LLVM  mainline
ARMISelLowering.cpp
Go to the documentation of this file.
00001 //===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===//
00002 //
00003 //                     The LLVM Compiler Infrastructure
00004 //
00005 // This file is distributed under the University of Illinois Open Source
00006 // License. See LICENSE.TXT for details.
00007 //
00008 //===----------------------------------------------------------------------===//
00009 //
00010 // This file defines the interfaces that ARM uses to lower LLVM code into a
00011 // selection DAG.
00012 //
00013 //===----------------------------------------------------------------------===//
00014 
00015 #include "ARMISelLowering.h"
00016 #include "ARMCallingConv.h"
00017 #include "ARMConstantPoolValue.h"
00018 #include "ARMMachineFunctionInfo.h"
00019 #include "ARMPerfectShuffle.h"
00020 #include "ARMSubtarget.h"
00021 #include "ARMTargetMachine.h"
00022 #include "ARMTargetObjectFile.h"
00023 #include "MCTargetDesc/ARMAddressingModes.h"
00024 #include "llvm/ADT/Statistic.h"
00025 #include "llvm/ADT/StringExtras.h"
00026 #include "llvm/ADT/StringSwitch.h"
00027 #include "llvm/CodeGen/CallingConvLower.h"
00028 #include "llvm/CodeGen/IntrinsicLowering.h"
00029 #include "llvm/CodeGen/MachineBasicBlock.h"
00030 #include "llvm/CodeGen/MachineFrameInfo.h"
00031 #include "llvm/CodeGen/MachineFunction.h"
00032 #include "llvm/CodeGen/MachineInstrBuilder.h"
00033 #include "llvm/CodeGen/MachineJumpTableInfo.h"
00034 #include "llvm/CodeGen/MachineModuleInfo.h"
00035 #include "llvm/CodeGen/MachineRegisterInfo.h"
00036 #include "llvm/CodeGen/SelectionDAG.h"
00037 #include "llvm/IR/CallingConv.h"
00038 #include "llvm/IR/Constants.h"
00039 #include "llvm/IR/Function.h"
00040 #include "llvm/IR/GlobalValue.h"
00041 #include "llvm/IR/IRBuilder.h"
00042 #include "llvm/IR/Instruction.h"
00043 #include "llvm/IR/Instructions.h"
00044 #include "llvm/IR/IntrinsicInst.h"
00045 #include "llvm/IR/Intrinsics.h"
00046 #include "llvm/IR/Type.h"
00047 #include "llvm/MC/MCSectionMachO.h"
00048 #include "llvm/Support/CommandLine.h"
00049 #include "llvm/Support/Debug.h"
00050 #include "llvm/Support/ErrorHandling.h"
00051 #include "llvm/Support/MathExtras.h"
00052 #include "llvm/Support/raw_ostream.h"
00053 #include "llvm/Target/TargetOptions.h"
00054 #include <utility>
00055 using namespace llvm;
00056 
00057 #define DEBUG_TYPE "arm-isel"
00058 
00059 STATISTIC(NumTailCalls, "Number of tail calls");
00060 STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
00061 STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments");
00062 
00063 cl::opt<bool>
00064 EnableARMLongCalls("arm-long-calls", cl::Hidden,
00065   cl::desc("Generate calls via indirect call instructions"),
00066   cl::init(false));
00067 
00068 static cl::opt<bool>
00069 ARMInterworking("arm-interworking", cl::Hidden,
00070   cl::desc("Enable / disable ARM interworking (for debugging only)"),
00071   cl::init(true));
00072 
00073 namespace {
00074   class ARMCCState : public CCState {
00075   public:
00076     ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
00077                SmallVectorImpl<CCValAssign> &locs, LLVMContext &C,
00078                ParmContext PC)
00079         : CCState(CC, isVarArg, MF, locs, C) {
00080       assert(((PC == Call) || (PC == Prologue)) &&
00081              "ARMCCState users must specify whether their context is call"
00082              "or prologue generation.");
00083       CallOrPrologue = PC;
00084     }
00085   };
00086 }
00087 
00088 // The APCS parameter registers.
00089 static const MCPhysReg GPRArgRegs[] = {
00090   ARM::R0, ARM::R1, ARM::R2, ARM::R3
00091 };
00092 
00093 void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT,
00094                                        MVT PromotedBitwiseVT) {
00095   if (VT != PromotedLdStVT) {
00096     setOperationAction(ISD::LOAD, VT, Promote);
00097     AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT);
00098 
00099     setOperationAction(ISD::STORE, VT, Promote);
00100     AddPromotedToType (ISD::STORE, VT, PromotedLdStVT);
00101   }
00102 
00103   MVT ElemTy = VT.getVectorElementType();
00104   if (ElemTy != MVT::i64 && ElemTy != MVT::f64)
00105     setOperationAction(ISD::SETCC, VT, Custom);
00106   setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
00107   setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
00108   if (ElemTy == MVT::i32) {
00109     setOperationAction(ISD::SINT_TO_FP, VT, Custom);
00110     setOperationAction(ISD::UINT_TO_FP, VT, Custom);
00111     setOperationAction(ISD::FP_TO_SINT, VT, Custom);
00112     setOperationAction(ISD::FP_TO_UINT, VT, Custom);
00113   } else {
00114     setOperationAction(ISD::SINT_TO_FP, VT, Expand);
00115     setOperationAction(ISD::UINT_TO_FP, VT, Expand);
00116     setOperationAction(ISD::FP_TO_SINT, VT, Expand);
00117     setOperationAction(ISD::FP_TO_UINT, VT, Expand);
00118   }
00119   setOperationAction(ISD::BUILD_VECTOR,      VT, Custom);
00120   setOperationAction(ISD::VECTOR_SHUFFLE,    VT, Custom);
00121   setOperationAction(ISD::CONCAT_VECTORS,    VT, Legal);
00122   setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
00123   setOperationAction(ISD::SELECT,            VT, Expand);
00124   setOperationAction(ISD::SELECT_CC,         VT, Expand);
00125   setOperationAction(ISD::VSELECT,           VT, Expand);
00126   setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
00127   if (VT.isInteger()) {
00128     setOperationAction(ISD::SHL, VT, Custom);
00129     setOperationAction(ISD::SRA, VT, Custom);
00130     setOperationAction(ISD::SRL, VT, Custom);
00131   }
00132 
00133   // Promote all bit-wise operations.
00134   if (VT.isInteger() && VT != PromotedBitwiseVT) {
00135     setOperationAction(ISD::AND, VT, Promote);
00136     AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT);
00137     setOperationAction(ISD::OR,  VT, Promote);
00138     AddPromotedToType (ISD::OR,  VT, PromotedBitwiseVT);
00139     setOperationAction(ISD::XOR, VT, Promote);
00140     AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT);
00141   }
00142 
00143   // Neon does not support vector divide/remainder operations.
00144   setOperationAction(ISD::SDIV, VT, Expand);
00145   setOperationAction(ISD::UDIV, VT, Expand);
00146   setOperationAction(ISD::FDIV, VT, Expand);
00147   setOperationAction(ISD::SREM, VT, Expand);
00148   setOperationAction(ISD::UREM, VT, Expand);
00149   setOperationAction(ISD::FREM, VT, Expand);
00150 }
00151 
00152 void ARMTargetLowering::addDRTypeForNEON(MVT VT) {
00153   addRegisterClass(VT, &ARM::DPRRegClass);
00154   addTypeForNEON(VT, MVT::f64, MVT::v2i32);
00155 }
00156 
00157 void ARMTargetLowering::addQRTypeForNEON(MVT VT) {
00158   addRegisterClass(VT, &ARM::DPairRegClass);
00159   addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
00160 }
00161 
00162 ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
00163                                      const ARMSubtarget &STI)
00164     : TargetLowering(TM), Subtarget(&STI) {
00165   RegInfo = Subtarget->getRegisterInfo();
00166   Itins = Subtarget->getInstrItineraryData();
00167 
00168   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
00169 
00170   if (Subtarget->isTargetMachO()) {
00171     // Uses VFP for Thumb libfuncs if available.
00172     if (Subtarget->isThumb() && Subtarget->hasVFP2() &&
00173         Subtarget->hasARMOps() && !TM.Options.UseSoftFloat) {
00174       // Single-precision floating-point arithmetic.
00175       setLibcallName(RTLIB::ADD_F32, "__addsf3vfp");
00176       setLibcallName(RTLIB::SUB_F32, "__subsf3vfp");
00177       setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp");
00178       setLibcallName(RTLIB::DIV_F32, "__divsf3vfp");
00179 
00180       // Double-precision floating-point arithmetic.
00181       setLibcallName(RTLIB::ADD_F64, "__adddf3vfp");
00182       setLibcallName(RTLIB::SUB_F64, "__subdf3vfp");
00183       setLibcallName(RTLIB::MUL_F64, "__muldf3vfp");
00184       setLibcallName(RTLIB::DIV_F64, "__divdf3vfp");
00185 
00186       // Single-precision comparisons.
00187       setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp");
00188       setLibcallName(RTLIB::UNE_F32, "__nesf2vfp");
00189       setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp");
00190       setLibcallName(RTLIB::OLE_F32, "__lesf2vfp");
00191       setLibcallName(RTLIB::OGE_F32, "__gesf2vfp");
00192       setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp");
00193       setLibcallName(RTLIB::UO_F32,  "__unordsf2vfp");
00194       setLibcallName(RTLIB::O_F32,   "__unordsf2vfp");
00195 
00196       setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE);
00197       setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE);
00198       setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE);
00199       setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE);
00200       setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE);
00201       setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE);
00202       setCmpLibcallCC(RTLIB::UO_F32,  ISD::SETNE);
00203       setCmpLibcallCC(RTLIB::O_F32,   ISD::SETEQ);
00204 
00205       // Double-precision comparisons.
00206       setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp");
00207       setLibcallName(RTLIB::UNE_F64, "__nedf2vfp");
00208       setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp");
00209       setLibcallName(RTLIB::OLE_F64, "__ledf2vfp");
00210       setLibcallName(RTLIB::OGE_F64, "__gedf2vfp");
00211       setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp");
00212       setLibcallName(RTLIB::UO_F64,  "__unorddf2vfp");
00213       setLibcallName(RTLIB::O_F64,   "__unorddf2vfp");
00214 
00215       setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE);
00216       setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE);
00217       setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE);
00218       setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE);
00219       setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE);
00220       setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE);
00221       setCmpLibcallCC(RTLIB::UO_F64,  ISD::SETNE);
00222       setCmpLibcallCC(RTLIB::O_F64,   ISD::SETEQ);
00223 
00224       // Floating-point to integer conversions.
00225       // i64 conversions are done via library routines even when generating VFP
00226       // instructions, so use the same ones.
00227       setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp");
00228       setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp");
00229       setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp");
00230       setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp");
00231 
00232       // Conversions between floating types.
00233       setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp");
00234       setLibcallName(RTLIB::FPEXT_F32_F64,   "__extendsfdf2vfp");
00235 
00236       // Integer to floating-point conversions.
00237       // i64 conversions are done via library routines even when generating VFP
00238       // instructions, so use the same ones.
00239       // FIXME: There appears to be some naming inconsistency in ARM libgcc:
00240       // e.g., __floatunsidf vs. __floatunssidfvfp.
00241       setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp");
00242       setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp");
00243       setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp");
00244       setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp");
00245     }
00246   }
00247 
00248   // These libcalls are not available in 32-bit.
00249   setLibcallName(RTLIB::SHL_I128, nullptr);
00250   setLibcallName(RTLIB::SRL_I128, nullptr);
00251   setLibcallName(RTLIB::SRA_I128, nullptr);
00252 
00253   if (Subtarget->isAAPCS_ABI() && !Subtarget->isTargetMachO() &&
00254       !Subtarget->isTargetWindows()) {
00255     static const struct {
00256       const RTLIB::Libcall Op;
00257       const char * const Name;
00258       const CallingConv::ID CC;
00259       const ISD::CondCode Cond;
00260     } LibraryCalls[] = {
00261       // Double-precision floating-point arithmetic helper functions
00262       // RTABI chapter 4.1.2, Table 2
00263       { RTLIB::ADD_F64, "__aeabi_dadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00264       { RTLIB::DIV_F64, "__aeabi_ddiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00265       { RTLIB::MUL_F64, "__aeabi_dmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00266       { RTLIB::SUB_F64, "__aeabi_dsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00267 
00268       // Double-precision floating-point comparison helper functions
00269       // RTABI chapter 4.1.2, Table 3
00270       { RTLIB::OEQ_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
00271       { RTLIB::UNE_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
00272       { RTLIB::OLT_F64, "__aeabi_dcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
00273       { RTLIB::OLE_F64, "__aeabi_dcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
00274       { RTLIB::OGE_F64, "__aeabi_dcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
00275       { RTLIB::OGT_F64, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
00276       { RTLIB::UO_F64,  "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
00277       { RTLIB::O_F64,   "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ },
00278 
00279       // Single-precision floating-point arithmetic helper functions
00280       // RTABI chapter 4.1.2, Table 4
00281       { RTLIB::ADD_F32, "__aeabi_fadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00282       { RTLIB::DIV_F32, "__aeabi_fdiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00283       { RTLIB::MUL_F32, "__aeabi_fmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00284       { RTLIB::SUB_F32, "__aeabi_fsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00285 
00286       // Single-precision floating-point comparison helper functions
00287       // RTABI chapter 4.1.2, Table 5
00288       { RTLIB::OEQ_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
00289       { RTLIB::UNE_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
00290       { RTLIB::OLT_F32, "__aeabi_fcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
00291       { RTLIB::OLE_F32, "__aeabi_fcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
00292       { RTLIB::OGE_F32, "__aeabi_fcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
00293       { RTLIB::OGT_F32, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
00294       { RTLIB::UO_F32,  "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
00295       { RTLIB::O_F32,   "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ },
00296 
00297       // Floating-point to integer conversions.
00298       // RTABI chapter 4.1.2, Table 6
00299       { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00300       { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00301       { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00302       { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00303       { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00304       { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00305       { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00306       { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00307 
00308       // Conversions between floating types.
00309       // RTABI chapter 4.1.2, Table 7
00310       { RTLIB::FPROUND_F64_F32, "__aeabi_d2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00311       { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00312       { RTLIB::FPEXT_F32_F64,   "__aeabi_f2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00313 
00314       // Integer to floating-point conversions.
00315       // RTABI chapter 4.1.2, Table 8
00316       { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00317       { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00318       { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00319       { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00320       { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00321       { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00322       { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00323       { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00324 
00325       // Long long helper functions
00326       // RTABI chapter 4.2, Table 9
00327       { RTLIB::MUL_I64, "__aeabi_lmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00328       { RTLIB::SHL_I64, "__aeabi_llsl", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00329       { RTLIB::SRL_I64, "__aeabi_llsr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00330       { RTLIB::SRA_I64, "__aeabi_lasr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00331 
00332       // Integer division functions
00333       // RTABI chapter 4.3.1
00334       { RTLIB::SDIV_I8,  "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00335       { RTLIB::SDIV_I16, "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00336       { RTLIB::SDIV_I32, "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00337       { RTLIB::SDIV_I64, "__aeabi_ldivmod",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00338       { RTLIB::UDIV_I8,  "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00339       { RTLIB::UDIV_I16, "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00340       { RTLIB::UDIV_I32, "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00341       { RTLIB::UDIV_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00342 
00343       // Memory operations
00344       // RTABI chapter 4.3.4
00345       { RTLIB::MEMCPY,  "__aeabi_memcpy",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00346       { RTLIB::MEMMOVE, "__aeabi_memmove", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00347       { RTLIB::MEMSET,  "__aeabi_memset",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00348     };
00349 
00350     for (const auto &LC : LibraryCalls) {
00351       setLibcallName(LC.Op, LC.Name);
00352       setLibcallCallingConv(LC.Op, LC.CC);
00353       if (LC.Cond != ISD::SETCC_INVALID)
00354         setCmpLibcallCC(LC.Op, LC.Cond);
00355     }
00356   }
00357 
00358   if (Subtarget->isTargetWindows()) {
00359     static const struct {
00360       const RTLIB::Libcall Op;
00361       const char * const Name;
00362       const CallingConv::ID CC;
00363     } LibraryCalls[] = {
00364       { RTLIB::FPTOSINT_F32_I64, "__stoi64", CallingConv::ARM_AAPCS_VFP },
00365       { RTLIB::FPTOSINT_F64_I64, "__dtoi64", CallingConv::ARM_AAPCS_VFP },
00366       { RTLIB::FPTOUINT_F32_I64, "__stou64", CallingConv::ARM_AAPCS_VFP },
00367       { RTLIB::FPTOUINT_F64_I64, "__dtou64", CallingConv::ARM_AAPCS_VFP },
00368       { RTLIB::SINTTOFP_I64_F32, "__i64tos", CallingConv::ARM_AAPCS_VFP },
00369       { RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP },
00370       { RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP },
00371       { RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP },
00372     };
00373 
00374     for (const auto &LC : LibraryCalls) {
00375       setLibcallName(LC.Op, LC.Name);
00376       setLibcallCallingConv(LC.Op, LC.CC);
00377     }
00378   }
00379 
00380   // Use divmod compiler-rt calls for iOS 5.0 and later.
00381   if (Subtarget->getTargetTriple().isiOS() &&
00382       !Subtarget->getTargetTriple().isOSVersionLT(5, 0)) {
00383     setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4");
00384     setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4");
00385   }
00386 
00387   // The half <-> float conversion functions are always soft-float, but are
00388   // needed for some targets which use a hard-float calling convention by
00389   // default.
00390   if (Subtarget->isAAPCS_ABI()) {
00391     setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS);
00392     setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS);
00393     setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS);
00394   } else {
00395     setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS);
00396     setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS);
00397     setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS);
00398   }
00399 
00400   if (Subtarget->isThumb1Only())
00401     addRegisterClass(MVT::i32, &ARM::tGPRRegClass);
00402   else
00403     addRegisterClass(MVT::i32, &ARM::GPRRegClass);
00404   if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() &&
00405       !Subtarget->isThumb1Only()) {
00406     addRegisterClass(MVT::f32, &ARM::SPRRegClass);
00407     addRegisterClass(MVT::f64, &ARM::DPRRegClass);
00408   }
00409 
00410   for (MVT VT : MVT::vector_valuetypes()) {
00411     for (MVT InnerVT : MVT::vector_valuetypes()) {
00412       setTruncStoreAction(VT, InnerVT, Expand);
00413       setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
00414       setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
00415       setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
00416     }
00417 
00418     setOperationAction(ISD::MULHS, VT, Expand);
00419     setOperationAction(ISD::SMUL_LOHI, VT, Expand);
00420     setOperationAction(ISD::MULHU, VT, Expand);
00421     setOperationAction(ISD::UMUL_LOHI, VT, Expand);
00422 
00423     setOperationAction(ISD::BSWAP, VT, Expand);
00424   }
00425 
00426   setOperationAction(ISD::ConstantFP, MVT::f32, Custom);
00427   setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
00428 
00429   if (Subtarget->hasNEON()) {
00430     addDRTypeForNEON(MVT::v2f32);
00431     addDRTypeForNEON(MVT::v8i8);
00432     addDRTypeForNEON(MVT::v4i16);
00433     addDRTypeForNEON(MVT::v2i32);
00434     addDRTypeForNEON(MVT::v1i64);
00435 
00436     addQRTypeForNEON(MVT::v4f32);
00437     addQRTypeForNEON(MVT::v2f64);
00438     addQRTypeForNEON(MVT::v16i8);
00439     addQRTypeForNEON(MVT::v8i16);
00440     addQRTypeForNEON(MVT::v4i32);
00441     addQRTypeForNEON(MVT::v2i64);
00442 
00443     // v2f64 is legal so that QR subregs can be extracted as f64 elements, but
00444     // neither Neon nor VFP support any arithmetic operations on it.
00445     // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively
00446     // supported for v4f32.
00447     setOperationAction(ISD::FADD, MVT::v2f64, Expand);
00448     setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
00449     setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
00450     // FIXME: Code duplication: FDIV and FREM are expanded always, see
00451     // ARMTargetLowering::addTypeForNEON method for details.
00452     setOperationAction(ISD::FDIV, MVT::v2f64, Expand);
00453     setOperationAction(ISD::FREM, MVT::v2f64, Expand);
00454     // FIXME: Create unittest.
00455     // In another words, find a way when "copysign" appears in DAG with vector
00456     // operands.
00457     setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand);
00458     // FIXME: Code duplication: SETCC has custom operation action, see
00459     // ARMTargetLowering::addTypeForNEON method for details.
00460     setOperationAction(ISD::SETCC, MVT::v2f64, Expand);
00461     // FIXME: Create unittest for FNEG and for FABS.
00462     setOperationAction(ISD::FNEG, MVT::v2f64, Expand);
00463     setOperationAction(ISD::FABS, MVT::v2f64, Expand);
00464     setOperationAction(ISD::FSQRT, MVT::v2f64, Expand);
00465     setOperationAction(ISD::FSIN, MVT::v2f64, Expand);
00466     setOperationAction(ISD::FCOS, MVT::v2f64, Expand);
00467     setOperationAction(ISD::FPOWI, MVT::v2f64, Expand);
00468     setOperationAction(ISD::FPOW, MVT::v2f64, Expand);
00469     setOperationAction(ISD::FLOG, MVT::v2f64, Expand);
00470     setOperationAction(ISD::FLOG2, MVT::v2f64, Expand);
00471     setOperationAction(ISD::FLOG10, MVT::v2f64, Expand);
00472     setOperationAction(ISD::FEXP, MVT::v2f64, Expand);
00473     setOperationAction(ISD::FEXP2, MVT::v2f64, Expand);
00474     // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR.
00475     setOperationAction(ISD::FCEIL, MVT::v2f64, Expand);
00476     setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand);
00477     setOperationAction(ISD::FRINT, MVT::v2f64, Expand);
00478     setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand);
00479     setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand);
00480     setOperationAction(ISD::FMA, MVT::v2f64, Expand);
00481 
00482     setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
00483     setOperationAction(ISD::FSIN, MVT::v4f32, Expand);
00484     setOperationAction(ISD::FCOS, MVT::v4f32, Expand);
00485     setOperationAction(ISD::FPOWI, MVT::v4f32, Expand);
00486     setOperationAction(ISD::FPOW, MVT::v4f32, Expand);
00487     setOperationAction(ISD::FLOG, MVT::v4f32, Expand);
00488     setOperationAction(ISD::FLOG2, MVT::v4f32, Expand);
00489     setOperationAction(ISD::FLOG10, MVT::v4f32, Expand);
00490     setOperationAction(ISD::FEXP, MVT::v4f32, Expand);
00491     setOperationAction(ISD::FEXP2, MVT::v4f32, Expand);
00492     setOperationAction(ISD::FCEIL, MVT::v4f32, Expand);
00493     setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand);
00494     setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
00495     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
00496     setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand);
00497 
00498     // Mark v2f32 intrinsics.
00499     setOperationAction(ISD::FSQRT, MVT::v2f32, Expand);
00500     setOperationAction(ISD::FSIN, MVT::v2f32, Expand);
00501     setOperationAction(ISD::FCOS, MVT::v2f32, Expand);
00502     setOperationAction(ISD::FPOWI, MVT::v2f32, Expand);
00503     setOperationAction(ISD::FPOW, MVT::v2f32, Expand);
00504     setOperationAction(ISD::FLOG, MVT::v2f32, Expand);
00505     setOperationAction(ISD::FLOG2, MVT::v2f32, Expand);
00506     setOperationAction(ISD::FLOG10, MVT::v2f32, Expand);
00507     setOperationAction(ISD::FEXP, MVT::v2f32, Expand);
00508     setOperationAction(ISD::FEXP2, MVT::v2f32, Expand);
00509     setOperationAction(ISD::FCEIL, MVT::v2f32, Expand);
00510     setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand);
00511     setOperationAction(ISD::FRINT, MVT::v2f32, Expand);
00512     setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand);
00513     setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand);
00514 
00515     // Neon does not support some operations on v1i64 and v2i64 types.
00516     setOperationAction(ISD::MUL, MVT::v1i64, Expand);
00517     // Custom handling for some quad-vector types to detect VMULL.
00518     setOperationAction(ISD::MUL, MVT::v8i16, Custom);
00519     setOperationAction(ISD::MUL, MVT::v4i32, Custom);
00520     setOperationAction(ISD::MUL, MVT::v2i64, Custom);
00521     // Custom handling for some vector types to avoid expensive expansions
00522     setOperationAction(ISD::SDIV, MVT::v4i16, Custom);
00523     setOperationAction(ISD::SDIV, MVT::v8i8, Custom);
00524     setOperationAction(ISD::UDIV, MVT::v4i16, Custom);
00525     setOperationAction(ISD::UDIV, MVT::v8i8, Custom);
00526     setOperationAction(ISD::SETCC, MVT::v1i64, Expand);
00527     setOperationAction(ISD::SETCC, MVT::v2i64, Expand);
00528     // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with
00529     // a destination type that is wider than the source, and nor does
00530     // it have a FP_TO_[SU]INT instruction with a narrower destination than
00531     // source.
00532     setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
00533     setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
00534     setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom);
00535     setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom);
00536 
00537     setOperationAction(ISD::FP_ROUND,   MVT::v2f32, Expand);
00538     setOperationAction(ISD::FP_EXTEND,  MVT::v2f64, Expand);
00539 
00540     // NEON does not have single instruction CTPOP for vectors with element
00541     // types wider than 8-bits.  However, custom lowering can leverage the
00542     // v8i8/v16i8 vcnt instruction.
00543     setOperationAction(ISD::CTPOP,      MVT::v2i32, Custom);
00544     setOperationAction(ISD::CTPOP,      MVT::v4i32, Custom);
00545     setOperationAction(ISD::CTPOP,      MVT::v4i16, Custom);
00546     setOperationAction(ISD::CTPOP,      MVT::v8i16, Custom);
00547 
00548     // NEON only has FMA instructions as of VFP4.
00549     if (!Subtarget->hasVFP4()) {
00550       setOperationAction(ISD::FMA, MVT::v2f32, Expand);
00551       setOperationAction(ISD::FMA, MVT::v4f32, Expand);
00552     }
00553 
00554     setTargetDAGCombine(ISD::INTRINSIC_VOID);
00555     setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
00556     setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
00557     setTargetDAGCombine(ISD::SHL);
00558     setTargetDAGCombine(ISD::SRL);
00559     setTargetDAGCombine(ISD::SRA);
00560     setTargetDAGCombine(ISD::SIGN_EXTEND);
00561     setTargetDAGCombine(ISD::ZERO_EXTEND);
00562     setTargetDAGCombine(ISD::ANY_EXTEND);
00563     setTargetDAGCombine(ISD::SELECT_CC);
00564     setTargetDAGCombine(ISD::BUILD_VECTOR);
00565     setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
00566     setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
00567     setTargetDAGCombine(ISD::STORE);
00568     setTargetDAGCombine(ISD::FP_TO_SINT);
00569     setTargetDAGCombine(ISD::FP_TO_UINT);
00570     setTargetDAGCombine(ISD::FDIV);
00571     setTargetDAGCombine(ISD::LOAD);
00572 
00573     // It is legal to extload from v4i8 to v4i16 or v4i32.
00574     for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16,
00575                    MVT::v2i32}) {
00576       for (MVT VT : MVT::integer_vector_valuetypes()) {
00577         setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal);
00578         setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal);
00579         setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal);
00580       }
00581     }
00582   }
00583 
00584   // ARM and Thumb2 support UMLAL/SMLAL.
00585   if (!Subtarget->isThumb1Only())
00586     setTargetDAGCombine(ISD::ADDC);
00587 
00588   if (Subtarget->isFPOnlySP()) {
00589     // When targetting a floating-point unit with only single-precision
00590     // operations, f64 is legal for the few double-precision instructions which
00591     // are present However, no double-precision operations other than moves,
00592     // loads and stores are provided by the hardware.
00593     setOperationAction(ISD::FADD,       MVT::f64, Expand);
00594     setOperationAction(ISD::FSUB,       MVT::f64, Expand);
00595     setOperationAction(ISD::FMUL,       MVT::f64, Expand);
00596     setOperationAction(ISD::FMA,        MVT::f64, Expand);
00597     setOperationAction(ISD::FDIV,       MVT::f64, Expand);
00598     setOperationAction(ISD::FREM,       MVT::f64, Expand);
00599     setOperationAction(ISD::FCOPYSIGN,  MVT::f64, Expand);
00600     setOperationAction(ISD::FGETSIGN,   MVT::f64, Expand);
00601     setOperationAction(ISD::FNEG,       MVT::f64, Expand);
00602     setOperationAction(ISD::FABS,       MVT::f64, Expand);
00603     setOperationAction(ISD::FSQRT,      MVT::f64, Expand);
00604     setOperationAction(ISD::FSIN,       MVT::f64, Expand);
00605     setOperationAction(ISD::FCOS,       MVT::f64, Expand);
00606     setOperationAction(ISD::FPOWI,      MVT::f64, Expand);
00607     setOperationAction(ISD::FPOW,       MVT::f64, Expand);
00608     setOperationAction(ISD::FLOG,       MVT::f64, Expand);
00609     setOperationAction(ISD::FLOG2,      MVT::f64, Expand);
00610     setOperationAction(ISD::FLOG10,     MVT::f64, Expand);
00611     setOperationAction(ISD::FEXP,       MVT::f64, Expand);
00612     setOperationAction(ISD::FEXP2,      MVT::f64, Expand);
00613     setOperationAction(ISD::FCEIL,      MVT::f64, Expand);
00614     setOperationAction(ISD::FTRUNC,     MVT::f64, Expand);
00615     setOperationAction(ISD::FRINT,      MVT::f64, Expand);
00616     setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand);
00617     setOperationAction(ISD::FFLOOR,     MVT::f64, Expand);
00618     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
00619     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
00620     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
00621     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
00622     setOperationAction(ISD::FP_TO_SINT, MVT::f64, Custom);
00623     setOperationAction(ISD::FP_TO_UINT, MVT::f64, Custom);
00624     setOperationAction(ISD::FP_ROUND,   MVT::f32, Custom);
00625     setOperationAction(ISD::FP_EXTEND,  MVT::f64, Custom);
00626   }
00627 
00628   computeRegisterProperties(Subtarget->getRegisterInfo());
00629 
00630   // ARM does not have floating-point extending loads.
00631   for (MVT VT : MVT::fp_valuetypes()) {
00632     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
00633     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
00634   }
00635 
00636   // ... or truncating stores
00637   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
00638   setTruncStoreAction(MVT::f32, MVT::f16, Expand);
00639   setTruncStoreAction(MVT::f64, MVT::f16, Expand);
00640 
00641   // ARM does not have i1 sign extending load.
00642   for (MVT VT : MVT::integer_valuetypes())
00643     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
00644 
00645   // ARM supports all 4 flavors of integer indexed load / store.
00646   if (!Subtarget->isThumb1Only()) {
00647     for (unsigned im = (unsigned)ISD::PRE_INC;
00648          im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
00649       setIndexedLoadAction(im,  MVT::i1,  Legal);
00650       setIndexedLoadAction(im,  MVT::i8,  Legal);
00651       setIndexedLoadAction(im,  MVT::i16, Legal);
00652       setIndexedLoadAction(im,  MVT::i32, Legal);
00653       setIndexedStoreAction(im, MVT::i1,  Legal);
00654       setIndexedStoreAction(im, MVT::i8,  Legal);
00655       setIndexedStoreAction(im, MVT::i16, Legal);
00656       setIndexedStoreAction(im, MVT::i32, Legal);
00657     }
00658   }
00659 
00660   setOperationAction(ISD::SADDO, MVT::i32, Custom);
00661   setOperationAction(ISD::UADDO, MVT::i32, Custom);
00662   setOperationAction(ISD::SSUBO, MVT::i32, Custom);
00663   setOperationAction(ISD::USUBO, MVT::i32, Custom);
00664 
00665   // i64 operation support.
00666   setOperationAction(ISD::MUL,     MVT::i64, Expand);
00667   setOperationAction(ISD::MULHU,   MVT::i32, Expand);
00668   if (Subtarget->isThumb1Only()) {
00669     setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
00670     setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
00671   }
00672   if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops()
00673       || (Subtarget->isThumb2() && !Subtarget->hasThumb2DSP()))
00674     setOperationAction(ISD::MULHS, MVT::i32, Expand);
00675 
00676   setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
00677   setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
00678   setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
00679   setOperationAction(ISD::SRL,       MVT::i64, Custom);
00680   setOperationAction(ISD::SRA,       MVT::i64, Custom);
00681 
00682   if (!Subtarget->isThumb1Only()) {
00683     // FIXME: We should do this for Thumb1 as well.
00684     setOperationAction(ISD::ADDC,    MVT::i32, Custom);
00685     setOperationAction(ISD::ADDE,    MVT::i32, Custom);
00686     setOperationAction(ISD::SUBC,    MVT::i32, Custom);
00687     setOperationAction(ISD::SUBE,    MVT::i32, Custom);
00688   }
00689 
00690   // ARM does not have ROTL.
00691   setOperationAction(ISD::ROTL,  MVT::i32, Expand);
00692   setOperationAction(ISD::CTTZ,  MVT::i32, Custom);
00693   setOperationAction(ISD::CTPOP, MVT::i32, Expand);
00694   if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only())
00695     setOperationAction(ISD::CTLZ, MVT::i32, Expand);
00696 
00697   // These just redirect to CTTZ and CTLZ on ARM.
00698   setOperationAction(ISD::CTTZ_ZERO_UNDEF  , MVT::i32  , Expand);
00699   setOperationAction(ISD::CTLZ_ZERO_UNDEF  , MVT::i32  , Expand);
00700 
00701   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
00702 
00703   // Only ARMv6 has BSWAP.
00704   if (!Subtarget->hasV6Ops())
00705     setOperationAction(ISD::BSWAP, MVT::i32, Expand);
00706 
00707   if (!(Subtarget->hasDivide() && Subtarget->isThumb2()) &&
00708       !(Subtarget->hasDivideInARMMode() && !Subtarget->isThumb())) {
00709     // These are expanded into libcalls if the cpu doesn't have HW divider.
00710     setOperationAction(ISD::SDIV,  MVT::i32, Expand);
00711     setOperationAction(ISD::UDIV,  MVT::i32, Expand);
00712   }
00713 
00714   // FIXME: Also set divmod for SREM on EABI
00715   setOperationAction(ISD::SREM,  MVT::i32, Expand);
00716   setOperationAction(ISD::UREM,  MVT::i32, Expand);
00717   // Register based DivRem for AEABI (RTABI 4.2)
00718   if (Subtarget->isTargetAEABI()) {
00719     setLibcallName(RTLIB::SDIVREM_I8,  "__aeabi_idivmod");
00720     setLibcallName(RTLIB::SDIVREM_I16, "__aeabi_idivmod");
00721     setLibcallName(RTLIB::SDIVREM_I32, "__aeabi_idivmod");
00722     setLibcallName(RTLIB::SDIVREM_I64, "__aeabi_ldivmod");
00723     setLibcallName(RTLIB::UDIVREM_I8,  "__aeabi_uidivmod");
00724     setLibcallName(RTLIB::UDIVREM_I16, "__aeabi_uidivmod");
00725     setLibcallName(RTLIB::UDIVREM_I32, "__aeabi_uidivmod");
00726     setLibcallName(RTLIB::UDIVREM_I64, "__aeabi_uldivmod");
00727 
00728     setLibcallCallingConv(RTLIB::SDIVREM_I8, CallingConv::ARM_AAPCS);
00729     setLibcallCallingConv(RTLIB::SDIVREM_I16, CallingConv::ARM_AAPCS);
00730     setLibcallCallingConv(RTLIB::SDIVREM_I32, CallingConv::ARM_AAPCS);
00731     setLibcallCallingConv(RTLIB::SDIVREM_I64, CallingConv::ARM_AAPCS);
00732     setLibcallCallingConv(RTLIB::UDIVREM_I8, CallingConv::ARM_AAPCS);
00733     setLibcallCallingConv(RTLIB::UDIVREM_I16, CallingConv::ARM_AAPCS);
00734     setLibcallCallingConv(RTLIB::UDIVREM_I32, CallingConv::ARM_AAPCS);
00735     setLibcallCallingConv(RTLIB::UDIVREM_I64, CallingConv::ARM_AAPCS);
00736 
00737     setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
00738     setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
00739   } else {
00740     setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
00741     setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
00742   }
00743 
00744   setOperationAction(ISD::GlobalAddress, MVT::i32,   Custom);
00745   setOperationAction(ISD::ConstantPool,  MVT::i32,   Custom);
00746   setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom);
00747   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
00748   setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
00749 
00750   setOperationAction(ISD::TRAP, MVT::Other, Legal);
00751 
00752   // Use the default implementation.
00753   setOperationAction(ISD::VASTART,            MVT::Other, Custom);
00754   setOperationAction(ISD::VAARG,              MVT::Other, Expand);
00755   setOperationAction(ISD::VACOPY,             MVT::Other, Expand);
00756   setOperationAction(ISD::VAEND,              MVT::Other, Expand);
00757   setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
00758   setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
00759 
00760   if (!Subtarget->isTargetMachO()) {
00761     // Non-MachO platforms may return values in these registers via the
00762     // personality function.
00763     setExceptionPointerRegister(ARM::R0);
00764     setExceptionSelectorRegister(ARM::R1);
00765   }
00766 
00767   if (Subtarget->getTargetTriple().isWindowsItaniumEnvironment())
00768     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
00769   else
00770     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
00771 
00772   // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use
00773   // the default expansion. If we are targeting a single threaded system,
00774   // then set them all for expand so we can lower them later into their
00775   // non-atomic form.
00776   if (TM.Options.ThreadModel == ThreadModel::Single)
00777     setOperationAction(ISD::ATOMIC_FENCE,   MVT::Other, Expand);
00778   else if (Subtarget->hasAnyDataBarrier() && !Subtarget->isThumb1Only()) {
00779     // ATOMIC_FENCE needs custom lowering; the others should have been expanded
00780     // to ldrex/strex loops already.
00781     setOperationAction(ISD::ATOMIC_FENCE,     MVT::Other, Custom);
00782 
00783     // On v8, we have particularly efficient implementations of atomic fences
00784     // if they can be combined with nearby atomic loads and stores.
00785     if (!Subtarget->hasV8Ops()) {
00786       // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc.
00787       setInsertFencesForAtomic(true);
00788     }
00789   } else {
00790     // If there's anything we can use as a barrier, go through custom lowering
00791     // for ATOMIC_FENCE.
00792     setOperationAction(ISD::ATOMIC_FENCE,   MVT::Other,
00793                        Subtarget->hasAnyDataBarrier() ? Custom : Expand);
00794 
00795     // Set them all for expansion, which will force libcalls.
00796     setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i32, Expand);
00797     setOperationAction(ISD::ATOMIC_SWAP,      MVT::i32, Expand);
00798     setOperationAction(ISD::ATOMIC_LOAD_ADD,  MVT::i32, Expand);
00799     setOperationAction(ISD::ATOMIC_LOAD_SUB,  MVT::i32, Expand);
00800     setOperationAction(ISD::ATOMIC_LOAD_AND,  MVT::i32, Expand);
00801     setOperationAction(ISD::ATOMIC_LOAD_OR,   MVT::i32, Expand);
00802     setOperationAction(ISD::ATOMIC_LOAD_XOR,  MVT::i32, Expand);
00803     setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand);
00804     setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand);
00805     setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand);
00806     setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand);
00807     setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand);
00808     // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the
00809     // Unordered/Monotonic case.
00810     setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
00811     setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
00812   }
00813 
00814   setOperationAction(ISD::PREFETCH,         MVT::Other, Custom);
00815 
00816   // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes.
00817   if (!Subtarget->hasV6Ops()) {
00818     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
00819     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8,  Expand);
00820   }
00821   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
00822 
00823   if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() &&
00824       !Subtarget->isThumb1Only()) {
00825     // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
00826     // iff target supports vfp2.
00827     setOperationAction(ISD::BITCAST, MVT::i64, Custom);
00828     setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
00829   }
00830 
00831   // We want to custom lower some of our intrinsics.
00832   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
00833   if (Subtarget->isTargetDarwin()) {
00834     setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
00835     setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
00836     setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
00837   }
00838 
00839   setOperationAction(ISD::SETCC,     MVT::i32, Expand);
00840   setOperationAction(ISD::SETCC,     MVT::f32, Expand);
00841   setOperationAction(ISD::SETCC,     MVT::f64, Expand);
00842   setOperationAction(ISD::SELECT,    MVT::i32, Custom);
00843   setOperationAction(ISD::SELECT,    MVT::f32, Custom);
00844   setOperationAction(ISD::SELECT,    MVT::f64, Custom);
00845   setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
00846   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
00847   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
00848 
00849   setOperationAction(ISD::BRCOND,    MVT::Other, Expand);
00850   setOperationAction(ISD::BR_CC,     MVT::i32,   Custom);
00851   setOperationAction(ISD::BR_CC,     MVT::f32,   Custom);
00852   setOperationAction(ISD::BR_CC,     MVT::f64,   Custom);
00853   setOperationAction(ISD::BR_JT,     MVT::Other, Custom);
00854 
00855   // We don't support sin/cos/fmod/copysign/pow
00856   setOperationAction(ISD::FSIN,      MVT::f64, Expand);
00857   setOperationAction(ISD::FSIN,      MVT::f32, Expand);
00858   setOperationAction(ISD::FCOS,      MVT::f32, Expand);
00859   setOperationAction(ISD::FCOS,      MVT::f64, Expand);
00860   setOperationAction(ISD::FSINCOS,   MVT::f64, Expand);
00861   setOperationAction(ISD::FSINCOS,   MVT::f32, Expand);
00862   setOperationAction(ISD::FREM,      MVT::f64, Expand);
00863   setOperationAction(ISD::FREM,      MVT::f32, Expand);
00864   if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() &&
00865       !Subtarget->isThumb1Only()) {
00866     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
00867     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
00868   }
00869   setOperationAction(ISD::FPOW,      MVT::f64, Expand);
00870   setOperationAction(ISD::FPOW,      MVT::f32, Expand);
00871 
00872   if (!Subtarget->hasVFP4()) {
00873     setOperationAction(ISD::FMA, MVT::f64, Expand);
00874     setOperationAction(ISD::FMA, MVT::f32, Expand);
00875   }
00876 
00877   // Various VFP goodness
00878   if (!TM.Options.UseSoftFloat && !Subtarget->isThumb1Only()) {
00879     // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded.
00880     if (!Subtarget->hasFPARMv8() || Subtarget->isFPOnlySP()) {
00881       setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
00882       setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
00883     }
00884 
00885     // fp16 is a special v7 extension that adds f16 <-> f32 conversions.
00886     if (!Subtarget->hasFP16()) {
00887       setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
00888       setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
00889     }
00890   }
00891 
00892   // Combine sin / cos into one node or libcall if possible.
00893   if (Subtarget->hasSinCos()) {
00894     setLibcallName(RTLIB::SINCOS_F32, "sincosf");
00895     setLibcallName(RTLIB::SINCOS_F64, "sincos");
00896     if (Subtarget->getTargetTriple().isiOS()) {
00897       // For iOS, we don't want to the normal expansion of a libcall to
00898       // sincos. We want to issue a libcall to __sincos_stret.
00899       setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
00900       setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
00901     }
00902   }
00903 
00904   // FP-ARMv8 implements a lot of rounding-like FP operations.
00905   if (Subtarget->hasFPARMv8()) {
00906     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
00907     setOperationAction(ISD::FCEIL, MVT::f32, Legal);
00908     setOperationAction(ISD::FROUND, MVT::f32, Legal);
00909     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
00910     setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
00911     setOperationAction(ISD::FRINT, MVT::f32, Legal);
00912     if (!Subtarget->isFPOnlySP()) {
00913       setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
00914       setOperationAction(ISD::FCEIL, MVT::f64, Legal);
00915       setOperationAction(ISD::FROUND, MVT::f64, Legal);
00916       setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
00917       setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
00918       setOperationAction(ISD::FRINT, MVT::f64, Legal);
00919     }
00920   }
00921   // We have target-specific dag combine patterns for the following nodes:
00922   // ARMISD::VMOVRRD  - No need to call setTargetDAGCombine
00923   setTargetDAGCombine(ISD::ADD);
00924   setTargetDAGCombine(ISD::SUB);
00925   setTargetDAGCombine(ISD::MUL);
00926   setTargetDAGCombine(ISD::AND);
00927   setTargetDAGCombine(ISD::OR);
00928   setTargetDAGCombine(ISD::XOR);
00929 
00930   if (Subtarget->hasV6Ops())
00931     setTargetDAGCombine(ISD::SRL);
00932 
00933   setStackPointerRegisterToSaveRestore(ARM::SP);
00934 
00935   if (TM.Options.UseSoftFloat || Subtarget->isThumb1Only() ||
00936       !Subtarget->hasVFP2())
00937     setSchedulingPreference(Sched::RegPressure);
00938   else
00939     setSchedulingPreference(Sched::Hybrid);
00940 
00941   //// temporary - rewrite interface to use type
00942   MaxStoresPerMemset = 8;
00943   MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
00944   MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores
00945   MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 4 : 2;
00946   MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores
00947   MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 4 : 2;
00948 
00949   // On ARM arguments smaller than 4 bytes are extended, so all arguments
00950   // are at least 4 bytes aligned.
00951   setMinStackArgumentAlignment(4);
00952 
00953   // Prefer likely predicted branches to selects on out-of-order cores.
00954   PredictableSelectIsExpensive = Subtarget->isLikeA9();
00955 
00956   setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2);
00957 }
00958 
00959 // FIXME: It might make sense to define the representative register class as the
00960 // nearest super-register that has a non-null superset. For example, DPR_VFP2 is
00961 // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently,
00962 // SPR's representative would be DPR_VFP2. This should work well if register
00963 // pressure tracking were modified such that a register use would increment the
00964 // pressure of the register class's representative and all of it's super
00965 // classes' representatives transitively. We have not implemented this because
00966 // of the difficulty prior to coalescing of modeling operand register classes
00967 // due to the common occurrence of cross class copies and subregister insertions
00968 // and extractions.
00969 std::pair<const TargetRegisterClass *, uint8_t>
00970 ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
00971                                            MVT VT) const {
00972   const TargetRegisterClass *RRC = nullptr;
00973   uint8_t Cost = 1;
00974   switch (VT.SimpleTy) {
00975   default:
00976     return TargetLowering::findRepresentativeClass(TRI, VT);
00977   // Use DPR as representative register class for all floating point
00978   // and vector types. Since there are 32 SPR registers and 32 DPR registers so
00979   // the cost is 1 for both f32 and f64.
00980   case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16:
00981   case MVT::v2i32: case MVT::v1i64: case MVT::v2f32:
00982     RRC = &ARM::DPRRegClass;
00983     // When NEON is used for SP, only half of the register file is available
00984     // because operations that define both SP and DP results will be constrained
00985     // to the VFP2 class (D0-D15). We currently model this constraint prior to
00986     // coalescing by double-counting the SP regs. See the FIXME above.
00987     if (Subtarget->useNEONForSinglePrecisionFP())
00988       Cost = 2;
00989     break;
00990   case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
00991   case MVT::v4f32: case MVT::v2f64:
00992     RRC = &ARM::DPRRegClass;
00993     Cost = 2;
00994     break;
00995   case MVT::v4i64:
00996     RRC = &ARM::DPRRegClass;
00997     Cost = 4;
00998     break;
00999   case MVT::v8i64:
01000     RRC = &ARM::DPRRegClass;
01001     Cost = 8;
01002     break;
01003   }
01004   return std::make_pair(RRC, Cost);
01005 }
01006 
01007 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
01008   switch (Opcode) {
01009   default: return nullptr;
01010   case ARMISD::Wrapper:       return "ARMISD::Wrapper";
01011   case ARMISD::WrapperPIC:    return "ARMISD::WrapperPIC";
01012   case ARMISD::WrapperJT:     return "ARMISD::WrapperJT";
01013   case ARMISD::CALL:          return "ARMISD::CALL";
01014   case ARMISD::CALL_PRED:     return "ARMISD::CALL_PRED";
01015   case ARMISD::CALL_NOLINK:   return "ARMISD::CALL_NOLINK";
01016   case ARMISD::tCALL:         return "ARMISD::tCALL";
01017   case ARMISD::BRCOND:        return "ARMISD::BRCOND";
01018   case ARMISD::BR_JT:         return "ARMISD::BR_JT";
01019   case ARMISD::BR2_JT:        return "ARMISD::BR2_JT";
01020   case ARMISD::RET_FLAG:      return "ARMISD::RET_FLAG";
01021   case ARMISD::INTRET_FLAG:   return "ARMISD::INTRET_FLAG";
01022   case ARMISD::PIC_ADD:       return "ARMISD::PIC_ADD";
01023   case ARMISD::CMP:           return "ARMISD::CMP";
01024   case ARMISD::CMN:           return "ARMISD::CMN";
01025   case ARMISD::CMPZ:          return "ARMISD::CMPZ";
01026   case ARMISD::CMPFP:         return "ARMISD::CMPFP";
01027   case ARMISD::CMPFPw0:       return "ARMISD::CMPFPw0";
01028   case ARMISD::BCC_i64:       return "ARMISD::BCC_i64";
01029   case ARMISD::FMSTAT:        return "ARMISD::FMSTAT";
01030 
01031   case ARMISD::CMOV:          return "ARMISD::CMOV";
01032 
01033   case ARMISD::RBIT:          return "ARMISD::RBIT";
01034 
01035   case ARMISD::SRL_FLAG:      return "ARMISD::SRL_FLAG";
01036   case ARMISD::SRA_FLAG:      return "ARMISD::SRA_FLAG";
01037   case ARMISD::RRX:           return "ARMISD::RRX";
01038 
01039   case ARMISD::ADDC:          return "ARMISD::ADDC";
01040   case ARMISD::ADDE:          return "ARMISD::ADDE";
01041   case ARMISD::SUBC:          return "ARMISD::SUBC";
01042   case ARMISD::SUBE:          return "ARMISD::SUBE";
01043 
01044   case ARMISD::VMOVRRD:       return "ARMISD::VMOVRRD";
01045   case ARMISD::VMOVDRR:       return "ARMISD::VMOVDRR";
01046 
01047   case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
01048   case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP";
01049 
01050   case ARMISD::TC_RETURN:     return "ARMISD::TC_RETURN";
01051 
01052   case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
01053 
01054   case ARMISD::DYN_ALLOC:     return "ARMISD::DYN_ALLOC";
01055 
01056   case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR";
01057 
01058   case ARMISD::PRELOAD:       return "ARMISD::PRELOAD";
01059 
01060   case ARMISD::WIN__CHKSTK:   return "ARMISD:::WIN__CHKSTK";
01061 
01062   case ARMISD::VCEQ:          return "ARMISD::VCEQ";
01063   case ARMISD::VCEQZ:         return "ARMISD::VCEQZ";
01064   case ARMISD::VCGE:          return "ARMISD::VCGE";
01065   case ARMISD::VCGEZ:         return "ARMISD::VCGEZ";
01066   case ARMISD::VCLEZ:         return "ARMISD::VCLEZ";
01067   case ARMISD::VCGEU:         return "ARMISD::VCGEU";
01068   case ARMISD::VCGT:          return "ARMISD::VCGT";
01069   case ARMISD::VCGTZ:         return "ARMISD::VCGTZ";
01070   case ARMISD::VCLTZ:         return "ARMISD::VCLTZ";
01071   case ARMISD::VCGTU:         return "ARMISD::VCGTU";
01072   case ARMISD::VTST:          return "ARMISD::VTST";
01073 
01074   case ARMISD::VSHL:          return "ARMISD::VSHL";
01075   case ARMISD::VSHRs:         return "ARMISD::VSHRs";
01076   case ARMISD::VSHRu:         return "ARMISD::VSHRu";
01077   case ARMISD::VRSHRs:        return "ARMISD::VRSHRs";
01078   case ARMISD::VRSHRu:        return "ARMISD::VRSHRu";
01079   case ARMISD::VRSHRN:        return "ARMISD::VRSHRN";
01080   case ARMISD::VQSHLs:        return "ARMISD::VQSHLs";
01081   case ARMISD::VQSHLu:        return "ARMISD::VQSHLu";
01082   case ARMISD::VQSHLsu:       return "ARMISD::VQSHLsu";
01083   case ARMISD::VQSHRNs:       return "ARMISD::VQSHRNs";
01084   case ARMISD::VQSHRNu:       return "ARMISD::VQSHRNu";
01085   case ARMISD::VQSHRNsu:      return "ARMISD::VQSHRNsu";
01086   case ARMISD::VQRSHRNs:      return "ARMISD::VQRSHRNs";
01087   case ARMISD::VQRSHRNu:      return "ARMISD::VQRSHRNu";
01088   case ARMISD::VQRSHRNsu:     return "ARMISD::VQRSHRNsu";
01089   case ARMISD::VGETLANEu:     return "ARMISD::VGETLANEu";
01090   case ARMISD::VGETLANEs:     return "ARMISD::VGETLANEs";
01091   case ARMISD::VMOVIMM:       return "ARMISD::VMOVIMM";
01092   case ARMISD::VMVNIMM:       return "ARMISD::VMVNIMM";
01093   case ARMISD::VMOVFPIMM:     return "ARMISD::VMOVFPIMM";
01094   case ARMISD::VDUP:          return "ARMISD::VDUP";
01095   case ARMISD::VDUPLANE:      return "ARMISD::VDUPLANE";
01096   case ARMISD::VEXT:          return "ARMISD::VEXT";
01097   case ARMISD::VREV64:        return "ARMISD::VREV64";
01098   case ARMISD::VREV32:        return "ARMISD::VREV32";
01099   case ARMISD::VREV16:        return "ARMISD::VREV16";
01100   case ARMISD::VZIP:          return "ARMISD::VZIP";
01101   case ARMISD::VUZP:          return "ARMISD::VUZP";
01102   case ARMISD::VTRN:          return "ARMISD::VTRN";
01103   case ARMISD::VTBL1:         return "ARMISD::VTBL1";
01104   case ARMISD::VTBL2:         return "ARMISD::VTBL2";
01105   case ARMISD::VMULLs:        return "ARMISD::VMULLs";
01106   case ARMISD::VMULLu:        return "ARMISD::VMULLu";
01107   case ARMISD::UMLAL:         return "ARMISD::UMLAL";
01108   case ARMISD::SMLAL:         return "ARMISD::SMLAL";
01109   case ARMISD::BUILD_VECTOR:  return "ARMISD::BUILD_VECTOR";
01110   case ARMISD::FMAX:          return "ARMISD::FMAX";
01111   case ARMISD::FMIN:          return "ARMISD::FMIN";
01112   case ARMISD::VMAXNM:        return "ARMISD::VMAX";
01113   case ARMISD::VMINNM:        return "ARMISD::VMIN";
01114   case ARMISD::BFI:           return "ARMISD::BFI";
01115   case ARMISD::VORRIMM:       return "ARMISD::VORRIMM";
01116   case ARMISD::VBICIMM:       return "ARMISD::VBICIMM";
01117   case ARMISD::VBSL:          return "ARMISD::VBSL";
01118   case ARMISD::VLD2DUP:       return "ARMISD::VLD2DUP";
01119   case ARMISD::VLD3DUP:       return "ARMISD::VLD3DUP";
01120   case ARMISD::VLD4DUP:       return "ARMISD::VLD4DUP";
01121   case ARMISD::VLD1_UPD:      return "ARMISD::VLD1_UPD";
01122   case ARMISD::VLD2_UPD:      return "ARMISD::VLD2_UPD";
01123   case ARMISD::VLD3_UPD:      return "ARMISD::VLD3_UPD";
01124   case ARMISD::VLD4_UPD:      return "ARMISD::VLD4_UPD";
01125   case ARMISD::VLD2LN_UPD:    return "ARMISD::VLD2LN_UPD";
01126   case ARMISD::VLD3LN_UPD:    return "ARMISD::VLD3LN_UPD";
01127   case ARMISD::VLD4LN_UPD:    return "ARMISD::VLD4LN_UPD";
01128   case ARMISD::VLD2DUP_UPD:   return "ARMISD::VLD2DUP_UPD";
01129   case ARMISD::VLD3DUP_UPD:   return "ARMISD::VLD3DUP_UPD";
01130   case ARMISD::VLD4DUP_UPD:   return "ARMISD::VLD4DUP_UPD";
01131   case ARMISD::VST1_UPD:      return "ARMISD::VST1_UPD";
01132   case ARMISD::VST2_UPD:      return "ARMISD::VST2_UPD";
01133   case ARMISD::VST3_UPD:      return "ARMISD::VST3_UPD";
01134   case ARMISD::VST4_UPD:      return "ARMISD::VST4_UPD";
01135   case ARMISD::VST2LN_UPD:    return "ARMISD::VST2LN_UPD";
01136   case ARMISD::VST3LN_UPD:    return "ARMISD::VST3LN_UPD";
01137   case ARMISD::VST4LN_UPD:    return "ARMISD::VST4LN_UPD";
01138   }
01139 }
01140 
01141 EVT ARMTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
01142   if (!VT.isVector()) return getPointerTy();
01143   return VT.changeVectorElementTypeToInteger();
01144 }
01145 
01146 /// getRegClassFor - Return the register class that should be used for the
01147 /// specified value type.
01148 const TargetRegisterClass *ARMTargetLowering::getRegClassFor(MVT VT) const {
01149   // Map v4i64 to QQ registers but do not make the type legal. Similarly map
01150   // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to
01151   // load / store 4 to 8 consecutive D registers.
01152   if (Subtarget->hasNEON()) {
01153     if (VT == MVT::v4i64)
01154       return &ARM::QQPRRegClass;
01155     if (VT == MVT::v8i64)
01156       return &ARM::QQQQPRRegClass;
01157   }
01158   return TargetLowering::getRegClassFor(VT);
01159 }
01160 
01161 // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the
01162 // source/dest is aligned and the copy size is large enough. We therefore want
01163 // to align such objects passed to memory intrinsics.
01164 bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
01165                                                unsigned &PrefAlign) const {
01166   if (!isa<MemIntrinsic>(CI))
01167     return false;
01168   MinSize = 8;
01169   // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1
01170   // cycle faster than 4-byte aligned LDM.
01171   PrefAlign = (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? 8 : 4);
01172   return true;
01173 }
01174 
01175 // Create a fast isel object.
01176 FastISel *
01177 ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
01178                                   const TargetLibraryInfo *libInfo) const {
01179   return ARM::createFastISel(funcInfo, libInfo);
01180 }
01181 
01182 Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
01183   unsigned NumVals = N->getNumValues();
01184   if (!NumVals)
01185     return Sched::RegPressure;
01186 
01187   for (unsigned i = 0; i != NumVals; ++i) {
01188     EVT VT = N->getValueType(i);
01189     if (VT == MVT::Glue || VT == MVT::Other)
01190       continue;
01191     if (VT.isFloatingPoint() || VT.isVector())
01192       return Sched::ILP;
01193   }
01194 
01195   if (!N->isMachineOpcode())
01196     return Sched::RegPressure;
01197 
01198   // Load are scheduled for latency even if there instruction itinerary
01199   // is not available.
01200   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
01201   const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
01202 
01203   if (MCID.getNumDefs() == 0)
01204     return Sched::RegPressure;
01205   if (!Itins->isEmpty() &&
01206       Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2)
01207     return Sched::ILP;
01208 
01209   return Sched::RegPressure;
01210 }
01211 
01212 //===----------------------------------------------------------------------===//
01213 // Lowering Code
01214 //===----------------------------------------------------------------------===//
01215 
01216 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
01217 static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {
01218   switch (CC) {
01219   default: llvm_unreachable("Unknown condition code!");
01220   case ISD::SETNE:  return ARMCC::NE;
01221   case ISD::SETEQ:  return ARMCC::EQ;
01222   case ISD::SETGT:  return ARMCC::GT;
01223   case ISD::SETGE:  return ARMCC::GE;
01224   case ISD::SETLT:  return ARMCC::LT;
01225   case ISD::SETLE:  return ARMCC::LE;
01226   case ISD::SETUGT: return ARMCC::HI;
01227   case ISD::SETUGE: return ARMCC::HS;
01228   case ISD::SETULT: return ARMCC::LO;
01229   case ISD::SETULE: return ARMCC::LS;
01230   }
01231 }
01232 
01233 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
01234 static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
01235                         ARMCC::CondCodes &CondCode2) {
01236   CondCode2 = ARMCC::AL;
01237   switch (CC) {
01238   default: llvm_unreachable("Unknown FP condition!");
01239   case ISD::SETEQ:
01240   case ISD::SETOEQ: CondCode = ARMCC::EQ; break;
01241   case ISD::SETGT:
01242   case ISD::SETOGT: CondCode = ARMCC::GT; break;
01243   case ISD::SETGE:
01244   case ISD::SETOGE: CondCode = ARMCC::GE; break;
01245   case ISD::SETOLT: CondCode = ARMCC::MI; break;
01246   case ISD::SETOLE: CondCode = ARMCC::LS; break;
01247   case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break;
01248   case ISD::SETO:   CondCode = ARMCC::VC; break;
01249   case ISD::SETUO:  CondCode = ARMCC::VS; break;
01250   case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break;
01251   case ISD::SETUGT: CondCode = ARMCC::HI; break;
01252   case ISD::SETUGE: CondCode = ARMCC::PL; break;
01253   case ISD::SETLT:
01254   case ISD::SETULT: CondCode = ARMCC::LT; break;
01255   case ISD::SETLE:
01256   case ISD::SETULE: CondCode = ARMCC::LE; break;
01257   case ISD::SETNE:
01258   case ISD::SETUNE: CondCode = ARMCC::NE; break;
01259   }
01260 }
01261 
01262 //===----------------------------------------------------------------------===//
01263 //                      Calling Convention Implementation
01264 //===----------------------------------------------------------------------===//
01265 
01266 #include "ARMGenCallingConv.inc"
01267 
01268 /// getEffectiveCallingConv - Get the effective calling convention, taking into
01269 /// account presence of floating point hardware and calling convention
01270 /// limitations, such as support for variadic functions.
01271 CallingConv::ID
01272 ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC,
01273                                            bool isVarArg) const {
01274   switch (CC) {
01275   default:
01276     llvm_unreachable("Unsupported calling convention");
01277   case CallingConv::ARM_AAPCS:
01278   case CallingConv::ARM_APCS:
01279   case CallingConv::GHC:
01280     return CC;
01281   case CallingConv::ARM_AAPCS_VFP:
01282     return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP;
01283   case CallingConv::C:
01284     if (!Subtarget->isAAPCS_ABI())
01285       return CallingConv::ARM_APCS;
01286     else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() &&
01287              getTargetMachine().Options.FloatABIType == FloatABI::Hard &&
01288              !isVarArg)
01289       return CallingConv::ARM_AAPCS_VFP;
01290     else
01291       return CallingConv::ARM_AAPCS;
01292   case CallingConv::Fast:
01293     if (!Subtarget->isAAPCS_ABI()) {
01294       if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg)
01295         return CallingConv::Fast;
01296       return CallingConv::ARM_APCS;
01297     } else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg)
01298       return CallingConv::ARM_AAPCS_VFP;
01299     else
01300       return CallingConv::ARM_AAPCS;
01301   }
01302 }
01303 
01304 /// CCAssignFnForNode - Selects the correct CCAssignFn for the given
01305 /// CallingConvention.
01306 CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
01307                                                  bool Return,
01308                                                  bool isVarArg) const {
01309   switch (getEffectiveCallingConv(CC, isVarArg)) {
01310   default:
01311     llvm_unreachable("Unsupported calling convention");
01312   case CallingConv::ARM_APCS:
01313     return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
01314   case CallingConv::ARM_AAPCS:
01315     return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
01316   case CallingConv::ARM_AAPCS_VFP:
01317     return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
01318   case CallingConv::Fast:
01319     return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
01320   case CallingConv::GHC:
01321     return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC);
01322   }
01323 }
01324 
01325 /// LowerCallResult - Lower the result values of a call into the
01326 /// appropriate copies out of appropriate physical registers.
01327 SDValue
01328 ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
01329                                    CallingConv::ID CallConv, bool isVarArg,
01330                                    const SmallVectorImpl<ISD::InputArg> &Ins,
01331                                    SDLoc dl, SelectionDAG &DAG,
01332                                    SmallVectorImpl<SDValue> &InVals,
01333                                    bool isThisReturn, SDValue ThisVal) const {
01334 
01335   // Assign locations to each value returned by this call.
01336   SmallVector<CCValAssign, 16> RVLocs;
01337   ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
01338                     *DAG.getContext(), Call);
01339   CCInfo.AnalyzeCallResult(Ins,
01340                            CCAssignFnForNode(CallConv, /* Return*/ true,
01341                                              isVarArg));
01342 
01343   // Copy all of the result registers out of their specified physreg.
01344   for (unsigned i = 0; i != RVLocs.size(); ++i) {
01345     CCValAssign VA = RVLocs[i];
01346 
01347     // Pass 'this' value directly from the argument to return value, to avoid
01348     // reg unit interference
01349     if (i == 0 && isThisReturn) {
01350       assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 &&
01351              "unexpected return calling convention register assignment");
01352       InVals.push_back(ThisVal);
01353       continue;
01354     }
01355 
01356     SDValue Val;
01357     if (VA.needsCustom()) {
01358       // Handle f64 or half of a v2f64.
01359       SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
01360                                       InFlag);
01361       Chain = Lo.getValue(1);
01362       InFlag = Lo.getValue(2);
01363       VA = RVLocs[++i]; // skip ahead to next loc
01364       SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
01365                                       InFlag);
01366       Chain = Hi.getValue(1);
01367       InFlag = Hi.getValue(2);
01368       if (!Subtarget->isLittle())
01369         std::swap (Lo, Hi);
01370       Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
01371 
01372       if (VA.getLocVT() == MVT::v2f64) {
01373         SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
01374         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
01375                           DAG.getConstant(0, MVT::i32));
01376 
01377         VA = RVLocs[++i]; // skip ahead to next loc
01378         Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
01379         Chain = Lo.getValue(1);
01380         InFlag = Lo.getValue(2);
01381         VA = RVLocs[++i]; // skip ahead to next loc
01382         Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
01383         Chain = Hi.getValue(1);
01384         InFlag = Hi.getValue(2);
01385         if (!Subtarget->isLittle())
01386           std::swap (Lo, Hi);
01387         Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
01388         Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
01389                           DAG.getConstant(1, MVT::i32));
01390       }
01391     } else {
01392       Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
01393                                InFlag);
01394       Chain = Val.getValue(1);
01395       InFlag = Val.getValue(2);
01396     }
01397 
01398     switch (VA.getLocInfo()) {
01399     default: llvm_unreachable("Unknown loc info!");
01400     case CCValAssign::Full: break;
01401     case CCValAssign::BCvt:
01402       Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
01403       break;
01404     }
01405 
01406     InVals.push_back(Val);
01407   }
01408 
01409   return Chain;
01410 }
01411 
01412 /// LowerMemOpCallTo - Store the argument to the stack.
01413 SDValue
01414 ARMTargetLowering::LowerMemOpCallTo(SDValue Chain,
01415                                     SDValue StackPtr, SDValue Arg,
01416                                     SDLoc dl, SelectionDAG &DAG,
01417                                     const CCValAssign &VA,
01418                                     ISD::ArgFlagsTy Flags) const {
01419   unsigned LocMemOffset = VA.getLocMemOffset();
01420   SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
01421   PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
01422   return DAG.getStore(Chain, dl, Arg, PtrOff,
01423                       MachinePointerInfo::getStack(LocMemOffset),
01424                       false, false, 0);
01425 }
01426 
01427 void ARMTargetLowering::PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG,
01428                                          SDValue Chain, SDValue &Arg,
01429                                          RegsToPassVector &RegsToPass,
01430                                          CCValAssign &VA, CCValAssign &NextVA,
01431                                          SDValue &StackPtr,
01432                                          SmallVectorImpl<SDValue> &MemOpChains,
01433                                          ISD::ArgFlagsTy Flags) const {
01434 
01435   SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
01436                               DAG.getVTList(MVT::i32, MVT::i32), Arg);
01437   unsigned id = Subtarget->isLittle() ? 0 : 1;
01438   RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id)));
01439 
01440   if (NextVA.isRegLoc())
01441     RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id)));
01442   else {
01443     assert(NextVA.isMemLoc());
01444     if (!StackPtr.getNode())
01445       StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
01446 
01447     MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1-id),
01448                                            dl, DAG, NextVA,
01449                                            Flags));
01450   }
01451 }
01452 
01453 /// LowerCall - Lowering a call into a callseq_start <-
01454 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
01455 /// nodes.
01456 SDValue
01457 ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
01458                              SmallVectorImpl<SDValue> &InVals) const {
01459   SelectionDAG &DAG                     = CLI.DAG;
01460   SDLoc &dl                          = CLI.DL;
01461   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
01462   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
01463   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
01464   SDValue Chain                         = CLI.Chain;
01465   SDValue Callee                        = CLI.Callee;
01466   bool &isTailCall                      = CLI.IsTailCall;
01467   CallingConv::ID CallConv              = CLI.CallConv;
01468   bool doesNotRet                       = CLI.DoesNotReturn;
01469   bool isVarArg                         = CLI.IsVarArg;
01470 
01471   MachineFunction &MF = DAG.getMachineFunction();
01472   bool isStructRet    = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
01473   bool isThisReturn   = false;
01474   bool isSibCall      = false;
01475 
01476   // Disable tail calls if they're not supported.
01477   if (!Subtarget->supportsTailCall() || MF.getTarget().Options.DisableTailCalls)
01478     isTailCall = false;
01479 
01480   if (isTailCall) {
01481     // Check if it's really possible to do a tail call.
01482     isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
01483                     isVarArg, isStructRet, MF.getFunction()->hasStructRetAttr(),
01484                                                    Outs, OutVals, Ins, DAG);
01485     if (!isTailCall && CLI.CS && CLI.CS->isMustTailCall())
01486       report_fatal_error("failed to perform tail call elimination on a call "
01487                          "site marked musttail");
01488     // We don't support GuaranteedTailCallOpt for ARM, only automatically
01489     // detected sibcalls.
01490     if (isTailCall) {
01491       ++NumTailCalls;
01492       isSibCall = true;
01493     }
01494   }
01495 
01496   // Analyze operands of the call, assigning locations to each operand.
01497   SmallVector<CCValAssign, 16> ArgLocs;
01498   ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
01499                     *DAG.getContext(), Call);
01500   CCInfo.AnalyzeCallOperands(Outs,
01501                              CCAssignFnForNode(CallConv, /* Return*/ false,
01502                                                isVarArg));
01503 
01504   // Get a count of how many bytes are to be pushed on the stack.
01505   unsigned NumBytes = CCInfo.getNextStackOffset();
01506 
01507   // For tail calls, memory operands are available in our caller's stack.
01508   if (isSibCall)
01509     NumBytes = 0;
01510 
01511   // Adjust the stack pointer for the new arguments...
01512   // These operations are automatically eliminated by the prolog/epilog pass
01513   if (!isSibCall)
01514     Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
01515                                  dl);
01516 
01517   SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
01518 
01519   RegsToPassVector RegsToPass;
01520   SmallVector<SDValue, 8> MemOpChains;
01521 
01522   // Walk the register/memloc assignments, inserting copies/loads.  In the case
01523   // of tail call optimization, arguments are handled later.
01524   for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
01525        i != e;
01526        ++i, ++realArgIdx) {
01527     CCValAssign &VA = ArgLocs[i];
01528     SDValue Arg = OutVals[realArgIdx];
01529     ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
01530     bool isByVal = Flags.isByVal();
01531 
01532     // Promote the value if needed.
01533     switch (VA.getLocInfo()) {
01534     default: llvm_unreachable("Unknown loc info!");
01535     case CCValAssign::Full: break;
01536     case CCValAssign::SExt:
01537       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
01538       break;
01539     case CCValAssign::ZExt:
01540       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
01541       break;
01542     case CCValAssign::AExt:
01543       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
01544       break;
01545     case CCValAssign::BCvt:
01546       Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
01547       break;
01548     }
01549 
01550     // f64 and v2f64 might be passed in i32 pairs and must be split into pieces
01551     if (VA.needsCustom()) {
01552       if (VA.getLocVT() == MVT::v2f64) {
01553         SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
01554                                   DAG.getConstant(0, MVT::i32));
01555         SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
01556                                   DAG.getConstant(1, MVT::i32));
01557 
01558         PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
01559                          VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
01560 
01561         VA = ArgLocs[++i]; // skip ahead to next loc
01562         if (VA.isRegLoc()) {
01563           PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
01564                            VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
01565         } else {
01566           assert(VA.isMemLoc());
01567 
01568           MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
01569                                                  dl, DAG, VA, Flags));
01570         }
01571       } else {
01572         PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
01573                          StackPtr, MemOpChains, Flags);
01574       }
01575     } else if (VA.isRegLoc()) {
01576       if (realArgIdx == 0 && Flags.isReturned() && Outs[0].VT == MVT::i32) {
01577         assert(VA.getLocVT() == MVT::i32 &&
01578                "unexpected calling convention register assignment");
01579         assert(!Ins.empty() && Ins[0].VT == MVT::i32 &&
01580                "unexpected use of 'returned'");
01581         isThisReturn = true;
01582       }
01583       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
01584     } else if (isByVal) {
01585       assert(VA.isMemLoc());
01586       unsigned offset = 0;
01587 
01588       // True if this byval aggregate will be split between registers
01589       // and memory.
01590       unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
01591       unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed();
01592 
01593       if (CurByValIdx < ByValArgsCount) {
01594 
01595         unsigned RegBegin, RegEnd;
01596         CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd);
01597 
01598         EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
01599         unsigned int i, j;
01600         for (i = 0, j = RegBegin; j < RegEnd; i++, j++) {
01601           SDValue Const = DAG.getConstant(4*i, MVT::i32);
01602           SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
01603           SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
01604                                      MachinePointerInfo(),
01605                                      false, false, false,
01606                                      DAG.InferPtrAlignment(AddArg));
01607           MemOpChains.push_back(Load.getValue(1));
01608           RegsToPass.push_back(std::make_pair(j, Load));
01609         }
01610 
01611         // If parameter size outsides register area, "offset" value
01612         // helps us to calculate stack slot for remained part properly.
01613         offset = RegEnd - RegBegin;
01614 
01615         CCInfo.nextInRegsParam();
01616       }
01617 
01618       if (Flags.getByValSize() > 4*offset) {
01619         unsigned LocMemOffset = VA.getLocMemOffset();
01620         SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset);
01621         SDValue Dst = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr,
01622                                   StkPtrOff);
01623         SDValue SrcOffset = DAG.getIntPtrConstant(4*offset);
01624         SDValue Src = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, SrcOffset);
01625         SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset,
01626                                            MVT::i32);
01627         SDValue AlignNode = DAG.getConstant(Flags.getByValAlign(), MVT::i32);
01628 
01629         SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
01630         SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
01631         MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs,
01632                                           Ops));
01633       }
01634     } else if (!isSibCall) {
01635       assert(VA.isMemLoc());
01636 
01637       MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
01638                                              dl, DAG, VA, Flags));
01639     }
01640   }
01641 
01642   if (!MemOpChains.empty())
01643     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
01644 
01645   // Build a sequence of copy-to-reg nodes chained together with token chain
01646   // and flag operands which copy the outgoing args into the appropriate regs.
01647   SDValue InFlag;
01648   // Tail call byval lowering might overwrite argument registers so in case of
01649   // tail call optimization the copies to registers are lowered later.
01650   if (!isTailCall)
01651     for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
01652       Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
01653                                RegsToPass[i].second, InFlag);
01654       InFlag = Chain.getValue(1);
01655     }
01656 
01657   // For tail calls lower the arguments to the 'real' stack slot.
01658   if (isTailCall) {
01659     // Force all the incoming stack arguments to be loaded from the stack
01660     // before any new outgoing arguments are stored to the stack, because the
01661     // outgoing stack slots may alias the incoming argument stack slots, and
01662     // the alias isn't otherwise explicit. This is slightly more conservative
01663     // than necessary, because it means that each store effectively depends
01664     // on every argument instead of just those arguments it would clobber.
01665 
01666     // Do not flag preceding copytoreg stuff together with the following stuff.
01667     InFlag = SDValue();
01668     for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
01669       Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
01670                                RegsToPass[i].second, InFlag);
01671       InFlag = Chain.getValue(1);
01672     }
01673     InFlag = SDValue();
01674   }
01675 
01676   // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
01677   // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
01678   // node so that legalize doesn't hack it.
01679   bool isDirect = false;
01680   bool isARMFunc = false;
01681   bool isLocalARMFunc = false;
01682   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
01683 
01684   if (EnableARMLongCalls) {
01685     assert((Subtarget->isTargetWindows() ||
01686             getTargetMachine().getRelocationModel() == Reloc::Static) &&
01687            "long-calls with non-static relocation model!");
01688     // Handle a global address or an external symbol. If it's not one of
01689     // those, the target's already in a register, so we don't need to do
01690     // anything extra.
01691     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
01692       const GlobalValue *GV = G->getGlobal();
01693       // Create a constant pool entry for the callee address
01694       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
01695       ARMConstantPoolValue *CPV =
01696         ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0);
01697 
01698       // Get the address of the callee into a register
01699       SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
01700       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
01701       Callee = DAG.getLoad(getPointerTy(), dl,
01702                            DAG.getEntryNode(), CPAddr,
01703                            MachinePointerInfo::getConstantPool(),
01704                            false, false, false, 0);
01705     } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) {
01706       const char *Sym = S->getSymbol();
01707 
01708       // Create a constant pool entry for the callee address
01709       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
01710       ARMConstantPoolValue *CPV =
01711         ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
01712                                       ARMPCLabelIndex, 0);
01713       // Get the address of the callee into a register
01714       SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
01715       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
01716       Callee = DAG.getLoad(getPointerTy(), dl,
01717                            DAG.getEntryNode(), CPAddr,
01718                            MachinePointerInfo::getConstantPool(),
01719                            false, false, false, 0);
01720     }
01721   } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
01722     const GlobalValue *GV = G->getGlobal();
01723     isDirect = true;
01724     bool isExt = GV->isDeclaration() || GV->isWeakForLinker();
01725     bool isStub = (isExt && Subtarget->isTargetMachO()) &&
01726                    getTargetMachine().getRelocationModel() != Reloc::Static;
01727     isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass());
01728     // ARM call to a local ARM function is predicable.
01729     isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking);
01730     // tBX takes a register source operand.
01731     if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
01732       assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?");
01733       Callee = DAG.getNode(ARMISD::WrapperPIC, dl, getPointerTy(),
01734                            DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
01735                                                       0, ARMII::MO_NONLAZY));
01736       Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee,
01737                            MachinePointerInfo::getGOT(), false, false, true, 0);
01738     } else if (Subtarget->isTargetCOFF()) {
01739       assert(Subtarget->isTargetWindows() &&
01740              "Windows is the only supported COFF target");
01741       unsigned TargetFlags = GV->hasDLLImportStorageClass()
01742                                  ? ARMII::MO_DLLIMPORT
01743                                  : ARMII::MO_NO_FLAG;
01744       Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), /*Offset=*/0,
01745                                           TargetFlags);
01746       if (GV->hasDLLImportStorageClass())
01747         Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
01748                              DAG.getNode(ARMISD::Wrapper, dl, getPointerTy(),
01749                                          Callee), MachinePointerInfo::getGOT(),
01750                              false, false, false, 0);
01751     } else {
01752       // On ELF targets for PIC code, direct calls should go through the PLT
01753       unsigned OpFlags = 0;
01754       if (Subtarget->isTargetELF() &&
01755           getTargetMachine().getRelocationModel() == Reloc::PIC_)
01756         OpFlags = ARMII::MO_PLT;
01757       Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
01758     }
01759   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
01760     isDirect = true;
01761     bool isStub = Subtarget->isTargetMachO() &&
01762                   getTargetMachine().getRelocationModel() != Reloc::Static;
01763     isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass());
01764     // tBX takes a register source operand.
01765     const char *Sym = S->getSymbol();
01766     if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
01767       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
01768       ARMConstantPoolValue *CPV =
01769         ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
01770                                       ARMPCLabelIndex, 4);
01771       SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
01772       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
01773       Callee = DAG.getLoad(getPointerTy(), dl,
01774                            DAG.getEntryNode(), CPAddr,
01775                            MachinePointerInfo::getConstantPool(),
01776                            false, false, false, 0);
01777       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
01778       Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
01779                            getPointerTy(), Callee, PICLabel);
01780     } else {
01781       unsigned OpFlags = 0;
01782       // On ELF targets for PIC code, direct calls should go through the PLT
01783       if (Subtarget->isTargetELF() &&
01784                   getTargetMachine().getRelocationModel() == Reloc::PIC_)
01785         OpFlags = ARMII::MO_PLT;
01786       Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags);
01787     }
01788   }
01789 
01790   // FIXME: handle tail calls differently.
01791   unsigned CallOpc;
01792   bool HasMinSizeAttr = MF.getFunction()->hasFnAttribute(Attribute::MinSize);
01793   if (Subtarget->isThumb()) {
01794     if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
01795       CallOpc = ARMISD::CALL_NOLINK;
01796     else
01797       CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL;
01798   } else {
01799     if (!isDirect && !Subtarget->hasV5TOps())
01800       CallOpc = ARMISD::CALL_NOLINK;
01801     else if (doesNotRet && isDirect && Subtarget->hasRAS() &&
01802                // Emit regular call when code size is the priority
01803                !HasMinSizeAttr)
01804       // "mov lr, pc; b _foo" to avoid confusing the RSP
01805       CallOpc = ARMISD::CALL_NOLINK;
01806     else
01807       CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL;
01808   }
01809 
01810   std::vector<SDValue> Ops;
01811   Ops.push_back(Chain);
01812   Ops.push_back(Callee);
01813 
01814   // Add argument registers to the end of the list so that they are known live
01815   // into the call.
01816   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
01817     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
01818                                   RegsToPass[i].second.getValueType()));
01819 
01820   // Add a register mask operand representing the call-preserved registers.
01821   if (!isTailCall) {
01822     const uint32_t *Mask;
01823     const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo();
01824     if (isThisReturn) {
01825       // For 'this' returns, use the R0-preserving mask if applicable
01826       Mask = ARI->getThisReturnPreservedMask(MF, CallConv);
01827       if (!Mask) {
01828         // Set isThisReturn to false if the calling convention is not one that
01829         // allows 'returned' to be modeled in this way, so LowerCallResult does
01830         // not try to pass 'this' straight through
01831         isThisReturn = false;
01832         Mask = ARI->getCallPreservedMask(MF, CallConv);
01833       }
01834     } else
01835       Mask = ARI->getCallPreservedMask(MF, CallConv);
01836 
01837     assert(Mask && "Missing call preserved mask for calling convention");
01838     Ops.push_back(DAG.getRegisterMask(Mask));
01839   }
01840 
01841   if (InFlag.getNode())
01842     Ops.push_back(InFlag);
01843 
01844   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
01845   if (isTailCall)
01846     return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops);
01847 
01848   // Returns a chain and a flag for retval copy to use.
01849   Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
01850   InFlag = Chain.getValue(1);
01851 
01852   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
01853                              DAG.getIntPtrConstant(0, true), InFlag, dl);
01854   if (!Ins.empty())
01855     InFlag = Chain.getValue(1);
01856 
01857   // Handle result values, copying them out of physregs into vregs that we
01858   // return.
01859   return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
01860                          InVals, isThisReturn,
01861                          isThisReturn ? OutVals[0] : SDValue());
01862 }
01863 
01864 /// HandleByVal - Every parameter *after* a byval parameter is passed
01865 /// on the stack.  Remember the next parameter register to allocate,
01866 /// and then confiscate the rest of the parameter registers to insure
01867 /// this.
01868 void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size,
01869                                     unsigned Align) const {
01870   assert((State->getCallOrPrologue() == Prologue ||
01871           State->getCallOrPrologue() == Call) &&
01872          "unhandled ParmContext");
01873 
01874   // Byval (as with any stack) slots are always at least 4 byte aligned.
01875   Align = std::max(Align, 4U);
01876 
01877   unsigned Reg = State->AllocateReg(GPRArgRegs);
01878   if (!Reg)
01879     return;
01880 
01881   unsigned AlignInRegs = Align / 4;
01882   unsigned Waste = (ARM::R4 - Reg) % AlignInRegs;
01883   for (unsigned i = 0; i < Waste; ++i)
01884     Reg = State->AllocateReg(GPRArgRegs);
01885 
01886   if (!Reg)
01887     return;
01888 
01889   unsigned Excess = 4 * (ARM::R4 - Reg);
01890 
01891   // Special case when NSAA != SP and parameter size greater than size of
01892   // all remained GPR regs. In that case we can't split parameter, we must
01893   // send it to stack. We also must set NCRN to R4, so waste all
01894   // remained registers.
01895   const unsigned NSAAOffset = State->getNextStackOffset();
01896   if (NSAAOffset != 0 && Size > Excess) {
01897     while (State->AllocateReg(GPRArgRegs))
01898       ;
01899     return;
01900   }
01901 
01902   // First register for byval parameter is the first register that wasn't
01903   // allocated before this method call, so it would be "reg".
01904   // If parameter is small enough to be saved in range [reg, r4), then
01905   // the end (first after last) register would be reg + param-size-in-regs,
01906   // else parameter would be splitted between registers and stack,
01907   // end register would be r4 in this case.
01908   unsigned ByValRegBegin = Reg;
01909   unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4, ARM::R4);
01910   State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd);
01911   // Note, first register is allocated in the beginning of function already,
01912   // allocate remained amount of registers we need.
01913   for (unsigned i = Reg + 1; i != ByValRegEnd; ++i)
01914     State->AllocateReg(GPRArgRegs);
01915   // A byval parameter that is split between registers and memory needs its
01916   // size truncated here.
01917   // In the case where the entire structure fits in registers, we set the
01918   // size in memory to zero.
01919   Size = std::max<int>(Size - Excess, 0);
01920 }
01921 
01922 
01923 /// MatchingStackOffset - Return true if the given stack call argument is
01924 /// already available in the same position (relatively) of the caller's
01925 /// incoming argument stack.
01926 static
01927 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
01928                          MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
01929                          const TargetInstrInfo *TII) {
01930   unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
01931   int FI = INT_MAX;
01932   if (Arg.getOpcode() == ISD::CopyFromReg) {
01933     unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
01934     if (!TargetRegisterInfo::isVirtualRegister(VR))
01935       return false;
01936     MachineInstr *Def = MRI->getVRegDef(VR);
01937     if (!Def)
01938       return false;
01939     if (!Flags.isByVal()) {
01940       if (!TII->isLoadFromStackSlot(Def, FI))
01941         return false;
01942     } else {
01943       return false;
01944     }
01945   } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
01946     if (Flags.isByVal())
01947       // ByVal argument is passed in as a pointer but it's now being
01948       // dereferenced. e.g.
01949       // define @foo(%struct.X* %A) {
01950       //   tail call @bar(%struct.X* byval %A)
01951       // }
01952       return false;
01953     SDValue Ptr = Ld->getBasePtr();
01954     FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
01955     if (!FINode)
01956       return false;
01957     FI = FINode->getIndex();
01958   } else
01959     return false;
01960 
01961   assert(FI != INT_MAX);
01962   if (!MFI->isFixedObjectIndex(FI))
01963     return false;
01964   return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
01965 }
01966 
01967 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
01968 /// for tail call optimization. Targets which want to do tail call
01969 /// optimization should implement this function.
01970 bool
01971 ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
01972                                                      CallingConv::ID CalleeCC,
01973                                                      bool isVarArg,
01974                                                      bool isCalleeStructRet,
01975                                                      bool isCallerStructRet,
01976                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
01977                                     const SmallVectorImpl<SDValue> &OutVals,
01978                                     const SmallVectorImpl<ISD::InputArg> &Ins,
01979                                                      SelectionDAG& DAG) const {
01980   const Function *CallerF = DAG.getMachineFunction().getFunction();
01981   CallingConv::ID CallerCC = CallerF->getCallingConv();
01982   bool CCMatch = CallerCC == CalleeCC;
01983 
01984   // Look for obvious safe cases to perform tail call optimization that do not
01985   // require ABI changes. This is what gcc calls sibcall.
01986 
01987   // Do not sibcall optimize vararg calls unless the call site is not passing
01988   // any arguments.
01989   if (isVarArg && !Outs.empty())
01990     return false;
01991 
01992   // Exception-handling functions need a special set of instructions to indicate
01993   // a return to the hardware. Tail-calling another function would probably
01994   // break this.
01995   if (CallerF->hasFnAttribute("interrupt"))
01996     return false;
01997 
01998   // Also avoid sibcall optimization if either caller or callee uses struct
01999   // return semantics.
02000   if (isCalleeStructRet || isCallerStructRet)
02001     return false;
02002 
02003   // FIXME: Completely disable sibcall for Thumb1 since ThumbRegisterInfo::
02004   // emitEpilogue is not ready for them. Thumb tail calls also use t2B, as
02005   // the Thumb1 16-bit unconditional branch doesn't have sufficient relocation
02006   // support in the assembler and linker to be used. This would need to be
02007   // fixed to fully support tail calls in Thumb1.
02008   //
02009   // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take
02010   // LR.  This means if we need to reload LR, it takes an extra instructions,
02011   // which outweighs the value of the tail call; but here we don't know yet
02012   // whether LR is going to be used.  Probably the right approach is to
02013   // generate the tail call here and turn it back into CALL/RET in
02014   // emitEpilogue if LR is used.
02015 
02016   // Thumb1 PIC calls to external symbols use BX, so they can be tail calls,
02017   // but we need to make sure there are enough registers; the only valid
02018   // registers are the 4 used for parameters.  We don't currently do this
02019   // case.
02020   if (Subtarget->isThumb1Only())
02021     return false;
02022 
02023   // Externally-defined functions with weak linkage should not be
02024   // tail-called on ARM when the OS does not support dynamic
02025   // pre-emption of symbols, as the AAELF spec requires normal calls
02026   // to undefined weak functions to be replaced with a NOP or jump to the
02027   // next instruction. The behaviour of branch instructions in this
02028   // situation (as used for tail calls) is implementation-defined, so we
02029   // cannot rely on the linker replacing the tail call with a return.
02030   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
02031     const GlobalValue *GV = G->getGlobal();
02032     const Triple TT(getTargetMachine().getTargetTriple());
02033     if (GV->hasExternalWeakLinkage() &&
02034         (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO()))
02035       return false;
02036   }
02037 
02038   // If the calling conventions do not match, then we'd better make sure the
02039   // results are returned in the same way as what the caller expects.
02040   if (!CCMatch) {
02041     SmallVector<CCValAssign, 16> RVLocs1;
02042     ARMCCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
02043                        *DAG.getContext(), Call);
02044     CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg));
02045 
02046     SmallVector<CCValAssign, 16> RVLocs2;
02047     ARMCCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
02048                        *DAG.getContext(), Call);
02049     CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg));
02050 
02051     if (RVLocs1.size() != RVLocs2.size())
02052       return false;
02053     for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
02054       if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
02055         return false;
02056       if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
02057         return false;
02058       if (RVLocs1[i].isRegLoc()) {
02059         if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
02060           return false;
02061       } else {
02062         if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
02063           return false;
02064       }
02065     }
02066   }
02067 
02068   // If Caller's vararg or byval argument has been split between registers and
02069   // stack, do not perform tail call, since part of the argument is in caller's
02070   // local frame.
02071   const ARMFunctionInfo *AFI_Caller = DAG.getMachineFunction().
02072                                       getInfo<ARMFunctionInfo>();
02073   if (AFI_Caller->getArgRegsSaveSize())
02074     return false;
02075 
02076   // If the callee takes no arguments then go on to check the results of the
02077   // call.
02078   if (!Outs.empty()) {
02079     // Check if stack adjustment is needed. For now, do not do this if any
02080     // argument is passed on the stack.
02081     SmallVector<CCValAssign, 16> ArgLocs;
02082     ARMCCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
02083                       *DAG.getContext(), Call);
02084     CCInfo.AnalyzeCallOperands(Outs,
02085                                CCAssignFnForNode(CalleeCC, false, isVarArg));
02086     if (CCInfo.getNextStackOffset()) {
02087       MachineFunction &MF = DAG.getMachineFunction();
02088 
02089       // Check if the arguments are already laid out in the right way as
02090       // the caller's fixed stack objects.
02091       MachineFrameInfo *MFI = MF.getFrameInfo();
02092       const MachineRegisterInfo *MRI = &MF.getRegInfo();
02093       const TargetInstrInfo *TII = Subtarget->getInstrInfo();
02094       for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
02095            i != e;
02096            ++i, ++realArgIdx) {
02097         CCValAssign &VA = ArgLocs[i];
02098         EVT RegVT = VA.getLocVT();
02099         SDValue Arg = OutVals[realArgIdx];
02100         ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
02101         if (VA.getLocInfo() == CCValAssign::Indirect)
02102           return false;
02103         if (VA.needsCustom()) {
02104           // f64 and vector types are split into multiple registers or
02105           // register/stack-slot combinations.  The types will not match
02106           // the registers; give up on memory f64 refs until we figure
02107           // out what to do about this.
02108           if (!VA.isRegLoc())
02109             return false;
02110           if (!ArgLocs[++i].isRegLoc())
02111             return false;
02112           if (RegVT == MVT::v2f64) {
02113             if (!ArgLocs[++i].isRegLoc())
02114               return false;
02115             if (!ArgLocs[++i].isRegLoc())
02116               return false;
02117           }
02118         } else if (!VA.isRegLoc()) {
02119           if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
02120                                    MFI, MRI, TII))
02121             return false;
02122         }
02123       }
02124     }
02125   }
02126 
02127   return true;
02128 }
02129 
02130 bool
02131 ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
02132                                   MachineFunction &MF, bool isVarArg,
02133                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
02134                                   LLVMContext &Context) const {
02135   SmallVector<CCValAssign, 16> RVLocs;
02136   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
02137   return CCInfo.CheckReturn(Outs, CCAssignFnForNode(CallConv, /*Return=*/true,
02138                                                     isVarArg));
02139 }
02140 
02141 static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
02142                                     SDLoc DL, SelectionDAG &DAG) {
02143   const MachineFunction &MF = DAG.getMachineFunction();
02144   const Function *F = MF.getFunction();
02145 
02146   StringRef IntKind = F->getFnAttribute("interrupt").getValueAsString();
02147 
02148   // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset
02149   // version of the "preferred return address". These offsets affect the return
02150   // instruction if this is a return from PL1 without hypervisor extensions.
02151   //    IRQ/FIQ: +4     "subs pc, lr, #4"
02152   //    SWI:     0      "subs pc, lr, #0"
02153   //    ABORT:   +4     "subs pc, lr, #4"
02154   //    UNDEF:   +4/+2  "subs pc, lr, #0"
02155   // UNDEF varies depending on where the exception came from ARM or Thumb
02156   // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0.
02157 
02158   int64_t LROffset;
02159   if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" ||
02160       IntKind == "ABORT")
02161     LROffset = 4;
02162   else if (IntKind == "SWI" || IntKind == "UNDEF")
02163     LROffset = 0;
02164   else
02165     report_fatal_error("Unsupported interrupt attribute. If present, value "
02166                        "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
02167 
02168   RetOps.insert(RetOps.begin() + 1, DAG.getConstant(LROffset, MVT::i32, false));
02169 
02170   return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps);
02171 }
02172 
02173 SDValue
02174 ARMTargetLowering::LowerReturn(SDValue Chain,
02175                                CallingConv::ID CallConv, bool isVarArg,
02176                                const SmallVectorImpl<ISD::OutputArg> &Outs,
02177                                const SmallVectorImpl<SDValue> &OutVals,
02178                                SDLoc dl, SelectionDAG &DAG) const {
02179 
02180   // CCValAssign - represent the assignment of the return value to a location.
02181   SmallVector<CCValAssign, 16> RVLocs;
02182 
02183   // CCState - Info about the registers and stack slots.
02184   ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
02185                     *DAG.getContext(), Call);
02186 
02187   // Analyze outgoing return values.
02188   CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true,
02189                                                isVarArg));
02190 
02191   SDValue Flag;
02192   SmallVector<SDValue, 4> RetOps;
02193   RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
02194   bool isLittleEndian = Subtarget->isLittle();
02195 
02196   MachineFunction &MF = DAG.getMachineFunction();
02197   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02198   AFI->setReturnRegsCount(RVLocs.size());
02199 
02200   // Copy the result values into the output registers.
02201   for (unsigned i = 0, realRVLocIdx = 0;
02202        i != RVLocs.size();
02203        ++i, ++realRVLocIdx) {
02204     CCValAssign &VA = RVLocs[i];
02205     assert(VA.isRegLoc() && "Can only return in registers!");
02206 
02207     SDValue Arg = OutVals[realRVLocIdx];
02208 
02209     switch (VA.getLocInfo()) {
02210     default: llvm_unreachable("Unknown loc info!");
02211     case CCValAssign::Full: break;
02212     case CCValAssign::BCvt:
02213       Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
02214       break;
02215     }
02216 
02217     if (VA.needsCustom()) {
02218       if (VA.getLocVT() == MVT::v2f64) {
02219         // Extract the first half and return it in two registers.
02220         SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
02221                                    DAG.getConstant(0, MVT::i32));
02222         SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl,
02223                                        DAG.getVTList(MVT::i32, MVT::i32), Half);
02224 
02225         Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
02226                                  HalfGPRs.getValue(isLittleEndian ? 0 : 1),
02227                                  Flag);
02228         Flag = Chain.getValue(1);
02229         RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
02230         VA = RVLocs[++i]; // skip ahead to next loc
02231         Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
02232                                  HalfGPRs.getValue(isLittleEndian ? 1 : 0),
02233                                  Flag);
02234         Flag = Chain.getValue(1);
02235         RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
02236         VA = RVLocs[++i]; // skip ahead to next loc
02237 
02238         // Extract the 2nd half and fall through to handle it as an f64 value.
02239         Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
02240                           DAG.getConstant(1, MVT::i32));
02241       }
02242       // Legalize ret f64 -> ret 2 x i32.  We always have fmrrd if f64 is
02243       // available.
02244       SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
02245                                   DAG.getVTList(MVT::i32, MVT::i32), Arg);
02246       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
02247                                fmrrd.getValue(isLittleEndian ? 0 : 1),
02248                                Flag);
02249       Flag = Chain.getValue(1);
02250       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
02251       VA = RVLocs[++i]; // skip ahead to next loc
02252       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
02253                                fmrrd.getValue(isLittleEndian ? 1 : 0),
02254                                Flag);
02255     } else
02256       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
02257 
02258     // Guarantee that all emitted copies are
02259     // stuck together, avoiding something bad.
02260     Flag = Chain.getValue(1);
02261     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
02262   }
02263 
02264   // Update chain and glue.
02265   RetOps[0] = Chain;
02266   if (Flag.getNode())
02267     RetOps.push_back(Flag);
02268 
02269   // CPUs which aren't M-class use a special sequence to return from
02270   // exceptions (roughly, any instruction setting pc and cpsr simultaneously,
02271   // though we use "subs pc, lr, #N").
02272   //
02273   // M-class CPUs actually use a normal return sequence with a special
02274   // (hardware-provided) value in LR, so the normal code path works.
02275   if (DAG.getMachineFunction().getFunction()->hasFnAttribute("interrupt") &&
02276       !Subtarget->isMClass()) {
02277     if (Subtarget->isThumb1Only())
02278       report_fatal_error("interrupt attribute is not supported in Thumb1");
02279     return LowerInterruptReturn(RetOps, dl, DAG);
02280   }
02281 
02282   return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, RetOps);
02283 }
02284 
02285 bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
02286   if (N->getNumValues() != 1)
02287     return false;
02288   if (!N->hasNUsesOfValue(1, 0))
02289     return false;
02290 
02291   SDValue TCChain = Chain;
02292   SDNode *Copy = *N->use_begin();
02293   if (Copy->getOpcode() == ISD::CopyToReg) {
02294     // If the copy has a glue operand, we conservatively assume it isn't safe to
02295     // perform a tail call.
02296     if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
02297       return false;
02298     TCChain = Copy->getOperand(0);
02299   } else if (Copy->getOpcode() == ARMISD::VMOVRRD) {
02300     SDNode *VMov = Copy;
02301     // f64 returned in a pair of GPRs.
02302     SmallPtrSet<SDNode*, 2> Copies;
02303     for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
02304          UI != UE; ++UI) {
02305       if (UI->getOpcode() != ISD::CopyToReg)
02306         return false;
02307       Copies.insert(*UI);
02308     }
02309     if (Copies.size() > 2)
02310       return false;
02311 
02312     for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
02313          UI != UE; ++UI) {
02314       SDValue UseChain = UI->getOperand(0);
02315       if (Copies.count(UseChain.getNode()))
02316         // Second CopyToReg
02317         Copy = *UI;
02318       else {
02319         // We are at the top of this chain.
02320         // If the copy has a glue operand, we conservatively assume it
02321         // isn't safe to perform a tail call.
02322         if (UI->getOperand(UI->getNumOperands()-1).getValueType() == MVT::Glue)
02323           return false;
02324         // First CopyToReg
02325         TCChain = UseChain;
02326       }
02327     }
02328   } else if (Copy->getOpcode() == ISD::BITCAST) {
02329     // f32 returned in a single GPR.
02330     if (!Copy->hasOneUse())
02331       return false;
02332     Copy = *Copy->use_begin();
02333     if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0))
02334       return false;
02335     // If the copy has a glue operand, we conservatively assume it isn't safe to
02336     // perform a tail call.
02337     if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
02338       return false;
02339     TCChain = Copy->getOperand(0);
02340   } else {
02341     return false;
02342   }
02343 
02344   bool HasRet = false;
02345   for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
02346        UI != UE; ++UI) {
02347     if (UI->getOpcode() != ARMISD::RET_FLAG &&
02348         UI->getOpcode() != ARMISD::INTRET_FLAG)
02349       return false;
02350     HasRet = true;
02351   }
02352 
02353   if (!HasRet)
02354     return false;
02355 
02356   Chain = TCChain;
02357   return true;
02358 }
02359 
02360 bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
02361   if (!Subtarget->supportsTailCall())
02362     return false;
02363 
02364   if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
02365     return false;
02366 
02367   return !Subtarget->isThumb1Only();
02368 }
02369 
02370 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
02371 // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
02372 // one of the above mentioned nodes. It has to be wrapped because otherwise
02373 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
02374 // be used to form addressing mode. These wrapped nodes will be selected
02375 // into MOVi.
02376 static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
02377   EVT PtrVT = Op.getValueType();
02378   // FIXME there is no actual debug info here
02379   SDLoc dl(Op);
02380   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
02381   SDValue Res;
02382   if (CP->isMachineConstantPoolEntry())
02383     Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
02384                                     CP->getAlignment());
02385   else
02386     Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
02387                                     CP->getAlignment());
02388   return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res);
02389 }
02390 
02391 unsigned ARMTargetLowering::getJumpTableEncoding() const {
02392   return MachineJumpTableInfo::EK_Inline;
02393 }
02394 
02395 SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
02396                                              SelectionDAG &DAG) const {
02397   MachineFunction &MF = DAG.getMachineFunction();
02398   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02399   unsigned ARMPCLabelIndex = 0;
02400   SDLoc DL(Op);
02401   EVT PtrVT = getPointerTy();
02402   const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
02403   Reloc::Model RelocM = getTargetMachine().getRelocationModel();
02404   SDValue CPAddr;
02405   if (RelocM == Reloc::Static) {
02406     CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4);
02407   } else {
02408     unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
02409     ARMPCLabelIndex = AFI->createPICLabelUId();
02410     ARMConstantPoolValue *CPV =
02411       ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex,
02412                                       ARMCP::CPBlockAddress, PCAdj);
02413     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02414   }
02415   CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr);
02416   SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr,
02417                                MachinePointerInfo::getConstantPool(),
02418                                false, false, false, 0);
02419   if (RelocM == Reloc::Static)
02420     return Result;
02421   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
02422   return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel);
02423 }
02424 
02425 // Lower ISD::GlobalTLSAddress using the "general dynamic" model
02426 SDValue
02427 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
02428                                                  SelectionDAG &DAG) const {
02429   SDLoc dl(GA);
02430   EVT PtrVT = getPointerTy();
02431   unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
02432   MachineFunction &MF = DAG.getMachineFunction();
02433   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02434   unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
02435   ARMConstantPoolValue *CPV =
02436     ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
02437                                     ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true);
02438   SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02439   Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
02440   Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument,
02441                          MachinePointerInfo::getConstantPool(),
02442                          false, false, false, 0);
02443   SDValue Chain = Argument.getValue(1);
02444 
02445   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
02446   Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel);
02447 
02448   // call __tls_get_addr.
02449   ArgListTy Args;
02450   ArgListEntry Entry;
02451   Entry.Node = Argument;
02452   Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext());
02453   Args.push_back(Entry);
02454 
02455   // FIXME: is there useful debug info available here?
02456   TargetLowering::CallLoweringInfo CLI(DAG);
02457   CLI.setDebugLoc(dl).setChain(Chain)
02458     .setCallee(CallingConv::C, Type::getInt32Ty(*DAG.getContext()),
02459                DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args),
02460                0);
02461 
02462   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
02463   return CallResult.first;
02464 }
02465 
02466 // Lower ISD::GlobalTLSAddress using the "initial exec" or
02467 // "local exec" model.
02468 SDValue
02469 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
02470                                         SelectionDAG &DAG,
02471                                         TLSModel::Model model) const {
02472   const GlobalValue *GV = GA->getGlobal();
02473   SDLoc dl(GA);
02474   SDValue Offset;
02475   SDValue Chain = DAG.getEntryNode();
02476   EVT PtrVT = getPointerTy();
02477   // Get the Thread Pointer
02478   SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
02479 
02480   if (model == TLSModel::InitialExec) {
02481     MachineFunction &MF = DAG.getMachineFunction();
02482     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02483     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
02484     // Initial exec model.
02485     unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
02486     ARMConstantPoolValue *CPV =
02487       ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
02488                                       ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF,
02489                                       true);
02490     Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02491     Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
02492     Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
02493                          MachinePointerInfo::getConstantPool(),
02494                          false, false, false, 0);
02495     Chain = Offset.getValue(1);
02496 
02497     SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
02498     Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);
02499 
02500     Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
02501                          MachinePointerInfo::getConstantPool(),
02502                          false, false, false, 0);
02503   } else {
02504     // local exec model
02505     assert(model == TLSModel::LocalExec);
02506     ARMConstantPoolValue *CPV =
02507       ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF);
02508     Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02509     Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
02510     Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
02511                          MachinePointerInfo::getConstantPool(),
02512                          false, false, false, 0);
02513   }
02514 
02515   // The address of the thread local variable is the add of the thread
02516   // pointer with the offset of the variable.
02517   return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
02518 }
02519 
02520 SDValue
02521 ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
02522   // TODO: implement the "local dynamic" model
02523   assert(Subtarget->isTargetELF() &&
02524          "TLS not implemented for non-ELF targets");
02525   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
02526 
02527   TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal());
02528 
02529   switch (model) {
02530     case TLSModel::GeneralDynamic:
02531     case TLSModel::LocalDynamic:
02532       return LowerToTLSGeneralDynamicModel(GA, DAG);
02533     case TLSModel::InitialExec:
02534     case TLSModel::LocalExec:
02535       return LowerToTLSExecModels(GA, DAG, model);
02536   }
02537   llvm_unreachable("bogus TLS model");
02538 }
02539 
02540 SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
02541                                                  SelectionDAG &DAG) const {
02542   EVT PtrVT = getPointerTy();
02543   SDLoc dl(Op);
02544   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
02545   if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
02546     bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility();
02547     ARMConstantPoolValue *CPV =
02548       ARMConstantPoolConstant::Create(GV,
02549                                       UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT);
02550     SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02551     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
02552     SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
02553                                  CPAddr,
02554                                  MachinePointerInfo::getConstantPool(),
02555                                  false, false, false, 0);
02556     SDValue Chain = Result.getValue(1);
02557     SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
02558     Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT);
02559     if (!UseGOTOFF)
02560       Result = DAG.getLoad(PtrVT, dl, Chain, Result,
02561                            MachinePointerInfo::getGOT(),
02562                            false, false, false, 0);
02563     return Result;
02564   }
02565 
02566   // If we have T2 ops, we can materialize the address directly via movt/movw
02567   // pair. This is always cheaper.
02568   if (Subtarget->useMovt(DAG.getMachineFunction())) {
02569     ++NumMovwMovt;
02570     // FIXME: Once remat is capable of dealing with instructions with register
02571     // operands, expand this into two nodes.
02572     return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
02573                        DAG.getTargetGlobalAddress(GV, dl, PtrVT));
02574   } else {
02575     SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
02576     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
02577     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
02578                        MachinePointerInfo::getConstantPool(),
02579                        false, false, false, 0);
02580   }
02581 }
02582 
02583 SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
02584                                                     SelectionDAG &DAG) const {
02585   EVT PtrVT = getPointerTy();
02586   SDLoc dl(Op);
02587   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
02588   Reloc::Model RelocM = getTargetMachine().getRelocationModel();
02589 
02590   if (Subtarget->useMovt(DAG.getMachineFunction()))
02591     ++NumMovwMovt;
02592 
02593   // FIXME: Once remat is capable of dealing with instructions with register
02594   // operands, expand this into multiple nodes
02595   unsigned Wrapper =
02596       RelocM == Reloc::PIC_ ? ARMISD::WrapperPIC : ARMISD::Wrapper;
02597 
02598   SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY);
02599   SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G);
02600 
02601   if (Subtarget->GVIsIndirectSymbol(GV, RelocM))
02602     Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
02603                          MachinePointerInfo::getGOT(), false, false, false, 0);
02604   return Result;
02605 }
02606 
02607 SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op,
02608                                                      SelectionDAG &DAG) const {
02609   assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported");
02610   assert(Subtarget->useMovt(DAG.getMachineFunction()) &&
02611          "Windows on ARM expects to use movw/movt");
02612 
02613   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
02614   const ARMII::TOF TargetFlags =
02615     (GV->hasDLLImportStorageClass() ? ARMII::MO_DLLIMPORT : ARMII::MO_NO_FLAG);
02616   EVT PtrVT = getPointerTy();
02617   SDValue Result;
02618   SDLoc DL(Op);
02619 
02620   ++NumMovwMovt;
02621 
02622   // FIXME: Once remat is capable of dealing with instructions with register
02623   // operands, expand this into two nodes.
02624   Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT,
02625                        DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*Offset=*/0,
02626                                                   TargetFlags));
02627   if (GV->hasDLLImportStorageClass())
02628     Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
02629                          MachinePointerInfo::getGOT(), false, false, false, 0);
02630   return Result;
02631 }
02632 
02633 SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op,
02634                                                     SelectionDAG &DAG) const {
02635   assert(Subtarget->isTargetELF() &&
02636          "GLOBAL OFFSET TABLE not implemented for non-ELF targets");
02637   MachineFunction &MF = DAG.getMachineFunction();
02638   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02639   unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
02640   EVT PtrVT = getPointerTy();
02641   SDLoc dl(Op);
02642   unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
02643   ARMConstantPoolValue *CPV =
02644     ARMConstantPoolSymbol::Create(*DAG.getContext(), "_GLOBAL_OFFSET_TABLE_",
02645                                   ARMPCLabelIndex, PCAdj);
02646   SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02647   CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
02648   SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
02649                                MachinePointerInfo::getConstantPool(),
02650                                false, false, false, 0);
02651   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
02652   return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
02653 }
02654 
02655 SDValue
02656 ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const {
02657   SDLoc dl(Op);
02658   SDValue Val = DAG.getConstant(0, MVT::i32);
02659   return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl,
02660                      DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0),
02661                      Op.getOperand(1), Val);
02662 }
02663 
02664 SDValue
02665 ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const {
02666   SDLoc dl(Op);
02667   return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0),
02668                      Op.getOperand(1), DAG.getConstant(0, MVT::i32));
02669 }
02670 
02671 SDValue
02672 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
02673                                           const ARMSubtarget *Subtarget) const {
02674   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
02675   SDLoc dl(Op);
02676   switch (IntNo) {
02677   default: return SDValue();    // Don't custom lower most intrinsics.
02678   case Intrinsic::arm_rbit: {
02679     assert(Op.getOperand(1).getValueType() == MVT::i32 &&
02680            "RBIT intrinsic must have i32 type!");
02681     return DAG.getNode(ARMISD::RBIT, dl, MVT::i32, Op.getOperand(1));
02682   }
02683   case Intrinsic::arm_thread_pointer: {
02684     EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
02685     return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
02686   }
02687   case Intrinsic::eh_sjlj_lsda: {
02688     MachineFunction &MF = DAG.getMachineFunction();
02689     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02690     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
02691     EVT PtrVT = getPointerTy();
02692     Reloc::Model RelocM = getTargetMachine().getRelocationModel();
02693     SDValue CPAddr;
02694     unsigned PCAdj = (RelocM != Reloc::PIC_)
02695       ? 0 : (Subtarget->isThumb() ? 4 : 8);
02696     ARMConstantPoolValue *CPV =
02697       ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex,
02698                                       ARMCP::CPLSDA, PCAdj);
02699     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02700     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
02701     SDValue Result =
02702       DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
02703                   MachinePointerInfo::getConstantPool(),
02704                   false, false, false, 0);
02705 
02706     if (RelocM == Reloc::PIC_) {
02707       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
02708       Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
02709     }
02710     return Result;
02711   }
02712   case Intrinsic::arm_neon_vmulls:
02713   case Intrinsic::arm_neon_vmullu: {
02714     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls)
02715       ? ARMISD::VMULLs : ARMISD::VMULLu;
02716     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
02717                        Op.getOperand(1), Op.getOperand(2));
02718   }
02719   }
02720 }
02721 
02722 static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
02723                                  const ARMSubtarget *Subtarget) {
02724   // FIXME: handle "fence singlethread" more efficiently.
02725   SDLoc dl(Op);
02726   if (!Subtarget->hasDataBarrier()) {
02727     // Some ARMv6 cpus can support data barriers with an mcr instruction.
02728     // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
02729     // here.
02730     assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&
02731            "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!");
02732     return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0),
02733                        DAG.getConstant(0, MVT::i32));
02734   }
02735 
02736   ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1));
02737   AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue());
02738   ARM_MB::MemBOpt Domain = ARM_MB::ISH;
02739   if (Subtarget->isMClass()) {
02740     // Only a full system barrier exists in the M-class architectures.
02741     Domain = ARM_MB::SY;
02742   } else if (Subtarget->isSwift() && Ord == Release) {
02743     // Swift happens to implement ISHST barriers in a way that's compatible with
02744     // Release semantics but weaker than ISH so we'd be fools not to use
02745     // it. Beware: other processors probably don't!
02746     Domain = ARM_MB::ISHST;
02747   }
02748 
02749   return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0),
02750                      DAG.getConstant(Intrinsic::arm_dmb, MVT::i32),
02751                      DAG.getConstant(Domain, MVT::i32));
02752 }
02753 
02754 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
02755                              const ARMSubtarget *Subtarget) {
02756   // ARM pre v5TE and Thumb1 does not have preload instructions.
02757   if (!(Subtarget->isThumb2() ||
02758         (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps())))
02759     // Just preserve the chain.
02760     return Op.getOperand(0);
02761 
02762   SDLoc dl(Op);
02763   unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1;
02764   if (!isRead &&
02765       (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension()))
02766     // ARMv7 with MP extension has PLDW.
02767     return Op.getOperand(0);
02768 
02769   unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
02770   if (Subtarget->isThumb()) {
02771     // Invert the bits.
02772     isRead = ~isRead & 1;
02773     isData = ~isData & 1;
02774   }
02775 
02776   return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0),
02777                      Op.getOperand(1), DAG.getConstant(isRead, MVT::i32),
02778                      DAG.getConstant(isData, MVT::i32));
02779 }
02780 
02781 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
02782   MachineFunction &MF = DAG.getMachineFunction();
02783   ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>();
02784 
02785   // vastart just stores the address of the VarArgsFrameIndex slot into the
02786   // memory location argument.
02787   SDLoc dl(Op);
02788   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
02789   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
02790   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
02791   return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
02792                       MachinePointerInfo(SV), false, false, 0);
02793 }
02794 
02795 SDValue
02796 ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
02797                                         SDValue &Root, SelectionDAG &DAG,
02798                                         SDLoc dl) const {
02799   MachineFunction &MF = DAG.getMachineFunction();
02800   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02801 
02802   const TargetRegisterClass *RC;
02803   if (AFI->isThumb1OnlyFunction())
02804     RC = &ARM::tGPRRegClass;
02805   else
02806     RC = &ARM::GPRRegClass;
02807 
02808   // Transform the arguments stored in physical registers into virtual ones.
02809   unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
02810   SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
02811 
02812   SDValue ArgValue2;
02813   if (NextVA.isMemLoc()) {
02814     MachineFrameInfo *MFI = MF.getFrameInfo();
02815     int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true);
02816 
02817     // Create load node to retrieve arguments from the stack.
02818     SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
02819     ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN,
02820                             MachinePointerInfo::getFixedStack(FI),
02821                             false, false, false, 0);
02822   } else {
02823     Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
02824     ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
02825   }
02826   if (!Subtarget->isLittle())
02827     std::swap (ArgValue, ArgValue2);
02828   return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2);
02829 }
02830 
02831 // The remaining GPRs hold either the beginning of variable-argument
02832 // data, or the beginning of an aggregate passed by value (usually
02833 // byval).  Either way, we allocate stack slots adjacent to the data
02834 // provided by our caller, and store the unallocated registers there.
02835 // If this is a variadic function, the va_list pointer will begin with
02836 // these values; otherwise, this reassembles a (byval) structure that
02837 // was split between registers and memory.
02838 // Return: The frame index registers were stored into.
02839 int
02840 ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
02841                                   SDLoc dl, SDValue &Chain,
02842                                   const Value *OrigArg,
02843                                   unsigned InRegsParamRecordIdx,
02844                                   int ArgOffset,
02845                                   unsigned ArgSize) const {
02846   // Currently, two use-cases possible:
02847   // Case #1. Non-var-args function, and we meet first byval parameter.
02848   //          Setup first unallocated register as first byval register;
02849   //          eat all remained registers
02850   //          (these two actions are performed by HandleByVal method).
02851   //          Then, here, we initialize stack frame with
02852   //          "store-reg" instructions.
02853   // Case #2. Var-args function, that doesn't contain byval parameters.
02854   //          The same: eat all remained unallocated registers,
02855   //          initialize stack frame.
02856 
02857   MachineFunction &MF = DAG.getMachineFunction();
02858   MachineFrameInfo *MFI = MF.getFrameInfo();
02859   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02860   unsigned RBegin, REnd;
02861   if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) {
02862     CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd);
02863   } else {
02864     unsigned RBeginIdx = CCInfo.getFirstUnallocated(GPRArgRegs);
02865     RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx];
02866     REnd = ARM::R4;
02867   }
02868 
02869   if (REnd != RBegin)
02870     ArgOffset = -4 * (ARM::R4 - RBegin);
02871 
02872   int FrameIndex = MFI->CreateFixedObject(ArgSize, ArgOffset, false);
02873   SDValue FIN = DAG.getFrameIndex(FrameIndex, getPointerTy());
02874 
02875   SmallVector<SDValue, 4> MemOps;
02876   const TargetRegisterClass *RC =
02877       AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
02878 
02879   for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) {
02880     unsigned VReg = MF.addLiveIn(Reg, RC);
02881     SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
02882     SDValue Store =
02883         DAG.getStore(Val.getValue(1), dl, Val, FIN,
02884                      MachinePointerInfo(OrigArg, 4 * i), false, false, 0);
02885     MemOps.push_back(Store);
02886     FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN,
02887                       DAG.getConstant(4, getPointerTy()));
02888   }
02889 
02890   if (!MemOps.empty())
02891     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
02892   return FrameIndex;
02893 }
02894 
02895 // Setup stack frame, the va_list pointer will start from.
02896 void
02897 ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
02898                                         SDLoc dl, SDValue &Chain,
02899                                         unsigned ArgOffset,
02900                                         unsigned TotalArgRegsSaveSize,
02901                                         bool ForceMutable) const {
02902   MachineFunction &MF = DAG.getMachineFunction();
02903   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02904 
02905   // Try to store any remaining integer argument regs
02906   // to their spots on the stack so that they may be loaded by deferencing
02907   // the result of va_next.
02908   // If there is no regs to be stored, just point address after last
02909   // argument passed via stack.
02910   int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr,
02911                                   CCInfo.getInRegsParamsCount(),
02912                                   CCInfo.getNextStackOffset(), 4);
02913   AFI->setVarArgsFrameIndex(FrameIndex);
02914 }
02915 
02916 SDValue
02917 ARMTargetLowering::LowerFormalArguments(SDValue Chain,
02918                                         CallingConv::ID CallConv, bool isVarArg,
02919                                         const SmallVectorImpl<ISD::InputArg>
02920                                           &Ins,
02921                                         SDLoc dl, SelectionDAG &DAG,
02922                                         SmallVectorImpl<SDValue> &InVals)
02923                                           const {
02924   MachineFunction &MF = DAG.getMachineFunction();
02925   MachineFrameInfo *MFI = MF.getFrameInfo();
02926 
02927   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02928 
02929   // Assign locations to all of the incoming arguments.
02930   SmallVector<CCValAssign, 16> ArgLocs;
02931   ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
02932                     *DAG.getContext(), Prologue);
02933   CCInfo.AnalyzeFormalArguments(Ins,
02934                                 CCAssignFnForNode(CallConv, /* Return*/ false,
02935                                                   isVarArg));
02936 
02937   SmallVector<SDValue, 16> ArgValues;
02938   SDValue ArgValue;
02939   Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin();
02940   unsigned CurArgIdx = 0;
02941 
02942   // Initially ArgRegsSaveSize is zero.
02943   // Then we increase this value each time we meet byval parameter.
02944   // We also increase this value in case of varargs function.
02945   AFI->setArgRegsSaveSize(0);
02946 
02947   // Calculate the amount of stack space that we need to allocate to store
02948   // byval and variadic arguments that are passed in registers.
02949   // We need to know this before we allocate the first byval or variadic
02950   // argument, as they will be allocated a stack slot below the CFA (Canonical
02951   // Frame Address, the stack pointer at entry to the function).
02952   unsigned ArgRegBegin = ARM::R4;
02953   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
02954     if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount())
02955       break;
02956 
02957     CCValAssign &VA = ArgLocs[i];
02958     unsigned Index = VA.getValNo();
02959     ISD::ArgFlagsTy Flags = Ins[Index].Flags;
02960     if (!Flags.isByVal())
02961       continue;
02962 
02963     assert(VA.isMemLoc() && "unexpected byval pointer in reg");
02964     unsigned RBegin, REnd;
02965     CCInfo.getInRegsParamInfo(CCInfo.getInRegsParamsProcessed(), RBegin, REnd);
02966     ArgRegBegin = std::min(ArgRegBegin, RBegin);
02967 
02968     CCInfo.nextInRegsParam();
02969   }
02970   CCInfo.rewindByValRegsInfo();
02971 
02972   int lastInsIndex = -1;
02973   if (isVarArg && MFI->hasVAStart()) {
02974     unsigned RegIdx = CCInfo.getFirstUnallocated(GPRArgRegs);
02975     if (RegIdx != array_lengthof(GPRArgRegs))
02976       ArgRegBegin = std::min(ArgRegBegin, (unsigned)GPRArgRegs[RegIdx]);
02977   }
02978 
02979   unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin);
02980   AFI->setArgRegsSaveSize(TotalArgRegsSaveSize);
02981 
02982   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
02983     CCValAssign &VA = ArgLocs[i];
02984     if (Ins[VA.getValNo()].isOrigArg()) {
02985       std::advance(CurOrigArg,
02986                    Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx);
02987       CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex();
02988     }
02989     // Arguments stored in registers.
02990     if (VA.isRegLoc()) {
02991       EVT RegVT = VA.getLocVT();
02992 
02993       if (VA.needsCustom()) {
02994         // f64 and vector types are split up into multiple registers or
02995         // combinations of registers and stack slots.
02996         if (VA.getLocVT() == MVT::v2f64) {
02997           SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
02998                                                    Chain, DAG, dl);
02999           VA = ArgLocs[++i]; // skip ahead to next loc
03000           SDValue ArgValue2;
03001           if (VA.isMemLoc()) {
03002             int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true);
03003             SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
03004             ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN,
03005                                     MachinePointerInfo::getFixedStack(FI),
03006                                     false, false, false, 0);
03007           } else {
03008             ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
03009                                              Chain, DAG, dl);
03010           }
03011           ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
03012           ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
03013                                  ArgValue, ArgValue1, DAG.getIntPtrConstant(0));
03014           ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
03015                                  ArgValue, ArgValue2, DAG.getIntPtrConstant(1));
03016         } else
03017           ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
03018 
03019       } else {
03020         const TargetRegisterClass *RC;
03021 
03022         if (RegVT == MVT::f32)
03023           RC = &ARM::SPRRegClass;
03024         else if (RegVT == MVT::f64)
03025           RC = &ARM::DPRRegClass;
03026         else if (RegVT == MVT::v2f64)
03027           RC = &ARM::QPRRegClass;
03028         else if (RegVT == MVT::i32)
03029           RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass
03030                                            : &ARM::GPRRegClass;
03031         else
03032           llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
03033 
03034         // Transform the arguments in physical registers into virtual ones.
03035         unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
03036         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
03037       }
03038 
03039       // If this is an 8 or 16-bit value, it is really passed promoted
03040       // to 32 bits.  Insert an assert[sz]ext to capture this, then
03041       // truncate to the right size.
03042       switch (VA.getLocInfo()) {
03043       default: llvm_unreachable("Unknown loc info!");
03044       case CCValAssign::Full: break;
03045       case CCValAssign::BCvt:
03046         ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
03047         break;
03048       case CCValAssign::SExt:
03049         ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
03050                                DAG.getValueType(VA.getValVT()));
03051         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
03052         break;
03053       case CCValAssign::ZExt:
03054         ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
03055                                DAG.getValueType(VA.getValVT()));
03056         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
03057         break;
03058       }
03059 
03060       InVals.push_back(ArgValue);
03061 
03062     } else { // VA.isRegLoc()
03063 
03064       // sanity check
03065       assert(VA.isMemLoc());
03066       assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
03067 
03068       int index = VA.getValNo();
03069 
03070       // Some Ins[] entries become multiple ArgLoc[] entries.
03071       // Process them only once.
03072       if (index != lastInsIndex)
03073         {
03074           ISD::ArgFlagsTy Flags = Ins[index].Flags;
03075           // FIXME: For now, all byval parameter objects are marked mutable.
03076           // This can be changed with more analysis.
03077           // In case of tail call optimization mark all arguments mutable.
03078           // Since they could be overwritten by lowering of arguments in case of
03079           // a tail call.
03080           if (Flags.isByVal()) {
03081             assert(Ins[index].isOrigArg() &&
03082                    "Byval arguments cannot be implicit");
03083             unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed();
03084 
03085             int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain, CurOrigArg,
03086                                             CurByValIndex, VA.getLocMemOffset(),
03087                                             Flags.getByValSize());
03088             InVals.push_back(DAG.getFrameIndex(FrameIndex, getPointerTy()));
03089             CCInfo.nextInRegsParam();
03090           } else {
03091             unsigned FIOffset = VA.getLocMemOffset();
03092             int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
03093                                             FIOffset, true);
03094 
03095             // Create load nodes to retrieve arguments from the stack.
03096             SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
03097             InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
03098                                          MachinePointerInfo::getFixedStack(FI),
03099                                          false, false, false, 0));
03100           }
03101           lastInsIndex = index;
03102         }
03103     }
03104   }
03105 
03106   // varargs
03107   if (isVarArg && MFI->hasVAStart())
03108     VarArgStyleRegisters(CCInfo, DAG, dl, Chain,
03109                          CCInfo.getNextStackOffset(),
03110                          TotalArgRegsSaveSize);
03111 
03112   AFI->setArgumentStackSize(CCInfo.getNextStackOffset());
03113 
03114   return Chain;
03115 }
03116 
03117 /// isFloatingPointZero - Return true if this is +0.0.
03118 static bool isFloatingPointZero(SDValue Op) {
03119   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
03120     return CFP->getValueAPF().isPosZero();
03121   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
03122     // Maybe this has already been legalized into the constant pool?
03123     if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) {
03124       SDValue WrapperOp = Op.getOperand(1).getOperand(0);
03125       if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp))
03126         if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
03127           return CFP->getValueAPF().isPosZero();
03128     }
03129   } else if (Op->getOpcode() == ISD::BITCAST &&
03130              Op->getValueType(0) == MVT::f64) {
03131     // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64)
03132     // created by LowerConstantFP().
03133     SDValue BitcastOp = Op->getOperand(0);
03134     if (BitcastOp->getOpcode() == ARMISD::VMOVIMM) {
03135       SDValue MoveOp = BitcastOp->getOperand(0);
03136       if (MoveOp->getOpcode() == ISD::TargetConstant &&
03137           cast<ConstantSDNode>(MoveOp)->getZExtValue() == 0) {
03138         return true;
03139       }
03140     }
03141   }
03142   return false;
03143 }
03144 
03145 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for
03146 /// the given operands.
03147 SDValue
03148 ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
03149                              SDValue &ARMcc, SelectionDAG &DAG,
03150                              SDLoc dl) const {
03151   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
03152     unsigned C = RHSC->getZExtValue();
03153     if (!isLegalICmpImmediate(C)) {
03154       // Constant does not fit, try adjusting it by one?
03155       switch (CC) {
03156       default: break;
03157       case ISD::SETLT:
03158       case ISD::SETGE:
03159         if (C != 0x80000000 && isLegalICmpImmediate(C-1)) {
03160           CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
03161           RHS = DAG.getConstant(C-1, MVT::i32);
03162         }
03163         break;
03164       case ISD::SETULT:
03165       case ISD::SETUGE:
03166         if (C != 0 && isLegalICmpImmediate(C-1)) {
03167           CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
03168           RHS = DAG.getConstant(C-1, MVT::i32);
03169         }
03170         break;
03171       case ISD::SETLE:
03172       case ISD::SETGT:
03173         if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) {
03174           CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
03175           RHS = DAG.getConstant(C+1, MVT::i32);
03176         }
03177         break;
03178       case ISD::SETULE:
03179       case ISD::SETUGT:
03180         if (C != 0xffffffff && isLegalICmpImmediate(C+1)) {
03181           CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
03182           RHS = DAG.getConstant(C+1, MVT::i32);
03183         }
03184         break;
03185       }
03186     }
03187   }
03188 
03189   ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
03190   ARMISD::NodeType CompareType;
03191   switch (CondCode) {
03192   default:
03193     CompareType = ARMISD::CMP;
03194     break;
03195   case ARMCC::EQ:
03196   case ARMCC::NE:
03197     // Uses only Z Flag
03198     CompareType = ARMISD::CMPZ;
03199     break;
03200   }
03201   ARMcc = DAG.getConstant(CondCode, MVT::i32);
03202   return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS);
03203 }
03204 
03205 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
03206 SDValue
03207 ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
03208                              SDLoc dl) const {
03209   assert(!Subtarget->isFPOnlySP() || RHS.getValueType() != MVT::f64);
03210   SDValue Cmp;
03211   if (!isFloatingPointZero(RHS))
03212     Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS);
03213   else
03214     Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS);
03215   return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp);
03216 }
03217 
03218 /// duplicateCmp - Glue values can have only one use, so this function
03219 /// duplicates a comparison node.
03220 SDValue
03221 ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const {
03222   unsigned Opc = Cmp.getOpcode();
03223   SDLoc DL(Cmp);
03224   if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ)
03225     return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
03226 
03227   assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation");
03228   Cmp = Cmp.getOperand(0);
03229   Opc = Cmp.getOpcode();
03230   if (Opc == ARMISD::CMPFP)
03231     Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
03232   else {
03233     assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT");
03234     Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0));
03235   }
03236   return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp);
03237 }
03238 
03239 std::pair<SDValue, SDValue>
03240 ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG,
03241                                  SDValue &ARMcc) const {
03242   assert(Op.getValueType() == MVT::i32 &&  "Unsupported value type");
03243 
03244   SDValue Value, OverflowCmp;
03245   SDValue LHS = Op.getOperand(0);
03246   SDValue RHS = Op.getOperand(1);
03247 
03248 
03249   // FIXME: We are currently always generating CMPs because we don't support
03250   // generating CMN through the backend. This is not as good as the natural
03251   // CMP case because it causes a register dependency and cannot be folded
03252   // later.
03253 
03254   switch (Op.getOpcode()) {
03255   default:
03256     llvm_unreachable("Unknown overflow instruction!");
03257   case ISD::SADDO:
03258     ARMcc = DAG.getConstant(ARMCC::VC, MVT::i32);
03259     Value = DAG.getNode(ISD::ADD, SDLoc(Op), Op.getValueType(), LHS, RHS);
03260     OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, Value, LHS);
03261     break;
03262   case ISD::UADDO:
03263     ARMcc = DAG.getConstant(ARMCC::HS, MVT::i32);
03264     Value = DAG.getNode(ISD::ADD, SDLoc(Op), Op.getValueType(), LHS, RHS);
03265     OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, Value, LHS);
03266     break;
03267   case ISD::SSUBO:
03268     ARMcc = DAG.getConstant(ARMCC::VC, MVT::i32);
03269     Value = DAG.getNode(ISD::SUB, SDLoc(Op), Op.getValueType(), LHS, RHS);
03270     OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, LHS, RHS);
03271     break;
03272   case ISD::USUBO:
03273     ARMcc = DAG.getConstant(ARMCC::HS, MVT::i32);
03274     Value = DAG.getNode(ISD::SUB, SDLoc(Op), Op.getValueType(), LHS, RHS);
03275     OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, LHS, RHS);
03276     break;
03277   } // switch (...)
03278 
03279   return std::make_pair(Value, OverflowCmp);
03280 }
03281 
03282 
03283 SDValue
03284 ARMTargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
03285   // Let legalize expand this if it isn't a legal type yet.
03286   if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
03287     return SDValue();
03288 
03289   SDValue Value, OverflowCmp;
03290   SDValue ARMcc;
03291   std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc);
03292   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03293   // We use 0 and 1 as false and true values.
03294   SDValue TVal = DAG.getConstant(1, MVT::i32);
03295   SDValue FVal = DAG.getConstant(0, MVT::i32);
03296   EVT VT = Op.getValueType();
03297 
03298   SDValue Overflow = DAG.getNode(ARMISD::CMOV, SDLoc(Op), VT, TVal, FVal,
03299                                  ARMcc, CCR, OverflowCmp);
03300 
03301   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
03302   return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), VTs, Value, Overflow);
03303 }
03304 
03305 
03306 SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
03307   SDValue Cond = Op.getOperand(0);
03308   SDValue SelectTrue = Op.getOperand(1);
03309   SDValue SelectFalse = Op.getOperand(2);
03310   SDLoc dl(Op);
03311   unsigned Opc = Cond.getOpcode();
03312 
03313   if (Cond.getResNo() == 1 &&
03314       (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
03315        Opc == ISD::USUBO)) {
03316     if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0)))
03317       return SDValue();
03318 
03319     SDValue Value, OverflowCmp;
03320     SDValue ARMcc;
03321     std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
03322     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03323     EVT VT = Op.getValueType();
03324 
03325     return getCMOV(SDLoc(Op), VT, SelectTrue, SelectFalse, ARMcc, CCR,
03326                    OverflowCmp, DAG);
03327   }
03328 
03329   // Convert:
03330   //
03331   //   (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond)
03332   //   (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond)
03333   //
03334   if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) {
03335     const ConstantSDNode *CMOVTrue =
03336       dyn_cast<ConstantSDNode>(Cond.getOperand(0));
03337     const ConstantSDNode *CMOVFalse =
03338       dyn_cast<ConstantSDNode>(Cond.getOperand(1));
03339 
03340     if (CMOVTrue && CMOVFalse) {
03341       unsigned CMOVTrueVal = CMOVTrue->getZExtValue();
03342       unsigned CMOVFalseVal = CMOVFalse->getZExtValue();
03343 
03344       SDValue True;
03345       SDValue False;
03346       if (CMOVTrueVal == 1 && CMOVFalseVal == 0) {
03347         True = SelectTrue;
03348         False = SelectFalse;
03349       } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) {
03350         True = SelectFalse;
03351         False = SelectTrue;
03352       }
03353 
03354       if (True.getNode() && False.getNode()) {
03355         EVT VT = Op.getValueType();
03356         SDValue ARMcc = Cond.getOperand(2);
03357         SDValue CCR = Cond.getOperand(3);
03358         SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG);
03359         assert(True.getValueType() == VT);
03360         return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG);
03361       }
03362     }
03363   }
03364 
03365   // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the
03366   // undefined bits before doing a full-word comparison with zero.
03367   Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond,
03368                      DAG.getConstant(1, Cond.getValueType()));
03369 
03370   return DAG.getSelectCC(dl, Cond,
03371                          DAG.getConstant(0, Cond.getValueType()),
03372                          SelectTrue, SelectFalse, ISD::SETNE);
03373 }
03374 
03375 static ISD::CondCode getInverseCCForVSEL(ISD::CondCode CC) {
03376   if (CC == ISD::SETNE)
03377     return ISD::SETEQ;
03378   return ISD::getSetCCInverse(CC, true);
03379 }
03380 
03381 static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
03382                                  bool &swpCmpOps, bool &swpVselOps) {
03383   // Start by selecting the GE condition code for opcodes that return true for
03384   // 'equality'
03385   if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE ||
03386       CC == ISD::SETULE)
03387     CondCode = ARMCC::GE;
03388 
03389   // and GT for opcodes that return false for 'equality'.
03390   else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT ||
03391            CC == ISD::SETULT)
03392     CondCode = ARMCC::GT;
03393 
03394   // Since we are constrained to GE/GT, if the opcode contains 'less', we need
03395   // to swap the compare operands.
03396   if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT ||
03397       CC == ISD::SETULT)
03398     swpCmpOps = true;
03399 
03400   // Both GT and GE are ordered comparisons, and return false for 'unordered'.
03401   // If we have an unordered opcode, we need to swap the operands to the VSEL
03402   // instruction (effectively negating the condition).
03403   //
03404   // This also has the effect of swapping which one of 'less' or 'greater'
03405   // returns true, so we also swap the compare operands. It also switches
03406   // whether we return true for 'equality', so we compensate by picking the
03407   // opposite condition code to our original choice.
03408   if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE ||
03409       CC == ISD::SETUGT) {
03410     swpCmpOps = !swpCmpOps;
03411     swpVselOps = !swpVselOps;
03412     CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT;
03413   }
03414 
03415   // 'ordered' is 'anything but unordered', so use the VS condition code and
03416   // swap the VSEL operands.
03417   if (CC == ISD::SETO) {
03418     CondCode = ARMCC::VS;
03419     swpVselOps = true;
03420   }
03421 
03422   // 'unordered or not equal' is 'anything but equal', so use the EQ condition
03423   // code and swap the VSEL operands.
03424   if (CC == ISD::SETUNE) {
03425     CondCode = ARMCC::EQ;
03426     swpVselOps = true;
03427   }
03428 }
03429 
03430 SDValue ARMTargetLowering::getCMOV(SDLoc dl, EVT VT, SDValue FalseVal,
03431                                    SDValue TrueVal, SDValue ARMcc, SDValue CCR,
03432                                    SDValue Cmp, SelectionDAG &DAG) const {
03433   if (Subtarget->isFPOnlySP() && VT == MVT::f64) {
03434     FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl,
03435                            DAG.getVTList(MVT::i32, MVT::i32), FalseVal);
03436     TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl,
03437                           DAG.getVTList(MVT::i32, MVT::i32), TrueVal);
03438 
03439     SDValue TrueLow = TrueVal.getValue(0);
03440     SDValue TrueHigh = TrueVal.getValue(1);
03441     SDValue FalseLow = FalseVal.getValue(0);
03442     SDValue FalseHigh = FalseVal.getValue(1);
03443 
03444     SDValue Low = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseLow, TrueLow,
03445                               ARMcc, CCR, Cmp);
03446     SDValue High = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseHigh, TrueHigh,
03447                                ARMcc, CCR, duplicateCmp(Cmp, DAG));
03448 
03449     return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High);
03450   } else {
03451     return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,
03452                        Cmp);
03453   }
03454 }
03455 
03456 SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
03457   EVT VT = Op.getValueType();
03458   SDValue LHS = Op.getOperand(0);
03459   SDValue RHS = Op.getOperand(1);
03460   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
03461   SDValue TrueVal = Op.getOperand(2);
03462   SDValue FalseVal = Op.getOperand(3);
03463   SDLoc dl(Op);
03464 
03465   if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) {
03466     DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC,
03467                                                     dl);
03468 
03469     // If softenSetCCOperands only returned one value, we should compare it to
03470     // zero.
03471     if (!RHS.getNode()) {
03472       RHS = DAG.getConstant(0, LHS.getValueType());
03473       CC = ISD::SETNE;
03474     }
03475   }
03476 
03477   if (LHS.getValueType() == MVT::i32) {
03478     // Try to generate VSEL on ARMv8.
03479     // The VSEL instruction can't use all the usual ARM condition
03480     // codes: it only has two bits to select the condition code, so it's
03481     // constrained to use only GE, GT, VS and EQ.
03482     //
03483     // To implement all the various ISD::SETXXX opcodes, we sometimes need to
03484     // swap the operands of the previous compare instruction (effectively
03485     // inverting the compare condition, swapping 'less' and 'greater') and
03486     // sometimes need to swap the operands to the VSEL (which inverts the
03487     // condition in the sense of firing whenever the previous condition didn't)
03488     if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 ||
03489                                     TrueVal.getValueType() == MVT::f64)) {
03490       ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
03491       if (CondCode == ARMCC::LT || CondCode == ARMCC::LE ||
03492           CondCode == ARMCC::VC || CondCode == ARMCC::NE) {
03493         CC = getInverseCCForVSEL(CC);
03494         std::swap(TrueVal, FalseVal);
03495       }
03496     }
03497 
03498     SDValue ARMcc;
03499     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03500     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
03501     return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
03502   }
03503 
03504   ARMCC::CondCodes CondCode, CondCode2;
03505   FPCCToARMCC(CC, CondCode, CondCode2);
03506 
03507   // Try to generate VSEL on ARMv8.
03508   if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 ||
03509                                   TrueVal.getValueType() == MVT::f64)) {
03510     // We can select VMAXNM/VMINNM from a compare followed by a select with the
03511     // same operands, as follows:
03512     //   c = fcmp [ogt, olt, ugt, ult] a, b
03513     //   select c, a, b
03514     // We only do this in unsafe-fp-math, because signed zeros and NaNs are
03515     // handled differently than the original code sequence.
03516     if (getTargetMachine().Options.UnsafeFPMath) {
03517       if (LHS == TrueVal && RHS == FalseVal) {
03518         if (CC == ISD::SETOGT || CC == ISD::SETUGT)
03519           return DAG.getNode(ARMISD::VMAXNM, dl, VT, TrueVal, FalseVal);
03520         if (CC == ISD::SETOLT || CC == ISD::SETULT)
03521           return DAG.getNode(ARMISD::VMINNM, dl, VT, TrueVal, FalseVal);
03522       } else if (LHS == FalseVal && RHS == TrueVal) {
03523         if (CC == ISD::SETOLT || CC == ISD::SETULT)
03524           return DAG.getNode(ARMISD::VMAXNM, dl, VT, TrueVal, FalseVal);
03525         if (CC == ISD::SETOGT || CC == ISD::SETUGT)
03526           return DAG.getNode(ARMISD::VMINNM, dl, VT, TrueVal, FalseVal);
03527       }
03528     }
03529 
03530     bool swpCmpOps = false;
03531     bool swpVselOps = false;
03532     checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps);
03533 
03534     if (CondCode == ARMCC::GT || CondCode == ARMCC::GE ||
03535         CondCode == ARMCC::VS || CondCode == ARMCC::EQ) {
03536       if (swpCmpOps)
03537         std::swap(LHS, RHS);
03538       if (swpVselOps)
03539         std::swap(TrueVal, FalseVal);
03540     }
03541   }
03542 
03543   SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32);
03544   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
03545   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03546   SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
03547   if (CondCode2 != ARMCC::AL) {
03548     SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32);
03549     // FIXME: Needs another CMP because flag can have but one use.
03550     SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
03551     Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG);
03552   }
03553   return Result;
03554 }
03555 
03556 /// canChangeToInt - Given the fp compare operand, return true if it is suitable
03557 /// to morph to an integer compare sequence.
03558 static bool canChangeToInt(SDValue Op, bool &SeenZero,
03559                            const ARMSubtarget *Subtarget) {
03560   SDNode *N = Op.getNode();
03561   if (!N->hasOneUse())
03562     // Otherwise it requires moving the value from fp to integer registers.
03563     return false;
03564   if (!N->getNumValues())
03565     return false;
03566   EVT VT = Op.getValueType();
03567   if (VT != MVT::f32 && !Subtarget->isFPBrccSlow())
03568     // f32 case is generally profitable. f64 case only makes sense when vcmpe +
03569     // vmrs are very slow, e.g. cortex-a8.
03570     return false;
03571 
03572   if (isFloatingPointZero(Op)) {
03573     SeenZero = true;
03574     return true;
03575   }
03576   return ISD::isNormalLoad(N);
03577 }
03578 
03579 static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {
03580   if (isFloatingPointZero(Op))
03581     return DAG.getConstant(0, MVT::i32);
03582 
03583   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
03584     return DAG.getLoad(MVT::i32, SDLoc(Op),
03585                        Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(),
03586                        Ld->isVolatile(), Ld->isNonTemporal(),
03587                        Ld->isInvariant(), Ld->getAlignment());
03588 
03589   llvm_unreachable("Unknown VFP cmp argument!");
03590 }
03591 
03592 static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
03593                            SDValue &RetVal1, SDValue &RetVal2) {
03594   if (isFloatingPointZero(Op)) {
03595     RetVal1 = DAG.getConstant(0, MVT::i32);
03596     RetVal2 = DAG.getConstant(0, MVT::i32);
03597     return;
03598   }
03599 
03600   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
03601     SDValue Ptr = Ld->getBasePtr();
03602     RetVal1 = DAG.getLoad(MVT::i32, SDLoc(Op),
03603                           Ld->getChain(), Ptr,
03604                           Ld->getPointerInfo(),
03605                           Ld->isVolatile(), Ld->isNonTemporal(),
03606                           Ld->isInvariant(), Ld->getAlignment());
03607 
03608     EVT PtrType = Ptr.getValueType();
03609     unsigned NewAlign = MinAlign(Ld->getAlignment(), 4);
03610     SDValue NewPtr = DAG.getNode(ISD::ADD, SDLoc(Op),
03611                                  PtrType, Ptr, DAG.getConstant(4, PtrType));
03612     RetVal2 = DAG.getLoad(MVT::i32, SDLoc(Op),
03613                           Ld->getChain(), NewPtr,
03614                           Ld->getPointerInfo().getWithOffset(4),
03615                           Ld->isVolatile(), Ld->isNonTemporal(),
03616                           Ld->isInvariant(), NewAlign);
03617     return;
03618   }
03619 
03620   llvm_unreachable("Unknown VFP cmp argument!");
03621 }
03622 
03623 /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some
03624 /// f32 and even f64 comparisons to integer ones.
03625 SDValue
03626 ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
03627   SDValue Chain = Op.getOperand(0);
03628   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
03629   SDValue LHS = Op.getOperand(2);
03630   SDValue RHS = Op.getOperand(3);
03631   SDValue Dest = Op.getOperand(4);
03632   SDLoc dl(Op);
03633 
03634   bool LHSSeenZero = false;
03635   bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget);
03636   bool RHSSeenZero = false;
03637   bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget);
03638   if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) {
03639     // If unsafe fp math optimization is enabled and there are no other uses of
03640     // the CMP operands, and the condition code is EQ or NE, we can optimize it
03641     // to an integer comparison.
03642     if (CC == ISD::SETOEQ)
03643       CC = ISD::SETEQ;
03644     else if (CC == ISD::SETUNE)
03645       CC = ISD::SETNE;
03646 
03647     SDValue Mask = DAG.getConstant(0x7fffffff, MVT::i32);
03648     SDValue ARMcc;
03649     if (LHS.getValueType() == MVT::f32) {
03650       LHS = DAG.getNode(ISD::AND, dl, MVT::i32,
03651                         bitcastf32Toi32(LHS, DAG), Mask);
03652       RHS = DAG.getNode(ISD::AND, dl, MVT::i32,
03653                         bitcastf32Toi32(RHS, DAG), Mask);
03654       SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
03655       SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03656       return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
03657                          Chain, Dest, ARMcc, CCR, Cmp);
03658     }
03659 
03660     SDValue LHS1, LHS2;
03661     SDValue RHS1, RHS2;
03662     expandf64Toi32(LHS, DAG, LHS1, LHS2);
03663     expandf64Toi32(RHS, DAG, RHS1, RHS2);
03664     LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask);
03665     RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask);
03666     ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
03667     ARMcc = DAG.getConstant(CondCode, MVT::i32);
03668     SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
03669     SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
03670     return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops);
03671   }
03672 
03673   return SDValue();
03674 }
03675 
03676 SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
03677   SDValue Chain = Op.getOperand(0);
03678   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
03679   SDValue LHS = Op.getOperand(2);
03680   SDValue RHS = Op.getOperand(3);
03681   SDValue Dest = Op.getOperand(4);
03682   SDLoc dl(Op);
03683 
03684   if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) {
03685     DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC,
03686                                                     dl);
03687 
03688     // If softenSetCCOperands only returned one value, we should compare it to
03689     // zero.
03690     if (!RHS.getNode()) {
03691       RHS = DAG.getConstant(0, LHS.getValueType());
03692       CC = ISD::SETNE;
03693     }
03694   }
03695 
03696   if (LHS.getValueType() == MVT::i32) {
03697     SDValue ARMcc;
03698     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
03699     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03700     return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
03701                        Chain, Dest, ARMcc, CCR, Cmp);
03702   }
03703 
03704   assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
03705 
03706   if (getTargetMachine().Options.UnsafeFPMath &&
03707       (CC == ISD::SETEQ || CC == ISD::SETOEQ ||
03708        CC == ISD::SETNE || CC == ISD::SETUNE)) {
03709     SDValue Result = OptimizeVFPBrcond(Op, DAG);
03710     if (Result.getNode())
03711       return Result;
03712   }
03713 
03714   ARMCC::CondCodes CondCode, CondCode2;
03715   FPCCToARMCC(CC, CondCode, CondCode2);
03716 
03717   SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32);
03718   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
03719   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03720   SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
03721   SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
03722   SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
03723   if (CondCode2 != ARMCC::AL) {
03724     ARMcc = DAG.getConstant(CondCode2, MVT::i32);
03725     SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) };
03726     Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
03727   }
03728   return Res;
03729 }
03730 
03731 SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
03732   SDValue Chain = Op.getOperand(0);
03733   SDValue Table = Op.getOperand(1);
03734   SDValue Index = Op.getOperand(2);
03735   SDLoc dl(Op);
03736 
03737   EVT PTy = getPointerTy();
03738   JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
03739   ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>();
03740   SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy);
03741   SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
03742   Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId);
03743   Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy));
03744   SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table);
03745   if (Subtarget->isThumb2()) {
03746     // Thumb2 uses a two-level jump. That is, it jumps into the jump table
03747     // which does another jump to the destination. This also makes it easier
03748     // to translate it to TBB / TBH later.
03749     // FIXME: This might not work if the function is extremely large.
03750     return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain,
03751                        Addr, Op.getOperand(2), JTI, UId);
03752   }
03753   if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
03754     Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr,
03755                        MachinePointerInfo::getJumpTable(),
03756                        false, false, false, 0);
03757     Chain = Addr.getValue(1);
03758     Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table);
03759     return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
03760   } else {
03761     Addr = DAG.getLoad(PTy, dl, Chain, Addr,
03762                        MachinePointerInfo::getJumpTable(),
03763                        false, false, false, 0);
03764     Chain = Addr.getValue(1);
03765     return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
03766   }
03767 }
03768 
03769 static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
03770   EVT VT = Op.getValueType();
03771   SDLoc dl(Op);
03772 
03773   if (Op.getValueType().getVectorElementType() == MVT::i32) {
03774     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32)
03775       return Op;
03776     return DAG.UnrollVectorOp(Op.getNode());
03777   }
03778 
03779   assert(Op.getOperand(0).getValueType() == MVT::v4f32 &&
03780          "Invalid type for custom lowering!");
03781   if (VT != MVT::v4i16)
03782     return DAG.UnrollVectorOp(Op.getNode());
03783 
03784   Op = DAG.getNode(Op.getOpcode(), dl, MVT::v4i32, Op.getOperand(0));
03785   return DAG.getNode(ISD::TRUNCATE, dl, VT, Op);
03786 }
03787 
03788 SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
03789   EVT VT = Op.getValueType();
03790   if (VT.isVector())
03791     return LowerVectorFP_TO_INT(Op, DAG);
03792   if (Subtarget->isFPOnlySP() && Op.getOperand(0).getValueType() == MVT::f64) {
03793     RTLIB::Libcall LC;
03794     if (Op.getOpcode() == ISD::FP_TO_SINT)
03795       LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(),
03796                               Op.getValueType());
03797     else
03798       LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(),
03799                               Op.getValueType());
03800     return makeLibCall(DAG, LC, Op.getValueType(), &Op.getOperand(0), 1,
03801                        /*isSigned*/ false, SDLoc(Op)).first;
03802   }
03803 
03804   return Op;
03805 }
03806 
03807 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
03808   EVT VT = Op.getValueType();
03809   SDLoc dl(Op);
03810 
03811   if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) {
03812     if (VT.getVectorElementType() == MVT::f32)
03813       return Op;
03814     return DAG.UnrollVectorOp(Op.getNode());
03815   }
03816 
03817   assert(Op.getOperand(0).getValueType() == MVT::v4i16 &&
03818          "Invalid type for custom lowering!");
03819   if (VT != MVT::v4f32)
03820     return DAG.UnrollVectorOp(Op.getNode());
03821 
03822   unsigned CastOpc;
03823   unsigned Opc;
03824   switch (Op.getOpcode()) {
03825   default: llvm_unreachable("Invalid opcode!");
03826   case ISD::SINT_TO_FP:
03827     CastOpc = ISD::SIGN_EXTEND;
03828     Opc = ISD::SINT_TO_FP;
03829     break;
03830   case ISD::UINT_TO_FP:
03831     CastOpc = ISD::ZERO_EXTEND;
03832     Opc = ISD::UINT_TO_FP;
03833     break;
03834   }
03835 
03836   Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0));
03837   return DAG.getNode(Opc, dl, VT, Op);
03838 }
03839 
03840 SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const {
03841   EVT VT = Op.getValueType();
03842   if (VT.isVector())
03843     return LowerVectorINT_TO_FP(Op, DAG);
03844   if (Subtarget->isFPOnlySP() && Op.getValueType() == MVT::f64) {
03845     RTLIB::Libcall LC;
03846     if (Op.getOpcode() == ISD::SINT_TO_FP)
03847       LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(),
03848                               Op.getValueType());
03849     else
03850       LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(),
03851                               Op.getValueType());
03852     return makeLibCall(DAG, LC, Op.getValueType(), &Op.getOperand(0), 1,
03853                        /*isSigned*/ false, SDLoc(Op)).first;
03854   }
03855 
03856   return Op;
03857 }
03858 
03859 SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
03860   // Implement fcopysign with a fabs and a conditional fneg.
03861   SDValue Tmp0 = Op.getOperand(0);
03862   SDValue Tmp1 = Op.getOperand(1);
03863   SDLoc dl(Op);
03864   EVT VT = Op.getValueType();
03865   EVT SrcVT = Tmp1.getValueType();
03866   bool InGPR = Tmp0.getOpcode() == ISD::BITCAST ||
03867     Tmp0.getOpcode() == ARMISD::VMOVDRR;
03868   bool UseNEON = !InGPR && Subtarget->hasNEON();
03869 
03870   if (UseNEON) {
03871     // Use VBSL to copy the sign bit.
03872     unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80);
03873     SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32,
03874                                DAG.getTargetConstant(EncodedVal, MVT::i32));
03875     EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64;
03876     if (VT == MVT::f64)
03877       Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT,
03878                          DAG.getNode(ISD::BITCAST, dl, OpVT, Mask),
03879                          DAG.getConstant(32, MVT::i32));
03880     else /*if (VT == MVT::f32)*/
03881       Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0);
03882     if (SrcVT == MVT::f32) {
03883       Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1);
03884       if (VT == MVT::f64)
03885         Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT,
03886                            DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1),
03887                            DAG.getConstant(32, MVT::i32));
03888     } else if (VT == MVT::f32)
03889       Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64,
03890                          DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1),
03891                          DAG.getConstant(32, MVT::i32));
03892     Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0);
03893     Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1);
03894 
03895     SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff),
03896                                             MVT::i32);
03897     AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes);
03898     SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask,
03899                                   DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes));
03900 
03901     SDValue Res = DAG.getNode(ISD::OR, dl, OpVT,
03902                               DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask),
03903                               DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot));
03904     if (VT == MVT::f32) {
03905       Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res);
03906       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
03907                         DAG.getConstant(0, MVT::i32));
03908     } else {
03909       Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res);
03910     }
03911 
03912     return Res;
03913   }
03914 
03915   // Bitcast operand 1 to i32.
03916   if (SrcVT == MVT::f64)
03917     Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
03918                        Tmp1).getValue(1);
03919   Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1);
03920 
03921   // Or in the signbit with integer operations.
03922   SDValue Mask1 = DAG.getConstant(0x80000000, MVT::i32);
03923   SDValue Mask2 = DAG.getConstant(0x7fffffff, MVT::i32);
03924   Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1);
03925   if (VT == MVT::f32) {
03926     Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32,
03927                        DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2);
03928     return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
03929                        DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1));
03930   }
03931 
03932   // f64: Or the high part with signbit and then combine two parts.
03933   Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
03934                      Tmp0);
03935   SDValue Lo = Tmp0.getValue(0);
03936   SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2);
03937   Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1);
03938   return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
03939 }
03940 
03941 SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
03942   MachineFunction &MF = DAG.getMachineFunction();
03943   MachineFrameInfo *MFI = MF.getFrameInfo();
03944   MFI->setReturnAddressIsTaken(true);
03945 
03946   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
03947     return SDValue();
03948 
03949   EVT VT = Op.getValueType();
03950   SDLoc dl(Op);
03951   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
03952   if (Depth) {
03953     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
03954     SDValue Offset = DAG.getConstant(4, MVT::i32);
03955     return DAG.getLoad(VT, dl, DAG.getEntryNode(),
03956                        DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
03957                        MachinePointerInfo(), false, false, false, 0);
03958   }
03959 
03960   // Return LR, which contains the return address. Mark it an implicit live-in.
03961   unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
03962   return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
03963 }
03964 
03965 SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
03966   const ARMBaseRegisterInfo &ARI =
03967     *static_cast<const ARMBaseRegisterInfo*>(RegInfo);
03968   MachineFunction &MF = DAG.getMachineFunction();
03969   MachineFrameInfo *MFI = MF.getFrameInfo();
03970   MFI->setFrameAddressIsTaken(true);
03971 
03972   EVT VT = Op.getValueType();
03973   SDLoc dl(Op);  // FIXME probably not meaningful
03974   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
03975   unsigned FrameReg = ARI.getFrameRegister(MF);
03976   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
03977   while (Depth--)
03978     FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
03979                             MachinePointerInfo(),
03980                             false, false, false, 0);
03981   return FrameAddr;
03982 }
03983 
03984 // FIXME? Maybe this could be a TableGen attribute on some registers and
03985 // this table could be generated automatically from RegInfo.
03986 unsigned ARMTargetLowering::getRegisterByName(const char* RegName,
03987                                               EVT VT) const {
03988   unsigned Reg = StringSwitch<unsigned>(RegName)
03989                        .Case("sp", ARM::SP)
03990                        .Default(0);
03991   if (Reg)
03992     return Reg;
03993   report_fatal_error("Invalid register name global variable");
03994 }
03995 
03996 /// ExpandBITCAST - If the target supports VFP, this function is called to
03997 /// expand a bit convert where either the source or destination type is i64 to
03998 /// use a VMOVDRR or VMOVRRD node.  This should not be done when the non-i64
03999 /// operand type is illegal (e.g., v2f32 for a target that doesn't support
04000 /// vectors), since the legalizer won't know what to do with that.
04001 static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) {
04002   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
04003   SDLoc dl(N);
04004   SDValue Op = N->getOperand(0);
04005 
04006   // This function is only supposed to be called for i64 types, either as the
04007   // source or destination of the bit convert.
04008   EVT SrcVT = Op.getValueType();
04009   EVT DstVT = N->getValueType(0);
04010   assert((SrcVT == MVT::i64 || DstVT == MVT::i64) &&
04011          "ExpandBITCAST called for non-i64 type");
04012 
04013   // Turn i64->f64 into VMOVDRR.
04014   if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) {
04015     SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
04016                              DAG.getConstant(0, MVT::i32));
04017     SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
04018                              DAG.getConstant(1, MVT::i32));
04019     return DAG.getNode(ISD::BITCAST, dl, DstVT,
04020                        DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi));
04021   }
04022 
04023   // Turn f64->i64 into VMOVRRD.
04024   if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) {
04025     SDValue Cvt;
04026     if (TLI.isBigEndian() && SrcVT.isVector() &&
04027         SrcVT.getVectorNumElements() > 1)
04028       Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
04029                         DAG.getVTList(MVT::i32, MVT::i32),
04030                         DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op));
04031     else
04032       Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
04033                         DAG.getVTList(MVT::i32, MVT::i32), Op);
04034     // Merge the pieces into a single i64 value.
04035     return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
04036   }
04037 
04038   return SDValue();
04039 }
04040 
04041 /// getZeroVector - Returns a vector of specified type with all zero elements.
04042 /// Zero vectors are used to represent vector negation and in those cases
04043 /// will be implemented with the NEON VNEG instruction.  However, VNEG does
04044 /// not support i64 elements, so sometimes the zero vectors will need to be
04045 /// explicitly constructed.  Regardless, use a canonical VMOV to create the
04046 /// zero vector.
04047 static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, SDLoc dl) {
04048   assert(VT.isVector() && "Expected a vector type");
04049   // The canonical modified immediate encoding of a zero vector is....0!
04050   SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32);
04051   EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
04052   SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal);
04053   return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
04054 }
04055 
04056 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two
04057 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
04058 SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
04059                                                 SelectionDAG &DAG) const {
04060   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
04061   EVT VT = Op.getValueType();
04062   unsigned VTBits = VT.getSizeInBits();
04063   SDLoc dl(Op);
04064   SDValue ShOpLo = Op.getOperand(0);
04065   SDValue ShOpHi = Op.getOperand(1);
04066   SDValue ShAmt  = Op.getOperand(2);
04067   SDValue ARMcc;
04068   unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
04069 
04070   assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
04071 
04072   SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
04073                                  DAG.getConstant(VTBits, MVT::i32), ShAmt);
04074   SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
04075   SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
04076                                    DAG.getConstant(VTBits, MVT::i32));
04077   SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
04078   SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
04079   SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
04080 
04081   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
04082   SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
04083                           ARMcc, DAG, dl);
04084   SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
04085   SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc,
04086                            CCR, Cmp);
04087 
04088   SDValue Ops[2] = { Lo, Hi };
04089   return DAG.getMergeValues(Ops, dl);
04090 }
04091 
04092 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
04093 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
04094 SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
04095                                                SelectionDAG &DAG) const {
04096   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
04097   EVT VT = Op.getValueType();
04098   unsigned VTBits = VT.getSizeInBits();
04099   SDLoc dl(Op);
04100   SDValue ShOpLo = Op.getOperand(0);
04101   SDValue ShOpHi = Op.getOperand(1);
04102   SDValue ShAmt  = Op.getOperand(2);
04103   SDValue ARMcc;
04104 
04105   assert(Op.getOpcode() == ISD::SHL_PARTS);
04106   SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
04107                                  DAG.getConstant(VTBits, MVT::i32), ShAmt);
04108   SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
04109   SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
04110                                    DAG.getConstant(VTBits, MVT::i32));
04111   SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
04112   SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
04113 
04114   SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
04115   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
04116   SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
04117                           ARMcc, DAG, dl);
04118   SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
04119   SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc,
04120                            CCR, Cmp);
04121 
04122   SDValue Ops[2] = { Lo, Hi };
04123   return DAG.getMergeValues(Ops, dl);
04124 }
04125 
04126 SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
04127                                             SelectionDAG &DAG) const {
04128   // The rounding mode is in bits 23:22 of the FPSCR.
04129   // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
04130   // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
04131   // so that the shift + and get folded into a bitfield extract.
04132   SDLoc dl(Op);
04133   SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32,
04134                               DAG.getConstant(Intrinsic::arm_get_fpscr,
04135                                               MVT::i32));
04136   SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR,
04137                                   DAG.getConstant(1U << 22, MVT::i32));
04138   SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds,
04139                               DAG.getConstant(22, MVT::i32));
04140   return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
04141                      DAG.getConstant(3, MVT::i32));
04142 }
04143 
04144 static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG,
04145                          const ARMSubtarget *ST) {
04146   EVT VT = N->getValueType(0);
04147   SDLoc dl(N);
04148 
04149   if (!ST->hasV6T2Ops())
04150     return SDValue();
04151 
04152   SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0));
04153   return DAG.getNode(ISD::CTLZ, dl, VT, rbit);
04154 }
04155 
04156 /// getCTPOP16BitCounts - Returns a v8i8/v16i8 vector containing the bit-count
04157 /// for each 16-bit element from operand, repeated.  The basic idea is to
04158 /// leverage vcnt to get the 8-bit counts, gather and add the results.
04159 ///
04160 /// Trace for v4i16:
04161 /// input    = [v0    v1    v2    v3   ] (vi 16-bit element)
04162 /// cast: N0 = [w0 w1 w2 w3 w4 w5 w6 w7] (v0 = [w0 w1], wi 8-bit element)
04163 /// vcnt: N1 = [b0 b1 b2 b3 b4 b5 b6 b7] (bi = bit-count of 8-bit element wi)
04164 /// vrev: N2 = [b1 b0 b3 b2 b5 b4 b7 b6]
04165 ///            [b0 b1 b2 b3 b4 b5 b6 b7]
04166 ///           +[b1 b0 b3 b2 b5 b4 b7 b6]
04167 /// N3=N1+N2 = [k0 k0 k1 k1 k2 k2 k3 k3] (k0 = b0+b1 = bit-count of 16-bit v0,
04168 /// vuzp:    = [k0 k1 k2 k3 k0 k1 k2 k3]  each ki is 8-bits)
04169 static SDValue getCTPOP16BitCounts(SDNode *N, SelectionDAG &DAG) {
04170   EVT VT = N->getValueType(0);
04171   SDLoc DL(N);
04172 
04173   EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8;
04174   SDValue N0 = DAG.getNode(ISD::BITCAST, DL, VT8Bit, N->getOperand(0));
04175   SDValue N1 = DAG.getNode(ISD::CTPOP, DL, VT8Bit, N0);
04176   SDValue N2 = DAG.getNode(ARMISD::VREV16, DL, VT8Bit, N1);
04177   SDValue N3 = DAG.getNode(ISD::ADD, DL, VT8Bit, N1, N2);
04178   return DAG.getNode(ARMISD::VUZP, DL, VT8Bit, N3, N3);
04179 }
04180 
04181 /// lowerCTPOP16BitElements - Returns a v4i16/v8i16 vector containing the
04182 /// bit-count for each 16-bit element from the operand.  We need slightly
04183 /// different sequencing for v4i16 and v8i16 to stay within NEON's available
04184 /// 64/128-bit registers.
04185 ///
04186 /// Trace for v4i16:
04187 /// input           = [v0    v1    v2    v3    ] (vi 16-bit element)
04188 /// v8i8: BitCounts = [k0 k1 k2 k3 k0 k1 k2 k3 ] (ki is the bit-count of vi)
04189 /// v8i16:Extended  = [k0    k1    k2    k3    k0    k1    k2    k3    ]
04190 /// v4i16:Extracted = [k0    k1    k2    k3    ]
04191 static SDValue lowerCTPOP16BitElements(SDNode *N, SelectionDAG &DAG) {
04192   EVT VT = N->getValueType(0);
04193   SDLoc DL(N);
04194 
04195   SDValue BitCounts = getCTPOP16BitCounts(N, DAG);
04196   if (VT.is64BitVector()) {
04197     SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, BitCounts);
04198     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, Extended,
04199                        DAG.getIntPtrConstant(0));
04200   } else {
04201     SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8,
04202                                     BitCounts, DAG.getIntPtrConstant(0));
04203     return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, Extracted);
04204   }
04205 }
04206 
04207 /// lowerCTPOP32BitElements - Returns a v2i32/v4i32 vector containing the
04208 /// bit-count for each 32-bit element from the operand.  The idea here is
04209 /// to split the vector into 16-bit elements, leverage the 16-bit count
04210 /// routine, and then combine the results.
04211 ///
04212 /// Trace for v2i32 (v4i32 similar with Extracted/Extended exchanged):
04213 /// input    = [v0    v1    ] (vi: 32-bit elements)
04214 /// Bitcast  = [w0 w1 w2 w3 ] (wi: 16-bit elements, v0 = [w0 w1])
04215 /// Counts16 = [k0 k1 k2 k3 ] (ki: 16-bit elements, bit-count of wi)
04216 /// vrev: N0 = [k1 k0 k3 k2 ]
04217 ///            [k0 k1 k2 k3 ]
04218 ///       N1 =+[k1 k0 k3 k2 ]
04219 ///            [k0 k2 k1 k3 ]
04220 ///       N2 =+[k1 k3 k0 k2 ]
04221 ///            [k0    k2    k1    k3    ]
04222 /// Extended =+[k1    k3    k0    k2    ]
04223 ///            [k0    k2    ]
04224 /// Extracted=+[k1    k3    ]
04225 ///
04226 static SDValue lowerCTPOP32BitElements(SDNode *N, SelectionDAG &DAG) {
04227   EVT VT = N->getValueType(0);
04228   SDLoc DL(N);
04229 
04230   EVT VT16Bit = VT.is64BitVector() ? MVT::v4i16 : MVT::v8i16;
04231 
04232   SDValue Bitcast = DAG.getNode(ISD::BITCAST, DL, VT16Bit, N->getOperand(0));
04233   SDValue Counts16 = lowerCTPOP16BitElements(Bitcast.getNode(), DAG);
04234   SDValue N0 = DAG.getNode(ARMISD::VREV32, DL, VT16Bit, Counts16);
04235   SDValue N1 = DAG.getNode(ISD::ADD, DL, VT16Bit, Counts16, N0);
04236   SDValue N2 = DAG.getNode(ARMISD::VUZP, DL, VT16Bit, N1, N1);
04237 
04238   if (VT.is64BitVector()) {
04239     SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, N2);
04240     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i32, Extended,
04241                        DAG.getIntPtrConstant(0));
04242   } else {
04243     SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, N2,
04244                                     DAG.getIntPtrConstant(0));
04245     return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, Extracted);
04246   }
04247 }
04248 
04249 static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG,
04250                           const ARMSubtarget *ST) {
04251   EVT VT = N->getValueType(0);
04252 
04253   assert(ST->hasNEON() && "Custom ctpop lowering requires NEON.");
04254   assert((VT == MVT::v2i32 || VT == MVT::v4i32 ||
04255           VT == MVT::v4i16 || VT == MVT::v8i16) &&
04256          "Unexpected type for custom ctpop lowering");
04257 
04258   if (VT.getVectorElementType() == MVT::i32)
04259     return lowerCTPOP32BitElements(N, DAG);
04260   else
04261     return lowerCTPOP16BitElements(N, DAG);
04262 }
04263 
04264 static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
04265                           const ARMSubtarget *ST) {
04266   EVT VT = N->getValueType(0);
04267   SDLoc dl(N);
04268 
04269   if (!VT.isVector())
04270     return SDValue();
04271 
04272   // Lower vector shifts on NEON to use VSHL.
04273   assert(ST->hasNEON() && "unexpected vector shift");
04274 
04275   // Left shifts translate directly to the vshiftu intrinsic.
04276   if (N->getOpcode() == ISD::SHL)
04277     return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
04278                        DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32),
04279                        N->getOperand(0), N->getOperand(1));
04280 
04281   assert((N->getOpcode() == ISD::SRA ||
04282           N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode");
04283 
04284   // NEON uses the same intrinsics for both left and right shifts.  For
04285   // right shifts, the shift amounts are negative, so negate the vector of
04286   // shift amounts.
04287   EVT ShiftVT = N->getOperand(1).getValueType();
04288   SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT,
04289                                      getZeroVector(ShiftVT, DAG, dl),
04290                                      N->getOperand(1));
04291   Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ?
04292                              Intrinsic::arm_neon_vshifts :
04293                              Intrinsic::arm_neon_vshiftu);
04294   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
04295                      DAG.getConstant(vshiftInt, MVT::i32),
04296                      N->getOperand(0), NegatedCount);
04297 }
04298 
04299 static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG,
04300                                 const ARMSubtarget *ST) {
04301   EVT VT = N->getValueType(0);
04302   SDLoc dl(N);
04303 
04304   // We can get here for a node like i32 = ISD::SHL i32, i64
04305   if (VT != MVT::i64)
04306     return SDValue();
04307 
04308   assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
04309          "Unknown shift to lower!");
04310 
04311   // We only lower SRA, SRL of 1 here, all others use generic lowering.
04312   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
04313       cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1)
04314     return SDValue();
04315 
04316   // If we are in thumb mode, we don't have RRX.
04317   if (ST->isThumb1Only()) return SDValue();
04318 
04319   // Okay, we have a 64-bit SRA or SRL of 1.  Lower this to an RRX expr.
04320   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
04321                            DAG.getConstant(0, MVT::i32));
04322   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
04323                            DAG.getConstant(1, MVT::i32));
04324 
04325   // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
04326   // captures the result into a carry flag.
04327   unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
04328   Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi);
04329 
04330   // The low part is an ARMISD::RRX operand, which shifts the carry in.
04331   Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));
04332 
04333   // Merge the pieces into a single i64 value.
04334  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
04335 }
04336 
04337 static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
04338   SDValue TmpOp0, TmpOp1;
04339   bool Invert = false;
04340   bool Swap = false;
04341   unsigned Opc = 0;
04342 
04343   SDValue Op0 = Op.getOperand(0);
04344   SDValue Op1 = Op.getOperand(1);
04345   SDValue CC = Op.getOperand(2);
04346   EVT CmpVT = Op0.getValueType().changeVectorElementTypeToInteger();
04347   EVT VT = Op.getValueType();
04348   ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
04349   SDLoc dl(Op);
04350 
04351   if (Op1.getValueType().isFloatingPoint()) {
04352     switch (SetCCOpcode) {
04353     default: llvm_unreachable("Illegal FP comparison");
04354     case ISD::SETUNE:
04355     case ISD::SETNE:  Invert = true; // Fallthrough
04356     case ISD::SETOEQ:
04357     case ISD::SETEQ:  Opc = ARMISD::VCEQ; break;
04358     case ISD::SETOLT:
04359     case ISD::SETLT: Swap = true; // Fallthrough
04360     case ISD::SETOGT:
04361     case ISD::SETGT:  Opc = ARMISD::VCGT; break;
04362     case ISD::SETOLE:
04363     case ISD::SETLE:  Swap = true; // Fallthrough
04364     case ISD::SETOGE:
04365     case ISD::SETGE: Opc = ARMISD::VCGE; break;
04366     case ISD::SETUGE: Swap = true; // Fallthrough
04367     case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break;
04368     case ISD::SETUGT: Swap = true; // Fallthrough
04369     case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break;
04370     case ISD::SETUEQ: Invert = true; // Fallthrough
04371     case ISD::SETONE:
04372       // Expand this to (OLT | OGT).
04373       TmpOp0 = Op0;
04374       TmpOp1 = Op1;
04375       Opc = ISD::OR;
04376       Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0);
04377       Op1 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp0, TmpOp1);
04378       break;
04379     case ISD::SETUO: Invert = true; // Fallthrough
04380     case ISD::SETO:
04381       // Expand this to (OLT | OGE).
04382       TmpOp0 = Op0;
04383       TmpOp1 = Op1;
04384       Opc = ISD::OR;
04385       Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0);
04386       Op1 = DAG.getNode(ARMISD::VCGE, dl, CmpVT, TmpOp0, TmpOp1);
04387       break;
04388     }
04389   } else {
04390     // Integer comparisons.
04391     switch (SetCCOpcode) {
04392     default: llvm_unreachable("Illegal integer comparison");
04393     case ISD::SETNE:  Invert = true;
04394     case ISD::SETEQ:  Opc = ARMISD::VCEQ; break;
04395     case ISD::SETLT:  Swap = true;
04396     case ISD::SETGT:  Opc = ARMISD::VCGT; break;
04397     case ISD::SETLE:  Swap = true;
04398     case ISD::SETGE:  Opc = ARMISD::VCGE; break;
04399     case ISD::SETULT: Swap = true;
04400     case ISD::SETUGT: Opc = ARMISD::VCGTU; break;
04401     case ISD::SETULE: Swap = true;
04402     case ISD::SETUGE: Opc = ARMISD::VCGEU; break;
04403     }
04404 
04405     // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero).
04406     if (Opc == ARMISD::VCEQ) {
04407 
04408       SDValue AndOp;
04409       if (ISD::isBuildVectorAllZeros(Op1.getNode()))
04410         AndOp = Op0;
04411       else if (ISD::isBuildVectorAllZeros(Op0.getNode()))
04412         AndOp = Op1;
04413 
04414       // Ignore bitconvert.
04415       if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST)
04416         AndOp = AndOp.getOperand(0);
04417 
04418       if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
04419         Opc = ARMISD::VTST;
04420         Op0 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(0));
04421         Op1 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(1));
04422         Invert = !Invert;
04423       }
04424     }
04425   }
04426 
04427   if (Swap)
04428     std::swap(Op0, Op1);
04429 
04430   // If one of the operands is a constant vector zero, attempt to fold the
04431   // comparison to a specialized compare-against-zero form.
04432   SDValue SingleOp;
04433   if (ISD::isBuildVectorAllZeros(Op1.getNode()))
04434     SingleOp = Op0;
04435   else if (ISD::isBuildVectorAllZeros(Op0.getNode())) {
04436     if (Opc == ARMISD::VCGE)
04437       Opc = ARMISD::VCLEZ;
04438     else if (Opc == ARMISD::VCGT)
04439       Opc = ARMISD::VCLTZ;
04440     SingleOp = Op1;
04441   }
04442 
04443   SDValue Result;
04444   if (SingleOp.getNode()) {
04445     switch (Opc) {
04446     case ARMISD::VCEQ:
04447       Result = DAG.getNode(ARMISD::VCEQZ, dl, CmpVT, SingleOp); break;
04448     case ARMISD::VCGE:
04449       Result = DAG.getNode(ARMISD::VCGEZ, dl, CmpVT, SingleOp); break;
04450     case ARMISD::VCLEZ:
04451       Result = DAG.getNode(ARMISD::VCLEZ, dl, CmpVT, SingleOp); break;
04452     case ARMISD::VCGT:
04453       Result = DAG.getNode(ARMISD::VCGTZ, dl, CmpVT, SingleOp); break;
04454     case ARMISD::VCLTZ:
04455       Result = DAG.getNode(ARMISD::VCLTZ, dl, CmpVT, SingleOp); break;
04456     default:
04457       Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1);
04458     }
04459   } else {
04460      Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1);
04461   }
04462 
04463   Result = DAG.getSExtOrTrunc(Result, dl, VT);
04464 
04465   if (Invert)
04466     Result = DAG.getNOT(dl, Result, VT);
04467 
04468   return Result;
04469 }
04470 
04471 /// isNEONModifiedImm - Check if the specified splat value corresponds to a
04472 /// valid vector constant for a NEON instruction with a "modified immediate"
04473 /// operand (e.g., VMOV).  If so, return the encoded value.
04474 static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
04475                                  unsigned SplatBitSize, SelectionDAG &DAG,
04476                                  EVT &VT, bool is128Bits, NEONModImmType type) {
04477   unsigned OpCmode, Imm;
04478 
04479   // SplatBitSize is set to the smallest size that splats the vector, so a
04480   // zero vector will always have SplatBitSize == 8.  However, NEON modified
04481   // immediate instructions others than VMOV do not support the 8-bit encoding
04482   // of a zero vector, and the default encoding of zero is supposed to be the
04483   // 32-bit version.
04484   if (SplatBits == 0)
04485     SplatBitSize = 32;
04486 
04487   switch (SplatBitSize) {
04488   case 8:
04489     if (type != VMOVModImm)
04490       return SDValue();
04491     // Any 1-byte value is OK.  Op=0, Cmode=1110.
04492     assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
04493     OpCmode = 0xe;
04494     Imm = SplatBits;
04495     VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
04496     break;
04497 
04498   case 16:
04499     // NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
04500     VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
04501     if ((SplatBits & ~0xff) == 0) {
04502       // Value = 0x00nn: Op=x, Cmode=100x.
04503       OpCmode = 0x8;
04504       Imm = SplatBits;
04505       break;
04506     }
04507     if ((SplatBits & ~0xff00) == 0) {
04508       // Value = 0xnn00: Op=x, Cmode=101x.
04509       OpCmode = 0xa;
04510       Imm = SplatBits >> 8;
04511       break;
04512     }
04513     return SDValue();
04514 
04515   case 32:
04516     // NEON's 32-bit VMOV supports splat values where:
04517     // * only one byte is nonzero, or
04518     // * the least significant byte is 0xff and the second byte is nonzero, or
04519     // * the least significant 2 bytes are 0xff and the third is nonzero.
04520     VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
04521     if ((SplatBits & ~0xff) == 0) {
04522       // Value = 0x000000nn: Op=x, Cmode=000x.
04523       OpCmode = 0;
04524       Imm = SplatBits;
04525       break;
04526     }
04527     if ((SplatBits & ~0xff00) == 0) {
04528       // Value = 0x0000nn00: Op=x, Cmode=001x.
04529       OpCmode = 0x2;
04530       Imm = SplatBits >> 8;
04531       break;
04532     }
04533     if ((SplatBits & ~0xff0000) == 0) {
04534       // Value = 0x00nn0000: Op=x, Cmode=010x.
04535       OpCmode = 0x4;
04536       Imm = SplatBits >> 16;
04537       break;
04538     }
04539     if ((SplatBits & ~0xff000000) == 0) {
04540       // Value = 0xnn000000: Op=x, Cmode=011x.
04541       OpCmode = 0x6;
04542       Imm = SplatBits >> 24;
04543       break;
04544     }
04545 
04546     // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC
04547     if (type == OtherModImm) return SDValue();
04548 
04549     if ((SplatBits & ~0xffff) == 0 &&
04550         ((SplatBits | SplatUndef) & 0xff) == 0xff) {
04551       // Value = 0x0000nnff: Op=x, Cmode=1100.
04552       OpCmode = 0xc;
04553       Imm = SplatBits >> 8;
04554       break;
04555     }
04556 
04557     if ((SplatBits & ~0xffffff) == 0 &&
04558         ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
04559       // Value = 0x00nnffff: Op=x, Cmode=1101.
04560       OpCmode = 0xd;
04561       Imm = SplatBits >> 16;
04562       break;
04563     }
04564 
04565     // Note: there are a few 32-bit splat values (specifically: 00ffff00,
04566     // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
04567     // VMOV.I32.  A (very) minor optimization would be to replicate the value
04568     // and fall through here to test for a valid 64-bit splat.  But, then the
04569     // caller would also need to check and handle the change in size.
04570     return SDValue();
04571 
04572   case 64: {
04573     if (type != VMOVModImm)
04574       return SDValue();
04575     // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
04576     uint64_t BitMask = 0xff;
04577     uint64_t Val = 0;
04578     unsigned ImmMask = 1;
04579     Imm = 0;
04580     for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
04581       if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
04582         Val |= BitMask;
04583         Imm |= ImmMask;
04584       } else if ((SplatBits & BitMask) != 0) {
04585         return SDValue();
04586       }
04587       BitMask <<= 8;
04588       ImmMask <<= 1;
04589     }
04590 
04591     if (DAG.getTargetLoweringInfo().isBigEndian())
04592       // swap higher and lower 32 bit word
04593       Imm = ((Imm & 0xf) << 4) | ((Imm & 0xf0) >> 4);
04594 
04595     // Op=1, Cmode=1110.
04596     OpCmode = 0x1e;
04597     VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
04598     break;
04599   }
04600 
04601   default:
04602     llvm_unreachable("unexpected size for isNEONModifiedImm");
04603   }
04604 
04605   unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm);
04606   return DAG.getTargetConstant(EncodedVal, MVT::i32);
04607 }
04608 
04609 SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
04610                                            const ARMSubtarget *ST) const {
04611   if (!ST->hasVFP3())
04612     return SDValue();
04613 
04614   bool IsDouble = Op.getValueType() == MVT::f64;
04615   ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op);
04616 
04617   // Use the default (constant pool) lowering for double constants when we have
04618   // an SP-only FPU
04619   if (IsDouble && Subtarget->isFPOnlySP())
04620     return SDValue();
04621 
04622   // Try splatting with a VMOV.f32...
04623   APFloat FPVal = CFP->getValueAPF();
04624   int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal);
04625 
04626   if (ImmVal != -1) {
04627     if (IsDouble || !ST->useNEONForSinglePrecisionFP()) {
04628       // We have code in place to select a valid ConstantFP already, no need to
04629       // do any mangling.
04630       return Op;
04631     }
04632 
04633     // It's a float and we are trying to use NEON operations where
04634     // possible. Lower it to a splat followed by an extract.
04635     SDLoc DL(Op);
04636     SDValue NewVal = DAG.getTargetConstant(ImmVal, MVT::i32);
04637     SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32,
04638                                       NewVal);
04639     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant,
04640                        DAG.getConstant(0, MVT::i32));
04641   }
04642 
04643   // The rest of our options are NEON only, make sure that's allowed before
04644   // proceeding..
04645   if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP()))
04646     return SDValue();
04647 
04648   EVT VMovVT;
04649   uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue();
04650 
04651   // It wouldn't really be worth bothering for doubles except for one very
04652   // important value, which does happen to match: 0.0. So make sure we don't do
04653   // anything stupid.
04654   if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32))
04655     return SDValue();
04656 
04657   // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too).
04658   SDValue NewVal = isNEONModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, VMovVT,
04659                                      false, VMOVModImm);
04660   if (NewVal != SDValue()) {
04661     SDLoc DL(Op);
04662     SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT,
04663                                       NewVal);
04664     if (IsDouble)
04665       return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
04666 
04667     // It's a float: cast and extract a vector element.
04668     SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
04669                                        VecConstant);
04670     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
04671                        DAG.getConstant(0, MVT::i32));
04672   }
04673 
04674   // Finally, try a VMVN.i32
04675   NewVal = isNEONModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, VMovVT,
04676                              false, VMVNModImm);
04677   if (NewVal != SDValue()) {
04678     SDLoc DL(Op);
04679     SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal);
04680 
04681     if (IsDouble)
04682       return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
04683 
04684     // It's a float: cast and extract a vector element.
04685     SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
04686                                        VecConstant);
04687     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
04688                        DAG.getConstant(0, MVT::i32));
04689   }
04690 
04691   return SDValue();
04692 }
04693 
04694 // check if an VEXT instruction can handle the shuffle mask when the
04695 // vector sources of the shuffle are the same.
04696 static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) {
04697   unsigned NumElts = VT.getVectorNumElements();
04698 
04699   // Assume that the first shuffle index is not UNDEF.  Fail if it is.
04700   if (M[0] < 0)
04701     return false;
04702 
04703   Imm = M[0];
04704 
04705   // If this is a VEXT shuffle, the immediate value is the index of the first
04706   // element.  The other shuffle indices must be the successive elements after
04707   // the first one.
04708   unsigned ExpectedElt = Imm;
04709   for (unsigned i = 1; i < NumElts; ++i) {
04710     // Increment the expected index.  If it wraps around, just follow it
04711     // back to index zero and keep going.
04712     ++ExpectedElt;
04713     if (ExpectedElt == NumElts)
04714       ExpectedElt = 0;
04715 
04716     if (M[i] < 0) continue; // ignore UNDEF indices
04717     if (ExpectedElt != static_cast<unsigned>(M[i]))
04718       return false;
04719   }
04720 
04721   return true;
04722 }
04723 
04724 
04725 static bool isVEXTMask(ArrayRef<int> M, EVT VT,
04726                        bool &ReverseVEXT, unsigned &Imm) {
04727   unsigned NumElts = VT.getVectorNumElements();
04728   ReverseVEXT = false;
04729 
04730   // Assume that the first shuffle index is not UNDEF.  Fail if it is.
04731   if (M[0] < 0)
04732     return false;
04733 
04734   Imm = M[0];
04735 
04736   // If this is a VEXT shuffle, the immediate value is the index of the first
04737   // element.  The other shuffle indices must be the successive elements after
04738   // the first one.
04739   unsigned ExpectedElt = Imm;
04740   for (unsigned i = 1; i < NumElts; ++i) {
04741     // Increment the expected index.  If it wraps around, it may still be
04742     // a VEXT but the source vectors must be swapped.
04743     ExpectedElt += 1;
04744     if (ExpectedElt == NumElts * 2) {
04745       ExpectedElt = 0;
04746       ReverseVEXT = true;
04747     }
04748 
04749     if (M[i] < 0) continue; // ignore UNDEF indices
04750     if (ExpectedElt != static_cast<unsigned>(M[i]))
04751       return false;
04752   }
04753 
04754   // Adjust the index value if the source operands will be swapped.
04755   if (ReverseVEXT)
04756     Imm -= NumElts;
04757 
04758   return true;
04759 }
04760 
04761 /// isVREVMask - Check if a vector shuffle corresponds to a VREV
04762 /// instruction with the specified blocksize.  (The order of the elements
04763 /// within each block of the vector is reversed.)
04764 static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
04765   assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
04766          "Only possible block sizes for VREV are: 16, 32, 64");
04767 
04768   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
04769   if (EltSz == 64)
04770     return false;
04771 
04772   unsigned NumElts = VT.getVectorNumElements();
04773   unsigned BlockElts = M[0] + 1;
04774   // If the first shuffle index is UNDEF, be optimistic.
04775   if (M[0] < 0)
04776     BlockElts = BlockSize / EltSz;
04777 
04778   if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
04779     return false;
04780 
04781   for (unsigned i = 0; i < NumElts; ++i) {
04782     if (M[i] < 0) continue; // ignore UNDEF indices
04783     if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
04784       return false;
04785   }
04786 
04787   return true;
04788 }
04789 
04790 static bool isVTBLMask(ArrayRef<int> M, EVT VT) {
04791   // We can handle <8 x i8> vector shuffles. If the index in the mask is out of
04792   // range, then 0 is placed into the resulting vector. So pretty much any mask
04793   // of 8 elements can work here.
04794   return VT == MVT::v8i8 && M.size() == 8;
04795 }
04796 
04797 static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
04798   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
04799   if (EltSz == 64)
04800     return false;
04801 
04802   unsigned NumElts = VT.getVectorNumElements();
04803   WhichResult = (M[0] == 0 ? 0 : 1);
04804   for (unsigned i = 0; i < NumElts; i += 2) {
04805     if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) ||
04806         (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult))
04807       return false;
04808   }
04809   return true;
04810 }
04811 
04812 /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of
04813 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
04814 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
04815 static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
04816   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
04817   if (EltSz == 64)
04818     return false;
04819 
04820   unsigned NumElts = VT.getVectorNumElements();
04821   WhichResult = (M[0] == 0 ? 0 : 1);
04822   for (unsigned i = 0; i < NumElts; i += 2) {
04823     if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) ||
04824         (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult))
04825       return false;
04826   }
04827   return true;
04828 }
04829 
04830 static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
04831   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
04832   if (EltSz == 64)
04833     return false;
04834 
04835   unsigned NumElts = VT.getVectorNumElements();
04836   WhichResult = (M[0] == 0 ? 0 : 1);
04837   for (unsigned i = 0; i != NumElts; ++i) {
04838     if (M[i] < 0) continue; // ignore UNDEF indices
04839     if ((unsigned) M[i] != 2 * i + WhichResult)
04840       return false;
04841   }
04842 
04843   // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
04844   if (VT.is64BitVector() && EltSz == 32)
04845     return false;
04846 
04847   return true;
04848 }
04849 
04850 /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of
04851 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
04852 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
04853 static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
04854   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
04855   if (EltSz == 64)
04856     return false;
04857 
04858   unsigned Half = VT.getVectorNumElements() / 2;
04859   WhichResult = (M[0] == 0 ? 0 : 1);
04860   for (unsigned j = 0; j != 2; ++j) {
04861     unsigned Idx = WhichResult;
04862     for (unsigned i = 0; i != Half; ++i) {
04863       int MIdx = M[i + j * Half];
04864       if (MIdx >= 0 && (unsigned) MIdx != Idx)
04865         return false;
04866       Idx += 2;
04867     }
04868   }
04869 
04870   // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
04871   if (VT.is64BitVector() && EltSz == 32)
04872     return false;
04873 
04874   return true;
04875 }
04876 
04877 static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
04878   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
04879   if (EltSz == 64)
04880     return false;
04881 
04882   unsigned NumElts = VT.getVectorNumElements();
04883   WhichResult = (M[0] == 0 ? 0 : 1);
04884   unsigned Idx = WhichResult * NumElts / 2;
04885   for (unsigned i = 0; i != NumElts; i += 2) {
04886     if ((M[i] >= 0 && (unsigned) M[i] != Idx) ||
04887         (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts))
04888       return false;
04889     Idx += 1;
04890   }
04891 
04892   // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
04893   if (VT.is64BitVector() && EltSz == 32)
04894     return false;
04895 
04896   return true;
04897 }
04898 
04899 /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of
04900 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
04901 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
04902 static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
04903   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
04904   if (EltSz == 64)
04905     return false;
04906 
04907   unsigned NumElts = VT.getVectorNumElements();
04908   WhichResult = (M[0] == 0 ? 0 : 1);
04909   unsigned Idx = WhichResult * NumElts / 2;
04910   for (unsigned i = 0; i != NumElts; i += 2) {
04911     if ((M[i] >= 0 && (unsigned) M[i] != Idx) ||
04912         (M[i+1] >= 0 && (unsigned) M[i+1] != Idx))
04913       return false;
04914     Idx += 1;
04915   }
04916 
04917   // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
04918   if (VT.is64BitVector() && EltSz == 32)
04919     return false;
04920 
04921   return true;
04922 }
04923 
04924 /// \return true if this is a reverse operation on an vector.
04925 static bool isReverseMask(ArrayRef<int> M, EVT VT) {
04926   unsigned NumElts = VT.getVectorNumElements();
04927   // Make sure the mask has the right size.
04928   if (NumElts != M.size())
04929       return false;
04930 
04931   // Look for <15, ..., 3, -1, 1, 0>.
04932   for (unsigned i = 0; i != NumElts; ++i)
04933     if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i))
04934       return false;
04935 
04936   return true;
04937 }
04938 
04939 // If N is an integer constant that can be moved into a register in one
04940 // instruction, return an SDValue of such a constant (will become a MOV
04941 // instruction).  Otherwise return null.
04942 static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
04943                                      const ARMSubtarget *ST, SDLoc dl) {
04944   uint64_t Val;
04945   if (!isa<ConstantSDNode>(N))
04946     return SDValue();
04947   Val = cast<ConstantSDNode>(N)->getZExtValue();
04948 
04949   if (ST->isThumb1Only()) {
04950     if (Val <= 255 || ~Val <= 255)
04951       return DAG.getConstant(Val, MVT::i32);
04952   } else {
04953     if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1)
04954       return DAG.getConstant(Val, MVT::i32);
04955   }
04956   return SDValue();
04957 }
04958 
04959 // If this is a case we can't handle, return null and let the default
04960 // expansion code take care of it.
04961 SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
04962                                              const ARMSubtarget *ST) const {
04963   BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
04964   SDLoc dl(Op);
04965   EVT VT = Op.getValueType();
04966 
04967   APInt SplatBits, SplatUndef;
04968   unsigned SplatBitSize;
04969   bool HasAnyUndefs;
04970   if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
04971     if (SplatBitSize <= 64) {
04972       // Check if an immediate VMOV works.
04973       EVT VmovVT;
04974       SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
04975                                       SplatUndef.getZExtValue(), SplatBitSize,
04976                                       DAG, VmovVT, VT.is128BitVector(),
04977                                       VMOVModImm);
04978       if (Val.getNode()) {
04979         SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val);
04980         return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
04981       }
04982 
04983       // Try an immediate VMVN.
04984       uint64_t NegatedImm = (~SplatBits).getZExtValue();
04985       Val = isNEONModifiedImm(NegatedImm,
04986                                       SplatUndef.getZExtValue(), SplatBitSize,
04987                                       DAG, VmovVT, VT.is128BitVector(),
04988                                       VMVNModImm);
04989       if (Val.getNode()) {
04990         SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val);
04991         return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
04992       }
04993 
04994       // Use vmov.f32 to materialize other v2f32 and v4f32 splats.
04995       if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) {
04996         int ImmVal = ARM_AM::getFP32Imm(SplatBits);
04997         if (ImmVal != -1) {
04998           SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32);
04999           return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val);
05000         }
05001       }
05002     }
05003   }
05004 
05005   // Scan through the operands to see if only one value is used.
05006   //
05007   // As an optimisation, even if more than one value is used it may be more
05008   // profitable to splat with one value then change some lanes.
05009   //
05010   // Heuristically we decide to do this if the vector has a "dominant" value,
05011   // defined as splatted to more than half of the lanes.
05012   unsigned NumElts = VT.getVectorNumElements();
05013   bool isOnlyLowElement = true;
05014   bool usesOnlyOneValue = true;
05015   bool hasDominantValue = false;
05016   bool isConstant = true;
05017 
05018   // Map of the number of times a particular SDValue appears in the
05019   // element list.
05020   DenseMap<SDValue, unsigned> ValueCounts;
05021   SDValue Value;
05022   for (unsigned i = 0; i < NumElts; ++i) {
05023     SDValue V = Op.getOperand(i);
05024     if (V.getOpcode() == ISD::UNDEF)
05025       continue;
05026     if (i > 0)
05027       isOnlyLowElement = false;
05028     if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
05029       isConstant = false;
05030 
05031     ValueCounts.insert(std::make_pair(V, 0));
05032     unsigned &Count = ValueCounts[V];
05033 
05034     // Is this value dominant? (takes up more than half of the lanes)
05035     if (++Count > (NumElts / 2)) {
05036       hasDominantValue = true;
05037       Value = V;
05038     }
05039   }
05040   if (ValueCounts.size() != 1)
05041     usesOnlyOneValue = false;
05042   if (!Value.getNode() && ValueCounts.size() > 0)
05043     Value = ValueCounts.begin()->first;
05044 
05045   if (ValueCounts.size() == 0)
05046     return DAG.getUNDEF(VT);
05047 
05048   // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR.
05049   // Keep going if we are hitting this case.
05050   if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode()))
05051     return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
05052 
05053   unsigned EltSize = VT.getVectorElementType().getSizeInBits();
05054 
05055   // Use VDUP for non-constant splats.  For f32 constant splats, reduce to
05056   // i32 and try again.
05057   if (hasDominantValue && EltSize <= 32) {
05058     if (!isConstant) {
05059       SDValue N;
05060 
05061       // If we are VDUPing a value that comes directly from a vector, that will
05062       // cause an unnecessary move to and from a GPR, where instead we could
05063       // just use VDUPLANE. We can only do this if the lane being extracted
05064       // is at a constant index, as the VDUP from lane instructions only have
05065       // constant-index forms.
05066       if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
05067           isa<ConstantSDNode>(Value->getOperand(1))) {
05068         // We need to create a new undef vector to use for the VDUPLANE if the
05069         // size of the vector from which we get the value is different than the
05070         // size of the vector that we need to create. We will insert the element
05071         // such that the register coalescer will remove unnecessary copies.
05072         if (VT != Value->getOperand(0).getValueType()) {
05073           ConstantSDNode *constIndex;
05074           constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1));
05075           assert(constIndex && "The index is not a constant!");
05076           unsigned index = constIndex->getAPIntValue().getLimitedValue() %
05077                              VT.getVectorNumElements();
05078           N =  DAG.getNode(ARMISD::VDUPLANE, dl, VT,
05079                  DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT),
05080                         Value, DAG.getConstant(index, MVT::i32)),
05081                            DAG.getConstant(index, MVT::i32));
05082         } else
05083           N = DAG.getNode(ARMISD::VDUPLANE, dl, VT,
05084                         Value->getOperand(0), Value->getOperand(1));
05085       } else
05086         N = DAG.getNode(ARMISD::VDUP, dl, VT, Value);
05087 
05088       if (!usesOnlyOneValue) {
05089         // The dominant value was splatted as 'N', but we now have to insert
05090         // all differing elements.
05091         for (unsigned I = 0; I < NumElts; ++I) {
05092           if (Op.getOperand(I) == Value)
05093             continue;
05094           SmallVector<SDValue, 3> Ops;
05095           Ops.push_back(N);
05096           Ops.push_back(Op.getOperand(I));
05097           Ops.push_back(DAG.getConstant(I, MVT::i32));
05098           N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops);
05099         }
05100       }
05101       return N;
05102     }
05103     if (VT.getVectorElementType().isFloatingPoint()) {
05104       SmallVector<SDValue, 8> Ops;
05105       for (unsigned i = 0; i < NumElts; ++i)
05106         Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32,
05107                                   Op.getOperand(i)));
05108       EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
05109       SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, Ops);
05110       Val = LowerBUILD_VECTOR(Val, DAG, ST);
05111       if (Val.getNode())
05112         return DAG.getNode(ISD::BITCAST, dl, VT, Val);
05113     }
05114     if (usesOnlyOneValue) {
05115       SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl);
05116       if (isConstant && Val.getNode())
05117         return DAG.getNode(ARMISD::VDUP, dl, VT, Val);
05118     }
05119   }
05120 
05121   // If all elements are constants and the case above didn't get hit, fall back
05122   // to the default expansion, which will generate a load from the constant
05123   // pool.
05124   if (isConstant)
05125     return SDValue();
05126 
05127   // Empirical tests suggest this is rarely worth it for vectors of length <= 2.
05128   if (NumElts >= 4) {
05129     SDValue shuffle = ReconstructShuffle(Op, DAG);
05130     if (shuffle != SDValue())
05131       return shuffle;
05132   }
05133 
05134   // Vectors with 32- or 64-bit elements can be built by directly assigning
05135   // the subregisters.  Lower it to an ARMISD::BUILD_VECTOR so the operands
05136   // will be legalized.
05137   if (EltSize >= 32) {
05138     // Do the expansion with floating-point types, since that is what the VFP
05139     // registers are defined to use, and since i64 is not legal.
05140     EVT EltVT = EVT::getFloatingPointVT(EltSize);
05141     EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
05142     SmallVector<SDValue, 8> Ops;
05143     for (unsigned i = 0; i < NumElts; ++i)
05144       Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i)));
05145     SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
05146     return DAG.getNode(ISD::BITCAST, dl, VT, Val);
05147   }
05148 
05149   // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
05150   // know the default expansion would otherwise fall back on something even
05151   // worse. For a vector with one or two non-undef values, that's
05152   // scalar_to_vector for the elements followed by a shuffle (provided the
05153   // shuffle is valid for the target) and materialization element by element
05154   // on the stack followed by a load for everything else.
05155   if (!isConstant && !usesOnlyOneValue) {
05156     SDValue Vec = DAG.getUNDEF(VT);
05157     for (unsigned i = 0 ; i < NumElts; ++i) {
05158       SDValue V = Op.getOperand(i);
05159       if (V.getOpcode() == ISD::UNDEF)
05160         continue;
05161       SDValue LaneIdx = DAG.getConstant(i, MVT::i32);
05162       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
05163     }
05164     return Vec;
05165   }
05166 
05167   return SDValue();
05168 }
05169 
05170 // Gather data to see if the operation can be modelled as a
05171 // shuffle in combination with VEXTs.
05172 SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
05173                                               SelectionDAG &DAG) const {
05174   SDLoc dl(Op);
05175   EVT VT = Op.getValueType();
05176   unsigned NumElts = VT.getVectorNumElements();
05177 
05178   SmallVector<SDValue, 2> SourceVecs;
05179   SmallVector<unsigned, 2> MinElts;
05180   SmallVector<unsigned, 2> MaxElts;
05181 
05182   for (unsigned i = 0; i < NumElts; ++i) {
05183     SDValue V = Op.getOperand(i);
05184     if (V.getOpcode() == ISD::UNDEF)
05185       continue;
05186     else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) {
05187       // A shuffle can only come from building a vector from various
05188       // elements of other vectors.
05189       return SDValue();
05190     } else if (V.getOperand(0).getValueType().getVectorElementType() !=
05191                VT.getVectorElementType()) {
05192       // This code doesn't know how to handle shuffles where the vector
05193       // element types do not match (this happens because type legalization
05194       // promotes the return type of EXTRACT_VECTOR_ELT).
05195       // FIXME: It might be appropriate to extend this code to handle
05196       // mismatched types.
05197       return SDValue();
05198     }
05199 
05200     // Record this extraction against the appropriate vector if possible...
05201     SDValue SourceVec = V.getOperand(0);
05202     // If the element number isn't a constant, we can't effectively
05203     // analyze what's going on.
05204     if (!isa<ConstantSDNode>(V.getOperand(1)))
05205       return SDValue();
05206     unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
05207     bool FoundSource = false;
05208     for (unsigned j = 0; j < SourceVecs.size(); ++j) {
05209       if (SourceVecs[j] == SourceVec) {
05210         if (MinElts[j] > EltNo)
05211           MinElts[j] = EltNo;
05212         if (MaxElts[j] < EltNo)
05213           MaxElts[j] = EltNo;
05214         FoundSource = true;
05215         break;
05216       }
05217     }
05218 
05219     // Or record a new source if not...
05220     if (!FoundSource) {
05221       SourceVecs.push_back(SourceVec);
05222       MinElts.push_back(EltNo);
05223       MaxElts.push_back(EltNo);
05224     }
05225   }
05226 
05227   // Currently only do something sane when at most two source vectors
05228   // involved.
05229   if (SourceVecs.size() > 2)
05230     return SDValue();
05231 
05232   SDValue ShuffleSrcs[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT) };
05233   int VEXTOffsets[2] = {0, 0};
05234 
05235   // This loop extracts the usage patterns of the source vectors
05236   // and prepares appropriate SDValues for a shuffle if possible.
05237   for (unsigned i = 0; i < SourceVecs.size(); ++i) {
05238     if (SourceVecs[i].getValueType() == VT) {
05239       // No VEXT necessary
05240       ShuffleSrcs[i] = SourceVecs[i];
05241       VEXTOffsets[i] = 0;
05242       continue;
05243     } else if (SourceVecs[i].getValueType().getVectorNumElements() < NumElts) {
05244       // It probably isn't worth padding out a smaller vector just to
05245       // break it down again in a shuffle.
05246       return SDValue();
05247     }
05248 
05249     // Since only 64-bit and 128-bit vectors are legal on ARM and
05250     // we've eliminated the other cases...
05251     assert(SourceVecs[i].getValueType().getVectorNumElements() == 2*NumElts &&
05252            "unexpected vector sizes in ReconstructShuffle");
05253 
05254     if (MaxElts[i] - MinElts[i] >= NumElts) {
05255       // Span too large for a VEXT to cope
05256       return SDValue();
05257     }
05258 
05259     if (MinElts[i] >= NumElts) {
05260       // The extraction can just take the second half
05261       VEXTOffsets[i] = NumElts;
05262       ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
05263                                    SourceVecs[i],
05264                                    DAG.getIntPtrConstant(NumElts));
05265     } else if (MaxElts[i] < NumElts) {
05266       // The extraction can just take the first half
05267       VEXTOffsets[i] = 0;
05268       ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
05269                                    SourceVecs[i],
05270                                    DAG.getIntPtrConstant(0));
05271     } else {
05272       // An actual VEXT is needed
05273       VEXTOffsets[i] = MinElts[i];
05274       SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
05275                                      SourceVecs[i],
05276                                      DAG.getIntPtrConstant(0));
05277       SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
05278                                      SourceVecs[i],
05279                                      DAG.getIntPtrConstant(NumElts));
05280       ShuffleSrcs[i] = DAG.getNode(ARMISD::VEXT, dl, VT, VEXTSrc1, VEXTSrc2,
05281                                    DAG.getConstant(VEXTOffsets[i], MVT::i32));
05282     }
05283   }
05284 
05285   SmallVector<int, 8> Mask;
05286 
05287   for (unsigned i = 0; i < NumElts; ++i) {
05288     SDValue Entry = Op.getOperand(i);
05289     if (Entry.getOpcode() == ISD::UNDEF) {
05290       Mask.push_back(-1);
05291       continue;
05292     }
05293 
05294     SDValue ExtractVec = Entry.getOperand(0);
05295     int ExtractElt = cast<ConstantSDNode>(Op.getOperand(i)
05296                                           .getOperand(1))->getSExtValue();
05297     if (ExtractVec == SourceVecs[0]) {
05298       Mask.push_back(ExtractElt - VEXTOffsets[0]);
05299     } else {
05300       Mask.push_back(ExtractElt + NumElts - VEXTOffsets[1]);
05301     }
05302   }
05303 
05304   // Final check before we try to produce nonsense...
05305   if (isShuffleMaskLegal(Mask, VT))
05306     return DAG.getVectorShuffle(VT, dl, ShuffleSrcs[0], ShuffleSrcs[1],
05307                                 &Mask[0]);
05308 
05309   return SDValue();
05310 }
05311 
05312 /// isShuffleMaskLegal - Targets can use this to indicate that they only
05313 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
05314 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
05315 /// are assumed to be legal.
05316 bool
05317 ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
05318                                       EVT VT) const {
05319   if (VT.getVectorNumElements() == 4 &&
05320       (VT.is128BitVector() || VT.is64BitVector())) {
05321     unsigned PFIndexes[4];
05322     for (unsigned i = 0; i != 4; ++i) {
05323       if (M[i] < 0)
05324         PFIndexes[i] = 8;
05325       else
05326         PFIndexes[i] = M[i];
05327     }
05328 
05329     // Compute the index in the perfect shuffle table.
05330     unsigned PFTableIndex =
05331       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
05332     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
05333     unsigned Cost = (PFEntry >> 30);
05334 
05335     if (Cost <= 4)
05336       return true;
05337   }
05338 
05339   bool ReverseVEXT;
05340   unsigned Imm, WhichResult;
05341 
05342   unsigned EltSize = VT.getVectorElementType().getSizeInBits();
05343   return (EltSize >= 32 ||
05344           ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
05345           isVREVMask(M, VT, 64) ||
05346           isVREVMask(M, VT, 32) ||
05347           isVREVMask(M, VT, 16) ||
05348           isVEXTMask(M, VT, ReverseVEXT, Imm) ||
05349           isVTBLMask(M, VT) ||
05350           isVTRNMask(M, VT, WhichResult) ||
05351