LLVM API Documentation

NVPTXISelLowering.cpp
Go to the documentation of this file.
00001 //
00002 //                     The LLVM Compiler Infrastructure
00003 //
00004 // This file is distributed under the University of Illinois Open Source
00005 // License. See LICENSE.TXT for details.
00006 //
00007 //===----------------------------------------------------------------------===//
00008 //
00009 // This file defines the interfaces that NVPTX uses to lower LLVM code into a
00010 // selection DAG.
00011 //
00012 //===----------------------------------------------------------------------===//
00013 
00014 #include "NVPTXISelLowering.h"
00015 #include "NVPTX.h"
00016 #include "NVPTXTargetMachine.h"
00017 #include "NVPTXTargetObjectFile.h"
00018 #include "NVPTXUtilities.h"
00019 #include "llvm/CodeGen/Analysis.h"
00020 #include "llvm/CodeGen/MachineFrameInfo.h"
00021 #include "llvm/CodeGen/MachineFunction.h"
00022 #include "llvm/CodeGen/MachineInstrBuilder.h"
00023 #include "llvm/CodeGen/MachineRegisterInfo.h"
00024 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
00025 #include "llvm/IR/CallSite.h"
00026 #include "llvm/IR/DerivedTypes.h"
00027 #include "llvm/IR/Function.h"
00028 #include "llvm/IR/GlobalValue.h"
00029 #include "llvm/IR/IntrinsicInst.h"
00030 #include "llvm/IR/Intrinsics.h"
00031 #include "llvm/IR/Module.h"
00032 #include "llvm/MC/MCSectionELF.h"
00033 #include "llvm/Support/CommandLine.h"
00034 #include "llvm/Support/Debug.h"
00035 #include "llvm/Support/ErrorHandling.h"
00036 #include "llvm/Support/raw_ostream.h"
00037 #include <sstream>
00038 
00039 #undef DEBUG_TYPE
00040 #define DEBUG_TYPE "nvptx-lower"
00041 
00042 using namespace llvm;
00043 
00044 static unsigned int uniqueCallSite = 0;
00045 
00046 static cl::opt<bool> sched4reg(
00047     "nvptx-sched4reg",
00048     cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));
00049 
00050 static bool IsPTXVectorType(MVT VT) {
00051   switch (VT.SimpleTy) {
00052   default:
00053     return false;
00054   case MVT::v2i1:
00055   case MVT::v4i1:
00056   case MVT::v2i8:
00057   case MVT::v4i8:
00058   case MVT::v2i16:
00059   case MVT::v4i16:
00060   case MVT::v2i32:
00061   case MVT::v4i32:
00062   case MVT::v2i64:
00063   case MVT::v2f32:
00064   case MVT::v4f32:
00065   case MVT::v2f64:
00066     return true;
00067   }
00068 }
00069 
00070 /// ComputePTXValueVTs - For the given Type \p Ty, returns the set of primitive
00071 /// EVTs that compose it.  Unlike ComputeValueVTs, this will break apart vectors
00072 /// into their primitive components.
00073 /// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the
00074 /// same number of types as the Ins/Outs arrays in LowerFormalArguments,
00075 /// LowerCall, and LowerReturn.
00076 static void ComputePTXValueVTs(const TargetLowering &TLI, Type *Ty,
00077                                SmallVectorImpl<EVT> &ValueVTs,
00078                                SmallVectorImpl<uint64_t> *Offsets = 0,
00079                                uint64_t StartingOffset = 0) {
00080   SmallVector<EVT, 16> TempVTs;
00081   SmallVector<uint64_t, 16> TempOffsets;
00082 
00083   ComputeValueVTs(TLI, Ty, TempVTs, &TempOffsets, StartingOffset);
00084   for (unsigned i = 0, e = TempVTs.size(); i != e; ++i) {
00085     EVT VT = TempVTs[i];
00086     uint64_t Off = TempOffsets[i];
00087     if (VT.isVector())
00088       for (unsigned j = 0, je = VT.getVectorNumElements(); j != je; ++j) {
00089         ValueVTs.push_back(VT.getVectorElementType());
00090         if (Offsets)
00091           Offsets->push_back(Off+j*VT.getVectorElementType().getStoreSize());
00092       }
00093     else {
00094       ValueVTs.push_back(VT);
00095       if (Offsets)
00096         Offsets->push_back(Off);
00097     }
00098   }
00099 }
00100 
00101 // NVPTXTargetLowering Constructor.
00102 NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
00103     : TargetLowering(TM, new NVPTXTargetObjectFile()), nvTM(&TM),
00104       nvptxSubtarget(TM.getSubtarget<NVPTXSubtarget>()) {
00105 
00106   // always lower memset, memcpy, and memmove intrinsics to load/store
00107   // instructions, rather
00108   // then generating calls to memset, mempcy or memmove.
00109   MaxStoresPerMemset = (unsigned) 0xFFFFFFFF;
00110   MaxStoresPerMemcpy = (unsigned) 0xFFFFFFFF;
00111   MaxStoresPerMemmove = (unsigned) 0xFFFFFFFF;
00112 
00113   setBooleanContents(ZeroOrNegativeOneBooleanContent);
00114 
00115   // Jump is Expensive. Don't create extra control flow for 'and', 'or'
00116   // condition branches.
00117   setJumpIsExpensive(true);
00118 
00119   // By default, use the Source scheduling
00120   if (sched4reg)
00121     setSchedulingPreference(Sched::RegPressure);
00122   else
00123     setSchedulingPreference(Sched::Source);
00124 
00125   addRegisterClass(MVT::i1, &NVPTX::Int1RegsRegClass);
00126   addRegisterClass(MVT::i16, &NVPTX::Int16RegsRegClass);
00127   addRegisterClass(MVT::i32, &NVPTX::Int32RegsRegClass);
00128   addRegisterClass(MVT::i64, &NVPTX::Int64RegsRegClass);
00129   addRegisterClass(MVT::f32, &NVPTX::Float32RegsRegClass);
00130   addRegisterClass(MVT::f64, &NVPTX::Float64RegsRegClass);
00131 
00132   // Operations not directly supported by NVPTX.
00133   setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
00134   setOperationAction(ISD::BR_CC, MVT::f32, Expand);
00135   setOperationAction(ISD::BR_CC, MVT::f64, Expand);
00136   setOperationAction(ISD::BR_CC, MVT::i1, Expand);
00137   setOperationAction(ISD::BR_CC, MVT::i8, Expand);
00138   setOperationAction(ISD::BR_CC, MVT::i16, Expand);
00139   setOperationAction(ISD::BR_CC, MVT::i32, Expand);
00140   setOperationAction(ISD::BR_CC, MVT::i64, Expand);
00141   // Some SIGN_EXTEND_INREG can be done using cvt instruction.
00142   // For others we will expand to a SHL/SRA pair.
00143   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Legal);
00144   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
00145   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);
00146   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
00147   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
00148 
00149   if (nvptxSubtarget.hasROT64()) {
00150     setOperationAction(ISD::ROTL, MVT::i64, Legal);
00151     setOperationAction(ISD::ROTR, MVT::i64, Legal);
00152   } else {
00153     setOperationAction(ISD::ROTL, MVT::i64, Expand);
00154     setOperationAction(ISD::ROTR, MVT::i64, Expand);
00155   }
00156   if (nvptxSubtarget.hasROT32()) {
00157     setOperationAction(ISD::ROTL, MVT::i32, Legal);
00158     setOperationAction(ISD::ROTR, MVT::i32, Legal);
00159   } else {
00160     setOperationAction(ISD::ROTL, MVT::i32, Expand);
00161     setOperationAction(ISD::ROTR, MVT::i32, Expand);
00162   }
00163 
00164   setOperationAction(ISD::ROTL, MVT::i16, Expand);
00165   setOperationAction(ISD::ROTR, MVT::i16, Expand);
00166   setOperationAction(ISD::ROTL, MVT::i8, Expand);
00167   setOperationAction(ISD::ROTR, MVT::i8, Expand);
00168   setOperationAction(ISD::BSWAP, MVT::i16, Expand);
00169   setOperationAction(ISD::BSWAP, MVT::i32, Expand);
00170   setOperationAction(ISD::BSWAP, MVT::i64, Expand);
00171 
00172   // Indirect branch is not supported.
00173   // This also disables Jump Table creation.
00174   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
00175   setOperationAction(ISD::BRIND, MVT::Other, Expand);
00176 
00177   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
00178   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
00179 
00180   // We want to legalize constant related memmove and memcopy
00181   // intrinsics.
00182   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
00183 
00184   // Turn FP extload into load/fextend
00185   setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
00186   // Turn FP truncstore into trunc + store.
00187   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
00188 
00189   // PTX does not support load / store predicate registers
00190   setOperationAction(ISD::LOAD, MVT::i1, Custom);
00191   setOperationAction(ISD::STORE, MVT::i1, Custom);
00192 
00193   setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
00194   setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
00195   setTruncStoreAction(MVT::i64, MVT::i1, Expand);
00196   setTruncStoreAction(MVT::i32, MVT::i1, Expand);
00197   setTruncStoreAction(MVT::i16, MVT::i1, Expand);
00198   setTruncStoreAction(MVT::i8, MVT::i1, Expand);
00199 
00200   // This is legal in NVPTX
00201   setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
00202   setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
00203 
00204   // TRAP can be lowered to PTX trap
00205   setOperationAction(ISD::TRAP, MVT::Other, Legal);
00206 
00207   setOperationAction(ISD::ADDC, MVT::i64, Expand);
00208   setOperationAction(ISD::ADDE, MVT::i64, Expand);
00209 
00210   // Register custom handling for vector loads/stores
00211   for (int i = MVT::FIRST_VECTOR_VALUETYPE; i <= MVT::LAST_VECTOR_VALUETYPE;
00212        ++i) {
00213     MVT VT = (MVT::SimpleValueType) i;
00214     if (IsPTXVectorType(VT)) {
00215       setOperationAction(ISD::LOAD, VT, Custom);
00216       setOperationAction(ISD::STORE, VT, Custom);
00217       setOperationAction(ISD::INTRINSIC_W_CHAIN, VT, Custom);
00218     }
00219   }
00220 
00221   // Custom handling for i8 intrinsics
00222   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
00223 
00224   setOperationAction(ISD::CTLZ, MVT::i16, Legal);
00225   setOperationAction(ISD::CTLZ, MVT::i32, Legal);
00226   setOperationAction(ISD::CTLZ, MVT::i64, Legal);
00227   setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Legal);
00228   setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Legal);
00229   setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Legal);
00230   setOperationAction(ISD::CTTZ, MVT::i16, Expand);
00231   setOperationAction(ISD::CTTZ, MVT::i32, Expand);
00232   setOperationAction(ISD::CTTZ, MVT::i64, Expand);
00233   setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Expand);
00234   setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
00235   setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
00236   setOperationAction(ISD::CTPOP, MVT::i16, Legal);
00237   setOperationAction(ISD::CTPOP, MVT::i32, Legal);
00238   setOperationAction(ISD::CTPOP, MVT::i64, Legal);
00239 
00240   // Now deduce the information based on the above mentioned
00241   // actions
00242   computeRegisterProperties();
00243 }
00244 
00245 const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
00246   switch (Opcode) {
00247   default:
00248     return 0;
00249   case NVPTXISD::CALL:
00250     return "NVPTXISD::CALL";
00251   case NVPTXISD::RET_FLAG:
00252     return "NVPTXISD::RET_FLAG";
00253   case NVPTXISD::Wrapper:
00254     return "NVPTXISD::Wrapper";
00255   case NVPTXISD::DeclareParam:
00256     return "NVPTXISD::DeclareParam";
00257   case NVPTXISD::DeclareScalarParam:
00258     return "NVPTXISD::DeclareScalarParam";
00259   case NVPTXISD::DeclareRet:
00260     return "NVPTXISD::DeclareRet";
00261   case NVPTXISD::DeclareRetParam:
00262     return "NVPTXISD::DeclareRetParam";
00263   case NVPTXISD::PrintCall:
00264     return "NVPTXISD::PrintCall";
00265   case NVPTXISD::LoadParam:
00266     return "NVPTXISD::LoadParam";
00267   case NVPTXISD::LoadParamV2:
00268     return "NVPTXISD::LoadParamV2";
00269   case NVPTXISD::LoadParamV4:
00270     return "NVPTXISD::LoadParamV4";
00271   case NVPTXISD::StoreParam:
00272     return "NVPTXISD::StoreParam";
00273   case NVPTXISD::StoreParamV2:
00274     return "NVPTXISD::StoreParamV2";
00275   case NVPTXISD::StoreParamV4:
00276     return "NVPTXISD::StoreParamV4";
00277   case NVPTXISD::StoreParamS32:
00278     return "NVPTXISD::StoreParamS32";
00279   case NVPTXISD::StoreParamU32:
00280     return "NVPTXISD::StoreParamU32";
00281   case NVPTXISD::CallArgBegin:
00282     return "NVPTXISD::CallArgBegin";
00283   case NVPTXISD::CallArg:
00284     return "NVPTXISD::CallArg";
00285   case NVPTXISD::LastCallArg:
00286     return "NVPTXISD::LastCallArg";
00287   case NVPTXISD::CallArgEnd:
00288     return "NVPTXISD::CallArgEnd";
00289   case NVPTXISD::CallVoid:
00290     return "NVPTXISD::CallVoid";
00291   case NVPTXISD::CallVal:
00292     return "NVPTXISD::CallVal";
00293   case NVPTXISD::CallSymbol:
00294     return "NVPTXISD::CallSymbol";
00295   case NVPTXISD::Prototype:
00296     return "NVPTXISD::Prototype";
00297   case NVPTXISD::MoveParam:
00298     return "NVPTXISD::MoveParam";
00299   case NVPTXISD::StoreRetval:
00300     return "NVPTXISD::StoreRetval";
00301   case NVPTXISD::StoreRetvalV2:
00302     return "NVPTXISD::StoreRetvalV2";
00303   case NVPTXISD::StoreRetvalV4:
00304     return "NVPTXISD::StoreRetvalV4";
00305   case NVPTXISD::PseudoUseParam:
00306     return "NVPTXISD::PseudoUseParam";
00307   case NVPTXISD::RETURN:
00308     return "NVPTXISD::RETURN";
00309   case NVPTXISD::CallSeqBegin:
00310     return "NVPTXISD::CallSeqBegin";
00311   case NVPTXISD::CallSeqEnd:
00312     return "NVPTXISD::CallSeqEnd";
00313   case NVPTXISD::CallPrototype:
00314     return "NVPTXISD::CallPrototype";
00315   case NVPTXISD::LoadV2:
00316     return "NVPTXISD::LoadV2";
00317   case NVPTXISD::LoadV4:
00318     return "NVPTXISD::LoadV4";
00319   case NVPTXISD::LDGV2:
00320     return "NVPTXISD::LDGV2";
00321   case NVPTXISD::LDGV4:
00322     return "NVPTXISD::LDGV4";
00323   case NVPTXISD::LDUV2:
00324     return "NVPTXISD::LDUV2";
00325   case NVPTXISD::LDUV4:
00326     return "NVPTXISD::LDUV4";
00327   case NVPTXISD::StoreV2:
00328     return "NVPTXISD::StoreV2";
00329   case NVPTXISD::StoreV4:
00330     return "NVPTXISD::StoreV4";
00331   case NVPTXISD::Tex1DFloatI32:        return "NVPTXISD::Tex1DFloatI32";
00332   case NVPTXISD::Tex1DFloatFloat:      return "NVPTXISD::Tex1DFloatFloat";
00333   case NVPTXISD::Tex1DFloatFloatLevel:
00334     return "NVPTXISD::Tex1DFloatFloatLevel";
00335   case NVPTXISD::Tex1DFloatFloatGrad:
00336     return "NVPTXISD::Tex1DFloatFloatGrad";
00337   case NVPTXISD::Tex1DI32I32:          return "NVPTXISD::Tex1DI32I32";
00338   case NVPTXISD::Tex1DI32Float:        return "NVPTXISD::Tex1DI32Float";
00339   case NVPTXISD::Tex1DI32FloatLevel:
00340     return "NVPTXISD::Tex1DI32FloatLevel";
00341   case NVPTXISD::Tex1DI32FloatGrad:
00342     return "NVPTXISD::Tex1DI32FloatGrad";
00343   case NVPTXISD::Tex1DArrayFloatI32:   return "NVPTXISD::Tex2DArrayFloatI32";
00344   case NVPTXISD::Tex1DArrayFloatFloat: return "NVPTXISD::Tex2DArrayFloatFloat";
00345   case NVPTXISD::Tex1DArrayFloatFloatLevel:
00346     return "NVPTXISD::Tex2DArrayFloatFloatLevel";
00347   case NVPTXISD::Tex1DArrayFloatFloatGrad:
00348     return "NVPTXISD::Tex2DArrayFloatFloatGrad";
00349   case NVPTXISD::Tex1DArrayI32I32:     return "NVPTXISD::Tex2DArrayI32I32";
00350   case NVPTXISD::Tex1DArrayI32Float:   return "NVPTXISD::Tex2DArrayI32Float";
00351   case NVPTXISD::Tex1DArrayI32FloatLevel:
00352     return "NVPTXISD::Tex2DArrayI32FloatLevel";
00353   case NVPTXISD::Tex1DArrayI32FloatGrad:
00354     return "NVPTXISD::Tex2DArrayI32FloatGrad";
00355   case NVPTXISD::Tex2DFloatI32:        return "NVPTXISD::Tex2DFloatI32";
00356   case NVPTXISD::Tex2DFloatFloat:      return "NVPTXISD::Tex2DFloatFloat";
00357   case NVPTXISD::Tex2DFloatFloatLevel:
00358     return "NVPTXISD::Tex2DFloatFloatLevel";
00359   case NVPTXISD::Tex2DFloatFloatGrad:
00360     return "NVPTXISD::Tex2DFloatFloatGrad";
00361   case NVPTXISD::Tex2DI32I32:          return "NVPTXISD::Tex2DI32I32";
00362   case NVPTXISD::Tex2DI32Float:        return "NVPTXISD::Tex2DI32Float";
00363   case NVPTXISD::Tex2DI32FloatLevel:
00364     return "NVPTXISD::Tex2DI32FloatLevel";
00365   case NVPTXISD::Tex2DI32FloatGrad:
00366     return "NVPTXISD::Tex2DI32FloatGrad";
00367   case NVPTXISD::Tex2DArrayFloatI32:   return "NVPTXISD::Tex2DArrayFloatI32";
00368   case NVPTXISD::Tex2DArrayFloatFloat: return "NVPTXISD::Tex2DArrayFloatFloat";
00369   case NVPTXISD::Tex2DArrayFloatFloatLevel:
00370     return "NVPTXISD::Tex2DArrayFloatFloatLevel";
00371   case NVPTXISD::Tex2DArrayFloatFloatGrad:
00372     return "NVPTXISD::Tex2DArrayFloatFloatGrad";
00373   case NVPTXISD::Tex2DArrayI32I32:     return "NVPTXISD::Tex2DArrayI32I32";
00374   case NVPTXISD::Tex2DArrayI32Float:   return "NVPTXISD::Tex2DArrayI32Float";
00375   case NVPTXISD::Tex2DArrayI32FloatLevel:
00376     return "NVPTXISD::Tex2DArrayI32FloatLevel";
00377   case NVPTXISD::Tex2DArrayI32FloatGrad:
00378     return "NVPTXISD::Tex2DArrayI32FloatGrad";
00379   case NVPTXISD::Tex3DFloatI32:        return "NVPTXISD::Tex3DFloatI32";
00380   case NVPTXISD::Tex3DFloatFloat:      return "NVPTXISD::Tex3DFloatFloat";
00381   case NVPTXISD::Tex3DFloatFloatLevel:
00382     return "NVPTXISD::Tex3DFloatFloatLevel";
00383   case NVPTXISD::Tex3DFloatFloatGrad:
00384     return "NVPTXISD::Tex3DFloatFloatGrad";
00385   case NVPTXISD::Tex3DI32I32:          return "NVPTXISD::Tex3DI32I32";
00386   case NVPTXISD::Tex3DI32Float:        return "NVPTXISD::Tex3DI32Float";
00387   case NVPTXISD::Tex3DI32FloatLevel:
00388     return "NVPTXISD::Tex3DI32FloatLevel";
00389   case NVPTXISD::Tex3DI32FloatGrad:
00390     return "NVPTXISD::Tex3DI32FloatGrad";
00391 
00392   case NVPTXISD::Suld1DI8Trap:          return "NVPTXISD::Suld1DI8Trap";
00393   case NVPTXISD::Suld1DI16Trap:         return "NVPTXISD::Suld1DI16Trap";
00394   case NVPTXISD::Suld1DI32Trap:         return "NVPTXISD::Suld1DI32Trap";
00395   case NVPTXISD::Suld1DV2I8Trap:        return "NVPTXISD::Suld1DV2I8Trap";
00396   case NVPTXISD::Suld1DV2I16Trap:       return "NVPTXISD::Suld1DV2I16Trap";
00397   case NVPTXISD::Suld1DV2I32Trap:       return "NVPTXISD::Suld1DV2I32Trap";
00398   case NVPTXISD::Suld1DV4I8Trap:        return "NVPTXISD::Suld1DV4I8Trap";
00399   case NVPTXISD::Suld1DV4I16Trap:       return "NVPTXISD::Suld1DV4I16Trap";
00400   case NVPTXISD::Suld1DV4I32Trap:       return "NVPTXISD::Suld1DV4I32Trap";
00401 
00402   case NVPTXISD::Suld1DArrayI8Trap:     return "NVPTXISD::Suld1DArrayI8Trap";
00403   case NVPTXISD::Suld1DArrayI16Trap:    return "NVPTXISD::Suld1DArrayI16Trap";
00404   case NVPTXISD::Suld1DArrayI32Trap:    return "NVPTXISD::Suld1DArrayI32Trap";
00405   case NVPTXISD::Suld1DArrayV2I8Trap:   return "NVPTXISD::Suld1DArrayV2I8Trap";
00406   case NVPTXISD::Suld1DArrayV2I16Trap:  return "NVPTXISD::Suld1DArrayV2I16Trap";
00407   case NVPTXISD::Suld1DArrayV2I32Trap:  return "NVPTXISD::Suld1DArrayV2I32Trap";
00408   case NVPTXISD::Suld1DArrayV4I8Trap:   return "NVPTXISD::Suld1DArrayV4I8Trap";
00409   case NVPTXISD::Suld1DArrayV4I16Trap:  return "NVPTXISD::Suld1DArrayV4I16Trap";
00410   case NVPTXISD::Suld1DArrayV4I32Trap:  return "NVPTXISD::Suld1DArrayV4I32Trap";
00411 
00412   case NVPTXISD::Suld2DI8Trap:          return "NVPTXISD::Suld2DI8Trap";
00413   case NVPTXISD::Suld2DI16Trap:         return "NVPTXISD::Suld2DI16Trap";
00414   case NVPTXISD::Suld2DI32Trap:         return "NVPTXISD::Suld2DI32Trap";
00415   case NVPTXISD::Suld2DV2I8Trap:        return "NVPTXISD::Suld2DV2I8Trap";
00416   case NVPTXISD::Suld2DV2I16Trap:       return "NVPTXISD::Suld2DV2I16Trap";
00417   case NVPTXISD::Suld2DV2I32Trap:       return "NVPTXISD::Suld2DV2I32Trap";
00418   case NVPTXISD::Suld2DV4I8Trap:        return "NVPTXISD::Suld2DV4I8Trap";
00419   case NVPTXISD::Suld2DV4I16Trap:       return "NVPTXISD::Suld2DV4I16Trap";
00420   case NVPTXISD::Suld2DV4I32Trap:       return "NVPTXISD::Suld2DV4I32Trap";
00421 
00422   case NVPTXISD::Suld2DArrayI8Trap:     return "NVPTXISD::Suld2DArrayI8Trap";
00423   case NVPTXISD::Suld2DArrayI16Trap:    return "NVPTXISD::Suld2DArrayI16Trap";
00424   case NVPTXISD::Suld2DArrayI32Trap:    return "NVPTXISD::Suld2DArrayI32Trap";
00425   case NVPTXISD::Suld2DArrayV2I8Trap:   return "NVPTXISD::Suld2DArrayV2I8Trap";
00426   case NVPTXISD::Suld2DArrayV2I16Trap:  return "NVPTXISD::Suld2DArrayV2I16Trap";
00427   case NVPTXISD::Suld2DArrayV2I32Trap:  return "NVPTXISD::Suld2DArrayV2I32Trap";
00428   case NVPTXISD::Suld2DArrayV4I8Trap:   return "NVPTXISD::Suld2DArrayV4I8Trap";
00429   case NVPTXISD::Suld2DArrayV4I16Trap:  return "NVPTXISD::Suld2DArrayV4I16Trap";
00430   case NVPTXISD::Suld2DArrayV4I32Trap:  return "NVPTXISD::Suld2DArrayV4I32Trap";
00431 
00432   case NVPTXISD::Suld3DI8Trap:          return "NVPTXISD::Suld3DI8Trap";
00433   case NVPTXISD::Suld3DI16Trap:         return "NVPTXISD::Suld3DI16Trap";
00434   case NVPTXISD::Suld3DI32Trap:         return "NVPTXISD::Suld3DI32Trap";
00435   case NVPTXISD::Suld3DV2I8Trap:        return "NVPTXISD::Suld3DV2I8Trap";
00436   case NVPTXISD::Suld3DV2I16Trap:       return "NVPTXISD::Suld3DV2I16Trap";
00437   case NVPTXISD::Suld3DV2I32Trap:       return "NVPTXISD::Suld3DV2I32Trap";
00438   case NVPTXISD::Suld3DV4I8Trap:        return "NVPTXISD::Suld3DV4I8Trap";
00439   case NVPTXISD::Suld3DV4I16Trap:       return "NVPTXISD::Suld3DV4I16Trap";
00440   case NVPTXISD::Suld3DV4I32Trap:       return "NVPTXISD::Suld3DV4I32Trap";
00441   }
00442 }
00443 
00444 bool NVPTXTargetLowering::shouldSplitVectorType(EVT VT) const {
00445   return VT.getScalarType() == MVT::i1;
00446 }
00447 
00448 SDValue
00449 NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
00450   SDLoc dl(Op);
00451   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
00452   Op = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
00453   return DAG.getNode(NVPTXISD::Wrapper, dl, getPointerTy(), Op);
00454 }
00455 
00456 std::string
00457 NVPTXTargetLowering::getPrototype(Type *retTy, const ArgListTy &Args,
00458                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
00459                                   unsigned retAlignment,
00460                                   const ImmutableCallSite *CS) const {
00461 
00462   bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
00463   assert(isABI && "Non-ABI compilation is not supported");
00464   if (!isABI)
00465     return "";
00466 
00467   std::stringstream O;
00468   O << "prototype_" << uniqueCallSite << " : .callprototype ";
00469 
00470   if (retTy->getTypeID() == Type::VoidTyID) {
00471     O << "()";
00472   } else {
00473     O << "(";
00474     if (retTy->isFloatingPointTy() || retTy->isIntegerTy()) {
00475       unsigned size = 0;
00476       if (const IntegerType *ITy = dyn_cast<IntegerType>(retTy)) {
00477         size = ITy->getBitWidth();
00478         if (size < 32)
00479           size = 32;
00480       } else {
00481         assert(retTy->isFloatingPointTy() &&
00482                "Floating point type expected here");
00483         size = retTy->getPrimitiveSizeInBits();
00484       }
00485 
00486       O << ".param .b" << size << " _";
00487     } else if (isa<PointerType>(retTy)) {
00488       O << ".param .b" << getPointerTy().getSizeInBits() << " _";
00489     } else {
00490       if ((retTy->getTypeID() == Type::StructTyID) || isa<VectorType>(retTy)) {
00491         SmallVector<EVT, 16> vtparts;
00492         ComputeValueVTs(*this, retTy, vtparts);
00493         unsigned totalsz = 0;
00494         for (unsigned i = 0, e = vtparts.size(); i != e; ++i) {
00495           unsigned elems = 1;
00496           EVT elemtype = vtparts[i];
00497           if (vtparts[i].isVector()) {
00498             elems = vtparts[i].getVectorNumElements();
00499             elemtype = vtparts[i].getVectorElementType();
00500           }
00501           // TODO: no need to loop
00502           for (unsigned j = 0, je = elems; j != je; ++j) {
00503             unsigned sz = elemtype.getSizeInBits();
00504             if (elemtype.isInteger() && (sz < 8))
00505               sz = 8;
00506             totalsz += sz / 8;
00507           }
00508         }
00509         O << ".param .align " << retAlignment << " .b8 _[" << totalsz << "]";
00510       } else {
00511         assert(false && "Unknown return type");
00512       }
00513     }
00514     O << ") ";
00515   }
00516   O << "_ (";
00517 
00518   bool first = true;
00519   MVT thePointerTy = getPointerTy();
00520 
00521   unsigned OIdx = 0;
00522   for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
00523     Type *Ty = Args[i].Ty;
00524     if (!first) {
00525       O << ", ";
00526     }
00527     first = false;
00528 
00529     if (Outs[OIdx].Flags.isByVal() == false) {
00530       if (Ty->isAggregateType() || Ty->isVectorTy()) {
00531         unsigned align = 0;
00532         const CallInst *CallI = cast<CallInst>(CS->getInstruction());
00533         const DataLayout *TD = getDataLayout();
00534         // +1 because index 0 is reserved for return type alignment
00535         if (!llvm::getAlign(*CallI, i + 1, align))
00536           align = TD->getABITypeAlignment(Ty);
00537         unsigned sz = TD->getTypeAllocSize(Ty);
00538         O << ".param .align " << align << " .b8 ";
00539         O << "_";
00540         O << "[" << sz << "]";
00541         // update the index for Outs
00542         SmallVector<EVT, 16> vtparts;
00543         ComputeValueVTs(*this, Ty, vtparts);
00544         if (unsigned len = vtparts.size())
00545           OIdx += len - 1;
00546         continue;
00547       }
00548        // i8 types in IR will be i16 types in SDAG
00549       assert((getValueType(Ty) == Outs[OIdx].VT ||
00550              (getValueType(Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) &&
00551              "type mismatch between callee prototype and arguments");
00552       // scalar type
00553       unsigned sz = 0;
00554       if (isa<IntegerType>(Ty)) {
00555         sz = cast<IntegerType>(Ty)->getBitWidth();
00556         if (sz < 32)
00557           sz = 32;
00558       } else if (isa<PointerType>(Ty))
00559         sz = thePointerTy.getSizeInBits();
00560       else
00561         sz = Ty->getPrimitiveSizeInBits();
00562       O << ".param .b" << sz << " ";
00563       O << "_";
00564       continue;
00565     }
00566     const PointerType *PTy = dyn_cast<PointerType>(Ty);
00567     assert(PTy && "Param with byval attribute should be a pointer type");
00568     Type *ETy = PTy->getElementType();
00569 
00570     unsigned align = Outs[OIdx].Flags.getByValAlign();
00571     unsigned sz = getDataLayout()->getTypeAllocSize(ETy);
00572     O << ".param .align " << align << " .b8 ";
00573     O << "_";
00574     O << "[" << sz << "]";
00575   }
00576   O << ");";
00577   return O.str();
00578 }
00579 
00580 unsigned
00581 NVPTXTargetLowering::getArgumentAlignment(SDValue Callee,
00582                                           const ImmutableCallSite *CS,
00583                                           Type *Ty,
00584                                           unsigned Idx) const {
00585   const DataLayout *TD = getDataLayout();
00586   unsigned Align = 0;
00587   const Value *DirectCallee = CS->getCalledFunction();
00588 
00589   if (!DirectCallee) {
00590     // We don't have a direct function symbol, but that may be because of
00591     // constant cast instructions in the call.
00592     const Instruction *CalleeI = CS->getInstruction();
00593     assert(CalleeI && "Call target is not a function or derived value?");
00594 
00595     // With bitcast'd call targets, the instruction will be the call
00596     if (isa<CallInst>(CalleeI)) {
00597       // Check if we have call alignment metadata
00598       if (llvm::getAlign(*cast<CallInst>(CalleeI), Idx, Align))
00599         return Align;
00600 
00601       const Value *CalleeV = cast<CallInst>(CalleeI)->getCalledValue();
00602       // Ignore any bitcast instructions
00603       while(isa<ConstantExpr>(CalleeV)) {
00604         const ConstantExpr *CE = cast<ConstantExpr>(CalleeV);
00605         if (!CE->isCast())
00606           break;
00607         // Look through the bitcast
00608         CalleeV = cast<ConstantExpr>(CalleeV)->getOperand(0);
00609       }
00610 
00611       // We have now looked past all of the bitcasts.  Do we finally have a
00612       // Function?
00613       if (isa<Function>(CalleeV))
00614         DirectCallee = CalleeV;
00615     }
00616   }
00617 
00618   // Check for function alignment information if we found that the
00619   // ultimate target is a Function
00620   if (DirectCallee)
00621     if (llvm::getAlign(*cast<Function>(DirectCallee), Idx, Align))
00622       return Align;
00623 
00624   // Call is indirect or alignment information is not available, fall back to
00625   // the ABI type alignment
00626   return TD->getABITypeAlignment(Ty);
00627 }
00628 
00629 SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
00630                                        SmallVectorImpl<SDValue> &InVals) const {
00631   SelectionDAG &DAG = CLI.DAG;
00632   SDLoc dl = CLI.DL;
00633   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
00634   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
00635   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
00636   SDValue Chain = CLI.Chain;
00637   SDValue Callee = CLI.Callee;
00638   bool &isTailCall = CLI.IsTailCall;
00639   ArgListTy &Args = CLI.Args;
00640   Type *retTy = CLI.RetTy;
00641   ImmutableCallSite *CS = CLI.CS;
00642 
00643   bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
00644   assert(isABI && "Non-ABI compilation is not supported");
00645   if (!isABI)
00646     return Chain;
00647   const DataLayout *TD = getDataLayout();
00648   MachineFunction &MF = DAG.getMachineFunction();
00649   const Function *F = MF.getFunction();
00650 
00651   SDValue tempChain = Chain;
00652   Chain =
00653       DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(uniqueCallSite, true),
00654                            dl);
00655   SDValue InFlag = Chain.getValue(1);
00656 
00657   unsigned paramCount = 0;
00658   // Args.size() and Outs.size() need not match.
00659   // Outs.size() will be larger
00660   //   * if there is an aggregate argument with multiple fields (each field
00661   //     showing up separately in Outs)
00662   //   * if there is a vector argument with more than typical vector-length
00663   //     elements (generally if more than 4) where each vector element is
00664   //     individually present in Outs.
00665   // So a different index should be used for indexing into Outs/OutVals.
00666   // See similar issue in LowerFormalArguments.
00667   unsigned OIdx = 0;
00668   // Declare the .params or .reg need to pass values
00669   // to the function
00670   for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
00671     EVT VT = Outs[OIdx].VT;
00672     Type *Ty = Args[i].Ty;
00673 
00674     if (Outs[OIdx].Flags.isByVal() == false) {
00675       if (Ty->isAggregateType()) {
00676         // aggregate
00677         SmallVector<EVT, 16> vtparts;
00678         ComputeValueVTs(*this, Ty, vtparts);
00679 
00680         unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1);
00681         // declare .param .align <align> .b8 .param<n>[<size>];
00682         unsigned sz = TD->getTypeAllocSize(Ty);
00683         SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
00684         SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, MVT::i32),
00685                                       DAG.getConstant(paramCount, MVT::i32),
00686                                       DAG.getConstant(sz, MVT::i32), InFlag };
00687         Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
00688                             DeclareParamOps, 5);
00689         InFlag = Chain.getValue(1);
00690         unsigned curOffset = 0;
00691         for (unsigned j = 0, je = vtparts.size(); j != je; ++j) {
00692           unsigned elems = 1;
00693           EVT elemtype = vtparts[j];
00694           if (vtparts[j].isVector()) {
00695             elems = vtparts[j].getVectorNumElements();
00696             elemtype = vtparts[j].getVectorElementType();
00697           }
00698           for (unsigned k = 0, ke = elems; k != ke; ++k) {
00699             unsigned sz = elemtype.getSizeInBits();
00700             if (elemtype.isInteger() && (sz < 8))
00701               sz = 8;
00702             SDValue StVal = OutVals[OIdx];
00703             if (elemtype.getSizeInBits() < 16) {
00704               StVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, StVal);
00705             }
00706             SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
00707             SDValue CopyParamOps[] = { Chain,
00708                                        DAG.getConstant(paramCount, MVT::i32),
00709                                        DAG.getConstant(curOffset, MVT::i32),
00710                                        StVal, InFlag };
00711             Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl,
00712                                             CopyParamVTs, &CopyParamOps[0], 5,
00713                                             elemtype, MachinePointerInfo());
00714             InFlag = Chain.getValue(1);
00715             curOffset += sz / 8;
00716             ++OIdx;
00717           }
00718         }
00719         if (vtparts.size() > 0)
00720           --OIdx;
00721         ++paramCount;
00722         continue;
00723       }
00724       if (Ty->isVectorTy()) {
00725         EVT ObjectVT = getValueType(Ty);
00726         unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1);
00727         // declare .param .align <align> .b8 .param<n>[<size>];
00728         unsigned sz = TD->getTypeAllocSize(Ty);
00729         SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
00730         SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, MVT::i32),
00731                                       DAG.getConstant(paramCount, MVT::i32),
00732                                       DAG.getConstant(sz, MVT::i32), InFlag };
00733         Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
00734                             DeclareParamOps, 5);
00735         InFlag = Chain.getValue(1);
00736         unsigned NumElts = ObjectVT.getVectorNumElements();
00737         EVT EltVT = ObjectVT.getVectorElementType();
00738         EVT MemVT = EltVT;
00739         bool NeedExtend = false;
00740         if (EltVT.getSizeInBits() < 16) {
00741           NeedExtend = true;
00742           EltVT = MVT::i16;
00743         }
00744 
00745         // V1 store
00746         if (NumElts == 1) {
00747           SDValue Elt = OutVals[OIdx++];
00748           if (NeedExtend)
00749             Elt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt);
00750 
00751           SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
00752           SDValue CopyParamOps[] = { Chain,
00753                                      DAG.getConstant(paramCount, MVT::i32),
00754                                      DAG.getConstant(0, MVT::i32), Elt,
00755                                      InFlag };
00756           Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl,
00757                                           CopyParamVTs, &CopyParamOps[0], 5,
00758                                           MemVT, MachinePointerInfo());
00759           InFlag = Chain.getValue(1);
00760         } else if (NumElts == 2) {
00761           SDValue Elt0 = OutVals[OIdx++];
00762           SDValue Elt1 = OutVals[OIdx++];
00763           if (NeedExtend) {
00764             Elt0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt0);
00765             Elt1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt1);
00766           }
00767 
00768           SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
00769           SDValue CopyParamOps[] = { Chain,
00770                                      DAG.getConstant(paramCount, MVT::i32),
00771                                      DAG.getConstant(0, MVT::i32), Elt0, Elt1,
00772                                      InFlag };
00773           Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParamV2, dl,
00774                                           CopyParamVTs, &CopyParamOps[0], 6,
00775                                           MemVT, MachinePointerInfo());
00776           InFlag = Chain.getValue(1);
00777         } else {
00778           unsigned curOffset = 0;
00779           // V4 stores
00780           // We have at least 4 elements (<3 x Ty> expands to 4 elements) and
00781           // the
00782           // vector will be expanded to a power of 2 elements, so we know we can
00783           // always round up to the next multiple of 4 when creating the vector
00784           // stores.
00785           // e.g.  4 elem => 1 st.v4
00786           //       6 elem => 2 st.v4
00787           //       8 elem => 2 st.v4
00788           //      11 elem => 3 st.v4
00789           unsigned VecSize = 4;
00790           if (EltVT.getSizeInBits() == 64)
00791             VecSize = 2;
00792 
00793           // This is potentially only part of a vector, so assume all elements
00794           // are packed together.
00795           unsigned PerStoreOffset = MemVT.getStoreSizeInBits() / 8 * VecSize;
00796 
00797           for (unsigned i = 0; i < NumElts; i += VecSize) {
00798             // Get values
00799             SDValue StoreVal;
00800             SmallVector<SDValue, 8> Ops;
00801             Ops.push_back(Chain);
00802             Ops.push_back(DAG.getConstant(paramCount, MVT::i32));
00803             Ops.push_back(DAG.getConstant(curOffset, MVT::i32));
00804 
00805             unsigned Opc = NVPTXISD::StoreParamV2;
00806 
00807             StoreVal = OutVals[OIdx++];
00808             if (NeedExtend)
00809               StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
00810             Ops.push_back(StoreVal);
00811 
00812             if (i + 1 < NumElts) {
00813               StoreVal = OutVals[OIdx++];
00814               if (NeedExtend)
00815                 StoreVal =
00816                     DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
00817             } else {
00818               StoreVal = DAG.getUNDEF(EltVT);
00819             }
00820             Ops.push_back(StoreVal);
00821 
00822             if (VecSize == 4) {
00823               Opc = NVPTXISD::StoreParamV4;
00824               if (i + 2 < NumElts) {
00825                 StoreVal = OutVals[OIdx++];
00826                 if (NeedExtend)
00827                   StoreVal =
00828                       DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
00829               } else {
00830                 StoreVal = DAG.getUNDEF(EltVT);
00831               }
00832               Ops.push_back(StoreVal);
00833 
00834               if (i + 3 < NumElts) {
00835                 StoreVal = OutVals[OIdx++];
00836                 if (NeedExtend)
00837                   StoreVal =
00838                       DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
00839               } else {
00840                 StoreVal = DAG.getUNDEF(EltVT);
00841               }
00842               Ops.push_back(StoreVal);
00843             }
00844 
00845             Ops.push_back(InFlag);
00846 
00847             SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
00848             Chain = DAG.getMemIntrinsicNode(Opc, dl, CopyParamVTs, &Ops[0],
00849                                             Ops.size(), MemVT,
00850                                             MachinePointerInfo());
00851             InFlag = Chain.getValue(1);
00852             curOffset += PerStoreOffset;
00853           }
00854         }
00855         ++paramCount;
00856         --OIdx;
00857         continue;
00858       }
00859       // Plain scalar
00860       // for ABI,    declare .param .b<size> .param<n>;
00861       unsigned sz = VT.getSizeInBits();
00862       bool needExtend = false;
00863       if (VT.isInteger()) {
00864         if (sz < 16)
00865           needExtend = true;
00866         if (sz < 32)
00867           sz = 32;
00868       }
00869       SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
00870       SDValue DeclareParamOps[] = { Chain,
00871                                     DAG.getConstant(paramCount, MVT::i32),
00872                                     DAG.getConstant(sz, MVT::i32),
00873                                     DAG.getConstant(0, MVT::i32), InFlag };
00874       Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
00875                           DeclareParamOps, 5);
00876       InFlag = Chain.getValue(1);
00877       SDValue OutV = OutVals[OIdx];
00878       if (needExtend) {
00879         // zext/sext i1 to i16
00880         unsigned opc = ISD::ZERO_EXTEND;
00881         if (Outs[OIdx].Flags.isSExt())
00882           opc = ISD::SIGN_EXTEND;
00883         OutV = DAG.getNode(opc, dl, MVT::i16, OutV);
00884       }
00885       SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
00886       SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
00887                                  DAG.getConstant(0, MVT::i32), OutV, InFlag };
00888 
00889       unsigned opcode = NVPTXISD::StoreParam;
00890       if (Outs[OIdx].Flags.isZExt())
00891         opcode = NVPTXISD::StoreParamU32;
00892       else if (Outs[OIdx].Flags.isSExt())
00893         opcode = NVPTXISD::StoreParamS32;
00894       Chain = DAG.getMemIntrinsicNode(opcode, dl, CopyParamVTs, CopyParamOps, 5,
00895                                       VT, MachinePointerInfo());
00896 
00897       InFlag = Chain.getValue(1);
00898       ++paramCount;
00899       continue;
00900     }
00901     // struct or vector
00902     SmallVector<EVT, 16> vtparts;
00903     const PointerType *PTy = dyn_cast<PointerType>(Args[i].Ty);
00904     assert(PTy && "Type of a byval parameter should be pointer");
00905     ComputeValueVTs(*this, PTy->getElementType(), vtparts);
00906 
00907     // declare .param .align <align> .b8 .param<n>[<size>];
00908     unsigned sz = Outs[OIdx].Flags.getByValSize();
00909     SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
00910     // The ByValAlign in the Outs[OIdx].Flags is alway set at this point,
00911     // so we don't need to worry about natural alignment or not.
00912     // See TargetLowering::LowerCallTo().
00913     SDValue DeclareParamOps[] = {
00914       Chain, DAG.getConstant(Outs[OIdx].Flags.getByValAlign(), MVT::i32),
00915       DAG.getConstant(paramCount, MVT::i32), DAG.getConstant(sz, MVT::i32),
00916       InFlag
00917     };
00918     Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
00919                         DeclareParamOps, 5);
00920     InFlag = Chain.getValue(1);
00921     unsigned curOffset = 0;
00922     for (unsigned j = 0, je = vtparts.size(); j != je; ++j) {
00923       unsigned elems = 1;
00924       EVT elemtype = vtparts[j];
00925       if (vtparts[j].isVector()) {
00926         elems = vtparts[j].getVectorNumElements();
00927         elemtype = vtparts[j].getVectorElementType();
00928       }
00929       for (unsigned k = 0, ke = elems; k != ke; ++k) {
00930         unsigned sz = elemtype.getSizeInBits();
00931         if (elemtype.isInteger() && (sz < 8))
00932           sz = 8;
00933         SDValue srcAddr =
00934             DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[OIdx],
00935                         DAG.getConstant(curOffset, getPointerTy()));
00936         SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
00937                                      MachinePointerInfo(), false, false, false,
00938                                      0);
00939         if (elemtype.getSizeInBits() < 16) {
00940           theVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, theVal);
00941         }
00942         SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
00943         SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
00944                                    DAG.getConstant(curOffset, MVT::i32), theVal,
00945                                    InFlag };
00946         Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl, CopyParamVTs,
00947                                         CopyParamOps, 5, elemtype,
00948                                         MachinePointerInfo());
00949 
00950         InFlag = Chain.getValue(1);
00951         curOffset += sz / 8;
00952       }
00953     }
00954     ++paramCount;
00955   }
00956 
00957   GlobalAddressSDNode *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
00958   unsigned retAlignment = 0;
00959 
00960   // Handle Result
00961   if (Ins.size() > 0) {
00962     SmallVector<EVT, 16> resvtparts;
00963     ComputeValueVTs(*this, retTy, resvtparts);
00964 
00965     // Declare
00966     //  .param .align 16 .b8 retval0[<size-in-bytes>], or
00967     //  .param .b<size-in-bits> retval0
00968     unsigned resultsz = TD->getTypeAllocSizeInBits(retTy);
00969     if (retTy->isSingleValueType()) {
00970       // Scalar needs to be at least 32bit wide
00971       if (resultsz < 32)
00972         resultsz = 32;
00973       SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
00974       SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, MVT::i32),
00975                                   DAG.getConstant(resultsz, MVT::i32),
00976                                   DAG.getConstant(0, MVT::i32), InFlag };
00977       Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
00978                           DeclareRetOps, 5);
00979       InFlag = Chain.getValue(1);
00980     } else {
00981       retAlignment = getArgumentAlignment(Callee, CS, retTy, 0);
00982       SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
00983       SDValue DeclareRetOps[] = { Chain,
00984                                   DAG.getConstant(retAlignment, MVT::i32),
00985                                   DAG.getConstant(resultsz / 8, MVT::i32),
00986                                   DAG.getConstant(0, MVT::i32), InFlag };
00987       Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs,
00988                           DeclareRetOps, 5);
00989       InFlag = Chain.getValue(1);
00990     }
00991   }
00992 
00993   if (!Func) {
00994     // This is indirect function call case : PTX requires a prototype of the
00995     // form
00996     // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
00997     // to be emitted, and the label has to used as the last arg of call
00998     // instruction.
00999     // The prototype is embedded in a string and put as the operand for a
01000     // CallPrototype SDNode which will print out to the value of the string.
01001     SDVTList ProtoVTs = DAG.getVTList(MVT::Other, MVT::Glue);
01002     std::string Proto = getPrototype(retTy, Args, Outs, retAlignment, CS);
01003     const char *ProtoStr =
01004       nvTM->getManagedStrPool()->getManagedString(Proto.c_str())->c_str();
01005     SDValue ProtoOps[] = {
01006       Chain, DAG.getTargetExternalSymbol(ProtoStr, MVT::i32), InFlag,
01007     };
01008     Chain = DAG.getNode(NVPTXISD::CallPrototype, dl, ProtoVTs, &ProtoOps[0], 3);
01009     InFlag = Chain.getValue(1);
01010   }
01011   // Op to just print "call"
01012   SDVTList PrintCallVTs = DAG.getVTList(MVT::Other, MVT::Glue);
01013   SDValue PrintCallOps[] = {
01014     Chain, DAG.getConstant((Ins.size() == 0) ? 0 : 1, MVT::i32), InFlag
01015   };
01016   Chain = DAG.getNode(Func ? (NVPTXISD::PrintCallUni) : (NVPTXISD::PrintCall),
01017                       dl, PrintCallVTs, PrintCallOps, 3);
01018   InFlag = Chain.getValue(1);
01019 
01020   // Ops to print out the function name
01021   SDVTList CallVoidVTs = DAG.getVTList(MVT::Other, MVT::Glue);
01022   SDValue CallVoidOps[] = { Chain, Callee, InFlag };
01023   Chain = DAG.getNode(NVPTXISD::CallVoid, dl, CallVoidVTs, CallVoidOps, 3);
01024   InFlag = Chain.getValue(1);
01025 
01026   // Ops to print out the param list
01027   SDVTList CallArgBeginVTs = DAG.getVTList(MVT::Other, MVT::Glue);
01028   SDValue CallArgBeginOps[] = { Chain, InFlag };
01029   Chain = DAG.getNode(NVPTXISD::CallArgBegin, dl, CallArgBeginVTs,
01030                       CallArgBeginOps, 2);
01031   InFlag = Chain.getValue(1);
01032 
01033   for (unsigned i = 0, e = paramCount; i != e; ++i) {
01034     unsigned opcode;
01035     if (i == (e - 1))
01036       opcode = NVPTXISD::LastCallArg;
01037     else
01038       opcode = NVPTXISD::CallArg;
01039     SDVTList CallArgVTs = DAG.getVTList(MVT::Other, MVT::Glue);
01040     SDValue CallArgOps[] = { Chain, DAG.getConstant(1, MVT::i32),
01041                              DAG.getConstant(i, MVT::i32), InFlag };
01042     Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps, 4);
01043     InFlag = Chain.getValue(1);
01044   }
01045   SDVTList CallArgEndVTs = DAG.getVTList(MVT::Other, MVT::Glue);
01046   SDValue CallArgEndOps[] = { Chain, DAG.getConstant(Func ? 1 : 0, MVT::i32),
01047                               InFlag };
01048   Chain =
01049       DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps, 3);
01050   InFlag = Chain.getValue(1);
01051 
01052   if (!Func) {
01053     SDVTList PrototypeVTs = DAG.getVTList(MVT::Other, MVT::Glue);
01054     SDValue PrototypeOps[] = { Chain, DAG.getConstant(uniqueCallSite, MVT::i32),
01055                                InFlag };
01056     Chain = DAG.getNode(NVPTXISD::Prototype, dl, PrototypeVTs, PrototypeOps, 3);
01057     InFlag = Chain.getValue(1);
01058   }
01059 
01060   // Generate loads from param memory/moves from registers for result
01061   if (Ins.size() > 0) {
01062     unsigned resoffset = 0;
01063     if (retTy && retTy->isVectorTy()) {
01064       EVT ObjectVT = getValueType(retTy);
01065       unsigned NumElts = ObjectVT.getVectorNumElements();
01066       EVT EltVT = ObjectVT.getVectorElementType();
01067       assert(nvTM->getTargetLowering()->getNumRegisters(F->getContext(),
01068                                                         ObjectVT) == NumElts &&
01069              "Vector was not scalarized");
01070       unsigned sz = EltVT.getSizeInBits();
01071       bool needTruncate = sz < 16 ? true : false;
01072 
01073       if (NumElts == 1) {
01074         // Just a simple load
01075         std::vector<EVT> LoadRetVTs;
01076         if (needTruncate) {
01077           // If loading i1 result, generate
01078           //   load i16
01079           //   trunc i16 to i1
01080           LoadRetVTs.push_back(MVT::i16);
01081         } else
01082           LoadRetVTs.push_back(EltVT);
01083         LoadRetVTs.push_back(MVT::Other);
01084         LoadRetVTs.push_back(MVT::Glue);
01085         std::vector<SDValue> LoadRetOps;
01086         LoadRetOps.push_back(Chain);
01087         LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
01088         LoadRetOps.push_back(DAG.getConstant(0, MVT::i32));
01089         LoadRetOps.push_back(InFlag);
01090         SDValue retval = DAG.getMemIntrinsicNode(
01091             NVPTXISD::LoadParam, dl,
01092             DAG.getVTList(LoadRetVTs), &LoadRetOps[0],
01093             LoadRetOps.size(), EltVT, MachinePointerInfo());
01094         Chain = retval.getValue(1);
01095         InFlag = retval.getValue(2);
01096         SDValue Ret0 = retval;
01097         if (needTruncate)
01098           Ret0 = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Ret0);
01099         InVals.push_back(Ret0);
01100       } else if (NumElts == 2) {
01101         // LoadV2
01102         std::vector<EVT> LoadRetVTs;
01103         if (needTruncate) {
01104           // If loading i1 result, generate
01105           //   load i16
01106           //   trunc i16 to i1
01107           LoadRetVTs.push_back(MVT::i16);
01108           LoadRetVTs.push_back(MVT::i16);
01109         } else {
01110           LoadRetVTs.push_back(EltVT);
01111           LoadRetVTs.push_back(EltVT);
01112         }
01113         LoadRetVTs.push_back(MVT::Other);
01114         LoadRetVTs.push_back(MVT::Glue);
01115         std::vector<SDValue> LoadRetOps;
01116         LoadRetOps.push_back(Chain);
01117         LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
01118         LoadRetOps.push_back(DAG.getConstant(0, MVT::i32));
01119         LoadRetOps.push_back(InFlag);
01120         SDValue retval = DAG.getMemIntrinsicNode(
01121             NVPTXISD::LoadParamV2, dl,
01122             DAG.getVTList(LoadRetVTs), &LoadRetOps[0],
01123             LoadRetOps.size(), EltVT, MachinePointerInfo());
01124         Chain = retval.getValue(2);
01125         InFlag = retval.getValue(3);
01126         SDValue Ret0 = retval.getValue(0);
01127         SDValue Ret1 = retval.getValue(1);
01128         if (needTruncate) {
01129           Ret0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ret0);
01130           InVals.push_back(Ret0);
01131           Ret1 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ret1);
01132           InVals.push_back(Ret1);
01133         } else {
01134           InVals.push_back(Ret0);
01135           InVals.push_back(Ret1);
01136         }
01137       } else {
01138         // Split into N LoadV4
01139         unsigned Ofst = 0;
01140         unsigned VecSize = 4;
01141         unsigned Opc = NVPTXISD::LoadParamV4;
01142         if (EltVT.getSizeInBits() == 64) {
01143           VecSize = 2;
01144           Opc = NVPTXISD::LoadParamV2;
01145         }
01146         EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, VecSize);
01147         for (unsigned i = 0; i < NumElts; i += VecSize) {
01148           SmallVector<EVT, 8> LoadRetVTs;
01149           if (needTruncate) {
01150             // If loading i1 result, generate
01151             //   load i16
01152             //   trunc i16 to i1
01153             for (unsigned j = 0; j < VecSize; ++j)
01154               LoadRetVTs.push_back(MVT::i16);
01155           } else {
01156             for (unsigned j = 0; j < VecSize; ++j)
01157               LoadRetVTs.push_back(EltVT);
01158           }
01159           LoadRetVTs.push_back(MVT::Other);
01160           LoadRetVTs.push_back(MVT::Glue);
01161           SmallVector<SDValue, 4> LoadRetOps;
01162           LoadRetOps.push_back(Chain);
01163           LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
01164           LoadRetOps.push_back(DAG.getConstant(Ofst, MVT::i32));
01165           LoadRetOps.push_back(InFlag);
01166           SDValue retval = DAG.getMemIntrinsicNode(
01167               Opc, dl, DAG.getVTList(LoadRetVTs),
01168               &LoadRetOps[0], LoadRetOps.size(), EltVT, MachinePointerInfo());
01169           if (VecSize == 2) {
01170             Chain = retval.getValue(2);
01171             InFlag = retval.getValue(3);
01172           } else {
01173             Chain = retval.getValue(4);
01174             InFlag = retval.getValue(5);
01175           }
01176 
01177           for (unsigned j = 0; j < VecSize; ++j) {
01178             if (i + j >= NumElts)
01179               break;
01180             SDValue Elt = retval.getValue(j);
01181             if (needTruncate)
01182               Elt = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
01183             InVals.push_back(Elt);
01184           }
01185           Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
01186         }
01187       }
01188     } else {
01189       SmallVector<EVT, 16> VTs;
01190       ComputePTXValueVTs(*this, retTy, VTs);
01191       assert(VTs.size() == Ins.size() && "Bad value decomposition");
01192       for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
01193         unsigned sz = VTs[i].getSizeInBits();
01194         bool needTruncate = sz < 8 ? true : false;
01195         if (VTs[i].isInteger() && (sz < 8))
01196           sz = 8;
01197 
01198         SmallVector<EVT, 4> LoadRetVTs;
01199         EVT TheLoadType = VTs[i];
01200         if (retTy->isIntegerTy() &&
01201             TD->getTypeAllocSizeInBits(retTy) < 32) {
01202           // This is for integer types only, and specifically not for
01203           // aggregates.
01204           LoadRetVTs.push_back(MVT::i32);
01205           TheLoadType = MVT::i32;
01206         } else if (sz < 16) {
01207           // If loading i1/i8 result, generate
01208           //   load i8 (-> i16)
01209           //   trunc i16 to i1/i8
01210           LoadRetVTs.push_back(MVT::i16);
01211         } else
01212           LoadRetVTs.push_back(Ins[i].VT);
01213         LoadRetVTs.push_back(MVT::Other);
01214         LoadRetVTs.push_back(MVT::Glue);
01215 
01216         SmallVector<SDValue, 4> LoadRetOps;
01217         LoadRetOps.push_back(Chain);
01218         LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
01219         LoadRetOps.push_back(DAG.getConstant(resoffset, MVT::i32));
01220         LoadRetOps.push_back(InFlag);
01221         SDValue retval = DAG.getMemIntrinsicNode(
01222             NVPTXISD::LoadParam, dl,
01223             DAG.getVTList(LoadRetVTs), &LoadRetOps[0],
01224             LoadRetOps.size(), TheLoadType, MachinePointerInfo());
01225         Chain = retval.getValue(1);
01226         InFlag = retval.getValue(2);
01227         SDValue Ret0 = retval.getValue(0);
01228         if (needTruncate)
01229           Ret0 = DAG.getNode(ISD::TRUNCATE, dl, Ins[i].VT, Ret0);
01230         InVals.push_back(Ret0);
01231         resoffset += sz / 8;
01232       }
01233     }
01234   }
01235 
01236   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(uniqueCallSite, true),
01237                              DAG.getIntPtrConstant(uniqueCallSite + 1, true),
01238                              InFlag, dl);
01239   uniqueCallSite++;
01240 
01241   // set isTailCall to false for now, until we figure out how to express
01242   // tail call optimization in PTX
01243   isTailCall = false;
01244   return Chain;
01245 }
01246 
01247 // By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()
01248 // (see LegalizeDAG.cpp). This is slow and uses local memory.
01249 // We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5
01250 SDValue
01251 NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
01252   SDNode *Node = Op.getNode();
01253   SDLoc dl(Node);
01254   SmallVector<SDValue, 8> Ops;
01255   unsigned NumOperands = Node->getNumOperands();
01256   for (unsigned i = 0; i < NumOperands; ++i) {
01257     SDValue SubOp = Node->getOperand(i);
01258     EVT VVT = SubOp.getNode()->getValueType(0);
01259     EVT EltVT = VVT.getVectorElementType();
01260     unsigned NumSubElem = VVT.getVectorNumElements();
01261     for (unsigned j = 0; j < NumSubElem; ++j) {
01262       Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
01263                                 DAG.getIntPtrConstant(j)));
01264     }
01265   }
01266   return DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0), &Ops[0],
01267                      Ops.size());
01268 }
01269 
01270 SDValue
01271 NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
01272   switch (Op.getOpcode()) {
01273   case ISD::RETURNADDR:
01274     return SDValue();
01275   case ISD::FRAMEADDR:
01276     return SDValue();
01277   case ISD::GlobalAddress:
01278     return LowerGlobalAddress(Op, DAG);
01279   case ISD::INTRINSIC_W_CHAIN:
01280     return Op;
01281   case ISD::BUILD_VECTOR:
01282   case ISD::EXTRACT_SUBVECTOR:
01283     return Op;
01284   case ISD::CONCAT_VECTORS:
01285     return LowerCONCAT_VECTORS(Op, DAG);
01286   case ISD::STORE:
01287     return LowerSTORE(Op, DAG);
01288   case ISD::LOAD:
01289     return LowerLOAD(Op, DAG);
01290   default:
01291     llvm_unreachable("Custom lowering not defined for operation");
01292   }
01293 }
01294 
01295 SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
01296   if (Op.getValueType() == MVT::i1)
01297     return LowerLOADi1(Op, DAG);
01298   else
01299     return SDValue();
01300 }
01301 
01302 // v = ld i1* addr
01303 //   =>
01304 // v1 = ld i8* addr (-> i16)
01305 // v = trunc i16 to i1
01306 SDValue NVPTXTargetLowering::LowerLOADi1(SDValue Op, SelectionDAG &DAG) const {
01307   SDNode *Node = Op.getNode();
01308   LoadSDNode *LD = cast<LoadSDNode>(Node);
01309   SDLoc dl(Node);
01310   assert(LD->getExtensionType() == ISD::NON_EXTLOAD);
01311   assert(Node->getValueType(0) == MVT::i1 &&
01312          "Custom lowering for i1 load only");
01313   SDValue newLD =
01314       DAG.getLoad(MVT::i16, dl, LD->getChain(), LD->getBasePtr(),
01315                   LD->getPointerInfo(), LD->isVolatile(), LD->isNonTemporal(),
01316                   LD->isInvariant(), LD->getAlignment());
01317   SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
01318   // The legalizer (the caller) is expecting two values from the legalized
01319   // load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
01320   // in LegalizeDAG.cpp which also uses MergeValues.
01321   SDValue Ops[] = { result, LD->getChain() };
01322   return DAG.getMergeValues(Ops, 2, dl);
01323 }
01324 
01325 SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
01326   EVT ValVT = Op.getOperand(1).getValueType();
01327   if (ValVT == MVT::i1)
01328     return LowerSTOREi1(Op, DAG);
01329   else if (ValVT.isVector())
01330     return LowerSTOREVector(Op, DAG);
01331   else
01332     return SDValue();
01333 }
01334 
01335 SDValue
01336 NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
01337   SDNode *N = Op.getNode();
01338   SDValue Val = N->getOperand(1);
01339   SDLoc DL(N);
01340   EVT ValVT = Val.getValueType();
01341 
01342   if (ValVT.isVector()) {
01343     // We only handle "native" vector sizes for now, e.g. <4 x double> is not
01344     // legal.  We can (and should) split that into 2 stores of <2 x double> here
01345     // but I'm leaving that as a TODO for now.
01346     if (!ValVT.isSimple())
01347       return SDValue();
01348     switch (ValVT.getSimpleVT().SimpleTy) {
01349     default:
01350       return SDValue();
01351     case MVT::v2i8:
01352     case MVT::v2i16:
01353     case MVT::v2i32:
01354     case MVT::v2i64:
01355     case MVT::v2f32:
01356     case MVT::v2f64:
01357     case MVT::v4i8:
01358     case MVT::v4i16:
01359     case MVT::v4i32:
01360     case MVT::v4f32:
01361       // This is a "native" vector type
01362       break;
01363     }
01364 
01365     unsigned Opcode = 0;
01366     EVT EltVT = ValVT.getVectorElementType();
01367     unsigned NumElts = ValVT.getVectorNumElements();
01368 
01369     // Since StoreV2 is a target node, we cannot rely on DAG type legalization.
01370     // Therefore, we must ensure the type is legal.  For i1 and i8, we set the
01371     // stored type to i16 and propagate the "real" type as the memory type.
01372     bool NeedExt = false;
01373     if (EltVT.getSizeInBits() < 16)
01374       NeedExt = true;
01375 
01376     switch (NumElts) {
01377     default:
01378       return SDValue();
01379     case 2:
01380       Opcode = NVPTXISD::StoreV2;
01381       break;
01382     case 4: {
01383       Opcode = NVPTXISD::StoreV4;
01384       break;
01385     }
01386     }
01387 
01388     SmallVector<SDValue, 8> Ops;
01389 
01390     // First is the chain
01391     Ops.push_back(N->getOperand(0));
01392 
01393     // Then the split values
01394     for (unsigned i = 0; i < NumElts; ++i) {
01395       SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
01396                                    DAG.getIntPtrConstant(i));
01397       if (NeedExt)
01398         ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);
01399       Ops.push_back(ExtVal);
01400     }
01401 
01402     // Then any remaining arguments
01403     for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i) {
01404       Ops.push_back(N->getOperand(i));
01405     }
01406 
01407     MemSDNode *MemSD = cast<MemSDNode>(N);
01408 
01409     SDValue NewSt = DAG.getMemIntrinsicNode(
01410         Opcode, DL, DAG.getVTList(MVT::Other), &Ops[0], Ops.size(),
01411         MemSD->getMemoryVT(), MemSD->getMemOperand());
01412 
01413     //return DCI.CombineTo(N, NewSt, true);
01414     return NewSt;
01415   }
01416 
01417   return SDValue();
01418 }
01419 
01420 // st i1 v, addr
01421 //    =>
01422 // v1 = zxt v to i16
01423 // st.u8 i16, addr
01424 SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
01425   SDNode *Node = Op.getNode();
01426   SDLoc dl(Node);
01427   StoreSDNode *ST = cast<StoreSDNode>(Node);
01428   SDValue Tmp1 = ST->getChain();
01429   SDValue Tmp2 = ST->getBasePtr();
01430   SDValue Tmp3 = ST->getValue();
01431   assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
01432   unsigned Alignment = ST->getAlignment();
01433   bool isVolatile = ST->isVolatile();
01434   bool isNonTemporal = ST->isNonTemporal();
01435   Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3);
01436   SDValue Result = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2,
01437                                      ST->getPointerInfo(), MVT::i8, isNonTemporal,
01438                                      isVolatile, Alignment);
01439   return Result;
01440 }
01441 
01442 SDValue NVPTXTargetLowering::getExtSymb(SelectionDAG &DAG, const char *inname,
01443                                         int idx, EVT v) const {
01444   std::string *name = nvTM->getManagedStrPool()->getManagedString(inname);
01445   std::stringstream suffix;
01446   suffix << idx;
01447   *name += suffix.str();
01448   return DAG.getTargetExternalSymbol(name->c_str(), v);
01449 }
01450 
01451 SDValue
01452 NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int idx, EVT v) const {
01453   std::string ParamSym;
01454   raw_string_ostream ParamStr(ParamSym);
01455 
01456   ParamStr << DAG.getMachineFunction().getName() << "_param_" << idx;
01457   ParamStr.flush();
01458 
01459   std::string *SavedStr =
01460     nvTM->getManagedStrPool()->getManagedString(ParamSym.c_str());
01461   return DAG.getTargetExternalSymbol(SavedStr->c_str(), v);
01462 }
01463 
01464 SDValue NVPTXTargetLowering::getParamHelpSymbol(SelectionDAG &DAG, int idx) {
01465   return getExtSymb(DAG, ".HLPPARAM", idx);
01466 }
01467 
01468 // Check to see if the kernel argument is image*_t or sampler_t
01469 
01470 bool llvm::isImageOrSamplerVal(const Value *arg, const Module *context) {
01471   static const char *const specialTypes[] = { "struct._image2d_t",
01472                                               "struct._image3d_t",
01473                                               "struct._sampler_t" };
01474 
01475   const Type *Ty = arg->getType();
01476   const PointerType *PTy = dyn_cast<PointerType>(Ty);
01477 
01478   if (!PTy)
01479     return false;
01480 
01481   if (!context)
01482     return false;
01483 
01484   const StructType *STy = dyn_cast<StructType>(PTy->getElementType());
01485   const std::string TypeName = STy && !STy->isLiteral() ? STy->getName() : "";
01486 
01487   for (int i = 0, e = array_lengthof(specialTypes); i != e; ++i)
01488     if (TypeName == specialTypes[i])
01489       return true;
01490 
01491   return false;
01492 }
01493 
01494 SDValue NVPTXTargetLowering::LowerFormalArguments(
01495     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
01496     const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl, SelectionDAG &DAG,
01497     SmallVectorImpl<SDValue> &InVals) const {
01498   MachineFunction &MF = DAG.getMachineFunction();
01499   const DataLayout *TD = getDataLayout();
01500 
01501   const Function *F = MF.getFunction();
01502   const AttributeSet &PAL = F->getAttributes();
01503   const TargetLowering *TLI = nvTM->getTargetLowering();
01504 
01505   SDValue Root = DAG.getRoot();
01506   std::vector<SDValue> OutChains;
01507 
01508   bool isKernel = llvm::isKernelFunction(*F);
01509   bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
01510   assert(isABI && "Non-ABI compilation is not supported");
01511   if (!isABI)
01512     return Chain;
01513 
01514   std::vector<Type *> argTypes;
01515   std::vector<const Argument *> theArgs;
01516   for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
01517        I != E; ++I) {
01518     theArgs.push_back(I);
01519     argTypes.push_back(I->getType());
01520   }
01521   // argTypes.size() (or theArgs.size()) and Ins.size() need not match.
01522   // Ins.size() will be larger
01523   //   * if there is an aggregate argument with multiple fields (each field
01524   //     showing up separately in Ins)
01525   //   * if there is a vector argument with more than typical vector-length
01526   //     elements (generally if more than 4) where each vector element is
01527   //     individually present in Ins.
01528   // So a different index should be used for indexing into Ins.
01529   // See similar issue in LowerCall.
01530   unsigned InsIdx = 0;
01531 
01532   int idx = 0;
01533   for (unsigned i = 0, e = theArgs.size(); i != e; ++i, ++idx, ++InsIdx) {
01534     Type *Ty = argTypes[i];
01535 
01536     // If the kernel argument is image*_t or sampler_t, convert it to
01537     // a i32 constant holding the parameter position. This can later
01538     // matched in the AsmPrinter to output the correct mangled name.
01539     if (isImageOrSamplerVal(
01540             theArgs[i],
01541             (theArgs[i]->getParent() ? theArgs[i]->getParent()->getParent()
01542                                      : 0))) {
01543       assert(isKernel && "Only kernels can have image/sampler params");
01544       InVals.push_back(DAG.getConstant(i + 1, MVT::i32));
01545       continue;
01546     }
01547 
01548     if (theArgs[i]->use_empty()) {
01549       // argument is dead
01550       if (Ty->isAggregateType()) {
01551         SmallVector<EVT, 16> vtparts;
01552 
01553         ComputePTXValueVTs(*this, Ty, vtparts);
01554         assert(vtparts.size() > 0 && "empty aggregate type not expected");
01555         for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
01556              ++parti) {
01557           EVT partVT = vtparts[parti];
01558           InVals.push_back(DAG.getNode(ISD::UNDEF, dl, partVT));
01559           ++InsIdx;
01560         }
01561         if (vtparts.size() > 0)
01562           --InsIdx;
01563         continue;
01564       }
01565       if (Ty->isVectorTy()) {
01566         EVT ObjectVT = getValueType(Ty);
01567         unsigned NumRegs = TLI->getNumRegisters(F->getContext(), ObjectVT);
01568         for (unsigned parti = 0; parti < NumRegs; ++parti) {
01569           InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
01570           ++InsIdx;
01571         }
01572         if (NumRegs > 0)
01573           --InsIdx;
01574         continue;
01575       }
01576       InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
01577       continue;
01578     }
01579 
01580     // In the following cases, assign a node order of "idx+1"
01581     // to newly created nodes. The SDNodes for params have to
01582     // appear in the same order as their order of appearance
01583     // in the original function. "idx+1" holds that order.
01584     if (PAL.hasAttribute(i + 1, Attribute::ByVal) == false) {
01585       if (Ty->isAggregateType()) {
01586         SmallVector<EVT, 16> vtparts;
01587         SmallVector<uint64_t, 16> offsets;
01588 
01589         // NOTE: Here, we lose the ability to issue vector loads for vectors
01590         // that are a part of a struct.  This should be investigated in the
01591         // future.
01592         ComputePTXValueVTs(*this, Ty, vtparts, &offsets, 0);
01593         assert(vtparts.size() > 0 && "empty aggregate type not expected");
01594         bool aggregateIsPacked = false;
01595         if (StructType *STy = llvm::dyn_cast<StructType>(Ty))
01596           aggregateIsPacked = STy->isPacked();
01597 
01598         SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
01599         for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
01600              ++parti) {
01601           EVT partVT = vtparts[parti];
01602           Value *srcValue = Constant::getNullValue(
01603               PointerType::get(partVT.getTypeForEVT(F->getContext()),
01604                                llvm::ADDRESS_SPACE_PARAM));
01605           SDValue srcAddr =
01606               DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
01607                           DAG.getConstant(offsets[parti], getPointerTy()));
01608           unsigned partAlign =
01609               aggregateIsPacked ? 1
01610                                 : TD->getABITypeAlignment(
01611                                       partVT.getTypeForEVT(F->getContext()));
01612           SDValue p;
01613           if (Ins[InsIdx].VT.getSizeInBits() > partVT.getSizeInBits()) {
01614             ISD::LoadExtType ExtOp = Ins[InsIdx].Flags.isSExt() ? 
01615                                      ISD::SEXTLOAD : ISD::ZEXTLOAD;
01616             p = DAG.getExtLoad(ExtOp, dl, Ins[InsIdx].VT, Root, srcAddr,
01617                                MachinePointerInfo(srcValue), partVT, false,
01618                                false, partAlign);
01619           } else {
01620             p = DAG.getLoad(partVT, dl, Root, srcAddr,
01621                             MachinePointerInfo(srcValue), false, false, false,
01622                             partAlign);
01623           }
01624           if (p.getNode())
01625             p.getNode()->setIROrder(idx + 1);
01626           InVals.push_back(p);
01627           ++InsIdx;
01628         }
01629         if (vtparts.size() > 0)
01630           --InsIdx;
01631         continue;
01632       }
01633       if (Ty->isVectorTy()) {
01634         EVT ObjectVT = getValueType(Ty);
01635         SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
01636         unsigned NumElts = ObjectVT.getVectorNumElements();
01637         assert(TLI->getNumRegisters(F->getContext(), ObjectVT) == NumElts &&
01638                "Vector was not scalarized");
01639         unsigned Ofst = 0;
01640         EVT EltVT = ObjectVT.getVectorElementType();
01641 
01642         // V1 load
01643         // f32 = load ...
01644         if (NumElts == 1) {
01645           // We only have one element, so just directly load it
01646           Value *SrcValue = Constant::getNullValue(PointerType::get(
01647               EltVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
01648           SDValue SrcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
01649                                         DAG.getConstant(Ofst, getPointerTy()));
01650           SDValue P = DAG.getLoad(
01651               EltVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
01652               false, true,
01653               TD->getABITypeAlignment(EltVT.getTypeForEVT(F->getContext())));
01654           if (P.getNode())
01655             P.getNode()->setIROrder(idx + 1);
01656 
01657           if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits())
01658             P = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, P);
01659           InVals.push_back(P);
01660           Ofst += TD->getTypeAllocSize(EltVT.getTypeForEVT(F->getContext()));
01661           ++InsIdx;
01662         } else if (NumElts == 2) {
01663           // V2 load
01664           // f32,f32 = load ...
01665           EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, 2);
01666           Value *SrcValue = Constant::getNullValue(PointerType::get(
01667               VecVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
01668           SDValue SrcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
01669                                         DAG.getConstant(Ofst, getPointerTy()));
01670           SDValue P = DAG.getLoad(
01671               VecVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
01672               false, true,
01673               TD->getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())));
01674           if (P.getNode())
01675             P.getNode()->setIROrder(idx + 1);
01676 
01677           SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
01678                                      DAG.getIntPtrConstant(0));
01679           SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
01680                                      DAG.getIntPtrConstant(1));
01681 
01682           if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits()) {
01683             Elt0 = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt0);
01684             Elt1 = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt1);
01685           }
01686 
01687           InVals.push_back(Elt0);
01688           InVals.push_back(Elt1);
01689           Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
01690           InsIdx += 2;
01691         } else {
01692           // V4 loads
01693           // We have at least 4 elements (<3 x Ty> expands to 4 elements) and
01694           // the
01695           // vector will be expanded to a power of 2 elements, so we know we can
01696           // always round up to the next multiple of 4 when creating the vector
01697           // loads.
01698           // e.g.  4 elem => 1 ld.v4
01699           //       6 elem => 2 ld.v4
01700           //       8 elem => 2 ld.v4
01701           //      11 elem => 3 ld.v4
01702           unsigned VecSize = 4;
01703           if (EltVT.getSizeInBits() == 64) {
01704             VecSize = 2;
01705           }
01706           EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, VecSize);
01707           for (unsigned i = 0; i < NumElts; i += VecSize) {
01708             Value *SrcValue = Constant::getNullValue(
01709                 PointerType::get(VecVT.getTypeForEVT(F->getContext()),
01710                                  llvm::ADDRESS_SPACE_PARAM));
01711             SDValue SrcAddr =
01712                 DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
01713                             DAG.getConstant(Ofst, getPointerTy()));
01714             SDValue P = DAG.getLoad(
01715                 VecVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
01716                 false, true,
01717                 TD->getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())));
01718             if (P.getNode())
01719               P.getNode()->setIROrder(idx + 1);
01720 
01721             for (unsigned j = 0; j < VecSize; ++j) {
01722               if (i + j >= NumElts)
01723                 break;
01724               SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
01725                                         DAG.getIntPtrConstant(j));
01726               if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits())
01727                 Elt = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt);
01728               InVals.push_back(Elt);
01729             }
01730             Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
01731           }
01732           InsIdx += NumElts;
01733         }
01734 
01735         if (NumElts > 0)
01736           --InsIdx;
01737         continue;
01738       }
01739       // A plain scalar.
01740       EVT ObjectVT = getValueType(Ty);
01741       // If ABI, load from the param symbol
01742       SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
01743       Value *srcValue = Constant::getNullValue(PointerType::get(
01744           ObjectVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
01745       SDValue p;
01746        if (ObjectVT.getSizeInBits() < Ins[InsIdx].VT.getSizeInBits()) {
01747         ISD::LoadExtType ExtOp = Ins[InsIdx].Flags.isSExt() ? 
01748                                        ISD::SEXTLOAD : ISD::ZEXTLOAD;
01749         p = DAG.getExtLoad(ExtOp, dl, Ins[InsIdx].VT, Root, Arg,
01750                            MachinePointerInfo(srcValue), ObjectVT, false, false,
01751         TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
01752       } else {
01753         p = DAG.getLoad(Ins[InsIdx].VT, dl, Root, Arg,
01754                         MachinePointerInfo(srcValue), false, false, false,
01755         TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
01756       }
01757       if (p.getNode())
01758         p.getNode()->setIROrder(idx + 1);
01759       InVals.push_back(p);
01760       continue;
01761     }
01762 
01763     // Param has ByVal attribute
01764     // Return MoveParam(param symbol).
01765     // Ideally, the param symbol can be returned directly,
01766     // but when SDNode builder decides to use it in a CopyToReg(),
01767     // machine instruction fails because TargetExternalSymbol
01768     // (not lowered) is target dependent, and CopyToReg assumes
01769     // the source is lowered.
01770     EVT ObjectVT = getValueType(Ty);
01771     assert(ObjectVT == Ins[InsIdx].VT &&
01772            "Ins type did not match function type");
01773     SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
01774     SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
01775     if (p.getNode())
01776       p.getNode()->setIROrder(idx + 1);
01777     if (isKernel)
01778       InVals.push_back(p);
01779     else {
01780       SDValue p2 = DAG.getNode(
01781           ISD::INTRINSIC_WO_CHAIN, dl, ObjectVT,
01782           DAG.getConstant(Intrinsic::nvvm_ptr_local_to_gen, MVT::i32), p);
01783       InVals.push_back(p2);
01784     }
01785   }
01786 
01787   // Clang will check explicit VarArg and issue error if any. However, Clang
01788   // will let code with
01789   // implicit var arg like f() pass. See bug 617733.
01790   // We treat this case as if the arg list is empty.
01791   // if (F.isVarArg()) {
01792   // assert(0 && "VarArg not supported yet!");
01793   //}
01794 
01795   if (!OutChains.empty())
01796     DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &OutChains[0],
01797                             OutChains.size()));
01798 
01799   return Chain;
01800 }
01801 
01802 
01803 SDValue
01804 NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
01805                                  bool isVarArg,
01806                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
01807                                  const SmallVectorImpl<SDValue> &OutVals,
01808                                  SDLoc dl, SelectionDAG &DAG) const {
01809   MachineFunction &MF = DAG.getMachineFunction();
01810   const Function *F = MF.getFunction();
01811   Type *RetTy = F->getReturnType();
01812   const DataLayout *TD = getDataLayout();
01813 
01814   bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
01815   assert(isABI && "Non-ABI compilation is not supported");
01816   if (!isABI)
01817     return Chain;
01818 
01819   if (VectorType *VTy = dyn_cast<VectorType>(RetTy)) {
01820     // If we have a vector type, the OutVals array will be the scalarized
01821     // components and we have combine them into 1 or more vector stores.
01822     unsigned NumElts = VTy->getNumElements();
01823     assert(NumElts == Outs.size() && "Bad scalarization of return value");
01824 
01825     // const_cast can be removed in later LLVM versions
01826     EVT EltVT = getValueType(RetTy).getVectorElementType();
01827     bool NeedExtend = false;
01828     if (EltVT.getSizeInBits() < 16)
01829       NeedExtend = true;
01830 
01831     // V1 store
01832     if (NumElts == 1) {
01833       SDValue StoreVal = OutVals[0];
01834       // We only have one element, so just directly store it
01835       if (NeedExtend)
01836         StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
01837       SDValue Ops[] = { Chain, DAG.getConstant(0, MVT::i32), StoreVal };
01838       Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetval, dl,
01839                                       DAG.getVTList(MVT::Other), &Ops[0], 3,
01840                                       EltVT, MachinePointerInfo());
01841 
01842     } else if (NumElts == 2) {
01843       // V2 store
01844       SDValue StoreVal0 = OutVals[0];
01845       SDValue StoreVal1 = OutVals[1];
01846 
01847       if (NeedExtend) {
01848         StoreVal0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal0);
01849         StoreVal1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal1);
01850       }
01851 
01852       SDValue Ops[] = { Chain, DAG.getConstant(0, MVT::i32), StoreVal0,
01853                         StoreVal1 };
01854       Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetvalV2, dl,
01855                                       DAG.getVTList(MVT::Other), &Ops[0], 4,
01856                                       EltVT, MachinePointerInfo());
01857     } else {
01858       // V4 stores
01859       // We have at least 4 elements (<3 x Ty> expands to 4 elements) and the
01860       // vector will be expanded to a power of 2 elements, so we know we can
01861       // always round up to the next multiple of 4 when creating the vector
01862       // stores.
01863       // e.g.  4 elem => 1 st.v4
01864       //       6 elem => 2 st.v4
01865       //       8 elem => 2 st.v4
01866       //      11 elem => 3 st.v4
01867 
01868       unsigned VecSize = 4;
01869       if (OutVals[0].getValueType().getSizeInBits() == 64)
01870         VecSize = 2;
01871 
01872       unsigned Offset = 0;
01873 
01874       EVT VecVT =
01875           EVT::getVectorVT(F->getContext(), OutVals[0].getValueType(), VecSize);
01876       unsigned PerStoreOffset =
01877           TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
01878 
01879       for (unsigned i = 0; i < NumElts; i += VecSize) {
01880         // Get values
01881         SDValue StoreVal;
01882         SmallVector<SDValue, 8> Ops;
01883         Ops.push_back(Chain);
01884         Ops.push_back(DAG.getConstant(Offset, MVT::i32));
01885         unsigned Opc = NVPTXISD::StoreRetvalV2;
01886         EVT ExtendedVT = (NeedExtend) ? MVT::i16 : OutVals[0].getValueType();
01887 
01888         StoreVal = OutVals[i];
01889         if (NeedExtend)
01890           StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
01891         Ops.push_back(StoreVal);
01892 
01893         if (i + 1 < NumElts) {
01894           StoreVal = OutVals[i + 1];
01895           if (NeedExtend)
01896             StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
01897         } else {
01898           StoreVal = DAG.getUNDEF(ExtendedVT);
01899         }
01900         Ops.push_back(StoreVal);
01901 
01902         if (VecSize == 4) {
01903           Opc = NVPTXISD::StoreRetvalV4;
01904           if (i + 2 < NumElts) {
01905             StoreVal = OutVals[i + 2];
01906             if (NeedExtend)
01907               StoreVal =
01908                   DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
01909           } else {
01910             StoreVal = DAG.getUNDEF(ExtendedVT);
01911           }
01912           Ops.push_back(StoreVal);
01913 
01914           if (i + 3 < NumElts) {
01915             StoreVal = OutVals[i + 3];
01916             if (NeedExtend)
01917               StoreVal =
01918                   DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
01919           } else {
01920             StoreVal = DAG.getUNDEF(ExtendedVT);
01921           }
01922           Ops.push_back(StoreVal);
01923         }
01924 
01925         // Chain = DAG.getNode(Opc, dl, MVT::Other, &Ops[0], Ops.size());
01926         Chain =
01927             DAG.getMemIntrinsicNode(Opc, dl, DAG.getVTList(MVT::Other), &Ops[0],
01928                                     Ops.size(), EltVT, MachinePointerInfo());
01929         Offset += PerStoreOffset;
01930       }
01931     }
01932   } else {
01933     SmallVector<EVT, 16> ValVTs;
01934     // const_cast is necessary since we are still using an LLVM version from
01935     // before the type system re-write.
01936     ComputePTXValueVTs(*this, RetTy, ValVTs);
01937     assert(ValVTs.size() == OutVals.size() && "Bad return value decomposition");
01938 
01939     unsigned SizeSoFar = 0;
01940     for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
01941       SDValue theVal = OutVals[i];
01942       EVT TheValType = theVal.getValueType();
01943       unsigned numElems = 1;
01944       if (TheValType.isVector())
01945         numElems = TheValType.getVectorNumElements();
01946       for (unsigned j = 0, je = numElems; j != je; ++j) {
01947         SDValue TmpVal = theVal;
01948         if (TheValType.isVector())
01949           TmpVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
01950                                TheValType.getVectorElementType(), TmpVal,
01951                                DAG.getIntPtrConstant(j));
01952         EVT TheStoreType = ValVTs[i];
01953         if (RetTy->isIntegerTy() &&
01954             TD->getTypeAllocSizeInBits(RetTy) < 32) {
01955           // The following zero-extension is for integer types only, and
01956           // specifically not for aggregates.
01957           TmpVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, TmpVal);
01958           TheStoreType = MVT::i32;
01959         }
01960         else if (TmpVal.getValueType().getSizeInBits() < 16)
01961           TmpVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, TmpVal);
01962 
01963         SDValue Ops[] = { Chain, DAG.getConstant(SizeSoFar, MVT::i32), TmpVal };
01964         Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetval, dl,
01965                                         DAG.getVTList(MVT::Other), &Ops[0],
01966                                         3, TheStoreType,
01967                                         MachinePointerInfo());
01968         if(TheValType.isVector())
01969           SizeSoFar += 
01970             TheStoreType.getVectorElementType().getStoreSizeInBits() / 8;
01971         else
01972           SizeSoFar += TheStoreType.getStoreSizeInBits()/8;
01973       }
01974     }
01975   }
01976 
01977   return DAG.getNode(NVPTXISD::RET_FLAG, dl, MVT::Other, Chain);
01978 }
01979 
01980 
01981 void NVPTXTargetLowering::LowerAsmOperandForConstraint(
01982     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
01983     SelectionDAG &DAG) const {
01984   if (Constraint.length() > 1)
01985     return;
01986   else
01987     TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
01988 }
01989 
01990 // NVPTX suuport vector of legal types of any length in Intrinsics because the
01991 // NVPTX specific type legalizer
01992 // will legalize them to the PTX supported length.
01993 bool NVPTXTargetLowering::isTypeSupportedInIntrinsic(MVT VT) const {
01994   if (isTypeLegal(VT))
01995     return true;
01996   if (VT.isVector()) {
01997     MVT eVT = VT.getVectorElementType();
01998     if (isTypeLegal(eVT))
01999       return true;
02000   }
02001   return false;
02002 }
02003 
02004 static unsigned getOpcForTextureInstr(unsigned Intrinsic) {
02005   switch (Intrinsic) {
02006   default:
02007     return 0;
02008 
02009   case Intrinsic::nvvm_tex_1d_v4f32_i32:
02010     return NVPTXISD::Tex1DFloatI32;
02011   case Intrinsic::nvvm_tex_1d_v4f32_f32:
02012     return NVPTXISD::Tex1DFloatFloat;
02013   case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
02014     return NVPTXISD::Tex1DFloatFloatLevel;
02015   case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
02016     return NVPTXISD::Tex1DFloatFloatGrad;
02017   case Intrinsic::nvvm_tex_1d_v4i32_i32:
02018     return NVPTXISD::Tex1DI32I32;
02019   case Intrinsic::nvvm_tex_1d_v4i32_f32:
02020     return NVPTXISD::Tex1DI32Float;
02021   case Intrinsic::nvvm_tex_1d_level_v4i32_f32:
02022     return NVPTXISD::Tex1DI32FloatLevel;
02023   case Intrinsic::nvvm_tex_1d_grad_v4i32_f32:
02024     return NVPTXISD::Tex1DI32FloatGrad;
02025 
02026   case Intrinsic::nvvm_tex_1d_array_v4f32_i32:
02027     return NVPTXISD::Tex1DArrayFloatI32;
02028   case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
02029     return NVPTXISD::Tex1DArrayFloatFloat;
02030   case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
02031     return NVPTXISD::Tex1DArrayFloatFloatLevel;
02032   case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
02033     return NVPTXISD::Tex1DArrayFloatFloatGrad;
02034   case Intrinsic::nvvm_tex_1d_array_v4i32_i32:
02035     return NVPTXISD::Tex1DArrayI32I32;
02036   case Intrinsic::nvvm_tex_1d_array_v4i32_f32:
02037     return NVPTXISD::Tex1DArrayI32Float;
02038   case Intrinsic::nvvm_tex_1d_array_level_v4i32_f32:
02039     return NVPTXISD::Tex1DArrayI32FloatLevel;
02040   case Intrinsic::nvvm_tex_1d_array_grad_v4i32_f32:
02041     return NVPTXISD::Tex1DArrayI32FloatGrad;
02042 
02043   case Intrinsic::nvvm_tex_2d_v4f32_i32:
02044     return NVPTXISD::Tex2DFloatI32;
02045   case Intrinsic::nvvm_tex_2d_v4f32_f32:
02046     return NVPTXISD::Tex2DFloatFloat;
02047   case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
02048     return NVPTXISD::Tex2DFloatFloatLevel;
02049   case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
02050     return NVPTXISD::Tex2DFloatFloatGrad;
02051   case Intrinsic::nvvm_tex_2d_v4i32_i32:
02052     return NVPTXISD::Tex2DI32I32;
02053   case Intrinsic::nvvm_tex_2d_v4i32_f32:
02054     return NVPTXISD::Tex2DI32Float;
02055   case Intrinsic::nvvm_tex_2d_level_v4i32_f32:
02056     return NVPTXISD::Tex2DI32FloatLevel;
02057   case Intrinsic::nvvm_tex_2d_grad_v4i32_f32:
02058     return NVPTXISD::Tex2DI32FloatGrad;
02059 
02060   case Intrinsic::nvvm_tex_2d_array_v4f32_i32:
02061     return NVPTXISD::Tex2DArrayFloatI32;
02062   case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
02063     return NVPTXISD::Tex2DArrayFloatFloat;
02064   case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
02065     return NVPTXISD::Tex2DArrayFloatFloatLevel;
02066   case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
02067     return NVPTXISD::Tex2DArrayFloatFloatGrad;
02068   case Intrinsic::nvvm_tex_2d_array_v4i32_i32:
02069     return NVPTXISD::Tex2DArrayI32I32;
02070   case Intrinsic::nvvm_tex_2d_array_v4i32_f32:
02071     return NVPTXISD::Tex2DArrayI32Float;
02072   case Intrinsic::nvvm_tex_2d_array_level_v4i32_f32:
02073     return NVPTXISD::Tex2DArrayI32FloatLevel;
02074   case Intrinsic::nvvm_tex_2d_array_grad_v4i32_f32:
02075     return NVPTXISD::Tex2DArrayI32FloatGrad;
02076 
02077   case Intrinsic::nvvm_tex_3d_v4f32_i32:
02078     return NVPTXISD::Tex3DFloatI32;
02079   case Intrinsic::nvvm_tex_3d_v4f32_f32:
02080     return NVPTXISD::Tex3DFloatFloat;
02081   case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
02082     return NVPTXISD::Tex3DFloatFloatLevel;
02083   case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
02084     return NVPTXISD::Tex3DFloatFloatGrad;
02085   case Intrinsic::nvvm_tex_3d_v4i32_i32:
02086     return NVPTXISD::Tex3DI32I32;
02087   case Intrinsic::nvvm_tex_3d_v4i32_f32:
02088     return NVPTXISD::Tex3DI32Float;
02089   case Intrinsic::nvvm_tex_3d_level_v4i32_f32:
02090     return NVPTXISD::Tex3DI32FloatLevel;
02091   case Intrinsic::nvvm_tex_3d_grad_v4i32_f32:
02092     return NVPTXISD::Tex3DI32FloatGrad;
02093   }
02094 }
02095 
02096 static unsigned getOpcForSurfaceInstr(unsigned Intrinsic) {
02097   switch (Intrinsic) {
02098   default:
02099     return 0;
02100   case Intrinsic::nvvm_suld_1d_i8_trap:
02101     return NVPTXISD::Suld1DI8Trap;
02102   case Intrinsic::nvvm_suld_1d_i16_trap:
02103     return NVPTXISD::Suld1DI16Trap;
02104   case Intrinsic::nvvm_suld_1d_i32_trap:
02105     return NVPTXISD::Suld1DI32Trap;
02106   case Intrinsic::nvvm_suld_1d_v2i8_trap:
02107     return NVPTXISD::Suld1DV2I8Trap;
02108   case Intrinsic::nvvm_suld_1d_v2i16_trap:
02109     return NVPTXISD::Suld1DV2I16Trap;
02110   case Intrinsic::nvvm_suld_1d_v2i32_trap:
02111     return NVPTXISD::Suld1DV2I32Trap;
02112   case Intrinsic::nvvm_suld_1d_v4i8_trap:
02113     return NVPTXISD::Suld1DV4I8Trap;
02114   case Intrinsic::nvvm_suld_1d_v4i16_trap:
02115     return NVPTXISD::Suld1DV4I16Trap;
02116   case Intrinsic::nvvm_suld_1d_v4i32_trap:
02117     return NVPTXISD::Suld1DV4I32Trap;
02118   case Intrinsic::nvvm_suld_1d_array_i8_trap:
02119     return NVPTXISD::Suld1DArrayI8Trap;
02120   case Intrinsic::nvvm_suld_1d_array_i16_trap:
02121     return NVPTXISD::Suld1DArrayI16Trap;
02122   case Intrinsic::nvvm_suld_1d_array_i32_trap:
02123     return NVPTXISD::Suld1DArrayI32Trap;
02124   case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
02125     return NVPTXISD::Suld1DArrayV2I8Trap;
02126   case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
02127     return NVPTXISD::Suld1DArrayV2I16Trap;
02128   case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
02129     return NVPTXISD::Suld1DArrayV2I32Trap;
02130   case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
02131     return NVPTXISD::Suld1DArrayV4I8Trap;
02132   case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
02133     return NVPTXISD::Suld1DArrayV4I16Trap;
02134   case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
02135     return NVPTXISD::Suld1DArrayV4I32Trap;
02136   case Intrinsic::nvvm_suld_2d_i8_trap:
02137     return NVPTXISD::Suld2DI8Trap;
02138   case Intrinsic::nvvm_suld_2d_i16_trap:
02139     return NVPTXISD::Suld2DI16Trap;
02140   case Intrinsic::nvvm_suld_2d_i32_trap:
02141     return NVPTXISD::Suld2DI32Trap;
02142   case Intrinsic::nvvm_suld_2d_v2i8_trap:
02143     return NVPTXISD::Suld2DV2I8Trap;
02144   case Intrinsic::nvvm_suld_2d_v2i16_trap:
02145     return NVPTXISD::Suld2DV2I16Trap;
02146   case Intrinsic::nvvm_suld_2d_v2i32_trap:
02147     return NVPTXISD::Suld2DV2I32Trap;
02148   case Intrinsic::nvvm_suld_2d_v4i8_trap:
02149     return NVPTXISD::Suld2DV4I8Trap;
02150   case Intrinsic::nvvm_suld_2d_v4i16_trap:
02151     return NVPTXISD::Suld2DV4I16Trap;
02152   case Intrinsic::nvvm_suld_2d_v4i32_trap:
02153     return NVPTXISD::Suld2DV4I32Trap;
02154   case Intrinsic::nvvm_suld_2d_array_i8_trap:
02155     return NVPTXISD::Suld2DArrayI8Trap;
02156   case Intrinsic::nvvm_suld_2d_array_i16_trap:
02157     return NVPTXISD::Suld2DArrayI16Trap;
02158   case Intrinsic::nvvm_suld_2d_array_i32_trap:
02159     return NVPTXISD::Suld2DArrayI32Trap;
02160   case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
02161     return NVPTXISD::Suld2DArrayV2I8Trap;
02162   case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
02163     return NVPTXISD::Suld2DArrayV2I16Trap;
02164   case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
02165     return NVPTXISD::Suld2DArrayV2I32Trap;
02166   case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
02167     return NVPTXISD::Suld2DArrayV4I8Trap;
02168   case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
02169     return NVPTXISD::Suld2DArrayV4I16Trap;
02170   case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
02171     return NVPTXISD::Suld2DArrayV4I32Trap;
02172   case Intrinsic::nvvm_suld_3d_i8_trap:
02173     return NVPTXISD::Suld3DI8Trap;
02174   case Intrinsic::nvvm_suld_3d_i16_trap:
02175     return NVPTXISD::Suld3DI16Trap;
02176   case Intrinsic::nvvm_suld_3d_i32_trap:
02177     return NVPTXISD::Suld3DI32Trap;
02178   case Intrinsic::nvvm_suld_3d_v2i8_trap:
02179     return NVPTXISD::Suld3DV2I8Trap;
02180   case Intrinsic::nvvm_suld_3d_v2i16_trap:
02181     return NVPTXISD::Suld3DV2I16Trap;
02182   case Intrinsic::nvvm_suld_3d_v2i32_trap:
02183     return NVPTXISD::Suld3DV2I32Trap;
02184   case Intrinsic::nvvm_suld_3d_v4i8_trap:
02185     return NVPTXISD::Suld3DV4I8Trap;
02186   case Intrinsic::nvvm_suld_3d_v4i16_trap:
02187     return NVPTXISD::Suld3DV4I16Trap;
02188   case Intrinsic::nvvm_suld_3d_v4i32_trap:
02189     return NVPTXISD::Suld3DV4I32Trap;
02190   }
02191 }
02192 
02193 // llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
02194 // TgtMemIntrinsic
02195 // because we need the information that is only available in the "Value" type
02196 // of destination
02197 // pointer. In particular, the address space information.
02198 bool NVPTXTargetLowering::getTgtMemIntrinsic(
02199     IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const {
02200   switch (Intrinsic) {
02201   default:
02202     return false;
02203 
02204   case Intrinsic::nvvm_atomic_load_add_f32:
02205     Info.opc = ISD::INTRINSIC_W_CHAIN;
02206     Info.memVT = MVT::f32;
02207     Info.ptrVal = I.getArgOperand(0);
02208     Info.offset = 0;
02209     Info.vol = 0;
02210     Info.readMem = true;
02211     Info.writeMem = true;
02212     Info.align = 0;
02213     return true;
02214 
02215   case Intrinsic::nvvm_atomic_load_inc_32:
02216   case Intrinsic::nvvm_atomic_load_dec_32:
02217     Info.opc = ISD::INTRINSIC_W_CHAIN;
02218     Info.memVT = MVT::i32;
02219     Info.ptrVal = I.getArgOperand(0);
02220     Info.offset = 0;
02221     Info.vol = 0;
02222     Info.readMem = true;
02223     Info.writeMem = true;
02224     Info.align = 0;
02225     return true;
02226 
02227   case Intrinsic::nvvm_ldu_global_i:
02228   case Intrinsic::nvvm_ldu_global_f:
02229   case Intrinsic::nvvm_ldu_global_p:
02230 
02231     Info.opc = ISD::INTRINSIC_W_CHAIN;
02232     if (Intrinsic == Intrinsic::nvvm_ldu_global_i)
02233       Info.memVT = getValueType(I.getType());
02234     else if (Intrinsic == Intrinsic::nvvm_ldu_global_p)
02235       Info.memVT = getValueType(I.getType());
02236     else
02237       Info.memVT = MVT::f32;
02238     Info.ptrVal = I.getArgOperand(0);
02239     Info.offset = 0;
02240     Info.vol = 0;
02241     Info.readMem = true;
02242     Info.writeMem = false;
02243     Info.align = 0;
02244     return true;
02245 
02246   case Intrinsic::nvvm_tex_1d_v4f32_i32:
02247   case Intrinsic::nvvm_tex_1d_v4f32_f32:
02248   case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
02249   case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
02250   case Intrinsic::nvvm_tex_1d_array_v4f32_i32:
02251   case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
02252   case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
02253   case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
02254   case Intrinsic::nvvm_tex_2d_v4f32_i32:
02255   case Intrinsic::nvvm_tex_2d_v4f32_f32:
02256   case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
02257   case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
02258   case Intrinsic::nvvm_tex_2d_array_v4f32_i32:
02259   case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
02260   case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
02261   case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
02262   case Intrinsic::nvvm_tex_3d_v4f32_i32:
02263   case Intrinsic::nvvm_tex_3d_v4f32_f32:
02264   case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
02265   case Intrinsic::nvvm_tex_3d_grad_v4f32_f32: {
02266     Info.opc = getOpcForTextureInstr(Intrinsic);
02267     Info.memVT = MVT::f32;
02268     Info.ptrVal = NULL;
02269     Info.offset = 0;
02270     Info.vol = 0;
02271     Info.readMem = true;
02272     Info.writeMem = false;
02273     Info.align = 16;
02274     return true;
02275   }
02276   case Intrinsic::nvvm_tex_1d_v4i32_i32:
02277   case Intrinsic::nvvm_tex_1d_v4i32_f32:
02278   case Intrinsic::nvvm_tex_1d_level_v4i32_f32:
02279   case Intrinsic::nvvm_tex_1d_grad_v4i32_f32:
02280   case Intrinsic::nvvm_tex_1d_array_v4i32_i32:
02281   case Intrinsic::nvvm_tex_1d_array_v4i32_f32:
02282   case Intrinsic::nvvm_tex_1d_array_level_v4i32_f32:
02283   case Intrinsic::nvvm_tex_1d_array_grad_v4i32_f32:
02284   case Intrinsic::nvvm_tex_2d_v4i32_i32:
02285   case Intrinsic::nvvm_tex_2d_v4i32_f32:
02286   case Intrinsic::nvvm_tex_2d_level_v4i32_f32:
02287   case Intrinsic::nvvm_tex_2d_grad_v4i32_f32:
02288   case Intrinsic::nvvm_tex_2d_array_v4i32_i32:
02289   case Intrinsic::nvvm_tex_2d_array_v4i32_f32:
02290   case Intrinsic::nvvm_tex_2d_array_level_v4i32_f32:
02291   case Intrinsic::nvvm_tex_2d_array_grad_v4i32_f32:
02292   case Intrinsic::nvvm_tex_3d_v4i32_i32:
02293   case Intrinsic::nvvm_tex_3d_v4i32_f32:
02294   case Intrinsic::nvvm_tex_3d_level_v4i32_f32:
02295   case Intrinsic::nvvm_tex_3d_grad_v4i32_f32: {
02296     Info.opc = getOpcForTextureInstr(Intrinsic);
02297     Info.memVT = MVT::i32;
02298     Info.ptrVal = NULL;
02299     Info.offset = 0;
02300     Info.vol = 0;
02301     Info.readMem = true;
02302     Info.writeMem = false;
02303     Info.align = 16;
02304     return true;
02305   }
02306   case Intrinsic::nvvm_suld_1d_i8_trap:
02307   case Intrinsic::nvvm_suld_1d_v2i8_trap:
02308   case Intrinsic::nvvm_suld_1d_v4i8_trap:
02309   case Intrinsic::nvvm_suld_1d_array_i8_trap:
02310   case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
02311   case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
02312   case Intrinsic::nvvm_suld_2d_i8_trap:
02313   case Intrinsic::nvvm_suld_2d_v2i8_trap:
02314   case Intrinsic::nvvm_suld_2d_v4i8_trap:
02315   case Intrinsic::nvvm_suld_2d_array_i8_trap:
02316   case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
02317   case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
02318   case Intrinsic::nvvm_suld_3d_i8_trap:
02319   case Intrinsic::nvvm_suld_3d_v2i8_trap:
02320   case Intrinsic::nvvm_suld_3d_v4i8_trap: {
02321     Info.opc = getOpcForSurfaceInstr(Intrinsic);
02322     Info.memVT = MVT::i8;
02323     Info.ptrVal = NULL;
02324     Info.offset = 0;
02325     Info.vol = 0;
02326     Info.readMem = true;
02327     Info.writeMem = false;
02328     Info.align = 16;
02329     return true;
02330   }
02331   case Intrinsic::nvvm_suld_1d_i16_trap:
02332   case Intrinsic::nvvm_suld_1d_v2i16_trap:
02333   case Intrinsic::nvvm_suld_1d_v4i16_trap:
02334   case Intrinsic::nvvm_suld_1d_array_i16_trap:
02335   case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
02336   case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
02337   case Intrinsic::nvvm_suld_2d_i16_trap:
02338   case Intrinsic::nvvm_suld_2d_v2i16_trap:
02339   case Intrinsic::nvvm_suld_2d_v4i16_trap:
02340   case Intrinsic::nvvm_suld_2d_array_i16_trap:
02341   case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
02342   case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
02343   case Intrinsic::nvvm_suld_3d_i16_trap:
02344   case Intrinsic::nvvm_suld_3d_v2i16_trap:
02345   case Intrinsic::nvvm_suld_3d_v4i16_trap: {
02346     Info.opc = getOpcForSurfaceInstr(Intrinsic);
02347     Info.memVT = MVT::i16;
02348     Info.ptrVal = NULL;
02349     Info.offset = 0;
02350     Info.vol = 0;
02351     Info.readMem = true;
02352     Info.writeMem = false;
02353     Info.align = 16;
02354     return true;
02355   }
02356   case Intrinsic::nvvm_suld_1d_i32_trap:
02357   case Intrinsic::nvvm_suld_1d_v2i32_trap:
02358   case Intrinsic::nvvm_suld_1d_v4i32_trap:
02359   case Intrinsic::nvvm_suld_1d_array_i32_trap:
02360   case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
02361   case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
02362   case Intrinsic::nvvm_suld_2d_i32_trap:
02363   case Intrinsic::nvvm_suld_2d_v2i32_trap:
02364   case Intrinsic::nvvm_suld_2d_v4i32_trap:
02365   case Intrinsic::nvvm_suld_2d_array_i32_trap:
02366   case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
02367   case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
02368   case Intrinsic::nvvm_suld_3d_i32_trap:
02369   case Intrinsic::nvvm_suld_3d_v2i32_trap:
02370   case Intrinsic::nvvm_suld_3d_v4i32_trap: {
02371     Info.opc = getOpcForSurfaceInstr(Intrinsic);
02372     Info.memVT = MVT::i32;
02373     Info.ptrVal = NULL;
02374     Info.offset = 0;
02375     Info.vol = 0;
02376     Info.readMem = true;
02377     Info.writeMem = false;
02378     Info.align = 16;
02379     return true;
02380   }
02381 
02382   }
02383   return false;
02384 }
02385 
02386 /// isLegalAddressingMode - Return true if the addressing mode represented
02387 /// by AM is legal for this target, for a load/store of the specified type.
02388 /// Used to guide target specific optimizations, like loop strength reduction
02389 /// (LoopStrengthReduce.cpp) and memory optimization for address mode
02390 /// (CodeGenPrepare.cpp)
02391 bool NVPTXTargetLowering::isLegalAddressingMode(const AddrMode &AM,
02392                                                 Type *Ty) const {
02393 
02394   // AddrMode - This represents an addressing mode of:
02395   //    BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
02396   //
02397   // The legal address modes are
02398   // - [avar]
02399   // - [areg]
02400   // - [areg+immoff]
02401   // - [immAddr]
02402 
02403   if (AM.BaseGV) {
02404     if (AM.BaseOffs || AM.HasBaseReg || AM.Scale)
02405       return false;
02406     return true;
02407   }
02408 
02409   switch (AM.Scale) {
02410   case 0: // "r", "r+i" or "i" is allowed
02411     break;
02412   case 1:
02413     if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
02414       return false;
02415     // Otherwise we have r+i.
02416     break;
02417   default:
02418     // No scale > 1 is allowed
02419     return false;
02420   }
02421   return true;
02422 }
02423 
02424 //===----------------------------------------------------------------------===//
02425 //                         NVPTX Inline Assembly Support
02426 //===----------------------------------------------------------------------===//
02427 
02428 /// getConstraintType - Given a constraint letter, return the type of
02429 /// constraint it is for this target.
02430 NVPTXTargetLowering::ConstraintType
02431 NVPTXTargetLowering::getConstraintType(const std::string &Constraint) const {
02432   if (Constraint.size() == 1) {
02433     switch (Constraint[0]) {
02434     default:
02435       break;
02436     case 'r':
02437     case 'h':
02438     case 'c':
02439     case 'l':
02440     case 'f':
02441     case 'd':
02442     case '0':
02443     case 'N':
02444       return C_RegisterClass;
02445     }
02446   }
02447   return TargetLowering::getConstraintType(Constraint);
02448 }
02449 
02450 std::pair<unsigned, const TargetRegisterClass *>
02451 NVPTXTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
02452                                                   MVT VT) const {
02453   if (Constraint.size() == 1) {
02454     switch (Constraint[0]) {
02455     case 'c':
02456       return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
02457     case 'h':
02458       return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
02459     case 'r':
02460       return std::make_pair(0U, &NVPTX::Int32RegsRegClass);
02461     case 'l':
02462     case 'N':
02463       return std::make_pair(0U, &NVPTX::Int64RegsRegClass);
02464     case 'f':
02465       return std::make_pair(0U, &NVPTX::Float32RegsRegClass);
02466     case 'd':
02467       return std::make_pair(0U, &NVPTX::Float64RegsRegClass);
02468     }
02469   }
02470   return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
02471 }
02472 
02473 /// getFunctionAlignment - Return the Log2 alignment of this function.
02474 unsigned NVPTXTargetLowering::getFunctionAlignment(const Function *) const {
02475   return 4;
02476 }
02477 
02478 /// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
02479 static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
02480                               SmallVectorImpl<SDValue> &Results) {
02481   EVT ResVT = N->getValueType(0);
02482   SDLoc DL(N);
02483 
02484   assert(ResVT.isVector() && "Vector load must have vector type");
02485 
02486   // We only handle "native" vector sizes for now, e.g. <4 x double> is not
02487   // legal.  We can (and should) split that into 2 loads of <2 x double> here
02488   // but I'm leaving that as a TODO for now.
02489   assert(ResVT.isSimple() && "Can only handle simple types");
02490   switch (ResVT.getSimpleVT().SimpleTy) {
02491   default:
02492     return;
02493   case MVT::v2i8:
02494   case MVT::v2i16:
02495   case MVT::v2i32:
02496   case MVT::v2i64:
02497   case MVT::v2f32:
02498   case MVT::v2f64:
02499   case MVT::v4i8:
02500   case MVT::v4i16:
02501   case MVT::v4i32:
02502   case MVT::v4f32:
02503     // This is a "native" vector type
02504     break;
02505   }
02506 
02507   EVT EltVT = ResVT.getVectorElementType();
02508   unsigned NumElts = ResVT.getVectorNumElements();
02509 
02510   // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
02511   // Therefore, we must ensure the type is legal.  For i1 and i8, we set the
02512   // loaded type to i16 and propagate the "real" type as the memory type.
02513   bool NeedTrunc = false;
02514   if (EltVT.getSizeInBits() < 16) {
02515     EltVT = MVT::i16;
02516     NeedTrunc = true;
02517   }
02518 
02519   unsigned Opcode = 0;
02520   SDVTList LdResVTs;
02521 
02522   switch (NumElts) {
02523   default:
02524     return;
02525   case 2:
02526     Opcode = NVPTXISD::LoadV2;
02527     LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
02528     break;
02529   case 4: {
02530     Opcode = NVPTXISD::LoadV4;
02531     EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
02532     LdResVTs = DAG.getVTList(ListVTs);
02533     break;
02534   }
02535   }
02536 
02537   SmallVector<SDValue, 8> OtherOps;
02538 
02539   // Copy regular operands
02540   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
02541     OtherOps.push_back(N->getOperand(i));
02542 
02543   LoadSDNode *LD = cast<LoadSDNode>(N);
02544 
02545   // The select routine does not have access to the LoadSDNode instance, so
02546   // pass along the extension information
02547   OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType()));
02548 
02549   SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, &OtherOps[0],
02550                                           OtherOps.size(), LD->getMemoryVT(),
02551                                           LD->getMemOperand());
02552 
02553   SmallVector<SDValue, 4> ScalarRes;
02554 
02555   for (unsigned i = 0; i < NumElts; ++i) {
02556     SDValue Res = NewLD.getValue(i);
02557     if (NeedTrunc)
02558       Res = DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
02559     ScalarRes.push_back(Res);
02560   }
02561 
02562   SDValue LoadChain = NewLD.getValue(NumElts);
02563 
02564   SDValue BuildVec =
02565       DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, &ScalarRes[0], NumElts);
02566 
02567   Results.push_back(BuildVec);
02568   Results.push_back(LoadChain);
02569 }
02570 
02571 static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
02572                                      SmallVectorImpl<SDValue> &Results) {
02573   SDValue Chain = N->getOperand(0);
02574   SDValue Intrin = N->getOperand(1);
02575   SDLoc DL(N);
02576 
02577   // Get the intrinsic ID
02578   unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
02579   switch (IntrinNo) {
02580   default:
02581     return;
02582   case Intrinsic::nvvm_ldg_global_i:
02583   case Intrinsic::nvvm_ldg_global_f:
02584   case Intrinsic::nvvm_ldg_global_p:
02585   case Intrinsic::nvvm_ldu_global_i:
02586   case Intrinsic::nvvm_ldu_global_f:
02587   case Intrinsic::nvvm_ldu_global_p: {
02588     EVT ResVT = N->getValueType(0);
02589 
02590     if (ResVT.isVector()) {
02591       // Vector LDG/LDU
02592 
02593       unsigned NumElts = ResVT.getVectorNumElements();
02594       EVT EltVT = ResVT.getVectorElementType();
02595 
02596       // Since LDU/LDG are target nodes, we cannot rely on DAG type
02597       // legalization.
02598       // Therefore, we must ensure the type is legal.  For i1 and i8, we set the
02599       // loaded type to i16 and propagate the "real" type as the memory type.
02600       bool NeedTrunc = false;
02601       if (EltVT.getSizeInBits() < 16) {
02602         EltVT = MVT::i16;
02603         NeedTrunc = true;
02604       }
02605 
02606       unsigned Opcode = 0;
02607       SDVTList LdResVTs;
02608 
02609       switch (NumElts) {
02610       default:
02611         return;
02612       case 2:
02613         switch (IntrinNo) {
02614         default:
02615           return;
02616         case Intrinsic::nvvm_ldg_global_i:
02617         case Intrinsic::nvvm_ldg_global_f:
02618         case Intrinsic::nvvm_ldg_global_p:
02619           Opcode = NVPTXISD::LDGV2;
02620           break;
02621         case Intrinsic::nvvm_ldu_global_i:
02622         case Intrinsic::nvvm_ldu_global_f:
02623         case Intrinsic::nvvm_ldu_global_p:
02624           Opcode = NVPTXISD::LDUV2;
02625           break;
02626         }
02627         LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
02628         break;
02629       case 4: {
02630         switch (IntrinNo) {
02631         default:
02632           return;
02633         case Intrinsic::nvvm_ldg_global_i:
02634         case Intrinsic::nvvm_ldg_global_f:
02635         case Intrinsic::nvvm_ldg_global_p:
02636           Opcode = NVPTXISD::LDGV4;
02637           break;
02638         case Intrinsic::nvvm_ldu_global_i:
02639         case Intrinsic::nvvm_ldu_global_f:
02640         case Intrinsic::nvvm_ldu_global_p:
02641           Opcode = NVPTXISD::LDUV4;
02642           break;
02643         }
02644         EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
02645         LdResVTs = DAG.getVTList(ListVTs);
02646         break;
02647       }
02648       }
02649 
02650       SmallVector<SDValue, 8> OtherOps;
02651 
02652       // Copy regular operands
02653 
02654       OtherOps.push_back(Chain); // Chain
02655                                  // Skip operand 1 (intrinsic ID)
02656       // Others
02657       for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i)
02658         OtherOps.push_back(N->getOperand(i));
02659 
02660       MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
02661 
02662       SDValue NewLD = DAG.getMemIntrinsicNode(
02663           Opcode, DL, LdResVTs, &OtherOps[0], OtherOps.size(),
02664           MemSD->getMemoryVT(), MemSD->getMemOperand());
02665 
02666       SmallVector<SDValue, 4> ScalarRes;
02667 
02668       for (unsigned i = 0; i < NumElts; ++i) {
02669         SDValue Res = NewLD.getValue(i);
02670         if (NeedTrunc)
02671           Res =
02672               DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
02673         ScalarRes.push_back(Res);
02674       }
02675 
02676       SDValue LoadChain = NewLD.getValue(NumElts);
02677 
02678       SDValue BuildVec =
02679           DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, &ScalarRes[0], NumElts);
02680 
02681       Results.push_back(BuildVec);
02682       Results.push_back(LoadChain);
02683     } else {
02684       // i8 LDG/LDU
02685       assert(ResVT.isSimple() && ResVT.getSimpleVT().SimpleTy == MVT::i8 &&
02686              "Custom handling of non-i8 ldu/ldg?");
02687 
02688       // Just copy all operands as-is
02689       SmallVector<SDValue, 4> Ops;
02690       for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
02691         Ops.push_back(N->getOperand(i));
02692 
02693       // Force output to i16
02694       SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);
02695 
02696       MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
02697 
02698       // We make sure the memory type is i8, which will be used during isel
02699       // to select the proper instruction.
02700       SDValue NewLD =
02701           DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, LdResVTs, &Ops[0],
02702                                   Ops.size(), MVT::i8, MemSD->getMemOperand());
02703 
02704       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
02705                                     NewLD.getValue(0)));
02706       Results.push_back(NewLD.getValue(1));
02707     }
02708   }
02709   }
02710 }
02711 
02712 void NVPTXTargetLowering::ReplaceNodeResults(
02713     SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
02714   switch (N->getOpcode()) {
02715   default:
02716     report_fatal_error("Unhandled custom legalization");
02717   case ISD::LOAD:
02718     ReplaceLoadVector(N, DAG, Results);
02719     return;
02720   case ISD::INTRINSIC_W_CHAIN:
02721     ReplaceINTRINSIC_W_CHAIN(N, DAG, Results);
02722     return;
02723   }
02724 }
02725 
02726 // Pin NVPTXSection's and NVPTXTargetObjectFile's vtables to this file.
02727 void NVPTXSection::anchor() {}
02728 
02729 NVPTXTargetObjectFile::~NVPTXTargetObjectFile() {
02730   delete TextSection;
02731   delete DataSection;
02732   delete BSSSection;
02733   delete ReadOnlySection;
02734 
02735   delete StaticCtorSection;
02736   delete StaticDtorSection;
02737   delete LSDASection;
02738   delete EHFrameSection;
02739   delete DwarfAbbrevSection;
02740   delete DwarfInfoSection;
02741   delete DwarfLineSection;
02742   delete DwarfFrameSection;
02743   delete DwarfPubTypesSection;
02744   delete DwarfDebugInlineSection;
02745   delete DwarfStrSection;
02746   delete DwarfLocSection;
02747   delete DwarfARangesSection;
02748   delete DwarfRangesSection;
02749   delete DwarfMacroInfoSection;
02750 }