LCOV - code coverage report
Current view: top level - lib/Target/AArch64 - AArch64ISelLowering.h (source / functions) Hit Total Coverage
Test: llvm-toolchain.info Lines: 28 31 90.3 %
Date: 2017-09-14 15:23:50 Functions: 11 14 78.6 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
       2             : //
       3             : //                     The LLVM Compiler Infrastructure
       4             : //
       5             : // This file is distributed under the University of Illinois Open Source
       6             : // License. See LICENSE.TXT for details.
       7             : //
       8             : //===----------------------------------------------------------------------===//
       9             : //
      10             : // This file defines the interfaces that AArch64 uses to lower LLVM code into a
      11             : // selection DAG.
      12             : //
      13             : //===----------------------------------------------------------------------===//
      14             : 
      15             : #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
      16             : #define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
      17             : 
      18             : #include "AArch64.h"
      19             : #include "llvm/CodeGen/CallingConvLower.h"
      20             : #include "llvm/CodeGen/SelectionDAG.h"
      21             : #include "llvm/IR/CallingConv.h"
      22             : #include "llvm/IR/Instruction.h"
      23             : #include "llvm/Target/TargetLowering.h"
      24             : 
      25             : namespace llvm {
      26             : 
      27             : namespace AArch64ISD {
      28             : 
      29             : enum NodeType : unsigned {
      30             :   FIRST_NUMBER = ISD::BUILTIN_OP_END,
      31             :   WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
      32             :   CALL,         // Function call.
      33             : 
      34             :   // Produces the full sequence of instructions for getting the thread pointer
      35             :   // offset of a variable into X0, using the TLSDesc model.
      36             :   TLSDESC_CALLSEQ,
      37             :   ADRP,     // Page address of a TargetGlobalAddress operand.
      38             :   ADDlow,   // Add the low 12 bits of a TargetGlobalAddress operand.
      39             :   LOADgot,  // Load from automatically generated descriptor (e.g. Global
      40             :             // Offset Table, TLS record).
      41             :   RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
      42             :   BRCOND,   // Conditional branch instruction; "b.cond".
      43             :   CSEL,
      44             :   FCSEL, // Conditional move instruction.
      45             :   CSINV, // Conditional select invert.
      46             :   CSNEG, // Conditional select negate.
      47             :   CSINC, // Conditional select increment.
      48             : 
      49             :   // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
      50             :   // ELF.
      51             :   THREAD_POINTER,
      52             :   ADC,
      53             :   SBC, // adc, sbc instructions
      54             : 
      55             :   // Arithmetic instructions which write flags.
      56             :   ADDS,
      57             :   SUBS,
      58             :   ADCS,
      59             :   SBCS,
      60             :   ANDS,
      61             : 
      62             :   // Conditional compares. Operands: left,right,falsecc,cc,flags
      63             :   CCMP,
      64             :   CCMN,
      65             :   FCCMP,
      66             : 
      67             :   // Floating point comparison
      68             :   FCMP,
      69             : 
      70             :   // Scalar extract
      71             :   EXTR,
      72             : 
      73             :   // Scalar-to-vector duplication
      74             :   DUP,
      75             :   DUPLANE8,
      76             :   DUPLANE16,
      77             :   DUPLANE32,
      78             :   DUPLANE64,
      79             : 
      80             :   // Vector immedate moves
      81             :   MOVI,
      82             :   MOVIshift,
      83             :   MOVIedit,
      84             :   MOVImsl,
      85             :   FMOV,
      86             :   MVNIshift,
      87             :   MVNImsl,
      88             : 
      89             :   // Vector immediate ops
      90             :   BICi,
      91             :   ORRi,
      92             : 
      93             :   // Vector bit select: similar to ISD::VSELECT but not all bits within an
      94             :   // element must be identical.
      95             :   BSL,
      96             : 
      97             :   // Vector arithmetic negation
      98             :   NEG,
      99             : 
     100             :   // Vector shuffles
     101             :   ZIP1,
     102             :   ZIP2,
     103             :   UZP1,
     104             :   UZP2,
     105             :   TRN1,
     106             :   TRN2,
     107             :   REV16,
     108             :   REV32,
     109             :   REV64,
     110             :   EXT,
     111             : 
     112             :   // Vector shift by scalar
     113             :   VSHL,
     114             :   VLSHR,
     115             :   VASHR,
     116             : 
     117             :   // Vector shift by scalar (again)
     118             :   SQSHL_I,
     119             :   UQSHL_I,
     120             :   SQSHLU_I,
     121             :   SRSHR_I,
     122             :   URSHR_I,
     123             : 
     124             :   // Vector comparisons
     125             :   CMEQ,
     126             :   CMGE,
     127             :   CMGT,
     128             :   CMHI,
     129             :   CMHS,
     130             :   FCMEQ,
     131             :   FCMGE,
     132             :   FCMGT,
     133             : 
     134             :   // Vector zero comparisons
     135             :   CMEQz,
     136             :   CMGEz,
     137             :   CMGTz,
     138             :   CMLEz,
     139             :   CMLTz,
     140             :   FCMEQz,
     141             :   FCMGEz,
     142             :   FCMGTz,
     143             :   FCMLEz,
     144             :   FCMLTz,
     145             : 
     146             :   // Vector across-lanes addition
     147             :   // Only the lower result lane is defined.
     148             :   SADDV,
     149             :   UADDV,
     150             : 
     151             :   // Vector across-lanes min/max
     152             :   // Only the lower result lane is defined.
     153             :   SMINV,
     154             :   UMINV,
     155             :   SMAXV,
     156             :   UMAXV,
     157             : 
     158             :   // Vector bitwise negation
     159             :   NOT,
     160             : 
     161             :   // Vector bitwise selection
     162             :   BIT,
     163             : 
     164             :   // Compare-and-branch
     165             :   CBZ,
     166             :   CBNZ,
     167             :   TBZ,
     168             :   TBNZ,
     169             : 
     170             :   // Tail calls
     171             :   TC_RETURN,
     172             : 
     173             :   // Custom prefetch handling
     174             :   PREFETCH,
     175             : 
     176             :   // {s|u}int to FP within a FP register.
     177             :   SITOF,
     178             :   UITOF,
     179             : 
     180             :   /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
     181             :   /// world w.r.t vectors; which causes additional REV instructions to be
     182             :   /// generated to compensate for the byte-swapping. But sometimes we do
     183             :   /// need to re-interpret the data in SIMD vector registers in big-endian
     184             :   /// mode without emitting such REV instructions.
     185             :   NVCAST,
     186             : 
     187             :   SMULL,
     188             :   UMULL,
     189             : 
     190             :   // Reciprocal estimates and steps.
     191             :   FRECPE, FRECPS,
     192             :   FRSQRTE, FRSQRTS,
     193             : 
     194             :   // NEON Load/Store with post-increment base updates
     195             :   LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
     196             :   LD3post,
     197             :   LD4post,
     198             :   ST2post,
     199             :   ST3post,
     200             :   ST4post,
     201             :   LD1x2post,
     202             :   LD1x3post,
     203             :   LD1x4post,
     204             :   ST1x2post,
     205             :   ST1x3post,
     206             :   ST1x4post,
     207             :   LD1DUPpost,
     208             :   LD2DUPpost,
     209             :   LD3DUPpost,
     210             :   LD4DUPpost,
     211             :   LD1LANEpost,
     212             :   LD2LANEpost,
     213             :   LD3LANEpost,
     214             :   LD4LANEpost,
     215             :   ST2LANEpost,
     216             :   ST3LANEpost,
     217             :   ST4LANEpost
     218             : };
     219             : 
     220             : } // end namespace AArch64ISD
     221             : 
     222             : namespace {
     223             : 
     224             : // Any instruction that defines a 32-bit result zeros out the high half of the
     225             : // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
     226             : // be copying from a truncate. But any other 32-bit operation will zero-extend
     227             : // up to 64 bits.
     228             : // FIXME: X86 also checks for CMOV here. Do we need something similar?
     229             : static inline bool isDef32(const SDNode &N) {
     230         370 :   unsigned Opc = N.getOpcode();
     231         195 :   return Opc != ISD::TRUNCATE && Opc != TargetOpcode::EXTRACT_SUBREG &&
     232             :          Opc != ISD::CopyFromReg;
     233             : }
     234             : 
     235             : } // end anonymous namespace
     236             : 
     237             : class AArch64Subtarget;
     238             : class AArch64TargetMachine;
     239             : 
     240        2386 : class AArch64TargetLowering : public TargetLowering {
     241             : public:
     242             :   explicit AArch64TargetLowering(const TargetMachine &TM,
     243             :                                  const AArch64Subtarget &STI);
     244             : 
     245             :   /// Selects the correct CCAssignFn for a given CallingConvention value.
     246             :   CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
     247             : 
     248             :   /// Selects the correct CCAssignFn for a given CallingConvention value.
     249             :   CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC) const;
     250             : 
     251             :   /// Determine which of the bits specified in Mask are known to be either zero
     252             :   /// or one and return them in the KnownZero/KnownOne bitsets.
     253             :   void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
     254             :                                      const APInt &DemandedElts,
     255             :                                      const SelectionDAG &DAG,
     256             :                                      unsigned Depth = 0) const override;
     257             : 
     258             :   bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
     259             :                                     TargetLoweringOpt &TLO) const override;
     260             : 
     261             :   MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
     262             : 
     263             :   /// Returns true if the target allows unaligned memory accesses of the
     264             :   /// specified type.
     265             :   bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0,
     266             :                                       unsigned Align = 1,
     267             :                                       bool *Fast = nullptr) const override;
     268             : 
     269             :   /// Provide custom lowering hooks for some operations.
     270             :   SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
     271             : 
     272             :   const char *getTargetNodeName(unsigned Opcode) const override;
     273             : 
     274             :   SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
     275             : 
     276             :   /// Returns true if a cast between SrcAS and DestAS is a noop.
     277           0 :   bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
     278             :     // Addrspacecasts are always noops.
     279           0 :     return true;
     280             :   }
     281             : 
     282             :   /// This method returns a target specific FastISel object, or null if the
     283             :   /// target does not support "fast" ISel.
     284             :   FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
     285             :                            const TargetLibraryInfo *libInfo) const override;
     286             : 
     287             :   bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
     288             : 
     289             :   bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
     290             : 
     291             :   /// Return true if the given shuffle mask can be codegen'd directly, or if it
     292             :   /// should be stack expanded.
     293             :   bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
     294             : 
     295             :   /// Return the ISD::SETCC ValueType.
     296             :   EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
     297             :                          EVT VT) const override;
     298             : 
     299             :   SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
     300             : 
     301             :   MachineBasicBlock *EmitF128CSEL(MachineInstr &MI,
     302             :                                   MachineBasicBlock *BB) const;
     303             : 
     304             :   MachineBasicBlock *
     305             :   EmitInstrWithCustomInserter(MachineInstr &MI,
     306             :                               MachineBasicBlock *MBB) const override;
     307             : 
     308             :   bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
     309             :                           unsigned Intrinsic) const override;
     310             : 
     311             :   bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
     312             :   bool isTruncateFree(EVT VT1, EVT VT2) const override;
     313             : 
     314             :   bool isProfitableToHoist(Instruction *I) const override;
     315             : 
     316             :   bool isZExtFree(Type *Ty1, Type *Ty2) const override;
     317             :   bool isZExtFree(EVT VT1, EVT VT2) const override;
     318             :   bool isZExtFree(SDValue Val, EVT VT2) const override;
     319             : 
     320             :   bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
     321             : 
     322       11109 :   unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
     323             : 
     324             :   bool lowerInterleavedLoad(LoadInst *LI,
     325             :                             ArrayRef<ShuffleVectorInst *> Shuffles,
     326             :                             ArrayRef<unsigned> Indices,
     327             :                             unsigned Factor) const override;
     328             :   bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
     329             :                              unsigned Factor) const override;
     330             : 
     331             :   bool isLegalAddImmediate(int64_t) const override;
     332             :   bool isLegalICmpImmediate(int64_t) const override;
     333             : 
     334             :   EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
     335             :                           bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
     336             :                           MachineFunction &MF) const override;
     337             : 
     338             :   /// Return true if the addressing mode represented by AM is legal for this
     339             :   /// target, for a load/store of the specified type.
     340             :   bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
     341             :                              unsigned AS,
     342             :                              Instruction *I = nullptr) const override;
     343             : 
     344             :   /// \brief Return the cost of the scaling factor used in the addressing
     345             :   /// mode represented by AM for this target, for a load/store
     346             :   /// of the specified type.
     347             :   /// If the AM is supported, the return value must be >= 0.
     348             :   /// If the AM is not supported, it returns a negative value.
     349             :   int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
     350             :                            unsigned AS) const override;
     351             : 
     352             :   /// Return true if an FMA operation is faster than a pair of fmul and fadd
     353             :   /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
     354             :   /// returns true, otherwise fmuladd is expanded to fmul + fadd.
     355             :   bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
     356             : 
     357             :   const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
     358             : 
     359             :   /// \brief Returns false if N is a bit extraction pattern of (X >> C) & Mask.
     360             :   bool isDesirableToCommuteWithShift(const SDNode *N) const override;
     361             : 
     362             :   /// \brief Returns true if it is beneficial to convert a load of a constant
     363             :   /// to just the constant itself.
     364             :   bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
     365             :                                          Type *Ty) const override;
     366             : 
     367             :   Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
     368             :                         AtomicOrdering Ord) const override;
     369             :   Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
     370             :                               Value *Addr, AtomicOrdering Ord) const override;
     371             : 
     372             :   void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
     373             : 
     374             :   TargetLoweringBase::AtomicExpansionKind
     375             :   shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
     376             :   bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
     377             :   TargetLoweringBase::AtomicExpansionKind
     378             :   shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
     379             : 
     380             :   bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
     381             : 
     382             :   bool useLoadStackGuardNode() const override;
     383             :   TargetLoweringBase::LegalizeTypeAction
     384             :   getPreferredVectorAction(EVT VT) const override;
     385             : 
     386             :   /// If the target has a standard location for the stack protector cookie,
     387             :   /// returns the address of that location. Otherwise, returns nullptr.
     388             :   Value *getIRStackGuard(IRBuilder<> &IRB) const override;
     389             : 
     390             :   /// If the target has a standard location for the unsafe stack pointer,
     391             :   /// returns the address of that location. Otherwise, returns nullptr.
     392             :   Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override;
     393             : 
     394             :   /// If a physical register, this returns the register that receives the
     395             :   /// exception address on entry to an EH pad.
     396             :   unsigned
     397          30 :   getExceptionPointerRegister(const Constant *PersonalityFn) const override {
     398             :     // FIXME: This is a guess. Has this been defined yet?
     399          30 :     return AArch64::X0;
     400             :   }
     401             : 
     402             :   /// If a physical register, this returns the register that receives the
     403             :   /// exception typeid on entry to a landing pad.
     404             :   unsigned
     405          15 :   getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
     406             :     // FIXME: This is a guess. Has this been defined yet?
     407          15 :     return AArch64::X1;
     408             :   }
     409             : 
     410             :   bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
     411             : 
     412         706 :   bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
     413             :                         const SelectionDAG &DAG) const override {
     414             :     // Do not merge to float value size (128 bytes) if no implicit
     415             :     // float attribute is set.
     416             : 
     417         706 :     bool NoFloat = DAG.getMachineFunction().getFunction()->hasFnAttribute(
     418         706 :         Attribute::NoImplicitFloat);
     419             : 
     420         706 :     if (NoFloat)
     421           4 :       return (MemVT.getSizeInBits() <= 64);
     422             :     return true;
     423             :   }
     424             : 
     425           3 :   bool isCheapToSpeculateCttz() const override {
     426           3 :     return true;
     427             :   }
     428             : 
     429          22 :   bool isCheapToSpeculateCtlz() const override {
     430          22 :     return true;
     431             :   }
     432             : 
     433             :   bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
     434             : 
     435          18 :   bool hasAndNotCompare(SDValue) const override {
     436             :     // 'bics'
     437          18 :     return true;
     438             :   }
     439             : 
     440        4852 :   bool hasBitPreservingFPLogic(EVT VT) const override {
     441             :     // FIXME: Is this always true? It should be true for vectors at least.
     442       14515 :     return VT == MVT::f32 || VT == MVT::f64;
     443             :   }
     444             : 
     445       12292 :   bool supportSplitCSR(MachineFunction *MF) const override {
     446       24596 :     return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
     447       12316 :            MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
     448             :   }
     449             :   void initializeSplitCSR(MachineBasicBlock *Entry) const override;
     450             :   void insertCopiesSplitCSR(
     451             :       MachineBasicBlock *Entry,
     452             :       const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
     453             : 
     454      257440 :   bool supportSwiftError() const override {
     455      257440 :     return true;
     456             :   }
     457             : 
     458             :   /// Returns the size of the platform's va_list object.
     459             :   unsigned getVaListSizeInBits(const DataLayout &DL) const override;
     460             : 
     461             :   /// Returns true if \p VecTy is a legal interleaved access type. This
     462             :   /// function checks the vector element type and the overall width of the
     463             :   /// vector.
     464             :   bool isLegalInterleavedAccessType(VectorType *VecTy,
     465             :                                     const DataLayout &DL) const;
     466             : 
     467             :   /// Returns the number of interleaved accesses that will be generated when
     468             :   /// lowering accesses of the given type.
     469             :   unsigned getNumInterleavedAccesses(VectorType *VecTy,
     470             :                                      const DataLayout &DL) const;
     471             : 
     472             :   MachineMemOperand::Flags getMMOFlags(const Instruction &I) const override;
     473             : 
     474             :   bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
     475             :                                                  CallingConv::ID CallConv,
     476             :                                                  bool isVarArg) const override;
     477             : private:
     478             :   bool isExtFreeImpl(const Instruction *Ext) const override;
     479             : 
     480             :   /// Keep a pointer to the AArch64Subtarget around so that we can
     481             :   /// make the right decision when generating code for different targets.
     482             :   const AArch64Subtarget *Subtarget;
     483             : 
     484             :   void addTypeForNEON(MVT VT, MVT PromotedBitwiseVT);
     485             :   void addDRTypeForNEON(MVT VT);
     486             :   void addQRTypeForNEON(MVT VT);
     487             : 
     488             :   SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
     489             :                                bool isVarArg,
     490             :                                const SmallVectorImpl<ISD::InputArg> &Ins,
     491             :                                const SDLoc &DL, SelectionDAG &DAG,
     492             :                                SmallVectorImpl<SDValue> &InVals) const override;
     493             : 
     494             :   SDValue LowerCall(CallLoweringInfo & /*CLI*/,
     495             :                     SmallVectorImpl<SDValue> &InVals) const override;
     496             : 
     497             :   SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
     498             :                           CallingConv::ID CallConv, bool isVarArg,
     499             :                           const SmallVectorImpl<ISD::InputArg> &Ins,
     500             :                           const SDLoc &DL, SelectionDAG &DAG,
     501             :                           SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
     502             :                           SDValue ThisVal) const;
     503             : 
     504             :   SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
     505             : 
     506             :   bool isEligibleForTailCallOptimization(
     507             :       SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
     508             :       const SmallVectorImpl<ISD::OutputArg> &Outs,
     509             :       const SmallVectorImpl<SDValue> &OutVals,
     510             :       const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
     511             : 
     512             :   /// Finds the incoming stack arguments which overlap the given fixed stack
     513             :   /// object and incorporates their load into the current chain. This prevents
     514             :   /// an upcoming store from clobbering the stack argument before it's used.
     515             :   SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
     516             :                               MachineFrameInfo &MFI, int ClobberedFI) const;
     517             : 
     518             :   bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
     519             : 
     520             :   void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL,
     521             :                            SDValue &Chain) const;
     522             : 
     523             :   bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
     524             :                       bool isVarArg,
     525             :                       const SmallVectorImpl<ISD::OutputArg> &Outs,
     526             :                       LLVMContext &Context) const override;
     527             : 
     528             :   SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
     529             :                       const SmallVectorImpl<ISD::OutputArg> &Outs,
     530             :                       const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
     531             :                       SelectionDAG &DAG) const override;
     532             : 
     533             :   SDValue getTargetNode(GlobalAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
     534             :                         unsigned Flag) const;
     535             :   SDValue getTargetNode(JumpTableSDNode *N, EVT Ty, SelectionDAG &DAG,
     536             :                         unsigned Flag) const;
     537             :   SDValue getTargetNode(ConstantPoolSDNode *N, EVT Ty, SelectionDAG &DAG,
     538             :                         unsigned Flag) const;
     539             :   SDValue getTargetNode(BlockAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
     540             :                         unsigned Flag) const;
     541             :   template <class NodeTy> SDValue getGOT(NodeTy *N, SelectionDAG &DAG) const;
     542             :   template <class NodeTy>
     543             :   SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG) const;
     544             :   template <class NodeTy> SDValue getAddr(NodeTy *N, SelectionDAG &DAG) const;
     545             :   SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
     546             :   SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
     547             :   SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
     548             :   SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
     549             :   SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL,
     550             :                                  SelectionDAG &DAG) const;
     551             :   SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
     552             :   SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
     553             :   SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
     554             :   SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
     555             :   SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS,
     556             :                          SDValue TVal, SDValue FVal, const SDLoc &dl,
     557             :                          SelectionDAG &DAG) const;
     558             :   SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
     559             :   SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
     560             :   SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
     561             :   SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
     562             :   SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
     563             :   SDValue LowerWin64_VASTART(SDValue Op, SelectionDAG &DAG) const;
     564             :   SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
     565             :   SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
     566             :   SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
     567             :   SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
     568             :   SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
     569             :   SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
     570             :   SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
     571             :   SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
     572             :   SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
     573             :   SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
     574             :   SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
     575             :   SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
     576             :   SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
     577             :   SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
     578             :   SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
     579             :   SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
     580             :   SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
     581             :                         RTLIB::Libcall Call) const;
     582             :   SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
     583             :   SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
     584             :   SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
     585             :   SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
     586             :   SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
     587             :   SDValue LowerVectorAND(SDValue Op, SelectionDAG &DAG) const;
     588             :   SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
     589             :   SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
     590             :   SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
     591             :   SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
     592             : 
     593             :   SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
     594             :                         std::vector<SDNode *> *Created) const override;
     595             :   SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
     596             :                           int &ExtraSteps, bool &UseOneConst,
     597             :                           bool Reciprocal) const override;
     598             :   SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
     599             :                            int &ExtraSteps) const override;
     600             :   unsigned combineRepeatedFPDivisors() const override;
     601             : 
     602             :   ConstraintType getConstraintType(StringRef Constraint) const override;
     603             :   unsigned getRegisterByName(const char* RegName, EVT VT,
     604             :                              SelectionDAG &DAG) const override;
     605             : 
     606             :   /// Examine constraint string and operand type and determine a weight value.
     607             :   /// The operand object must already have been set up with the operand type.
     608             :   ConstraintWeight
     609             :   getSingleConstraintMatchWeight(AsmOperandInfo &info,
     610             :                                  const char *constraint) const override;
     611             : 
     612             :   std::pair<unsigned, const TargetRegisterClass *>
     613             :   getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
     614             :                                StringRef Constraint, MVT VT) const override;
     615             : 
     616             :   const char *LowerXConstraint(EVT ConstraintVT) const override;
     617             : 
     618             :   void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
     619             :                                     std::vector<SDValue> &Ops,
     620             :                                     SelectionDAG &DAG) const override;
     621             : 
     622           2 :   unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
     623           2 :     if (ConstraintCode == "Q")
     624             :       return InlineAsm::Constraint_Q;
     625             :     // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
     626             :     //        followed by llvm_unreachable so we'll leave them unimplemented in
     627             :     //        the backend for now.
     628           0 :     return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
     629             :   }
     630             : 
     631             :   bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
     632             :   bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
     633             :   bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
     634             :                               ISD::MemIndexedMode &AM, bool &IsInc,
     635             :                               SelectionDAG &DAG) const;
     636             :   bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
     637             :                                  ISD::MemIndexedMode &AM,
     638             :                                  SelectionDAG &DAG) const override;
     639             :   bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
     640             :                                   SDValue &Offset, ISD::MemIndexedMode &AM,
     641             :                                   SelectionDAG &DAG) const override;
     642             : 
     643             :   void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
     644             :                           SelectionDAG &DAG) const override;
     645             : 
     646             :   bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
     647             : };
     648             : 
     649             : namespace AArch64 {
     650             : FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
     651             :                          const TargetLibraryInfo *libInfo);
     652             : } // end namespace AArch64
     653             : 
     654             : } // end namespace llvm
     655             : 
     656             : #endif

Generated by: LCOV version 1.13