LCOV - code coverage report
Current view: top level - lib/Target/AArch64 - AArch64ISelLowering.h (source / functions) Hit Total Coverage
Test: llvm-toolchain.info Lines: 31 33 93.9 %
Date: 2018-06-17 00:07:59 Functions: 12 15 80.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
       2             : //
       3             : //                     The LLVM Compiler Infrastructure
       4             : //
       5             : // This file is distributed under the University of Illinois Open Source
       6             : // License. See LICENSE.TXT for details.
       7             : //
       8             : //===----------------------------------------------------------------------===//
       9             : //
      10             : // This file defines the interfaces that AArch64 uses to lower LLVM code into a
      11             : // selection DAG.
      12             : //
      13             : //===----------------------------------------------------------------------===//
      14             : 
      15             : #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
      16             : #define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
      17             : 
      18             : #include "AArch64.h"
      19             : #include "llvm/CodeGen/CallingConvLower.h"
      20             : #include "llvm/CodeGen/SelectionDAG.h"
      21             : #include "llvm/CodeGen/TargetLowering.h"
      22             : #include "llvm/IR/CallingConv.h"
      23             : #include "llvm/IR/Instruction.h"
      24             : 
      25             : namespace llvm {
      26             : 
      27             : namespace AArch64ISD {
      28             : 
      29             : enum NodeType : unsigned {
      30             :   FIRST_NUMBER = ISD::BUILTIN_OP_END,
      31             :   WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
      32             :   CALL,         // Function call.
      33             : 
      34             :   // Produces the full sequence of instructions for getting the thread pointer
      35             :   // offset of a variable into X0, using the TLSDesc model.
      36             :   TLSDESC_CALLSEQ,
      37             :   ADRP,     // Page address of a TargetGlobalAddress operand.
      38             :   ADDlow,   // Add the low 12 bits of a TargetGlobalAddress operand.
      39             :   LOADgot,  // Load from automatically generated descriptor (e.g. Global
      40             :             // Offset Table, TLS record).
      41             :   RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
      42             :   BRCOND,   // Conditional branch instruction; "b.cond".
      43             :   CSEL,
      44             :   FCSEL, // Conditional move instruction.
      45             :   CSINV, // Conditional select invert.
      46             :   CSNEG, // Conditional select negate.
      47             :   CSINC, // Conditional select increment.
      48             : 
      49             :   // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
      50             :   // ELF.
      51             :   THREAD_POINTER,
      52             :   ADC,
      53             :   SBC, // adc, sbc instructions
      54             : 
      55             :   // Arithmetic instructions which write flags.
      56             :   ADDS,
      57             :   SUBS,
      58             :   ADCS,
      59             :   SBCS,
      60             :   ANDS,
      61             : 
      62             :   // Conditional compares. Operands: left,right,falsecc,cc,flags
      63             :   CCMP,
      64             :   CCMN,
      65             :   FCCMP,
      66             : 
      67             :   // Floating point comparison
      68             :   FCMP,
      69             : 
      70             :   // Scalar extract
      71             :   EXTR,
      72             : 
      73             :   // Scalar-to-vector duplication
      74             :   DUP,
      75             :   DUPLANE8,
      76             :   DUPLANE16,
      77             :   DUPLANE32,
      78             :   DUPLANE64,
      79             : 
      80             :   // Vector immedate moves
      81             :   MOVI,
      82             :   MOVIshift,
      83             :   MOVIedit,
      84             :   MOVImsl,
      85             :   FMOV,
      86             :   MVNIshift,
      87             :   MVNImsl,
      88             : 
      89             :   // Vector immediate ops
      90             :   BICi,
      91             :   ORRi,
      92             : 
      93             :   // Vector bit select: similar to ISD::VSELECT but not all bits within an
      94             :   // element must be identical.
      95             :   BSL,
      96             : 
      97             :   // Vector arithmetic negation
      98             :   NEG,
      99             : 
     100             :   // Vector shuffles
     101             :   ZIP1,
     102             :   ZIP2,
     103             :   UZP1,
     104             :   UZP2,
     105             :   TRN1,
     106             :   TRN2,
     107             :   REV16,
     108             :   REV32,
     109             :   REV64,
     110             :   EXT,
     111             : 
     112             :   // Vector shift by scalar
     113             :   VSHL,
     114             :   VLSHR,
     115             :   VASHR,
     116             : 
     117             :   // Vector shift by scalar (again)
     118             :   SQSHL_I,
     119             :   UQSHL_I,
     120             :   SQSHLU_I,
     121             :   SRSHR_I,
     122             :   URSHR_I,
     123             : 
     124             :   // Vector comparisons
     125             :   CMEQ,
     126             :   CMGE,
     127             :   CMGT,
     128             :   CMHI,
     129             :   CMHS,
     130             :   FCMEQ,
     131             :   FCMGE,
     132             :   FCMGT,
     133             : 
     134             :   // Vector zero comparisons
     135             :   CMEQz,
     136             :   CMGEz,
     137             :   CMGTz,
     138             :   CMLEz,
     139             :   CMLTz,
     140             :   FCMEQz,
     141             :   FCMGEz,
     142             :   FCMGTz,
     143             :   FCMLEz,
     144             :   FCMLTz,
     145             : 
     146             :   // Vector across-lanes addition
     147             :   // Only the lower result lane is defined.
     148             :   SADDV,
     149             :   UADDV,
     150             : 
     151             :   // Vector across-lanes min/max
     152             :   // Only the lower result lane is defined.
     153             :   SMINV,
     154             :   UMINV,
     155             :   SMAXV,
     156             :   UMAXV,
     157             : 
     158             :   // Vector bitwise negation
     159             :   NOT,
     160             : 
     161             :   // Vector bitwise selection
     162             :   BIT,
     163             : 
     164             :   // Compare-and-branch
     165             :   CBZ,
     166             :   CBNZ,
     167             :   TBZ,
     168             :   TBNZ,
     169             : 
     170             :   // Tail calls
     171             :   TC_RETURN,
     172             : 
     173             :   // Custom prefetch handling
     174             :   PREFETCH,
     175             : 
     176             :   // {s|u}int to FP within a FP register.
     177             :   SITOF,
     178             :   UITOF,
     179             : 
     180             :   /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
     181             :   /// world w.r.t vectors; which causes additional REV instructions to be
     182             :   /// generated to compensate for the byte-swapping. But sometimes we do
     183             :   /// need to re-interpret the data in SIMD vector registers in big-endian
     184             :   /// mode without emitting such REV instructions.
     185             :   NVCAST,
     186             : 
     187             :   SMULL,
     188             :   UMULL,
     189             : 
     190             :   // Reciprocal estimates and steps.
     191             :   FRECPE, FRECPS,
     192             :   FRSQRTE, FRSQRTS,
     193             : 
     194             :   // NEON Load/Store with post-increment base updates
     195             :   LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
     196             :   LD3post,
     197             :   LD4post,
     198             :   ST2post,
     199             :   ST3post,
     200             :   ST4post,
     201             :   LD1x2post,
     202             :   LD1x3post,
     203             :   LD1x4post,
     204             :   ST1x2post,
     205             :   ST1x3post,
     206             :   ST1x4post,
     207             :   LD1DUPpost,
     208             :   LD2DUPpost,
     209             :   LD3DUPpost,
     210             :   LD4DUPpost,
     211             :   LD1LANEpost,
     212             :   LD2LANEpost,
     213             :   LD3LANEpost,
     214             :   LD4LANEpost,
     215             :   ST2LANEpost,
     216             :   ST3LANEpost,
     217             :   ST4LANEpost
     218             : };
     219             : 
     220             : } // end namespace AArch64ISD
     221             : 
     222             : namespace {
     223             : 
     224             : // Any instruction that defines a 32-bit result zeros out the high half of the
     225             : // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
     226             : // be copying from a truncate. But any other 32-bit operation will zero-extend
     227             : // up to 64 bits.
     228             : // FIXME: X86 also checks for CMOV here. Do we need something similar?
     229             : static inline bool isDef32(const SDNode &N) {
     230         205 :   unsigned Opc = N.getOpcode();
     231         205 :   return Opc != ISD::TRUNCATE && Opc != TargetOpcode::EXTRACT_SUBREG &&
     232             :          Opc != ISD::CopyFromReg;
     233             : }
     234             : 
     235             : } // end anonymous namespace
     236             : 
     237             : class AArch64Subtarget;
     238             : class AArch64TargetMachine;
     239             : 
     240        1405 : class AArch64TargetLowering : public TargetLowering {
     241             : public:
     242             :   explicit AArch64TargetLowering(const TargetMachine &TM,
     243             :                                  const AArch64Subtarget &STI);
     244             : 
     245             :   /// Selects the correct CCAssignFn for a given CallingConvention value.
     246             :   CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
     247             : 
     248             :   /// Selects the correct CCAssignFn for a given CallingConvention value.
     249             :   CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC) const;
     250             : 
     251             :   /// Determine which of the bits specified in Mask are known to be either zero
     252             :   /// or one and return them in the KnownZero/KnownOne bitsets.
     253             :   void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
     254             :                                      const APInt &DemandedElts,
     255             :                                      const SelectionDAG &DAG,
     256             :                                      unsigned Depth = 0) const override;
     257             : 
     258             :   bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
     259             :                                     TargetLoweringOpt &TLO) const override;
     260             : 
     261             :   MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
     262             : 
     263             :   /// Returns true if the target allows unaligned memory accesses of the
     264             :   /// specified type.
     265             :   bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0,
     266             :                                       unsigned Align = 1,
     267             :                                       bool *Fast = nullptr) const override;
     268             : 
     269             :   /// Provide custom lowering hooks for some operations.
     270             :   SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
     271             : 
     272             :   const char *getTargetNodeName(unsigned Opcode) const override;
     273             : 
     274             :   SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
     275             : 
     276             :   /// Returns true if a cast between SrcAS and DestAS is a noop.
     277           0 :   bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
     278             :     // Addrspacecasts are always noops.
     279           0 :     return true;
     280             :   }
     281             : 
     282             :   /// This method returns a target specific FastISel object, or null if the
     283             :   /// target does not support "fast" ISel.
     284             :   FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
     285             :                            const TargetLibraryInfo *libInfo) const override;
     286             : 
     287             :   bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
     288             : 
     289             :   bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
     290             : 
     291             :   /// Return true if the given shuffle mask can be codegen'd directly, or if it
     292             :   /// should be stack expanded.
     293             :   bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
     294             : 
     295             :   /// Return the ISD::SETCC ValueType.
     296             :   EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
     297             :                          EVT VT) const override;
     298             : 
     299             :   SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
     300             : 
     301             :   MachineBasicBlock *EmitF128CSEL(MachineInstr &MI,
     302             :                                   MachineBasicBlock *BB) const;
     303             : 
     304             :   MachineBasicBlock *
     305             :   EmitInstrWithCustomInserter(MachineInstr &MI,
     306             :                               MachineBasicBlock *MBB) const override;
     307             : 
     308             :   bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
     309             :                           MachineFunction &MF,
     310             :                           unsigned Intrinsic) const override;
     311             : 
     312             :   bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
     313             :                              EVT NewVT) const override;
     314             : 
     315             :   bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
     316             :   bool isTruncateFree(EVT VT1, EVT VT2) const override;
     317             : 
     318             :   bool isProfitableToHoist(Instruction *I) const override;
     319             : 
     320             :   bool isZExtFree(Type *Ty1, Type *Ty2) const override;
     321             :   bool isZExtFree(EVT VT1, EVT VT2) const override;
     322             :   bool isZExtFree(SDValue Val, EVT VT2) const override;
     323             : 
     324             :   bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
     325             : 
     326       13421 :   unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
     327             : 
     328             :   bool lowerInterleavedLoad(LoadInst *LI,
     329             :                             ArrayRef<ShuffleVectorInst *> Shuffles,
     330             :                             ArrayRef<unsigned> Indices,
     331             :                             unsigned Factor) const override;
     332             :   bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
     333             :                              unsigned Factor) const override;
     334             : 
     335             :   bool isLegalAddImmediate(int64_t) const override;
     336             :   bool isLegalICmpImmediate(int64_t) const override;
     337             : 
     338             :   bool shouldConsiderGEPOffsetSplit() const override;
     339             : 
     340             :   EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
     341             :                           bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
     342             :                           MachineFunction &MF) const override;
     343             : 
     344             :   /// Return true if the addressing mode represented by AM is legal for this
     345             :   /// target, for a load/store of the specified type.
     346             :   bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
     347             :                              unsigned AS,
     348             :                              Instruction *I = nullptr) const override;
     349             : 
     350             :   /// Return the cost of the scaling factor used in the addressing
     351             :   /// mode represented by AM for this target, for a load/store
     352             :   /// of the specified type.
     353             :   /// If the AM is supported, the return value must be >= 0.
     354             :   /// If the AM is not supported, it returns a negative value.
     355             :   int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
     356             :                            unsigned AS) const override;
     357             : 
     358             :   /// Return true if an FMA operation is faster than a pair of fmul and fadd
     359             :   /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
     360             :   /// returns true, otherwise fmuladd is expanded to fmul + fadd.
     361             :   bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
     362             : 
     363             :   const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
     364             : 
     365             :   /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
     366             :   bool isDesirableToCommuteWithShift(const SDNode *N) const override;
     367             : 
     368             :   /// Returns true if it is beneficial to convert a load of a constant
     369             :   /// to just the constant itself.
     370             :   bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
     371             :                                          Type *Ty) const override;
     372             : 
     373             :   /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
     374             :   /// with this index.
     375             :   bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
     376             :                                unsigned Index) const override;
     377             : 
     378             :   Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
     379             :                         AtomicOrdering Ord) const override;
     380             :   Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
     381             :                               Value *Addr, AtomicOrdering Ord) const override;
     382             : 
     383             :   void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
     384             : 
     385             :   TargetLoweringBase::AtomicExpansionKind
     386             :   shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
     387             :   bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
     388             :   TargetLoweringBase::AtomicExpansionKind
     389             :   shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
     390             : 
     391             :   bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
     392             : 
     393             :   bool useLoadStackGuardNode() const override;
     394             :   TargetLoweringBase::LegalizeTypeAction
     395             :   getPreferredVectorAction(EVT VT) const override;
     396             : 
     397             :   /// If the target has a standard location for the stack protector cookie,
     398             :   /// returns the address of that location. Otherwise, returns nullptr.
     399             :   Value *getIRStackGuard(IRBuilder<> &IRB) const override;
     400             : 
     401             :   /// If the target has a standard location for the unsafe stack pointer,
     402             :   /// returns the address of that location. Otherwise, returns nullptr.
     403             :   Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override;
     404             : 
     405             :   /// If a physical register, this returns the register that receives the
     406             :   /// exception address on entry to an EH pad.
     407             :   unsigned
     408          32 :   getExceptionPointerRegister(const Constant *PersonalityFn) const override {
     409             :     // FIXME: This is a guess. Has this been defined yet?
     410          32 :     return AArch64::X0;
     411             :   }
     412             : 
     413             :   /// If a physical register, this returns the register that receives the
     414             :   /// exception typeid on entry to a landing pad.
     415             :   unsigned
     416          16 :   getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
     417             :     // FIXME: This is a guess. Has this been defined yet?
     418          16 :     return AArch64::X1;
     419             :   }
     420             : 
     421             :   bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
     422             : 
     423        1211 :   bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
     424             :                         const SelectionDAG &DAG) const override {
     425             :     // Do not merge to float value size (128 bytes) if no implicit
     426             :     // float attribute is set.
     427             : 
     428        1211 :     bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
     429             :         Attribute::NoImplicitFloat);
     430             : 
     431        1211 :     if (NoFloat)
     432           4 :       return (MemVT.getSizeInBits() <= 64);
     433             :     return true;
     434             :   }
     435             : 
     436           3 :   bool isCheapToSpeculateCttz() const override {
     437           3 :     return true;
     438             :   }
     439             : 
     440          22 :   bool isCheapToSpeculateCtlz() const override {
     441          22 :     return true;
     442             :   }
     443             : 
     444             :   bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
     445             : 
     446          64 :   bool hasAndNotCompare(SDValue V) const override {
     447             :     // We can use bics for any scalar.
     448         192 :     return V.getValueType().isScalarInteger();
     449             :   }
     450             : 
     451         102 :   bool hasAndNot(SDValue Y) const override {
     452         204 :     EVT VT = Y.getValueType();
     453             : 
     454         102 :     if (!VT.isVector())
     455          56 :       return hasAndNotCompare(Y);
     456             : 
     457          46 :     return VT.getSizeInBits() >= 64; // vector 'bic'
     458             :   }
     459             : 
     460        5384 :   bool hasBitPreservingFPLogic(EVT VT) const override {
     461             :     // FIXME: Is this always true? It should be true for vectors at least.
     462        5384 :     return VT == MVT::f32 || VT == MVT::f64;
     463             :   }
     464             : 
     465       14553 :   bool supportSplitCSR(MachineFunction *MF) const override {
     466       29118 :     return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
     467       14553 :            MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
     468             :   }
     469             :   void initializeSplitCSR(MachineBasicBlock *Entry) const override;
     470             :   void insertCopiesSplitCSR(
     471             :       MachineBasicBlock *Entry,
     472             :       const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
     473             : 
     474      357539 :   bool supportSwiftError() const override {
     475      357539 :     return true;
     476             :   }
     477             : 
     478             :   /// Enable aggressive FMA fusion on targets that want it.
     479             :   bool enableAggressiveFMAFusion(EVT VT) const override;
     480             : 
     481             :   /// Returns the size of the platform's va_list object.
     482             :   unsigned getVaListSizeInBits(const DataLayout &DL) const override;
     483             : 
     484             :   /// Returns true if \p VecTy is a legal interleaved access type. This
     485             :   /// function checks the vector element type and the overall width of the
     486             :   /// vector.
     487             :   bool isLegalInterleavedAccessType(VectorType *VecTy,
     488             :                                     const DataLayout &DL) const;
     489             : 
     490             :   /// Returns the number of interleaved accesses that will be generated when
     491             :   /// lowering accesses of the given type.
     492             :   unsigned getNumInterleavedAccesses(VectorType *VecTy,
     493             :                                      const DataLayout &DL) const;
     494             : 
     495             :   MachineMemOperand::Flags getMMOFlags(const Instruction &I) const override;
     496             : 
     497             :   bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
     498             :                                                  CallingConv::ID CallConv,
     499             :                                                  bool isVarArg) const override;
     500             : private:
     501             :   /// Keep a pointer to the AArch64Subtarget around so that we can
     502             :   /// make the right decision when generating code for different targets.
     503             :   const AArch64Subtarget *Subtarget;
     504             : 
     505             :   bool isExtFreeImpl(const Instruction *Ext) const override;
     506             : 
     507             :   void addTypeForNEON(MVT VT, MVT PromotedBitwiseVT);
     508             :   void addDRTypeForNEON(MVT VT);
     509             :   void addQRTypeForNEON(MVT VT);
     510             : 
     511             :   SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
     512             :                                bool isVarArg,
     513             :                                const SmallVectorImpl<ISD::InputArg> &Ins,
     514             :                                const SDLoc &DL, SelectionDAG &DAG,
     515             :                                SmallVectorImpl<SDValue> &InVals) const override;
     516             : 
     517             :   SDValue LowerCall(CallLoweringInfo & /*CLI*/,
     518             :                     SmallVectorImpl<SDValue> &InVals) const override;
     519             : 
     520             :   SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
     521             :                           CallingConv::ID CallConv, bool isVarArg,
     522             :                           const SmallVectorImpl<ISD::InputArg> &Ins,
     523             :                           const SDLoc &DL, SelectionDAG &DAG,
     524             :                           SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
     525             :                           SDValue ThisVal) const;
     526             : 
     527             :   SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
     528             : 
     529             :   bool isEligibleForTailCallOptimization(
     530             :       SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
     531             :       const SmallVectorImpl<ISD::OutputArg> &Outs,
     532             :       const SmallVectorImpl<SDValue> &OutVals,
     533             :       const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
     534             : 
     535             :   /// Finds the incoming stack arguments which overlap the given fixed stack
     536             :   /// object and incorporates their load into the current chain. This prevents
     537             :   /// an upcoming store from clobbering the stack argument before it's used.
     538             :   SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
     539             :                               MachineFrameInfo &MFI, int ClobberedFI) const;
     540             : 
     541             :   bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
     542             : 
     543             :   void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL,
     544             :                            SDValue &Chain) const;
     545             : 
     546             :   bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
     547             :                       bool isVarArg,
     548             :                       const SmallVectorImpl<ISD::OutputArg> &Outs,
     549             :                       LLVMContext &Context) const override;
     550             : 
     551             :   SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
     552             :                       const SmallVectorImpl<ISD::OutputArg> &Outs,
     553             :                       const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
     554             :                       SelectionDAG &DAG) const override;
     555             : 
     556             :   SDValue getTargetNode(GlobalAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
     557             :                         unsigned Flag) const;
     558             :   SDValue getTargetNode(JumpTableSDNode *N, EVT Ty, SelectionDAG &DAG,
     559             :                         unsigned Flag) const;
     560             :   SDValue getTargetNode(ConstantPoolSDNode *N, EVT Ty, SelectionDAG &DAG,
     561             :                         unsigned Flag) const;
     562             :   SDValue getTargetNode(BlockAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
     563             :                         unsigned Flag) const;
     564             :   template <class NodeTy>
     565             :   SDValue getGOT(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
     566             :   template <class NodeTy>
     567             :   SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
     568             :   template <class NodeTy>
     569             :   SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
     570             :   SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
     571             :   SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
     572             :   SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
     573             :   SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
     574             :   SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
     575             :   SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL,
     576             :                                  SelectionDAG &DAG) const;
     577             :   SDValue LowerWindowsGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
     578             :   SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
     579             :   SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
     580             :   SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
     581             :   SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
     582             :   SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS,
     583             :                          SDValue TVal, SDValue FVal, const SDLoc &dl,
     584             :                          SelectionDAG &DAG) const;
     585             :   SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
     586             :   SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
     587             :   SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
     588             :   SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
     589             :   SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
     590             :   SDValue LowerWin64_VASTART(SDValue Op, SelectionDAG &DAG) const;
     591             :   SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
     592             :   SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
     593             :   SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
     594             :   SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
     595             :   SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
     596             :   SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
     597             :   SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
     598             :   SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
     599             :   SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
     600             :   SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
     601             :   SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
     602             :   SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
     603             :   SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
     604             :   SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
     605             :   SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
     606             :   SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
     607             :   SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
     608             :                         RTLIB::Libcall Call) const;
     609             :   SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
     610             :   SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
     611             :   SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
     612             :   SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
     613             :   SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
     614             :   SDValue LowerVectorAND(SDValue Op, SelectionDAG &DAG) const;
     615             :   SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
     616             :   SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
     617             :   SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
     618             :   SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
     619             :   SDValue LowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
     620             :   SDValue LowerATOMIC_LOAD_AND(SDValue Op, SelectionDAG &DAG) const;
     621             :   SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
     622             :   SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SDValue Chain,
     623             :                                          SDValue &Size,
     624             :                                          SelectionDAG &DAG) const;
     625             : 
     626             :   SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
     627             :                         std::vector<SDNode *> *Created) const override;
     628             :   SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
     629             :                           int &ExtraSteps, bool &UseOneConst,
     630             :                           bool Reciprocal) const override;
     631             :   SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
     632             :                            int &ExtraSteps) const override;
     633             :   unsigned combineRepeatedFPDivisors() const override;
     634             : 
     635             :   ConstraintType getConstraintType(StringRef Constraint) const override;
     636             :   unsigned getRegisterByName(const char* RegName, EVT VT,
     637             :                              SelectionDAG &DAG) const override;
     638             : 
     639             :   /// Examine constraint string and operand type and determine a weight value.
     640             :   /// The operand object must already have been set up with the operand type.
     641             :   ConstraintWeight
     642             :   getSingleConstraintMatchWeight(AsmOperandInfo &info,
     643             :                                  const char *constraint) const override;
     644             : 
     645             :   std::pair<unsigned, const TargetRegisterClass *>
     646             :   getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
     647             :                                StringRef Constraint, MVT VT) const override;
     648             : 
     649             :   const char *LowerXConstraint(EVT ConstraintVT) const override;
     650             : 
     651             :   void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
     652             :                                     std::vector<SDValue> &Ops,
     653             :                                     SelectionDAG &DAG) const override;
     654             : 
     655           2 :   unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
     656             :     if (ConstraintCode == "Q")
     657             :       return InlineAsm::Constraint_Q;
     658             :     // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
     659             :     //        followed by llvm_unreachable so we'll leave them unimplemented in
     660             :     //        the backend for now.
     661             :     return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
     662             :   }
     663             : 
     664             :   bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
     665             :   bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
     666             :   bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
     667             :                               ISD::MemIndexedMode &AM, bool &IsInc,
     668             :                               SelectionDAG &DAG) const;
     669             :   bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
     670             :                                  ISD::MemIndexedMode &AM,
     671             :                                  SelectionDAG &DAG) const override;
     672             :   bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
     673             :                                   SDValue &Offset, ISD::MemIndexedMode &AM,
     674             :                                   SelectionDAG &DAG) const override;
     675             : 
     676             :   void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
     677             :                           SelectionDAG &DAG) const override;
     678             : 
     679             :   bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
     680             : 
     681             :   void finalizeLowering(MachineFunction &MF) const override;
     682             : };
     683             : 
     684             : namespace AArch64 {
     685             : FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
     686             :                          const TargetLibraryInfo *libInfo);
     687             : } // end namespace AArch64
     688             : 
     689             : } // end namespace llvm
     690             : 
     691             : #endif

Generated by: LCOV version 1.13