LCOV - code coverage report
Current view: top level - lib/Target/AArch64 - AArch64InstructionSelector.cpp (source / functions) Hit Total Coverage
Test: llvm-toolchain.info Lines: 501 670 74.8 %
Date: 2017-09-14 15:23:50 Functions: 18 19 94.7 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : //===- AArch64InstructionSelector.cpp ----------------------------*- C++ -*-==//
       2             : //
       3             : //                     The LLVM Compiler Infrastructure
       4             : //
       5             : // This file is distributed under the University of Illinois Open Source
       6             : // License. See LICENSE.TXT for details.
       7             : //
       8             : //===----------------------------------------------------------------------===//
       9             : /// \file
      10             : /// This file implements the targeting of the InstructionSelector class for
      11             : /// AArch64.
      12             : /// \todo This should be generated by TableGen.
      13             : //===----------------------------------------------------------------------===//
      14             : 
      15             : #include "AArch64InstrInfo.h"
      16             : #include "AArch64MachineFunctionInfo.h"
      17             : #include "AArch64RegisterBankInfo.h"
      18             : #include "AArch64RegisterInfo.h"
      19             : #include "AArch64Subtarget.h"
      20             : #include "AArch64TargetMachine.h"
      21             : #include "MCTargetDesc/AArch64AddressingModes.h"
      22             : #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
      23             : #include "llvm/CodeGen/GlobalISel/Utils.h"
      24             : #include "llvm/CodeGen/MachineBasicBlock.h"
      25             : #include "llvm/CodeGen/MachineFunction.h"
      26             : #include "llvm/CodeGen/MachineInstr.h"
      27             : #include "llvm/CodeGen/MachineInstrBuilder.h"
      28             : #include "llvm/CodeGen/MachineOperand.h"
      29             : #include "llvm/CodeGen/MachineRegisterInfo.h"
      30             : #include "llvm/IR/Type.h"
      31             : #include "llvm/Support/Debug.h"
      32             : #include "llvm/Support/raw_ostream.h"
      33             : 
      34             : #define DEBUG_TYPE "aarch64-isel"
      35             : 
      36             : #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
      37             : 
      38             : using namespace llvm;
      39             : 
      40             : namespace {
      41             : 
      42             : #define GET_GLOBALISEL_PREDICATE_BITSET
      43             : #include "AArch64GenGlobalISel.inc"
      44             : #undef GET_GLOBALISEL_PREDICATE_BITSET
      45             : 
      46        3579 : class AArch64InstructionSelector : public InstructionSelector {
      47             : public:
      48             :   AArch64InstructionSelector(const AArch64TargetMachine &TM,
      49             :                              const AArch64Subtarget &STI,
      50             :                              const AArch64RegisterBankInfo &RBI);
      51             : 
      52             :   bool select(MachineInstr &I) const override;
      53             : 
      54             : private:
      55             :   /// tblgen-erated 'select' implementation, used as the initial selector for
      56             :   /// the patterns that don't require complex C++.
      57             :   bool selectImpl(MachineInstr &I) const;
      58             : 
      59             :   bool selectVaStartAAPCS(MachineInstr &I, MachineFunction &MF,
      60             :                           MachineRegisterInfo &MRI) const;
      61             :   bool selectVaStartDarwin(MachineInstr &I, MachineFunction &MF,
      62             :                            MachineRegisterInfo &MRI) const;
      63             : 
      64             :   bool selectCompareBranch(MachineInstr &I, MachineFunction &MF,
      65             :                            MachineRegisterInfo &MRI) const;
      66             : 
      67             :   ComplexRendererFn selectArithImmed(MachineOperand &Root) const;
      68             : 
      69             :   const AArch64TargetMachine &TM;
      70             :   const AArch64Subtarget &STI;
      71             :   const AArch64InstrInfo &TII;
      72             :   const AArch64RegisterInfo &TRI;
      73             :   const AArch64RegisterBankInfo &RBI;
      74             : 
      75             : #define GET_GLOBALISEL_PREDICATES_DECL
      76             : #include "AArch64GenGlobalISel.inc"
      77             : #undef GET_GLOBALISEL_PREDICATES_DECL
      78             : 
      79             : // We declare the temporaries used by selectImpl() in the class to minimize the
      80             : // cost of constructing placeholder values.
      81             : #define GET_GLOBALISEL_TEMPORARIES_DECL
      82             : #include "AArch64GenGlobalISel.inc"
      83             : #undef GET_GLOBALISEL_TEMPORARIES_DECL
      84             : };
      85             : 
      86             : } // end anonymous namespace
      87             : 
      88             : #define GET_GLOBALISEL_IMPL
      89             : #include "AArch64GenGlobalISel.inc"
      90             : #undef GET_GLOBALISEL_IMPL
      91             : 
      92        1214 : AArch64InstructionSelector::AArch64InstructionSelector(
      93             :     const AArch64TargetMachine &TM, const AArch64Subtarget &STI,
      94        1214 :     const AArch64RegisterBankInfo &RBI)
      95        1214 :     : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
      96        1214 :       TRI(*STI.getRegisterInfo()), RBI(RBI),
      97             : #define GET_GLOBALISEL_PREDICATES_INIT
      98             : #include "AArch64GenGlobalISel.inc"
      99             : #undef GET_GLOBALISEL_PREDICATES_INIT
     100             : #define GET_GLOBALISEL_TEMPORARIES_INIT
     101             : #include "AArch64GenGlobalISel.inc"
     102             : #undef GET_GLOBALISEL_TEMPORARIES_INIT
     103             : {
     104        1214 : }
     105             : 
     106             : // FIXME: This should be target-independent, inferred from the types declared
     107             : // for each class in the bank.
     108             : static const TargetRegisterClass *
     109          52 : getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB,
     110             :                          const RegisterBankInfo &RBI) {
     111          52 :   if (RB.getID() == AArch64::GPRRegBankID) {
     112          49 :     if (Ty.getSizeInBits() <= 32)
     113             :       return &AArch64::GPR32RegClass;
     114           7 :     if (Ty.getSizeInBits() == 64)
     115             :       return &AArch64::GPR64RegClass;
     116             :     return nullptr;
     117             :   }
     118             : 
     119           3 :   if (RB.getID() == AArch64::FPRRegBankID) {
     120           3 :     if (Ty.getSizeInBits() == 32)
     121             :       return &AArch64::FPR32RegClass;
     122           0 :     if (Ty.getSizeInBits() == 64)
     123             :       return &AArch64::FPR64RegClass;
     124           0 :     if (Ty.getSizeInBits() == 128)
     125             :       return &AArch64::FPR128RegClass;
     126             :     return nullptr;
     127             :   }
     128             : 
     129             :   return nullptr;
     130             : }
     131             : 
     132             : /// Check whether \p I is a currently unsupported binary operation:
     133             : /// - it has an unsized type
     134             : /// - an operand is not a vreg
     135             : /// - all operands are not in the same bank
     136             : /// These are checks that should someday live in the verifier, but right now,
     137             : /// these are mostly limitations of the aarch64 selector.
     138           8 : static bool unsupportedBinOp(const MachineInstr &I,
     139             :                              const AArch64RegisterBankInfo &RBI,
     140             :                              const MachineRegisterInfo &MRI,
     141             :                              const AArch64RegisterInfo &TRI) {
     142           8 :   LLT Ty = MRI.getType(I.getOperand(0).getReg());
     143           8 :   if (!Ty.isValid()) {
     144             :     DEBUG(dbgs() << "Generic binop register should be typed\n");
     145             :     return true;
     146             :   }
     147             : 
     148           8 :   const RegisterBank *PrevOpBank = nullptr;
     149          32 :   for (auto &MO : I.operands()) {
     150             :     // FIXME: Support non-register operands.
     151          24 :     if (!MO.isReg()) {
     152             :       DEBUG(dbgs() << "Generic inst non-reg operands are unsupported\n");
     153             :       return true;
     154             :     }
     155             : 
     156             :     // FIXME: Can generic operations have physical registers operands? If
     157             :     // so, this will need to be taught about that, and we'll need to get the
     158             :     // bank out of the minimal class for the register.
     159             :     // Either way, this needs to be documented (and possibly verified).
     160          48 :     if (!TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
     161             :       DEBUG(dbgs() << "Generic inst has physical register operand\n");
     162             :       return true;
     163             :     }
     164             : 
     165          24 :     const RegisterBank *OpBank = RBI.getRegBank(MO.getReg(), MRI, TRI);
     166          24 :     if (!OpBank) {
     167             :       DEBUG(dbgs() << "Generic register has no bank or class\n");
     168             :       return true;
     169             :     }
     170             : 
     171          24 :     if (PrevOpBank && OpBank != PrevOpBank) {
     172             :       DEBUG(dbgs() << "Generic inst operands have different banks\n");
     173             :       return true;
     174             :     }
     175          24 :     PrevOpBank = OpBank;
     176             :   }
     177             :   return false;
     178             : }
     179             : 
     180             : /// Select the AArch64 opcode for the basic binary operation \p GenericOpc
     181             : /// (such as G_OR or G_SDIV), appropriate for the register bank \p RegBankID
     182             : /// and of size \p OpSize.
     183             : /// \returns \p GenericOpc if the combination is unsupported.
     184           6 : static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID,
     185             :                                unsigned OpSize) {
     186           6 :   switch (RegBankID) {
     187           6 :   case AArch64::GPRRegBankID:
     188           6 :     if (OpSize == 32) {
     189           3 :       switch (GenericOpc) {
     190             :       case TargetOpcode::G_SHL:
     191             :         return AArch64::LSLVWr;
     192           1 :       case TargetOpcode::G_LSHR:
     193           1 :         return AArch64::LSRVWr;
     194           1 :       case TargetOpcode::G_ASHR:
     195           1 :         return AArch64::ASRVWr;
     196           0 :       default:
     197           0 :         return GenericOpc;
     198             :       }
     199           3 :     } else if (OpSize == 64) {
     200           3 :       switch (GenericOpc) {
     201             :       case TargetOpcode::G_GEP:
     202             :         return AArch64::ADDXrr;
     203           0 :       case TargetOpcode::G_SHL:
     204           0 :         return AArch64::LSLVXr;
     205           0 :       case TargetOpcode::G_LSHR:
     206           0 :         return AArch64::LSRVXr;
     207           0 :       case TargetOpcode::G_ASHR:
     208           0 :         return AArch64::ASRVXr;
     209           0 :       default:
     210           0 :         return GenericOpc;
     211             :       }
     212             :     }
     213             :     break;
     214           0 :   case AArch64::FPRRegBankID:
     215           0 :     switch (OpSize) {
     216           0 :     case 32:
     217           0 :       switch (GenericOpc) {
     218             :       case TargetOpcode::G_FADD:
     219             :         return AArch64::FADDSrr;
     220           0 :       case TargetOpcode::G_FSUB:
     221           0 :         return AArch64::FSUBSrr;
     222           0 :       case TargetOpcode::G_FMUL:
     223           0 :         return AArch64::FMULSrr;
     224           0 :       case TargetOpcode::G_FDIV:
     225           0 :         return AArch64::FDIVSrr;
     226           0 :       default:
     227           0 :         return GenericOpc;
     228             :       }
     229           0 :     case 64:
     230           0 :       switch (GenericOpc) {
     231             :       case TargetOpcode::G_FADD:
     232             :         return AArch64::FADDDrr;
     233           0 :       case TargetOpcode::G_FSUB:
     234           0 :         return AArch64::FSUBDrr;
     235           0 :       case TargetOpcode::G_FMUL:
     236           0 :         return AArch64::FMULDrr;
     237           0 :       case TargetOpcode::G_FDIV:
     238           0 :         return AArch64::FDIVDrr;
     239           0 :       case TargetOpcode::G_OR:
     240           0 :         return AArch64::ORRv8i8;
     241           0 :       default:
     242           0 :         return GenericOpc;
     243             :       }
     244             :     }
     245             :     break;
     246             :   }
     247             :   return GenericOpc;
     248             : }
     249             : 
     250             : /// Select the AArch64 opcode for the G_LOAD or G_STORE operation \p GenericOpc,
     251             : /// appropriate for the (value) register bank \p RegBankID and of memory access
     252             : /// size \p OpSize.  This returns the variant with the base+unsigned-immediate
     253             : /// addressing mode (e.g., LDRXui).
     254             : /// \returns \p GenericOpc if the combination is unsupported.
     255          32 : static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID,
     256             :                                     unsigned OpSize) {
     257          32 :   const bool isStore = GenericOpc == TargetOpcode::G_STORE;
     258          32 :   switch (RegBankID) {
     259          20 :   case AArch64::GPRRegBankID:
     260          20 :     switch (OpSize) {
     261           4 :     case 8:
     262           4 :       return isStore ? AArch64::STRBBui : AArch64::LDRBBui;
     263           4 :     case 16:
     264           4 :       return isStore ? AArch64::STRHHui : AArch64::LDRHHui;
     265           5 :     case 32:
     266           5 :       return isStore ? AArch64::STRWui : AArch64::LDRWui;
     267           7 :     case 64:
     268           7 :       return isStore ? AArch64::STRXui : AArch64::LDRXui;
     269             :     }
     270             :     break;
     271          12 :   case AArch64::FPRRegBankID:
     272          12 :     switch (OpSize) {
     273           2 :     case 8:
     274           2 :       return isStore ? AArch64::STRBui : AArch64::LDRBui;
     275           2 :     case 16:
     276           2 :       return isStore ? AArch64::STRHui : AArch64::LDRHui;
     277           4 :     case 32:
     278           4 :       return isStore ? AArch64::STRSui : AArch64::LDRSui;
     279           4 :     case 64:
     280           4 :       return isStore ? AArch64::STRDui : AArch64::LDRDui;
     281             :     }
     282             :     break;
     283             :   }
     284             :   return GenericOpc;
     285             : }
     286             : 
     287         412 : static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII,
     288             :                        MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
     289             :                        const RegisterBankInfo &RBI) {
     290             : 
     291         412 :   unsigned DstReg = I.getOperand(0).getReg();
     292         412 :   if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
     293             :     assert(I.isCopy() && "Generic operators do not allow physical registers");
     294             :     return true;
     295             :   }
     296             : 
     297         240 :   const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
     298         240 :   const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
     299         240 :   unsigned SrcReg = I.getOperand(1).getReg();
     300         240 :   const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
     301             :   (void)SrcSize;
     302             :   assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.isCopy()) &&
     303             :          "No phys reg on generic operators");
     304             :   assert(
     305             :       (DstSize == SrcSize ||
     306             :        // Copies are a mean to setup initial types, the number of
     307             :        // bits may not exactly match.
     308             :        (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
     309             :         DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI)) ||
     310             :        // Copies are a mean to copy bits around, as long as we are
     311             :        // on the same register class, that's fine. Otherwise, that
     312             :        // means we need some SUBREG_TO_REG or AND & co.
     313             :        (((DstSize + 31) / 32 == (SrcSize + 31) / 32) && DstSize > SrcSize)) &&
     314             :       "Copy with different width?!");
     315             :   assert((DstSize <= 64 || RegBank.getID() == AArch64::FPRRegBankID) &&
     316             :          "GPRs cannot get more than 64-bit width values");
     317         240 :   const TargetRegisterClass *RC = nullptr;
     318             : 
     319         240 :   if (RegBank.getID() == AArch64::FPRRegBankID) {
     320          56 :     if (DstSize <= 16)
     321             :       RC = &AArch64::FPR16RegClass;
     322          54 :     else if (DstSize <= 32)
     323             :       RC = &AArch64::FPR32RegClass;
     324          26 :     else if (DstSize <= 64)
     325             :       RC = &AArch64::FPR64RegClass;
     326           0 :     else if (DstSize <= 128)
     327             :       RC = &AArch64::FPR128RegClass;
     328             :     else {
     329             :       DEBUG(dbgs() << "Unexpected bitcast size " << DstSize << '\n');
     330             :       return false;
     331             :     }
     332             :   } else {
     333             :     assert(RegBank.getID() == AArch64::GPRRegBankID &&
     334             :            "Bitcast for the flags?");
     335             :     RC =
     336         184 :         DstSize <= 32 ? &AArch64::GPR32allRegClass : &AArch64::GPR64allRegClass;
     337             :   }
     338             : 
     339             :   // No need to constrain SrcReg. It will get constrained when
     340             :   // we hit another of its use or its defs.
     341             :   // Copies do not have constraints.
     342         240 :   if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
     343             :     DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
     344             :                  << " operand\n");
     345             :     return false;
     346             :   }
     347         720 :   I.setDesc(TII.get(AArch64::COPY));
     348         240 :   return true;
     349             : }
     350             : 
     351           0 : static unsigned selectFPConvOpc(unsigned GenericOpc, LLT DstTy, LLT SrcTy) {
     352           0 :   if (!DstTy.isScalar() || !SrcTy.isScalar())
     353             :     return GenericOpc;
     354             : 
     355           0 :   const unsigned DstSize = DstTy.getSizeInBits();
     356           0 :   const unsigned SrcSize = SrcTy.getSizeInBits();
     357             : 
     358           0 :   switch (DstSize) {
     359           0 :   case 32:
     360           0 :     switch (SrcSize) {
     361           0 :     case 32:
     362           0 :       switch (GenericOpc) {
     363             :       case TargetOpcode::G_SITOFP:
     364             :         return AArch64::SCVTFUWSri;
     365           0 :       case TargetOpcode::G_UITOFP:
     366           0 :         return AArch64::UCVTFUWSri;
     367           0 :       case TargetOpcode::G_FPTOSI:
     368           0 :         return AArch64::FCVTZSUWSr;
     369           0 :       case TargetOpcode::G_FPTOUI:
     370           0 :         return AArch64::FCVTZUUWSr;
     371           0 :       default:
     372           0 :         return GenericOpc;
     373             :       }
     374           0 :     case 64:
     375           0 :       switch (GenericOpc) {
     376             :       case TargetOpcode::G_SITOFP:
     377             :         return AArch64::SCVTFUXSri;
     378           0 :       case TargetOpcode::G_UITOFP:
     379           0 :         return AArch64::UCVTFUXSri;
     380           0 :       case TargetOpcode::G_FPTOSI:
     381           0 :         return AArch64::FCVTZSUWDr;
     382           0 :       case TargetOpcode::G_FPTOUI:
     383           0 :         return AArch64::FCVTZUUWDr;
     384           0 :       default:
     385           0 :         return GenericOpc;
     386             :       }
     387             :     default:
     388             :       return GenericOpc;
     389             :     }
     390           0 :   case 64:
     391           0 :     switch (SrcSize) {
     392           0 :     case 32:
     393           0 :       switch (GenericOpc) {
     394             :       case TargetOpcode::G_SITOFP:
     395             :         return AArch64::SCVTFUWDri;
     396           0 :       case TargetOpcode::G_UITOFP:
     397           0 :         return AArch64::UCVTFUWDri;
     398           0 :       case TargetOpcode::G_FPTOSI:
     399           0 :         return AArch64::FCVTZSUXSr;
     400           0 :       case TargetOpcode::G_FPTOUI:
     401           0 :         return AArch64::FCVTZUUXSr;
     402           0 :       default:
     403           0 :         return GenericOpc;
     404             :       }
     405           0 :     case 64:
     406           0 :       switch (GenericOpc) {
     407             :       case TargetOpcode::G_SITOFP:
     408             :         return AArch64::SCVTFUXDri;
     409           0 :       case TargetOpcode::G_UITOFP:
     410           0 :         return AArch64::UCVTFUXDri;
     411           0 :       case TargetOpcode::G_FPTOSI:
     412           0 :         return AArch64::FCVTZSUXDr;
     413           0 :       case TargetOpcode::G_FPTOUI:
     414           0 :         return AArch64::FCVTZUUXDr;
     415           0 :       default:
     416           0 :         return GenericOpc;
     417             :       }
     418             :     default:
     419             :       return GenericOpc;
     420             :     }
     421             :   default:
     422             :     return GenericOpc;
     423             :   };
     424             :   return GenericOpc;
     425             : }
     426             : 
     427          10 : static AArch64CC::CondCode changeICMPPredToAArch64CC(CmpInst::Predicate P) {
     428          10 :   switch (P) {
     429           0 :   default:
     430           0 :     llvm_unreachable("Unknown condition code!");
     431             :   case CmpInst::ICMP_NE:
     432             :     return AArch64CC::NE;
     433           3 :   case CmpInst::ICMP_EQ:
     434           3 :     return AArch64CC::EQ;
     435           0 :   case CmpInst::ICMP_SGT:
     436           0 :     return AArch64CC::GT;
     437           0 :   case CmpInst::ICMP_SGE:
     438           0 :     return AArch64CC::GE;
     439           0 :   case CmpInst::ICMP_SLT:
     440           0 :     return AArch64CC::LT;
     441           0 :   case CmpInst::ICMP_SLE:
     442           0 :     return AArch64CC::LE;
     443           0 :   case CmpInst::ICMP_UGT:
     444           0 :     return AArch64CC::HI;
     445           0 :   case CmpInst::ICMP_UGE:
     446           0 :     return AArch64CC::HS;
     447           3 :   case CmpInst::ICMP_ULT:
     448           3 :     return AArch64CC::LO;
     449           1 :   case CmpInst::ICMP_ULE:
     450           1 :     return AArch64CC::LS;
     451             :   }
     452             : }
     453             : 
     454           6 : static void changeFCMPPredToAArch64CC(CmpInst::Predicate P,
     455             :                                       AArch64CC::CondCode &CondCode,
     456             :                                       AArch64CC::CondCode &CondCode2) {
     457           6 :   CondCode2 = AArch64CC::AL;
     458           6 :   switch (P) {
     459           0 :   default:
     460           0 :     llvm_unreachable("Unknown FP condition!");
     461           0 :   case CmpInst::FCMP_OEQ:
     462           0 :     CondCode = AArch64CC::EQ;
     463           0 :     break;
     464           0 :   case CmpInst::FCMP_OGT:
     465           0 :     CondCode = AArch64CC::GT;
     466           0 :     break;
     467           0 :   case CmpInst::FCMP_OGE:
     468           0 :     CondCode = AArch64CC::GE;
     469           0 :     break;
     470           0 :   case CmpInst::FCMP_OLT:
     471           0 :     CondCode = AArch64CC::MI;
     472           0 :     break;
     473           0 :   case CmpInst::FCMP_OLE:
     474           0 :     CondCode = AArch64CC::LS;
     475           0 :     break;
     476           3 :   case CmpInst::FCMP_ONE:
     477           3 :     CondCode = AArch64CC::MI;
     478           3 :     CondCode2 = AArch64CC::GT;
     479           3 :     break;
     480           0 :   case CmpInst::FCMP_ORD:
     481           0 :     CondCode = AArch64CC::VC;
     482           0 :     break;
     483           0 :   case CmpInst::FCMP_UNO:
     484           0 :     CondCode = AArch64CC::VS;
     485           0 :     break;
     486           0 :   case CmpInst::FCMP_UEQ:
     487           0 :     CondCode = AArch64CC::EQ;
     488           0 :     CondCode2 = AArch64CC::VS;
     489           0 :     break;
     490           0 :   case CmpInst::FCMP_UGT:
     491           0 :     CondCode = AArch64CC::HI;
     492           0 :     break;
     493           3 :   case CmpInst::FCMP_UGE:
     494           3 :     CondCode = AArch64CC::PL;
     495           3 :     break;
     496           0 :   case CmpInst::FCMP_ULT:
     497           0 :     CondCode = AArch64CC::LT;
     498           0 :     break;
     499           0 :   case CmpInst::FCMP_ULE:
     500           0 :     CondCode = AArch64CC::LE;
     501           0 :     break;
     502           0 :   case CmpInst::FCMP_UNE:
     503           0 :     CondCode = AArch64CC::NE;
     504           0 :     break;
     505             :   }
     506           6 : }
     507             : 
     508           9 : bool AArch64InstructionSelector::selectCompareBranch(
     509             :     MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
     510             : 
     511           9 :   const unsigned CondReg = I.getOperand(0).getReg();
     512           9 :   MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
     513           9 :   MachineInstr *CCMI = MRI.getVRegDef(CondReg);
     514          18 :   if (CCMI->getOpcode() == TargetOpcode::G_TRUNC)
     515           5 :     CCMI = MRI.getVRegDef(CCMI->getOperand(1).getReg());
     516          18 :   if (CCMI->getOpcode() != TargetOpcode::G_ICMP)
     517             :     return false;
     518             : 
     519           5 :   unsigned LHS = CCMI->getOperand(2).getReg();
     520           5 :   unsigned RHS = CCMI->getOperand(3).getReg();
     521          10 :   if (!getConstantVRegVal(RHS, MRI))
     522             :     std::swap(RHS, LHS);
     523             : 
     524           5 :   const auto RHSImm = getConstantVRegVal(RHS, MRI);
     525          10 :   if (!RHSImm || *RHSImm != 0)
     526             :     return false;
     527             : 
     528           5 :   const RegisterBank &RB = *RBI.getRegBank(LHS, MRI, TRI);
     529           5 :   if (RB.getID() != AArch64::GPRRegBankID)
     530             :     return false;
     531             : 
     532           5 :   const auto Pred = (CmpInst::Predicate)CCMI->getOperand(1).getPredicate();
     533           5 :   if (Pred != CmpInst::ICMP_NE && Pred != CmpInst::ICMP_EQ)
     534             :     return false;
     535             : 
     536           4 :   const unsigned CmpWidth = MRI.getType(LHS).getSizeInBits();
     537           4 :   unsigned CBOpc = 0;
     538           4 :   if (CmpWidth <= 32)
     539           2 :     CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZW : AArch64::CBNZW);
     540           2 :   else if (CmpWidth == 64)
     541           2 :     CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZX : AArch64::CBNZX);
     542             :   else
     543             :     return false;
     544             : 
     545          16 :   auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CBOpc))
     546           4 :                  .addUse(LHS)
     547           8 :                  .addMBB(DestMBB);
     548             : 
     549           4 :   constrainSelectedInstRegOperands(*MIB.getInstr(), TII, TRI, RBI);
     550           4 :   I.eraseFromParent();
     551             :   return true;
     552             : }
     553             : 
     554             : bool AArch64InstructionSelector::selectVaStartAAPCS(
     555             :     MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
     556             :   return false;
     557             : }
     558             : 
     559           1 : bool AArch64InstructionSelector::selectVaStartDarwin(
     560             :     MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
     561           1 :   AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
     562           1 :   unsigned ListReg = I.getOperand(0).getReg();
     563             : 
     564           1 :   unsigned ArgsAddrReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
     565             : 
     566             :   auto MIB =
     567           4 :       BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::ADDXri))
     568           1 :           .addDef(ArgsAddrReg)
     569           2 :           .addFrameIndex(FuncInfo->getVarArgsStackIndex())
     570           1 :           .addImm(0)
     571           1 :           .addImm(0);
     572             : 
     573           1 :   constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
     574             : 
     575           3 :   MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::STRXui))
     576           1 :             .addUse(ArgsAddrReg)
     577           1 :             .addUse(ListReg)
     578           1 :             .addImm(0)
     579           2 :             .addMemOperand(*I.memoperands_begin());
     580             : 
     581           1 :   constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
     582           1 :   I.eraseFromParent();
     583           1 :   return true;
     584             : }
     585             : 
     586         660 : bool AArch64InstructionSelector::select(MachineInstr &I) const {
     587             :   assert(I.getParent() && "Instruction should be in a basic block!");
     588             :   assert(I.getParent()->getParent() && "Instruction should be in a function!");
     589             : 
     590         660 :   MachineBasicBlock &MBB = *I.getParent();
     591         660 :   MachineFunction &MF = *MBB.getParent();
     592         660 :   MachineRegisterInfo &MRI = MF.getRegInfo();
     593             : 
     594        1320 :   unsigned Opcode = I.getOpcode();
     595             :   // G_PHI requires same handling as PHI
     596         660 :   if (!isPreISelGenericOpcode(Opcode) || Opcode == TargetOpcode::G_PHI) {
     597             :     // Certain non-generic instructions also need some special handling.
     598             : 
     599         429 :     if (Opcode ==  TargetOpcode::LOAD_STACK_GUARD)
     600           0 :       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
     601             : 
     602         429 :     if (Opcode == TargetOpcode::PHI || Opcode == TargetOpcode::G_PHI) {
     603           6 :       const unsigned DefReg = I.getOperand(0).getReg();
     604           6 :       const LLT DefTy = MRI.getType(DefReg);
     605             : 
     606           6 :       const TargetRegisterClass *DefRC = nullptr;
     607           6 :       if (TargetRegisterInfo::isPhysicalRegister(DefReg)) {
     608           0 :         DefRC = TRI.getRegClass(DefReg);
     609             :       } else {
     610             :         const RegClassOrRegBank &RegClassOrBank =
     611           6 :             MRI.getRegClassOrRegBank(DefReg);
     612             : 
     613           0 :         DefRC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
     614           0 :         if (!DefRC) {
     615           6 :           if (!DefTy.isValid()) {
     616             :             DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
     617             :             return false;
     618             :           }
     619           6 :           const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
     620           6 :           DefRC = getRegClassForTypeOnBank(DefTy, RB, RBI);
     621           6 :           if (!DefRC) {
     622             :             DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
     623             :             return false;
     624             :           }
     625             :         }
     626             :       }
     627          12 :       I.setDesc(TII.get(TargetOpcode::PHI));
     628             : 
     629           6 :       return RBI.constrainGenericRegister(DefReg, *DefRC, MRI);
     630             :     }
     631             : 
     632         423 :     if (I.isCopy())
     633         406 :       return selectCopy(I, TII, MRI, TRI, RBI);
     634             : 
     635             :     return true;
     636             :   }
     637             : 
     638             : 
     639         231 :   if (I.getNumOperands() != I.getNumExplicitOperands()) {
     640             :     DEBUG(dbgs() << "Generic instruction has unexpected implicit operands\n");
     641             :     return false;
     642             :   }
     643             : 
     644         231 :   if (selectImpl(I))
     645             :     return true;
     646             : 
     647             :   LLT Ty =
     648         274 :       I.getOperand(0).isReg() ? MRI.getType(I.getOperand(0).getReg()) : LLT{};
     649             : 
     650         137 :   switch (Opcode) {
     651           9 :   case TargetOpcode::G_BRCOND: {
     652           9 :     if (Ty.getSizeInBits() > 32) {
     653             :       // We shouldn't need this on AArch64, but it would be implemented as an
     654             :       // EXTRACT_SUBREG followed by a TBNZW because TBNZX has no encoding if the
     655             :       // bit being tested is < 32.
     656             :       DEBUG(dbgs() << "G_BRCOND has type: " << Ty
     657             :                    << ", expected at most 32-bits");
     658             :       return false;
     659             :     }
     660             : 
     661           9 :     const unsigned CondReg = I.getOperand(0).getReg();
     662           9 :     MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
     663             : 
     664           9 :     if (selectCompareBranch(I, MF, MRI))
     665             :       return true;
     666             : 
     667          20 :     auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::TBNZW))
     668           5 :                    .addUse(CondReg)
     669           5 :                    .addImm(/*bit offset=*/0)
     670           5 :                    .addMBB(DestMBB);
     671             : 
     672           5 :     I.eraseFromParent();
     673           5 :     return constrainSelectedInstRegOperands(*MIB.getInstr(), TII, TRI, RBI);
     674             :   }
     675             : 
     676           1 :   case TargetOpcode::G_BRINDIRECT: {
     677           3 :     I.setDesc(TII.get(AArch64::BR));
     678           1 :     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
     679             :   }
     680             : 
     681           2 :   case TargetOpcode::G_FCONSTANT:
     682             :   case TargetOpcode::G_CONSTANT: {
     683           2 :     const bool isFP = Opcode == TargetOpcode::G_FCONSTANT;
     684             : 
     685           2 :     const LLT s32 = LLT::scalar(32);
     686           2 :     const LLT s64 = LLT::scalar(64);
     687           2 :     const LLT p0 = LLT::pointer(0, 64);
     688             : 
     689           2 :     const unsigned DefReg = I.getOperand(0).getReg();
     690           2 :     const LLT DefTy = MRI.getType(DefReg);
     691           2 :     const unsigned DefSize = DefTy.getSizeInBits();
     692           2 :     const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
     693             : 
     694             :     // FIXME: Redundant check, but even less readable when factored out.
     695           2 :     if (isFP) {
     696           1 :       if (Ty != s32 && Ty != s64) {
     697             :         DEBUG(dbgs() << "Unable to materialize FP " << Ty
     698             :                      << " constant, expected: " << s32 << " or " << s64
     699             :                      << '\n');
     700             :         return false;
     701             :       }
     702             : 
     703           2 :       if (RB.getID() != AArch64::FPRRegBankID) {
     704             :         DEBUG(dbgs() << "Unable to materialize FP " << Ty
     705             :                      << " constant on bank: " << RB << ", expected: FPR\n");
     706             :         return false;
     707             :       }
     708             :     } else {
     709             :       // s32 and s64 are covered by tablegen.
     710           0 :       if (Ty != p0) {
     711             :         DEBUG(dbgs() << "Unable to materialize integer " << Ty
     712             :                      << " constant, expected: " << s32 << ", " << s64 << ", or "
     713             :                      << p0 << '\n');
     714             :         return false;
     715             :       }
     716             : 
     717           0 :       if (RB.getID() != AArch64::GPRRegBankID) {
     718             :         DEBUG(dbgs() << "Unable to materialize integer " << Ty
     719             :                      << " constant on bank: " << RB << ", expected: GPR\n");
     720             :         return false;
     721             :       }
     722             :     }
     723             : 
     724           2 :     const unsigned MovOpc =
     725           2 :         DefSize == 32 ? AArch64::MOVi32imm : AArch64::MOVi64imm;
     726             : 
     727           6 :     I.setDesc(TII.get(MovOpc));
     728             : 
     729           2 :     if (isFP) {
     730           2 :       const TargetRegisterClass &GPRRC =
     731             :           DefSize == 32 ? AArch64::GPR32RegClass : AArch64::GPR64RegClass;
     732           2 :       const TargetRegisterClass &FPRRC =
     733             :           DefSize == 32 ? AArch64::FPR32RegClass : AArch64::FPR64RegClass;
     734             : 
     735           2 :       const unsigned DefGPRReg = MRI.createVirtualRegister(&GPRRC);
     736           2 :       MachineOperand &RegOp = I.getOperand(0);
     737           2 :       RegOp.setReg(DefGPRReg);
     738             : 
     739           4 :       BuildMI(MBB, std::next(I.getIterator()), I.getDebugLoc(),
     740          10 :               TII.get(AArch64::COPY))
     741           2 :           .addDef(DefReg)
     742           2 :           .addUse(DefGPRReg);
     743             : 
     744           2 :       if (!RBI.constrainGenericRegister(DefReg, FPRRC, MRI)) {
     745             :         DEBUG(dbgs() << "Failed to constrain G_FCONSTANT def operand\n");
     746             :         return false;
     747             :       }
     748             : 
     749           4 :       MachineOperand &ImmOp = I.getOperand(1);
     750             :       // FIXME: Is going through int64_t always correct?
     751           2 :       ImmOp.ChangeToImmediate(
     752           8 :           ImmOp.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
     753           0 :     } else if (I.getOperand(1).isCImm()) {
     754           0 :       uint64_t Val = I.getOperand(1).getCImm()->getZExtValue();
     755           0 :       I.getOperand(1).ChangeToImmediate(Val);
     756           0 :     } else if (I.getOperand(1).isImm()) {
     757           0 :       uint64_t Val = I.getOperand(1).getImm();
     758           0 :       I.getOperand(1).ChangeToImmediate(Val);
     759             :     }
     760             : 
     761           2 :     constrainSelectedInstRegOperands(I, TII, TRI, RBI);
     762           2 :     return true;
     763             :   }
     764           2 :   case TargetOpcode::G_EXTRACT: {
     765           2 :     LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
     766             :     // Larger extracts are vectors, same-size extracts should be something else
     767             :     // by now (either split up or simplified to a COPY).
     768           2 :     if (SrcTy.getSizeInBits() > 64 || Ty.getSizeInBits() > 32)
     769             :       return false;
     770             : 
     771           6 :     I.setDesc(TII.get(AArch64::UBFMXri));
     772           8 :     MachineInstrBuilder(MF, I).addImm(I.getOperand(2).getImm() +
     773           4 :                                       Ty.getSizeInBits() - 1);
     774             : 
     775           2 :     unsigned DstReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
     776           4 :     BuildMI(MBB, std::next(I.getIterator()), I.getDebugLoc(),
     777          10 :             TII.get(AArch64::COPY))
     778           4 :         .addDef(I.getOperand(0).getReg())
     779           2 :         .addUse(DstReg, 0, AArch64::sub_32);
     780           2 :     RBI.constrainGenericRegister(I.getOperand(0).getReg(),
     781             :                                  AArch64::GPR32RegClass, MRI);
     782           2 :     I.getOperand(0).setReg(DstReg);
     783             : 
     784           2 :     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
     785             :   }
     786             : 
     787           2 :   case TargetOpcode::G_INSERT: {
     788           2 :     LLT SrcTy = MRI.getType(I.getOperand(2).getReg());
     789             :     // Larger inserts are vectors, same-size ones should be something else by
     790             :     // now (split up or turned into COPYs).
     791           2 :     if (Ty.getSizeInBits() > 64 || SrcTy.getSizeInBits() > 32)
     792             :       return false;
     793             : 
     794           6 :     I.setDesc(TII.get(AArch64::BFMXri));
     795           2 :     unsigned LSB = I.getOperand(3).getImm();
     796           2 :     unsigned Width = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
     797           4 :     I.getOperand(3).setImm((64 - LSB) % 64);
     798           6 :     MachineInstrBuilder(MF, I).addImm(Width - 1);
     799             : 
     800           2 :     unsigned SrcReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
     801           4 :     BuildMI(MBB, I.getIterator(), I.getDebugLoc(),
     802           8 :             TII.get(AArch64::SUBREG_TO_REG))
     803           2 :         .addDef(SrcReg)
     804           2 :         .addImm(0)
     805           4 :         .addUse(I.getOperand(2).getReg())
     806           2 :         .addImm(AArch64::sub_32);
     807           2 :     RBI.constrainGenericRegister(I.getOperand(2).getReg(),
     808             :                                  AArch64::GPR32RegClass, MRI);
     809           4 :     I.getOperand(2).setReg(SrcReg);
     810             : 
     811           2 :     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
     812             :   }
     813           3 :   case TargetOpcode::G_FRAME_INDEX: {
     814             :     // allocas and G_FRAME_INDEX are only supported in addrspace(0).
     815           6 :     if (Ty != LLT::pointer(0, 64)) {
     816             :       DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << Ty
     817             :             << ", expected: " << LLT::pointer(0, 64) << '\n');
     818             :       return false;
     819             :     }
     820           9 :     I.setDesc(TII.get(AArch64::ADDXri));
     821             : 
     822             :     // MOs for a #0 shifted immediate.
     823           3 :     I.addOperand(MachineOperand::CreateImm(0));
     824           3 :     I.addOperand(MachineOperand::CreateImm(0));
     825             : 
     826           3 :     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
     827             :   }
     828             : 
     829           6 :   case TargetOpcode::G_GLOBAL_VALUE: {
     830           6 :     auto GV = I.getOperand(1).getGlobal();
     831           6 :     if (GV->isThreadLocal()) {
     832             :       // FIXME: we don't support TLS yet.
     833             :       return false;
     834             :     }
     835           6 :     unsigned char OpFlags = STI.ClassifyGlobalReference(GV, TM);
     836           6 :     if (OpFlags & AArch64II::MO_GOT) {
     837           9 :       I.setDesc(TII.get(AArch64::LOADgot));
     838           3 :       I.getOperand(1).setTargetFlags(OpFlags);
     839             :     } else {
     840           9 :       I.setDesc(TII.get(AArch64::MOVaddr));
     841           6 :       I.getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_PAGE);
     842           6 :       MachineInstrBuilder MIB(MF, I);
     843           6 :       MIB.addGlobalAddress(GV, I.getOperand(1).getOffset(),
     844           6 :                            OpFlags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
     845             :     }
     846           6 :     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
     847             :   }
     848             : 
     849          36 :   case TargetOpcode::G_LOAD:
     850             :   case TargetOpcode::G_STORE: {
     851          36 :     LLT MemTy = Ty;
     852          36 :     LLT PtrTy = MRI.getType(I.getOperand(1).getReg());
     853             : 
     854          72 :     if (PtrTy != LLT::pointer(0, 64)) {
     855             :       DEBUG(dbgs() << "Load/Store pointer has type: " << PtrTy
     856             :                    << ", expected: " << LLT::pointer(0, 64) << '\n');
     857             :       return false;
     858             :     }
     859             : 
     860          36 :     auto &MemOp = **I.memoperands_begin();
     861          36 :     if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
     862             :       DEBUG(dbgs() << "Atomic load/store not supported yet\n");
     863             :       return false;
     864             :     }
     865             : 
     866          32 :     const unsigned PtrReg = I.getOperand(1).getReg();
     867             : #ifndef NDEBUG
     868             :     const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI);
     869             :     // Sanity-check the pointer register.
     870             :     assert(PtrRB.getID() == AArch64::GPRRegBankID &&
     871             :            "Load/Store pointer operand isn't a GPR");
     872             :     assert(MRI.getType(PtrReg).isPointer() &&
     873             :            "Load/Store pointer operand isn't a pointer");
     874             : #endif
     875             : 
     876          32 :     const unsigned ValReg = I.getOperand(0).getReg();
     877          32 :     const RegisterBank &RB = *RBI.getRegBank(ValReg, MRI, TRI);
     878             : 
     879             :     const unsigned NewOpc =
     880          64 :         selectLoadStoreUIOp(I.getOpcode(), RB.getID(), MemTy.getSizeInBits());
     881          32 :     if (NewOpc == I.getOpcode())
     882             :       return false;
     883             : 
     884          96 :     I.setDesc(TII.get(NewOpc));
     885             : 
     886          32 :     uint64_t Offset = 0;
     887          32 :     auto *PtrMI = MRI.getVRegDef(PtrReg);
     888             : 
     889             :     // Try to fold a GEP into our unsigned immediate addressing mode.
     890          64 :     if (PtrMI->getOpcode() == TargetOpcode::G_GEP) {
     891          28 :       if (auto COff = getConstantVRegVal(PtrMI->getOperand(2).getReg(), MRI)) {
     892          14 :         int64_t Imm = *COff;
     893          14 :         const unsigned Size = MemTy.getSizeInBits() / 8;
     894          14 :         const unsigned Scale = Log2_32(Size);
     895          42 :         if ((Imm & (Size - 1)) == 0 && Imm >= 0 && Imm < (0x1000 << Scale)) {
     896          14 :           unsigned Ptr2Reg = PtrMI->getOperand(1).getReg();
     897          28 :           I.getOperand(1).setReg(Ptr2Reg);
     898          14 :           PtrMI = MRI.getVRegDef(Ptr2Reg);
     899          14 :           Offset = Imm / Size;
     900             :         }
     901             :       }
     902             :     }
     903             : 
     904             :     // If we haven't folded anything into our addressing mode yet, try to fold
     905             :     // a frame index into the base+offset.
     906          32 :     if (!Offset && PtrMI->getOpcode() == TargetOpcode::G_FRAME_INDEX)
     907           4 :       I.getOperand(1).ChangeToFrameIndex(PtrMI->getOperand(1).getIndex());
     908             : 
     909          64 :     I.addOperand(MachineOperand::CreateImm(Offset));
     910             : 
     911             :     // If we're storing a 0, use WZR/XZR.
     912          64 :     if (auto CVal = getConstantVRegVal(ValReg, MRI)) {
     913           2 :       if (*CVal == 0 && Opcode == TargetOpcode::G_STORE) {
     914           4 :         if (I.getOpcode() == AArch64::STRWui)
     915           1 :           I.getOperand(0).setReg(AArch64::WZR);
     916           1 :         else if (I.getOpcode() == AArch64::STRXui)
     917           1 :           I.getOperand(0).setReg(AArch64::XZR);
     918             :       }
     919             :     }
     920             : 
     921          32 :     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
     922             :   }
     923             : 
     924           2 :   case TargetOpcode::G_SMULH:
     925             :   case TargetOpcode::G_UMULH: {
     926             :     // Reject the various things we don't support yet.
     927           2 :     if (unsupportedBinOp(I, RBI, MRI, TRI))
     928             :       return false;
     929             : 
     930           2 :     const unsigned DefReg = I.getOperand(0).getReg();
     931           2 :     const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
     932             : 
     933           2 :     if (RB.getID() != AArch64::GPRRegBankID) {
     934             :       DEBUG(dbgs() << "G_[SU]MULH on bank: " << RB << ", expected: GPR\n");
     935             :       return false;
     936             :     }
     937             : 
     938           4 :     if (Ty != LLT::scalar(64)) {
     939             :       DEBUG(dbgs() << "G_[SU]MULH has type: " << Ty
     940             :                    << ", expected: " << LLT::scalar(64) << '\n');
     941             :       return false;
     942             :     }
     943             : 
     944           4 :     unsigned NewOpc = I.getOpcode() == TargetOpcode::G_SMULH ? AArch64::SMULHrr
     945           2 :                                                              : AArch64::UMULHrr;
     946           6 :     I.setDesc(TII.get(NewOpc));
     947             : 
     948             :     // Now that we selected an opcode, we need to constrain the register
     949             :     // operands to use appropriate classes.
     950           2 :     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
     951             :   }
     952           6 :   case TargetOpcode::G_FADD:
     953             :   case TargetOpcode::G_FSUB:
     954             :   case TargetOpcode::G_FMUL:
     955             :   case TargetOpcode::G_FDIV:
     956             : 
     957             :   case TargetOpcode::G_OR:
     958             :   case TargetOpcode::G_SHL:
     959             :   case TargetOpcode::G_LSHR:
     960             :   case TargetOpcode::G_ASHR:
     961             :   case TargetOpcode::G_GEP: {
     962             :     // Reject the various things we don't support yet.
     963           6 :     if (unsupportedBinOp(I, RBI, MRI, TRI))
     964             :       return false;
     965             : 
     966           6 :     const unsigned OpSize = Ty.getSizeInBits();
     967             : 
     968           6 :     const unsigned DefReg = I.getOperand(0).getReg();
     969           6 :     const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
     970             : 
     971          12 :     const unsigned NewOpc = selectBinaryOp(I.getOpcode(), RB.getID(), OpSize);
     972           6 :     if (NewOpc == I.getOpcode())
     973             :       return false;
     974             : 
     975          18 :     I.setDesc(TII.get(NewOpc));
     976             :     // FIXME: Should the type be always reset in setDesc?
     977             : 
     978             :     // Now that we selected an opcode, we need to constrain the register
     979             :     // operands to use appropriate classes.
     980           6 :     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
     981             :   }
     982             : 
     983           3 :   case TargetOpcode::G_PTR_MASK: {
     984           3 :     uint64_t Align = I.getOperand(2).getImm();
     985           3 :     if (Align >= 64 || Align == 0)
     986             :       return false;
     987             : 
     988           3 :     uint64_t Mask = ~((1ULL << Align) - 1);
     989           9 :     I.setDesc(TII.get(AArch64::ANDXri));
     990           9 :     I.getOperand(2).setImm(AArch64_AM::encodeLogicalImmediate(Mask, 64));
     991             : 
     992           3 :     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
     993             :   }
     994          23 :   case TargetOpcode::G_PTRTOINT:
     995             :   case TargetOpcode::G_TRUNC: {
     996          23 :     const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
     997          23 :     const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
     998             : 
     999          23 :     const unsigned DstReg = I.getOperand(0).getReg();
    1000          23 :     const unsigned SrcReg = I.getOperand(1).getReg();
    1001             : 
    1002          23 :     const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
    1003          23 :     const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
    1004             : 
    1005          23 :     if (DstRB.getID() != SrcRB.getID()) {
    1006             :       DEBUG(dbgs() << "G_TRUNC/G_PTRTOINT input/output on different banks\n");
    1007             :       return false;
    1008             :     }
    1009             : 
    1010          23 :     if (DstRB.getID() == AArch64::GPRRegBankID) {
    1011             :       const TargetRegisterClass *DstRC =
    1012          23 :           getRegClassForTypeOnBank(DstTy, DstRB, RBI);
    1013          23 :       if (!DstRC)
    1014             :         return false;
    1015             : 
    1016             :       const TargetRegisterClass *SrcRC =
    1017          23 :           getRegClassForTypeOnBank(SrcTy, SrcRB, RBI);
    1018          23 :       if (!SrcRC)
    1019             :         return false;
    1020             : 
    1021          46 :       if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
    1022          23 :           !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
    1023             :         DEBUG(dbgs() << "Failed to constrain G_TRUNC/G_PTRTOINT\n");
    1024             :         return false;
    1025             :       }
    1026             : 
    1027          23 :       if (DstRC == SrcRC) {
    1028             :         // Nothing to be done
    1029           6 :       } else if (Opcode == TargetOpcode::G_TRUNC && DstTy == LLT::scalar(32) &&
    1030           0 :                  SrcTy == LLT::scalar(64)) {
    1031           0 :         llvm_unreachable("TableGen can import this case");
    1032             :         return false;
    1033          10 :       } else if (DstRC == &AArch64::GPR32RegClass &&
    1034             :                  SrcRC == &AArch64::GPR64RegClass) {
    1035           5 :         I.getOperand(1).setSubReg(AArch64::sub_32);
    1036             :       } else {
    1037             :         DEBUG(dbgs() << "Unhandled mismatched classes in G_TRUNC/G_PTRTOINT\n");
    1038             :         return false;
    1039             :       }
    1040             : 
    1041          69 :       I.setDesc(TII.get(TargetOpcode::COPY));
    1042          23 :       return true;
    1043           0 :     } else if (DstRB.getID() == AArch64::FPRRegBankID) {
    1044           0 :       if (DstTy == LLT::vector(4, 16) && SrcTy == LLT::vector(4, 32)) {
    1045           0 :         I.setDesc(TII.get(AArch64::XTNv4i16));
    1046           0 :         constrainSelectedInstRegOperands(I, TII, TRI, RBI);
    1047           0 :         return true;
    1048             :       }
    1049             :     }
    1050             : 
    1051             :     return false;
    1052             :   }
    1053             : 
    1054           2 :   case TargetOpcode::G_ANYEXT: {
    1055           2 :     const unsigned DstReg = I.getOperand(0).getReg();
    1056           2 :     const unsigned SrcReg = I.getOperand(1).getReg();
    1057             : 
    1058           2 :     const RegisterBank &RBDst = *RBI.getRegBank(DstReg, MRI, TRI);
    1059           2 :     if (RBDst.getID() != AArch64::GPRRegBankID) {
    1060             :       DEBUG(dbgs() << "G_ANYEXT on bank: " << RBDst << ", expected: GPR\n");
    1061             :       return false;
    1062             :     }
    1063             : 
    1064           2 :     const RegisterBank &RBSrc = *RBI.getRegBank(SrcReg, MRI, TRI);
    1065           2 :     if (RBSrc.getID() != AArch64::GPRRegBankID) {
    1066             :       DEBUG(dbgs() << "G_ANYEXT on bank: " << RBSrc << ", expected: GPR\n");
    1067             :       return false;
    1068             :     }
    1069             : 
    1070           2 :     const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
    1071             : 
    1072           2 :     if (DstSize == 0) {
    1073             :       DEBUG(dbgs() << "G_ANYEXT operand has no size, not a gvreg?\n");
    1074             :       return false;
    1075             :     }
    1076             : 
    1077           2 :     if (DstSize != 64 && DstSize > 32) {
    1078             :       DEBUG(dbgs() << "G_ANYEXT to size: " << DstSize
    1079             :                    << ", expected: 32 or 64\n");
    1080             :       return false;
    1081             :     }
    1082             :     // At this point G_ANYEXT is just like a plain COPY, but we need
    1083             :     // to explicitly form the 64-bit value if any.
    1084           2 :     if (DstSize > 32) {
    1085           1 :       unsigned ExtSrc = MRI.createVirtualRegister(&AArch64::GPR64allRegClass);
    1086           4 :       BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
    1087           1 :           .addDef(ExtSrc)
    1088           1 :           .addImm(0)
    1089           1 :           .addUse(SrcReg)
    1090           1 :           .addImm(AArch64::sub_32);
    1091           2 :       I.getOperand(1).setReg(ExtSrc);
    1092             :     }
    1093           2 :     return selectCopy(I, TII, MRI, TRI, RBI);
    1094             :   }
    1095             : 
    1096           8 :   case TargetOpcode::G_ZEXT:
    1097             :   case TargetOpcode::G_SEXT: {
    1098          16 :     unsigned Opcode = I.getOpcode();
    1099           8 :     const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
    1100           8 :               SrcTy = MRI.getType(I.getOperand(1).getReg());
    1101           8 :     const bool isSigned = Opcode == TargetOpcode::G_SEXT;
    1102           8 :     const unsigned DefReg = I.getOperand(0).getReg();
    1103           8 :     const unsigned SrcReg = I.getOperand(1).getReg();
    1104           8 :     const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
    1105             : 
    1106           8 :     if (RB.getID() != AArch64::GPRRegBankID) {
    1107             :       DEBUG(dbgs() << TII.getName(I.getOpcode()) << " on bank: " << RB
    1108             :                    << ", expected: GPR\n");
    1109             :       return false;
    1110             :     }
    1111             : 
    1112             :     MachineInstr *ExtI;
    1113          16 :     if (DstTy == LLT::scalar(64)) {
    1114             :       // FIXME: Can we avoid manually doing this?
    1115           2 :       if (!RBI.constrainGenericRegister(SrcReg, AArch64::GPR32RegClass, MRI)) {
    1116             :         DEBUG(dbgs() << "Failed to constrain " << TII.getName(Opcode)
    1117             :                      << " operand\n");
    1118             :         return false;
    1119             :       }
    1120             : 
    1121             :       const unsigned SrcXReg =
    1122           2 :           MRI.createVirtualRegister(&AArch64::GPR64RegClass);
    1123           8 :       BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
    1124           2 :           .addDef(SrcXReg)
    1125           2 :           .addImm(0)
    1126           2 :           .addUse(SrcReg)
    1127           2 :           .addImm(AArch64::sub_32);
    1128             : 
    1129           2 :       const unsigned NewOpc = isSigned ? AArch64::SBFMXri : AArch64::UBFMXri;
    1130           6 :       ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
    1131           2 :                  .addDef(DefReg)
    1132           2 :                  .addUse(SrcXReg)
    1133           2 :                  .addImm(0)
    1134           4 :                  .addImm(SrcTy.getSizeInBits() - 1);
    1135           6 :     } else if (DstTy.isScalar() && DstTy.getSizeInBits() <= 32) {
    1136           6 :       const unsigned NewOpc = isSigned ? AArch64::SBFMWri : AArch64::UBFMWri;
    1137          24 :       ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
    1138           6 :                  .addDef(DefReg)
    1139           6 :                  .addUse(SrcReg)
    1140           6 :                  .addImm(0)
    1141          12 :                  .addImm(SrcTy.getSizeInBits() - 1);
    1142             :     } else {
    1143             :       return false;
    1144             :     }
    1145             : 
    1146           8 :     constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
    1147             : 
    1148           8 :     I.eraseFromParent();
    1149           8 :     return true;
    1150             :   }
    1151             : 
    1152           0 :   case TargetOpcode::G_SITOFP:
    1153             :   case TargetOpcode::G_UITOFP:
    1154             :   case TargetOpcode::G_FPTOSI:
    1155             :   case TargetOpcode::G_FPTOUI: {
    1156           0 :     const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
    1157           0 :               SrcTy = MRI.getType(I.getOperand(1).getReg());
    1158           0 :     const unsigned NewOpc = selectFPConvOpc(Opcode, DstTy, SrcTy);
    1159           0 :     if (NewOpc == Opcode)
    1160             :       return false;
    1161             : 
    1162           0 :     I.setDesc(TII.get(NewOpc));
    1163           0 :     constrainSelectedInstRegOperands(I, TII, TRI, RBI);
    1164             : 
    1165           0 :     return true;
    1166             :   }
    1167             : 
    1168             : 
    1169           1 :   case TargetOpcode::G_INTTOPTR:
    1170             :     // The importer is currently unable to import pointer types since they
    1171             :     // didn't exist in SelectionDAG.
    1172           1 :     return selectCopy(I, TII, MRI, TRI, RBI);
    1173             : 
    1174           3 :   case TargetOpcode::G_BITCAST:
    1175             :     // Imported SelectionDAG rules can handle every bitcast except those that
    1176             :     // bitcast from a type to the same type. Ideally, these shouldn't occur
    1177             :     // but we might not run an optimizer that deletes them.
    1178           6 :     if (MRI.getType(I.getOperand(0).getReg()) ==
    1179           6 :         MRI.getType(I.getOperand(1).getReg()))
    1180           3 :       return selectCopy(I, TII, MRI, TRI, RBI);
    1181             :     return false;
    1182             : 
    1183           9 :   case TargetOpcode::G_SELECT: {
    1184          18 :     if (MRI.getType(I.getOperand(1).getReg()) != LLT::scalar(1)) {
    1185             :       DEBUG(dbgs() << "G_SELECT cond has type: " << Ty
    1186             :                    << ", expected: " << LLT::scalar(1) << '\n');
    1187             :       return false;
    1188             :     }
    1189             : 
    1190           9 :     const unsigned CondReg = I.getOperand(1).getReg();
    1191           9 :     const unsigned TReg = I.getOperand(2).getReg();
    1192           9 :     const unsigned FReg = I.getOperand(3).getReg();
    1193             : 
    1194           9 :     unsigned CSelOpc = 0;
    1195             : 
    1196          15 :     if (Ty == LLT::scalar(32)) {
    1197             :       CSelOpc = AArch64::CSELWr;
    1198          12 :     } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
    1199             :       CSelOpc = AArch64::CSELXr;
    1200             :     } else {
    1201             :       return false;
    1202             :     }
    1203             : 
    1204             :     MachineInstr &TstMI =
    1205          36 :         *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri))
    1206           9 :              .addDef(AArch64::WZR)
    1207           9 :              .addUse(CondReg)
    1208          18 :              .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
    1209             : 
    1210          27 :     MachineInstr &CSelMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CSelOpc))
    1211          18 :                                 .addDef(I.getOperand(0).getReg())
    1212           9 :                                 .addUse(TReg)
    1213           9 :                                 .addUse(FReg)
    1214           9 :                                 .addImm(AArch64CC::NE);
    1215             : 
    1216           9 :     constrainSelectedInstRegOperands(TstMI, TII, TRI, RBI);
    1217           9 :     constrainSelectedInstRegOperands(CSelMI, TII, TRI, RBI);
    1218             : 
    1219           9 :     I.eraseFromParent();
    1220           9 :     return true;
    1221             :   }
    1222          10 :   case TargetOpcode::G_ICMP: {
    1223          20 :     if (Ty != LLT::scalar(32)) {
    1224             :       DEBUG(dbgs() << "G_ICMP result has type: " << Ty
    1225             :                    << ", expected: " << LLT::scalar(32) << '\n');
    1226             :       return false;
    1227             :     }
    1228             : 
    1229          10 :     unsigned CmpOpc = 0;
    1230          10 :     unsigned ZReg = 0;
    1231             : 
    1232          10 :     LLT CmpTy = MRI.getType(I.getOperand(2).getReg());
    1233          16 :     if (CmpTy == LLT::scalar(32)) {
    1234             :       CmpOpc = AArch64::SUBSWrr;
    1235             :       ZReg = AArch64::WZR;
    1236           9 :     } else if (CmpTy == LLT::scalar(64) || CmpTy.isPointer()) {
    1237             :       CmpOpc = AArch64::SUBSXrr;
    1238             :       ZReg = AArch64::XZR;
    1239             :     } else {
    1240             :       return false;
    1241             :     }
    1242             : 
    1243             :     // CSINC increments the result by one when the condition code is false.
    1244             :     // Therefore, we have to invert the predicate to get an increment by 1 when
    1245             :     // the predicate is true.
    1246             :     const AArch64CC::CondCode invCC =
    1247          10 :         changeICMPPredToAArch64CC(CmpInst::getInversePredicate(
    1248          20 :             (CmpInst::Predicate)I.getOperand(1).getPredicate()));
    1249             : 
    1250          40 :     MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
    1251          10 :                                .addDef(ZReg)
    1252          20 :                                .addUse(I.getOperand(2).getReg())
    1253          20 :                                .addUse(I.getOperand(3).getReg());
    1254             : 
    1255             :     MachineInstr &CSetMI =
    1256          30 :         *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
    1257          20 :              .addDef(I.getOperand(0).getReg())
    1258          10 :              .addUse(AArch64::WZR)
    1259          10 :              .addUse(AArch64::WZR)
    1260          20 :              .addImm(invCC);
    1261             : 
    1262          10 :     constrainSelectedInstRegOperands(CmpMI, TII, TRI, RBI);
    1263          10 :     constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
    1264             : 
    1265          10 :     I.eraseFromParent();
    1266          10 :     return true;
    1267             :   }
    1268             : 
    1269           6 :   case TargetOpcode::G_FCMP: {
    1270          12 :     if (Ty != LLT::scalar(32)) {
    1271             :       DEBUG(dbgs() << "G_FCMP result has type: " << Ty
    1272             :                    << ", expected: " << LLT::scalar(32) << '\n');
    1273             :       return false;
    1274             :     }
    1275             : 
    1276           6 :     unsigned CmpOpc = 0;
    1277           6 :     LLT CmpTy = MRI.getType(I.getOperand(2).getReg());
    1278           9 :     if (CmpTy == LLT::scalar(32)) {
    1279             :       CmpOpc = AArch64::FCMPSrr;
    1280           3 :     } else if (CmpTy == LLT::scalar(64)) {
    1281             :       CmpOpc = AArch64::FCMPDrr;
    1282             :     } else {
    1283             :       return false;
    1284             :     }
    1285             : 
    1286             :     // FIXME: regbank
    1287             : 
    1288             :     AArch64CC::CondCode CC1, CC2;
    1289           6 :     changeFCMPPredToAArch64CC(
    1290           6 :         (CmpInst::Predicate)I.getOperand(1).getPredicate(), CC1, CC2);
    1291             : 
    1292          24 :     MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
    1293          12 :                                .addUse(I.getOperand(2).getReg())
    1294          12 :                                .addUse(I.getOperand(3).getReg());
    1295             : 
    1296           6 :     const unsigned DefReg = I.getOperand(0).getReg();
    1297           6 :     unsigned Def1Reg = DefReg;
    1298           6 :     if (CC2 != AArch64CC::AL)
    1299           3 :       Def1Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
    1300             : 
    1301             :     MachineInstr &CSetMI =
    1302          18 :         *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
    1303           6 :              .addDef(Def1Reg)
    1304           6 :              .addUse(AArch64::WZR)
    1305           6 :              .addUse(AArch64::WZR)
    1306          18 :              .addImm(getInvertedCondCode(CC1));
    1307             : 
    1308           6 :     if (CC2 != AArch64CC::AL) {
    1309           3 :       unsigned Def2Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
    1310             :       MachineInstr &CSet2MI =
    1311           9 :           *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
    1312           3 :                .addDef(Def2Reg)
    1313           3 :                .addUse(AArch64::WZR)
    1314           3 :                .addUse(AArch64::WZR)
    1315           9 :                .addImm(getInvertedCondCode(CC2));
    1316             :       MachineInstr &OrMI =
    1317           9 :           *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ORRWrr))
    1318           3 :                .addDef(DefReg)
    1319           3 :                .addUse(Def1Reg)
    1320           3 :                .addUse(Def2Reg);
    1321           3 :       constrainSelectedInstRegOperands(OrMI, TII, TRI, RBI);
    1322           3 :       constrainSelectedInstRegOperands(CSet2MI, TII, TRI, RBI);
    1323             :     }
    1324             : 
    1325           6 :     constrainSelectedInstRegOperands(CmpMI, TII, TRI, RBI);
    1326           6 :     constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
    1327             : 
    1328           6 :     I.eraseFromParent();
    1329           6 :     return true;
    1330             :   }
    1331           1 :   case TargetOpcode::G_VASTART:
    1332           2 :     return STI.isTargetDarwin() ? selectVaStartDarwin(I, MF, MRI)
    1333             :                                 : selectVaStartAAPCS(I, MF, MRI);
    1334           2 :   case TargetOpcode::G_IMPLICIT_DEF:
    1335           6 :     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
    1336           2 :     return true;
    1337             :   }
    1338             : 
    1339             :   return false;
    1340             : }
    1341             : 
    1342             : /// SelectArithImmed - Select an immediate value that can be represented as
    1343             : /// a 12-bit value shifted left by either 0 or 12.  If so, return true with
    1344             : /// Val set to the 12-bit value and Shift set to the shifter operand.
    1345             : InstructionSelector::ComplexRendererFn
    1346          22 : AArch64InstructionSelector::selectArithImmed(MachineOperand &Root) const {
    1347          22 :   MachineInstr &MI = *Root.getParent();
    1348          22 :   MachineBasicBlock &MBB = *MI.getParent();
    1349          22 :   MachineFunction &MF = *MBB.getParent();
    1350          22 :   MachineRegisterInfo &MRI = MF.getRegInfo();
    1351             : 
    1352             :   // This function is called from the addsub_shifted_imm ComplexPattern,
    1353             :   // which lists [imm] as the list of opcode it's interested in, however
    1354             :   // we still need to check whether the operand is actually an immediate
    1355             :   // here because the ComplexPattern opcode list is only used in
    1356             :   // root-level opcode matching.
    1357             :   uint64_t Immed;
    1358          22 :   if (Root.isImm())
    1359           0 :     Immed = Root.getImm();
    1360          22 :   else if (Root.isCImm())
    1361           0 :     Immed = Root.getCImm()->getZExtValue();
    1362          22 :   else if (Root.isReg()) {
    1363          22 :     MachineInstr *Def = MRI.getVRegDef(Root.getReg());
    1364          44 :     if (Def->getOpcode() != TargetOpcode::G_CONSTANT)
    1365             :       return nullptr;
    1366           5 :     MachineOperand &Op1 = Def->getOperand(1);
    1367          10 :     if (!Op1.isCImm() || Op1.getCImm()->getBitWidth() > 64)
    1368             :       return nullptr;
    1369          10 :     Immed = Op1.getCImm()->getZExtValue();
    1370             :   } else
    1371             :     return nullptr;
    1372             : 
    1373             :   unsigned ShiftAmt;
    1374             : 
    1375           5 :   if (Immed >> 12 == 0) {
    1376             :     ShiftAmt = 0;
    1377           0 :   } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
    1378             :     ShiftAmt = 12;
    1379             :     Immed = Immed >> 12;
    1380             :   } else
    1381             :     return nullptr;
    1382             : 
    1383           5 :   unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
    1384          20 :   return [=](MachineInstrBuilder &MIB) { MIB.addImm(Immed).addImm(ShVal); };
    1385             : }
    1386             : 
    1387             : namespace llvm {
    1388             : InstructionSelector *
    1389        1214 : createAArch64InstructionSelector(const AArch64TargetMachine &TM,
    1390             :                                  AArch64Subtarget &Subtarget,
    1391             :                                  AArch64RegisterBankInfo &RBI) {
    1392        1214 :   return new AArch64InstructionSelector(TM, Subtarget, RBI);
    1393             : }
    1394      216918 : }

Generated by: LCOV version 1.13