LLVM  9.0.0svn
AArch64InstructionSelector.cpp
Go to the documentation of this file.
1 //===- AArch64InstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AArch64.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AArch64InstrInfo.h"
17 #include "AArch64RegisterInfo.h"
18 #include "AArch64Subtarget.h"
19 #include "AArch64TargetMachine.h"
21 #include "llvm/ADT/Optional.h"
34 #include "llvm/IR/Type.h"
35 #include "llvm/Support/Debug.h"
37 
38 #define DEBUG_TYPE "aarch64-isel"
39 
40 using namespace llvm;
41 
42 namespace {
43 
44 #define GET_GLOBALISEL_PREDICATE_BITSET
45 #include "AArch64GenGlobalISel.inc"
46 #undef GET_GLOBALISEL_PREDICATE_BITSET
47 
48 class AArch64InstructionSelector : public InstructionSelector {
49 public:
50  AArch64InstructionSelector(const AArch64TargetMachine &TM,
51  const AArch64Subtarget &STI,
52  const AArch64RegisterBankInfo &RBI);
53 
54  bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override;
55  static const char *getName() { return DEBUG_TYPE; }
56 
57 private:
58  /// tblgen-erated 'select' implementation, used as the initial selector for
59  /// the patterns that don't require complex C++.
60  bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
61 
62  bool selectVaStartAAPCS(MachineInstr &I, MachineFunction &MF,
63  MachineRegisterInfo &MRI) const;
64  bool selectVaStartDarwin(MachineInstr &I, MachineFunction &MF,
65  MachineRegisterInfo &MRI) const;
66 
67  bool selectCompareBranch(MachineInstr &I, MachineFunction &MF,
68  MachineRegisterInfo &MRI) const;
69 
70  bool selectVectorASHR(MachineInstr &I, MachineRegisterInfo &MRI) const;
71  bool selectVectorSHL(MachineInstr &I, MachineRegisterInfo &MRI) const;
72 
73  // Helper to generate an equivalent of scalar_to_vector into a new register,
74  // returned via 'Dst'.
75  MachineInstr *emitScalarToVector(unsigned EltSize,
76  const TargetRegisterClass *DstRC,
77  unsigned Scalar,
78  MachineIRBuilder &MIRBuilder) const;
79 
80  /// Emit a lane insert into \p DstReg, or a new vector register if None is
81  /// provided.
82  ///
83  /// The lane inserted into is defined by \p LaneIdx. The vector source
84  /// register is given by \p SrcReg. The register containing the element is
85  /// given by \p EltReg.
86  MachineInstr *emitLaneInsert(Optional<unsigned> DstReg, unsigned SrcReg,
87  unsigned EltReg, unsigned LaneIdx,
88  const RegisterBank &RB,
89  MachineIRBuilder &MIRBuilder) const;
90  bool selectInsertElt(MachineInstr &I, MachineRegisterInfo &MRI) const;
91  bool selectBuildVector(MachineInstr &I, MachineRegisterInfo &MRI) const;
94 
95  void collectShuffleMaskIndices(MachineInstr &I, MachineRegisterInfo &MRI,
96  SmallVectorImpl<Optional<int>> &Idxs) const;
97  bool selectShuffleVector(MachineInstr &I, MachineRegisterInfo &MRI) const;
98  bool selectExtractElt(MachineInstr &I, MachineRegisterInfo &MRI) const;
99  bool selectConcatVectors(MachineInstr &I, MachineRegisterInfo &MRI) const;
100  bool selectSplitVectorUnmerge(MachineInstr &I,
101  MachineRegisterInfo &MRI) const;
102  bool selectIntrinsicWithSideEffects(MachineInstr &I,
103  MachineRegisterInfo &MRI) const;
104  bool selectIntrinsic(MachineInstr &I, MachineRegisterInfo &MRI) const;
105  bool selectVectorICmp(MachineInstr &I, MachineRegisterInfo &MRI) const;
106  bool selectIntrinsicTrunc(MachineInstr &I, MachineRegisterInfo &MRI) const;
107  bool selectIntrinsicRound(MachineInstr &I, MachineRegisterInfo &MRI) const;
108  unsigned emitConstantPoolEntry(Constant *CPVal, MachineFunction &MF) const;
109  MachineInstr *emitLoadFromConstantPool(Constant *CPVal,
110  MachineIRBuilder &MIRBuilder) const;
111 
112  // Emit a vector concat operation.
113  MachineInstr *emitVectorConcat(Optional<unsigned> Dst, unsigned Op1,
114  unsigned Op2,
115  MachineIRBuilder &MIRBuilder) const;
116  MachineInstr *emitExtractVectorElt(Optional<unsigned> DstReg,
117  const RegisterBank &DstRB, LLT ScalarTy,
118  unsigned VecReg, unsigned LaneIdx,
119  MachineIRBuilder &MIRBuilder) const;
120 
121  /// Helper function for selecting G_FCONSTANT. If the G_FCONSTANT can be
122  /// materialized using a FMOV instruction, then update MI and return it.
123  /// Otherwise, do nothing and return a nullptr.
124  MachineInstr *emitFMovForFConstant(MachineInstr &MI,
125  MachineRegisterInfo &MRI) const;
126 
127  ComplexRendererFns selectArithImmed(MachineOperand &Root) const;
128 
129  ComplexRendererFns selectAddrModeUnscaled(MachineOperand &Root,
130  unsigned Size) const;
131 
132  ComplexRendererFns selectAddrModeUnscaled8(MachineOperand &Root) const {
133  return selectAddrModeUnscaled(Root, 1);
134  }
135  ComplexRendererFns selectAddrModeUnscaled16(MachineOperand &Root) const {
136  return selectAddrModeUnscaled(Root, 2);
137  }
138  ComplexRendererFns selectAddrModeUnscaled32(MachineOperand &Root) const {
139  return selectAddrModeUnscaled(Root, 4);
140  }
141  ComplexRendererFns selectAddrModeUnscaled64(MachineOperand &Root) const {
142  return selectAddrModeUnscaled(Root, 8);
143  }
144  ComplexRendererFns selectAddrModeUnscaled128(MachineOperand &Root) const {
145  return selectAddrModeUnscaled(Root, 16);
146  }
147 
148  ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root,
149  unsigned Size) const;
150  template <int Width>
151  ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root) const {
152  return selectAddrModeIndexed(Root, Width / 8);
153  }
154 
155  void renderTruncImm(MachineInstrBuilder &MIB, const MachineInstr &MI) const;
156 
157  // Materialize a GlobalValue or BlockAddress using a movz+movk sequence.
158  void materializeLargeCMVal(MachineInstr &I, const Value *V,
159  unsigned char OpFlags) const;
160 
161  // Optimization methods.
162 
163  // Helper function to check if a reg def is an MI with a given opcode and
164  // returns it if so.
165  MachineInstr *findMIFromReg(unsigned Reg, unsigned Opc,
166  MachineIRBuilder &MIB) const {
167  auto *Def = MIB.getMRI()->getVRegDef(Reg);
168  if (!Def || Def->getOpcode() != Opc)
169  return nullptr;
170  return Def;
171  }
172 
173  bool tryOptVectorShuffle(MachineInstr &I) const;
174  bool tryOptVectorDup(MachineInstr &MI) const;
175 
176  const AArch64TargetMachine &TM;
177  const AArch64Subtarget &STI;
178  const AArch64InstrInfo &TII;
179  const AArch64RegisterInfo &TRI;
180  const AArch64RegisterBankInfo &RBI;
181 
182 #define GET_GLOBALISEL_PREDICATES_DECL
183 #include "AArch64GenGlobalISel.inc"
184 #undef GET_GLOBALISEL_PREDICATES_DECL
185 
186 // We declare the temporaries used by selectImpl() in the class to minimize the
187 // cost of constructing placeholder values.
188 #define GET_GLOBALISEL_TEMPORARIES_DECL
189 #include "AArch64GenGlobalISel.inc"
190 #undef GET_GLOBALISEL_TEMPORARIES_DECL
191 };
192 
193 } // end anonymous namespace
194 
195 #define GET_GLOBALISEL_IMPL
196 #include "AArch64GenGlobalISel.inc"
197 #undef GET_GLOBALISEL_IMPL
198 
199 AArch64InstructionSelector::AArch64InstructionSelector(
200  const AArch64TargetMachine &TM, const AArch64Subtarget &STI,
201  const AArch64RegisterBankInfo &RBI)
202  : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
203  TRI(*STI.getRegisterInfo()), RBI(RBI),
205 #include "AArch64GenGlobalISel.inc"
208 #include "AArch64GenGlobalISel.inc"
210 {
211 }
212 
213 // FIXME: This should be target-independent, inferred from the types declared
214 // for each class in the bank.
215 static const TargetRegisterClass *
216 getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB,
217  const RegisterBankInfo &RBI,
218  bool GetAllRegSet = false) {
219  if (RB.getID() == AArch64::GPRRegBankID) {
220  if (Ty.getSizeInBits() <= 32)
221  return GetAllRegSet ? &AArch64::GPR32allRegClass
222  : &AArch64::GPR32RegClass;
223  if (Ty.getSizeInBits() == 64)
224  return GetAllRegSet ? &AArch64::GPR64allRegClass
225  : &AArch64::GPR64RegClass;
226  return nullptr;
227  }
228 
229  if (RB.getID() == AArch64::FPRRegBankID) {
230  if (Ty.getSizeInBits() <= 16)
231  return &AArch64::FPR16RegClass;
232  if (Ty.getSizeInBits() == 32)
233  return &AArch64::FPR32RegClass;
234  if (Ty.getSizeInBits() == 64)
235  return &AArch64::FPR64RegClass;
236  if (Ty.getSizeInBits() == 128)
237  return &AArch64::FPR128RegClass;
238  return nullptr;
239  }
240 
241  return nullptr;
242 }
243 
244 /// Given a register bank, and size in bits, return the smallest register class
245 /// that can represent that combination.
246 static const TargetRegisterClass *
247 getMinClassForRegBank(const RegisterBank &RB, unsigned SizeInBits,
248  bool GetAllRegSet = false) {
249  unsigned RegBankID = RB.getID();
250 
251  if (RegBankID == AArch64::GPRRegBankID) {
252  if (SizeInBits <= 32)
253  return GetAllRegSet ? &AArch64::GPR32allRegClass
254  : &AArch64::GPR32RegClass;
255  if (SizeInBits == 64)
256  return GetAllRegSet ? &AArch64::GPR64allRegClass
257  : &AArch64::GPR64RegClass;
258  }
259 
260  if (RegBankID == AArch64::FPRRegBankID) {
261  switch (SizeInBits) {
262  default:
263  return nullptr;
264  case 8:
265  return &AArch64::FPR8RegClass;
266  case 16:
267  return &AArch64::FPR16RegClass;
268  case 32:
269  return &AArch64::FPR32RegClass;
270  case 64:
271  return &AArch64::FPR64RegClass;
272  case 128:
273  return &AArch64::FPR128RegClass;
274  }
275  }
276 
277  return nullptr;
278 }
279 
280 /// Returns the correct subregister to use for a given register class.
282  const TargetRegisterInfo &TRI, unsigned &SubReg) {
283  switch (TRI.getRegSizeInBits(*RC)) {
284  case 8:
285  SubReg = AArch64::bsub;
286  break;
287  case 16:
288  SubReg = AArch64::hsub;
289  break;
290  case 32:
291  if (RC == &AArch64::GPR32RegClass)
292  SubReg = AArch64::sub_32;
293  else
294  SubReg = AArch64::ssub;
295  break;
296  case 64:
297  SubReg = AArch64::dsub;
298  break;
299  default:
300  LLVM_DEBUG(
301  dbgs() << "Couldn't find appropriate subregister for register class.");
302  return false;
303  }
304 
305  return true;
306 }
307 
308 /// Check whether \p I is a currently unsupported binary operation:
309 /// - it has an unsized type
310 /// - an operand is not a vreg
311 /// - all operands are not in the same bank
312 /// These are checks that should someday live in the verifier, but right now,
313 /// these are mostly limitations of the aarch64 selector.
314 static bool unsupportedBinOp(const MachineInstr &I,
315  const AArch64RegisterBankInfo &RBI,
316  const MachineRegisterInfo &MRI,
317  const AArch64RegisterInfo &TRI) {
318  LLT Ty = MRI.getType(I.getOperand(0).getReg());
319  if (!Ty.isValid()) {
320  LLVM_DEBUG(dbgs() << "Generic binop register should be typed\n");
321  return true;
322  }
323 
324  const RegisterBank *PrevOpBank = nullptr;
325  for (auto &MO : I.operands()) {
326  // FIXME: Support non-register operands.
327  if (!MO.isReg()) {
328  LLVM_DEBUG(dbgs() << "Generic inst non-reg operands are unsupported\n");
329  return true;
330  }
331 
332  // FIXME: Can generic operations have physical registers operands? If
333  // so, this will need to be taught about that, and we'll need to get the
334  // bank out of the minimal class for the register.
335  // Either way, this needs to be documented (and possibly verified).
336  if (!TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
337  LLVM_DEBUG(dbgs() << "Generic inst has physical register operand\n");
338  return true;
339  }
340 
341  const RegisterBank *OpBank = RBI.getRegBank(MO.getReg(), MRI, TRI);
342  if (!OpBank) {
343  LLVM_DEBUG(dbgs() << "Generic register has no bank or class\n");
344  return true;
345  }
346 
347  if (PrevOpBank && OpBank != PrevOpBank) {
348  LLVM_DEBUG(dbgs() << "Generic inst operands have different banks\n");
349  return true;
350  }
351  PrevOpBank = OpBank;
352  }
353  return false;
354 }
355 
356 /// Select the AArch64 opcode for the basic binary operation \p GenericOpc
357 /// (such as G_OR or G_SDIV), appropriate for the register bank \p RegBankID
358 /// and of size \p OpSize.
359 /// \returns \p GenericOpc if the combination is unsupported.
360 static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID,
361  unsigned OpSize) {
362  switch (RegBankID) {
363  case AArch64::GPRRegBankID:
364  if (OpSize == 32) {
365  switch (GenericOpc) {
366  case TargetOpcode::G_SHL:
367  return AArch64::LSLVWr;
368  case TargetOpcode::G_LSHR:
369  return AArch64::LSRVWr;
370  case TargetOpcode::G_ASHR:
371  return AArch64::ASRVWr;
372  default:
373  return GenericOpc;
374  }
375  } else if (OpSize == 64) {
376  switch (GenericOpc) {
377  case TargetOpcode::G_GEP:
378  return AArch64::ADDXrr;
379  case TargetOpcode::G_SHL:
380  return AArch64::LSLVXr;
381  case TargetOpcode::G_LSHR:
382  return AArch64::LSRVXr;
383  case TargetOpcode::G_ASHR:
384  return AArch64::ASRVXr;
385  default:
386  return GenericOpc;
387  }
388  }
389  break;
390  case AArch64::FPRRegBankID:
391  switch (OpSize) {
392  case 32:
393  switch (GenericOpc) {
394  case TargetOpcode::G_FADD:
395  return AArch64::FADDSrr;
396  case TargetOpcode::G_FSUB:
397  return AArch64::FSUBSrr;
398  case TargetOpcode::G_FMUL:
399  return AArch64::FMULSrr;
400  case TargetOpcode::G_FDIV:
401  return AArch64::FDIVSrr;
402  default:
403  return GenericOpc;
404  }
405  case 64:
406  switch (GenericOpc) {
407  case TargetOpcode::G_FADD:
408  return AArch64::FADDDrr;
409  case TargetOpcode::G_FSUB:
410  return AArch64::FSUBDrr;
411  case TargetOpcode::G_FMUL:
412  return AArch64::FMULDrr;
413  case TargetOpcode::G_FDIV:
414  return AArch64::FDIVDrr;
415  case TargetOpcode::G_OR:
416  return AArch64::ORRv8i8;
417  default:
418  return GenericOpc;
419  }
420  }
421  break;
422  }
423  return GenericOpc;
424 }
425 
426 /// Select the AArch64 opcode for the G_LOAD or G_STORE operation \p GenericOpc,
427 /// appropriate for the (value) register bank \p RegBankID and of memory access
428 /// size \p OpSize. This returns the variant with the base+unsigned-immediate
429 /// addressing mode (e.g., LDRXui).
430 /// \returns \p GenericOpc if the combination is unsupported.
431 static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID,
432  unsigned OpSize) {
433  const bool isStore = GenericOpc == TargetOpcode::G_STORE;
434  switch (RegBankID) {
435  case AArch64::GPRRegBankID:
436  switch (OpSize) {
437  case 8:
438  return isStore ? AArch64::STRBBui : AArch64::LDRBBui;
439  case 16:
440  return isStore ? AArch64::STRHHui : AArch64::LDRHHui;
441  case 32:
442  return isStore ? AArch64::STRWui : AArch64::LDRWui;
443  case 64:
444  return isStore ? AArch64::STRXui : AArch64::LDRXui;
445  }
446  break;
447  case AArch64::FPRRegBankID:
448  switch (OpSize) {
449  case 8:
450  return isStore ? AArch64::STRBui : AArch64::LDRBui;
451  case 16:
452  return isStore ? AArch64::STRHui : AArch64::LDRHui;
453  case 32:
454  return isStore ? AArch64::STRSui : AArch64::LDRSui;
455  case 64:
456  return isStore ? AArch64::STRDui : AArch64::LDRDui;
457  }
458  break;
459  }
460  return GenericOpc;
461 }
462 
463 #ifndef NDEBUG
464 /// Helper function that verifies that we have a valid copy at the end of
465 /// selectCopy. Verifies that the source and dest have the expected sizes and
466 /// then returns true.
467 static bool isValidCopy(const MachineInstr &I, const RegisterBank &DstBank,
468  const MachineRegisterInfo &MRI,
469  const TargetRegisterInfo &TRI,
470  const RegisterBankInfo &RBI) {
471  const unsigned DstReg = I.getOperand(0).getReg();
472  const unsigned SrcReg = I.getOperand(1).getReg();
473  const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
474  const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
475 
476  // Make sure the size of the source and dest line up.
477  assert(
478  (DstSize == SrcSize ||
479  // Copies are a mean to setup initial types, the number of
480  // bits may not exactly match.
481  (TargetRegisterInfo::isPhysicalRegister(SrcReg) && DstSize <= SrcSize) ||
482  // Copies are a mean to copy bits around, as long as we are
483  // on the same register class, that's fine. Otherwise, that
484  // means we need some SUBREG_TO_REG or AND & co.
485  (((DstSize + 31) / 32 == (SrcSize + 31) / 32) && DstSize > SrcSize)) &&
486  "Copy with different width?!");
487 
488  // Check the size of the destination.
489  assert((DstSize <= 64 || DstBank.getID() == AArch64::FPRRegBankID) &&
490  "GPRs cannot get more than 64-bit width values");
491 
492  return true;
493 }
494 #endif
495 
496 /// Helper function for selectCopy. Inserts a subregister copy from
497 /// \p *From to \p *To, linking it up to \p I.
498 ///
499 /// e.g, given I = "Dst = COPY SrcReg", we'll transform that into
500 ///
501 /// CopyReg (From class) = COPY SrcReg
502 /// SubRegCopy (To class) = COPY CopyReg:SubReg
503 /// Dst = COPY SubRegCopy
505  const RegisterBankInfo &RBI, unsigned SrcReg,
506  const TargetRegisterClass *From,
507  const TargetRegisterClass *To,
508  unsigned SubReg) {
509  MachineIRBuilder MIB(I);
510  auto Copy = MIB.buildCopy({From}, {SrcReg});
511  auto SubRegCopy = MIB.buildInstr(TargetOpcode::COPY, {To}, {})
512  .addReg(Copy.getReg(0), 0, SubReg);
513  MachineOperand &RegOp = I.getOperand(1);
514  RegOp.setReg(SubRegCopy.getReg(0));
515 
516  // It's possible that the destination register won't be constrained. Make
517  // sure that happens.
518  if (!TargetRegisterInfo::isPhysicalRegister(I.getOperand(0).getReg()))
519  RBI.constrainGenericRegister(I.getOperand(0).getReg(), *To, MRI);
520 
521  return true;
522 }
523 
524 /// Helper function to get the source and destination register classes for a
525 /// copy. Returns a std::pair containing the source register class for the
526 /// copy, and the destination register class for the copy. If a register class
527 /// cannot be determined, then it will be nullptr.
528 static std::pair<const TargetRegisterClass *, const TargetRegisterClass *>
531  const RegisterBankInfo &RBI) {
532  unsigned DstReg = I.getOperand(0).getReg();
533  unsigned SrcReg = I.getOperand(1).getReg();
534  const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
535  const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
536  unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
537  unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
538 
539  // Special casing for cross-bank copies of s1s. We can technically represent
540  // a 1-bit value with any size of register. The minimum size for a GPR is 32
541  // bits. So, we need to put the FPR on 32 bits as well.
542  //
543  // FIXME: I'm not sure if this case holds true outside of copies. If it does,
544  // then we can pull it into the helpers that get the appropriate class for a
545  // register bank. Or make a new helper that carries along some constraint
546  // information.
547  if (SrcRegBank != DstRegBank && (DstSize == 1 && SrcSize == 1))
548  SrcSize = DstSize = 32;
549 
550  return {getMinClassForRegBank(SrcRegBank, SrcSize, true),
551  getMinClassForRegBank(DstRegBank, DstSize, true)};
552 }
553 
556  const RegisterBankInfo &RBI) {
557 
558  unsigned DstReg = I.getOperand(0).getReg();
559  unsigned SrcReg = I.getOperand(1).getReg();
560  const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
561  const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
562 
563  // Find the correct register classes for the source and destination registers.
564  const TargetRegisterClass *SrcRC;
565  const TargetRegisterClass *DstRC;
566  std::tie(SrcRC, DstRC) = getRegClassesForCopy(I, TII, MRI, TRI, RBI);
567 
568  if (!DstRC) {
569  LLVM_DEBUG(dbgs() << "Unexpected dest size "
570  << RBI.getSizeInBits(DstReg, MRI, TRI) << '\n');
571  return false;
572  }
573 
574  // A couple helpers below, for making sure that the copy we produce is valid.
575 
576  // Set to true if we insert a SUBREG_TO_REG. If we do this, then we don't want
577  // to verify that the src and dst are the same size, since that's handled by
578  // the SUBREG_TO_REG.
579  bool KnownValid = false;
580 
581  // Returns true, or asserts if something we don't expect happens. Instead of
582  // returning true, we return isValidCopy() to ensure that we verify the
583  // result.
584  auto CheckCopy = [&]() {
585  // If we have a bitcast or something, we can't have physical registers.
586  assert(
587  (I.isCopy() ||
588  (!TargetRegisterInfo::isPhysicalRegister(I.getOperand(0).getReg()) &&
589  !TargetRegisterInfo::isPhysicalRegister(I.getOperand(1).getReg()))) &&
590  "No phys reg on generic operator!");
591  assert(KnownValid || isValidCopy(I, DstRegBank, MRI, TRI, RBI));
592  (void)KnownValid;
593  return true;
594  };
595 
596  // Is this a copy? If so, then we may need to insert a subregister copy, or
597  // a SUBREG_TO_REG.
598  if (I.isCopy()) {
599  // Yes. Check if there's anything to fix up.
600  if (!SrcRC) {
601  LLVM_DEBUG(dbgs() << "Couldn't determine source register class\n");
602  return false;
603  }
604 
605  // Is this a cross-bank copy?
606  if (DstRegBank.getID() != SrcRegBank.getID()) {
607  // If we're doing a cross-bank copy on different-sized registers, we need
608  // to do a bit more work.
609  unsigned SrcSize = TRI.getRegSizeInBits(*SrcRC);
610  unsigned DstSize = TRI.getRegSizeInBits(*DstRC);
611 
612  if (SrcSize > DstSize) {
613  // We're doing a cross-bank copy into a smaller register. We need a
614  // subregister copy. First, get a register class that's on the same bank
615  // as the destination, but the same size as the source.
616  const TargetRegisterClass *SubregRC =
617  getMinClassForRegBank(DstRegBank, SrcSize, true);
618  assert(SubregRC && "Didn't get a register class for subreg?");
619 
620  // Get the appropriate subregister for the destination.
621  unsigned SubReg = 0;
622  if (!getSubRegForClass(DstRC, TRI, SubReg)) {
623  LLVM_DEBUG(dbgs() << "Couldn't determine subregister for copy.\n");
624  return false;
625  }
626 
627  // Now, insert a subregister copy using the new register class.
628  selectSubregisterCopy(I, MRI, RBI, SrcReg, SubregRC, DstRC, SubReg);
629  return CheckCopy();
630  }
631 
632  else if (DstRegBank.getID() == AArch64::GPRRegBankID && DstSize == 32 &&
633  SrcSize == 16) {
634  // Special case for FPR16 to GPR32.
635  // FIXME: This can probably be generalized like the above case.
636  unsigned PromoteReg =
637  MRI.createVirtualRegister(&AArch64::FPR32RegClass);
638  BuildMI(*I.getParent(), I, I.getDebugLoc(),
639  TII.get(AArch64::SUBREG_TO_REG), PromoteReg)
640  .addImm(0)
641  .addUse(SrcReg)
642  .addImm(AArch64::hsub);
643  MachineOperand &RegOp = I.getOperand(1);
644  RegOp.setReg(PromoteReg);
645 
646  // Promise that the copy is implicitly validated by the SUBREG_TO_REG.
647  KnownValid = true;
648  }
649  }
650 
651  // If the destination is a physical register, then there's nothing to
652  // change, so we're done.
653  if (TargetRegisterInfo::isPhysicalRegister(DstReg))
654  return CheckCopy();
655  }
656 
657  // No need to constrain SrcReg. It will get constrained when we hit another
658  // of its use or its defs. Copies do not have constraints.
659  if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
660  LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
661  << " operand\n");
662  return false;
663  }
664  I.setDesc(TII.get(AArch64::COPY));
665  return CheckCopy();
666 }
667 
668 static unsigned selectFPConvOpc(unsigned GenericOpc, LLT DstTy, LLT SrcTy) {
669  if (!DstTy.isScalar() || !SrcTy.isScalar())
670  return GenericOpc;
671 
672  const unsigned DstSize = DstTy.getSizeInBits();
673  const unsigned SrcSize = SrcTy.getSizeInBits();
674 
675  switch (DstSize) {
676  case 32:
677  switch (SrcSize) {
678  case 32:
679  switch (GenericOpc) {
680  case TargetOpcode::G_SITOFP:
681  return AArch64::SCVTFUWSri;
682  case TargetOpcode::G_UITOFP:
683  return AArch64::UCVTFUWSri;
684  case TargetOpcode::G_FPTOSI:
685  return AArch64::FCVTZSUWSr;
686  case TargetOpcode::G_FPTOUI:
687  return AArch64::FCVTZUUWSr;
688  default:
689  return GenericOpc;
690  }
691  case 64:
692  switch (GenericOpc) {
693  case TargetOpcode::G_SITOFP:
694  return AArch64::SCVTFUXSri;
695  case TargetOpcode::G_UITOFP:
696  return AArch64::UCVTFUXSri;
697  case TargetOpcode::G_FPTOSI:
698  return AArch64::FCVTZSUWDr;
699  case TargetOpcode::G_FPTOUI:
700  return AArch64::FCVTZUUWDr;
701  default:
702  return GenericOpc;
703  }
704  default:
705  return GenericOpc;
706  }
707  case 64:
708  switch (SrcSize) {
709  case 32:
710  switch (GenericOpc) {
711  case TargetOpcode::G_SITOFP:
712  return AArch64::SCVTFUWDri;
713  case TargetOpcode::G_UITOFP:
714  return AArch64::UCVTFUWDri;
715  case TargetOpcode::G_FPTOSI:
716  return AArch64::FCVTZSUXSr;
717  case TargetOpcode::G_FPTOUI:
718  return AArch64::FCVTZUUXSr;
719  default:
720  return GenericOpc;
721  }
722  case 64:
723  switch (GenericOpc) {
724  case TargetOpcode::G_SITOFP:
725  return AArch64::SCVTFUXDri;
726  case TargetOpcode::G_UITOFP:
727  return AArch64::UCVTFUXDri;
728  case TargetOpcode::G_FPTOSI:
729  return AArch64::FCVTZSUXDr;
730  case TargetOpcode::G_FPTOUI:
731  return AArch64::FCVTZUUXDr;
732  default:
733  return GenericOpc;
734  }
735  default:
736  return GenericOpc;
737  }
738  default:
739  return GenericOpc;
740  };
741  return GenericOpc;
742 }
743 
745  switch (P) {
746  default:
747  llvm_unreachable("Unknown condition code!");
748  case CmpInst::ICMP_NE:
749  return AArch64CC::NE;
750  case CmpInst::ICMP_EQ:
751  return AArch64CC::EQ;
752  case CmpInst::ICMP_SGT:
753  return AArch64CC::GT;
754  case CmpInst::ICMP_SGE:
755  return AArch64CC::GE;
756  case CmpInst::ICMP_SLT:
757  return AArch64CC::LT;
758  case CmpInst::ICMP_SLE:
759  return AArch64CC::LE;
760  case CmpInst::ICMP_UGT:
761  return AArch64CC::HI;
762  case CmpInst::ICMP_UGE:
763  return AArch64CC::HS;
764  case CmpInst::ICMP_ULT:
765  return AArch64CC::LO;
766  case CmpInst::ICMP_ULE:
767  return AArch64CC::LS;
768  }
769 }
770 
773  AArch64CC::CondCode &CondCode2) {
774  CondCode2 = AArch64CC::AL;
775  switch (P) {
776  default:
777  llvm_unreachable("Unknown FP condition!");
778  case CmpInst::FCMP_OEQ:
779  CondCode = AArch64CC::EQ;
780  break;
781  case CmpInst::FCMP_OGT:
782  CondCode = AArch64CC::GT;
783  break;
784  case CmpInst::FCMP_OGE:
785  CondCode = AArch64CC::GE;
786  break;
787  case CmpInst::FCMP_OLT:
788  CondCode = AArch64CC::MI;
789  break;
790  case CmpInst::FCMP_OLE:
791  CondCode = AArch64CC::LS;
792  break;
793  case CmpInst::FCMP_ONE:
794  CondCode = AArch64CC::MI;
795  CondCode2 = AArch64CC::GT;
796  break;
797  case CmpInst::FCMP_ORD:
798  CondCode = AArch64CC::VC;
799  break;
800  case CmpInst::FCMP_UNO:
801  CondCode = AArch64CC::VS;
802  break;
803  case CmpInst::FCMP_UEQ:
804  CondCode = AArch64CC::EQ;
805  CondCode2 = AArch64CC::VS;
806  break;
807  case CmpInst::FCMP_UGT:
808  CondCode = AArch64CC::HI;
809  break;
810  case CmpInst::FCMP_UGE:
811  CondCode = AArch64CC::PL;
812  break;
813  case CmpInst::FCMP_ULT:
814  CondCode = AArch64CC::LT;
815  break;
816  case CmpInst::FCMP_ULE:
817  CondCode = AArch64CC::LE;
818  break;
819  case CmpInst::FCMP_UNE:
820  CondCode = AArch64CC::NE;
821  break;
822  }
823 }
824 
825 bool AArch64InstructionSelector::selectCompareBranch(
827 
828  const unsigned CondReg = I.getOperand(0).getReg();
829  MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
830  MachineInstr *CCMI = MRI.getVRegDef(CondReg);
831  if (CCMI->getOpcode() == TargetOpcode::G_TRUNC)
832  CCMI = MRI.getVRegDef(CCMI->getOperand(1).getReg());
833  if (CCMI->getOpcode() != TargetOpcode::G_ICMP)
834  return false;
835 
836  unsigned LHS = CCMI->getOperand(2).getReg();
837  unsigned RHS = CCMI->getOperand(3).getReg();
838  if (!getConstantVRegVal(RHS, MRI))
839  std::swap(RHS, LHS);
840 
841  const auto RHSImm = getConstantVRegVal(RHS, MRI);
842  if (!RHSImm || *RHSImm != 0)
843  return false;
844 
845  const RegisterBank &RB = *RBI.getRegBank(LHS, MRI, TRI);
846  if (RB.getID() != AArch64::GPRRegBankID)
847  return false;
848 
849  const auto Pred = (CmpInst::Predicate)CCMI->getOperand(1).getPredicate();
850  if (Pred != CmpInst::ICMP_NE && Pred != CmpInst::ICMP_EQ)
851  return false;
852 
853  const unsigned CmpWidth = MRI.getType(LHS).getSizeInBits();
854  unsigned CBOpc = 0;
855  if (CmpWidth <= 32)
856  CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZW : AArch64::CBNZW);
857  else if (CmpWidth == 64)
858  CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZX : AArch64::CBNZX);
859  else
860  return false;
861 
862  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CBOpc))
863  .addUse(LHS)
864  .addMBB(DestMBB)
865  .constrainAllUses(TII, TRI, RBI);
866 
867  I.eraseFromParent();
868  return true;
869 }
870 
871 bool AArch64InstructionSelector::selectVectorSHL(
872  MachineInstr &I, MachineRegisterInfo &MRI) const {
873  assert(I.getOpcode() == TargetOpcode::G_SHL);
874  unsigned DstReg = I.getOperand(0).getReg();
875  const LLT Ty = MRI.getType(DstReg);
876  unsigned Src1Reg = I.getOperand(1).getReg();
877  unsigned Src2Reg = I.getOperand(2).getReg();
878 
879  if (!Ty.isVector())
880  return false;
881 
882  unsigned Opc = 0;
883  if (Ty == LLT::vector(4, 32)) {
884  Opc = AArch64::USHLv4i32;
885  } else if (Ty == LLT::vector(2, 32)) {
886  Opc = AArch64::USHLv2i32;
887  } else {
888  LLVM_DEBUG(dbgs() << "Unhandled G_SHL type");
889  return false;
890  }
891 
892  MachineIRBuilder MIB(I);
893  auto UShl = MIB.buildInstr(Opc, {DstReg}, {Src1Reg, Src2Reg});
895  I.eraseFromParent();
896  return true;
897 }
898 
899 bool AArch64InstructionSelector::selectVectorASHR(
900  MachineInstr &I, MachineRegisterInfo &MRI) const {
901  assert(I.getOpcode() == TargetOpcode::G_ASHR);
902  unsigned DstReg = I.getOperand(0).getReg();
903  const LLT Ty = MRI.getType(DstReg);
904  unsigned Src1Reg = I.getOperand(1).getReg();
905  unsigned Src2Reg = I.getOperand(2).getReg();
906 
907  if (!Ty.isVector())
908  return false;
909 
910  // There is not a shift right register instruction, but the shift left
911  // register instruction takes a signed value, where negative numbers specify a
912  // right shift.
913 
914  unsigned Opc = 0;
915  unsigned NegOpc = 0;
916  const TargetRegisterClass *RC = nullptr;
917  if (Ty == LLT::vector(4, 32)) {
918  Opc = AArch64::SSHLv4i32;
919  NegOpc = AArch64::NEGv4i32;
920  RC = &AArch64::FPR128RegClass;
921  } else if (Ty == LLT::vector(2, 32)) {
922  Opc = AArch64::SSHLv2i32;
923  NegOpc = AArch64::NEGv2i32;
924  RC = &AArch64::FPR64RegClass;
925  } else {
926  LLVM_DEBUG(dbgs() << "Unhandled G_ASHR type");
927  return false;
928  }
929 
930  MachineIRBuilder MIB(I);
931  auto Neg = MIB.buildInstr(NegOpc, {RC}, {Src2Reg});
933  auto SShl = MIB.buildInstr(Opc, {DstReg}, {Src1Reg, Neg});
935  I.eraseFromParent();
936  return true;
937 }
938 
939 bool AArch64InstructionSelector::selectVaStartAAPCS(
940  MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
941  return false;
942 }
943 
944 bool AArch64InstructionSelector::selectVaStartDarwin(
945  MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
947  unsigned ListReg = I.getOperand(0).getReg();
948 
949  unsigned ArgsAddrReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
950 
951  auto MIB =
952  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::ADDXri))
953  .addDef(ArgsAddrReg)
954  .addFrameIndex(FuncInfo->getVarArgsStackIndex())
955  .addImm(0)
956  .addImm(0);
957 
959 
960  MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::STRXui))
961  .addUse(ArgsAddrReg)
962  .addUse(ListReg)
963  .addImm(0)
965 
967  I.eraseFromParent();
968  return true;
969 }
970 
971 void AArch64InstructionSelector::materializeLargeCMVal(
972  MachineInstr &I, const Value *V, unsigned char OpFlags) const {
973  MachineBasicBlock &MBB = *I.getParent();
974  MachineFunction &MF = *MBB.getParent();
975  MachineRegisterInfo &MRI = MF.getRegInfo();
976  MachineIRBuilder MIB(I);
977 
978  auto MovZ = MIB.buildInstr(AArch64::MOVZXi, {&AArch64::GPR64RegClass}, {});
979  MovZ->addOperand(MF, I.getOperand(1));
980  MovZ->getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_G0 |
982  MovZ->addOperand(MF, MachineOperand::CreateImm(0));
984 
985  auto BuildMovK = [&](unsigned SrcReg, unsigned char Flags, unsigned Offset,
986  unsigned ForceDstReg) {
987  unsigned DstReg = ForceDstReg
988  ? ForceDstReg
989  : MRI.createVirtualRegister(&AArch64::GPR64RegClass);
990  auto MovI = MIB.buildInstr(AArch64::MOVKXi).addDef(DstReg).addUse(SrcReg);
991  if (auto *GV = dyn_cast<GlobalValue>(V)) {
992  MovI->addOperand(MF, MachineOperand::CreateGA(
993  GV, MovZ->getOperand(1).getOffset(), Flags));
994  } else {
995  MovI->addOperand(
996  MF, MachineOperand::CreateBA(cast<BlockAddress>(V),
997  MovZ->getOperand(1).getOffset(), Flags));
998  }
999  MovI->addOperand(MF, MachineOperand::CreateImm(Offset));
1001  return DstReg;
1002  };
1003  unsigned DstReg = BuildMovK(MovZ.getReg(0),
1005  DstReg = BuildMovK(DstReg, AArch64II::MO_G2 | AArch64II::MO_NC, 32, 0);
1006  BuildMovK(DstReg, AArch64II::MO_G3, 48, I.getOperand(0).getReg());
1007  return;
1008 }
1009 
1010 bool AArch64InstructionSelector::select(MachineInstr &I,
1011  CodeGenCoverage &CoverageInfo) const {
1012  assert(I.getParent() && "Instruction should be in a basic block!");
1013  assert(I.getParent()->getParent() && "Instruction should be in a function!");
1014 
1015  MachineBasicBlock &MBB = *I.getParent();
1016  MachineFunction &MF = *MBB.getParent();
1017  MachineRegisterInfo &MRI = MF.getRegInfo();
1018 
1019  unsigned Opcode = I.getOpcode();
1020  // G_PHI requires same handling as PHI
1021  if (!isPreISelGenericOpcode(Opcode) || Opcode == TargetOpcode::G_PHI) {
1022  // Certain non-generic instructions also need some special handling.
1023 
1024  if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
1025  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1026 
1027  if (Opcode == TargetOpcode::PHI || Opcode == TargetOpcode::G_PHI) {
1028  const unsigned DefReg = I.getOperand(0).getReg();
1029  const LLT DefTy = MRI.getType(DefReg);
1030 
1031  const TargetRegisterClass *DefRC = nullptr;
1032  if (TargetRegisterInfo::isPhysicalRegister(DefReg)) {
1033  DefRC = TRI.getRegClass(DefReg);
1034  } else {
1035  const RegClassOrRegBank &RegClassOrBank =
1036  MRI.getRegClassOrRegBank(DefReg);
1037 
1038  DefRC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
1039  if (!DefRC) {
1040  if (!DefTy.isValid()) {
1041  LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
1042  return false;
1043  }
1044  const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
1045  DefRC = getRegClassForTypeOnBank(DefTy, RB, RBI);
1046  if (!DefRC) {
1047  LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
1048  return false;
1049  }
1050  }
1051  }
1052  I.setDesc(TII.get(TargetOpcode::PHI));
1053 
1054  return RBI.constrainGenericRegister(DefReg, *DefRC, MRI);
1055  }
1056 
1057  if (I.isCopy())
1058  return selectCopy(I, TII, MRI, TRI, RBI);
1059 
1060  return true;
1061  }
1062 
1063 
1064  if (I.getNumOperands() != I.getNumExplicitOperands()) {
1065  LLVM_DEBUG(
1066  dbgs() << "Generic instruction has unexpected implicit operands\n");
1067  return false;
1068  }
1069 
1070  if (selectImpl(I, CoverageInfo))
1071  return true;
1072 
1073  LLT Ty =
1074  I.getOperand(0).isReg() ? MRI.getType(I.getOperand(0).getReg()) : LLT{};
1075 
1076  MachineIRBuilder MIB(I);
1077 
1078  switch (Opcode) {
1079  case TargetOpcode::G_BRCOND: {
1080  if (Ty.getSizeInBits() > 32) {
1081  // We shouldn't need this on AArch64, but it would be implemented as an
1082  // EXTRACT_SUBREG followed by a TBNZW because TBNZX has no encoding if the
1083  // bit being tested is < 32.
1084  LLVM_DEBUG(dbgs() << "G_BRCOND has type: " << Ty
1085  << ", expected at most 32-bits");
1086  return false;
1087  }
1088 
1089  const unsigned CondReg = I.getOperand(0).getReg();
1090  MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
1091 
1092  // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z
1093  // instructions will not be produced, as they are conditional branch
1094  // instructions that do not set flags.
1095  bool ProduceNonFlagSettingCondBr =
1096  !MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening);
1097  if (ProduceNonFlagSettingCondBr && selectCompareBranch(I, MF, MRI))
1098  return true;
1099 
1100  if (ProduceNonFlagSettingCondBr) {
1101  auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::TBNZW))
1102  .addUse(CondReg)
1103  .addImm(/*bit offset=*/0)
1104  .addMBB(DestMBB);
1105 
1106  I.eraseFromParent();
1107  return constrainSelectedInstRegOperands(*MIB.getInstr(), TII, TRI, RBI);
1108  } else {
1109  auto CMP = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri))
1110  .addDef(AArch64::WZR)
1111  .addUse(CondReg)
1112  .addImm(1);
1113  constrainSelectedInstRegOperands(*CMP.getInstr(), TII, TRI, RBI);
1114  auto Bcc =
1115  BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::Bcc))
1116  .addImm(AArch64CC::EQ)
1117  .addMBB(DestMBB);
1118 
1119  I.eraseFromParent();
1120  return constrainSelectedInstRegOperands(*Bcc.getInstr(), TII, TRI, RBI);
1121  }
1122  }
1123 
1124  case TargetOpcode::G_BRINDIRECT: {
1125  I.setDesc(TII.get(AArch64::BR));
1126  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1127  }
1128 
1129  case TargetOpcode::G_BSWAP: {
1130  // Handle vector types for G_BSWAP directly.
1131  unsigned DstReg = I.getOperand(0).getReg();
1132  LLT DstTy = MRI.getType(DstReg);
1133 
1134  // We should only get vector types here; everything else is handled by the
1135  // importer right now.
1136  if (!DstTy.isVector() || DstTy.getSizeInBits() > 128) {
1137  LLVM_DEBUG(dbgs() << "Dst type for G_BSWAP currently unsupported.\n");
1138  return false;
1139  }
1140 
1141  // Only handle 4 and 2 element vectors for now.
1142  // TODO: 16-bit elements.
1143  unsigned NumElts = DstTy.getNumElements();
1144  if (NumElts != 4 && NumElts != 2) {
1145  LLVM_DEBUG(dbgs() << "Unsupported number of elements for G_BSWAP.\n");
1146  return false;
1147  }
1148 
1149  // Choose the correct opcode for the supported types. Right now, that's
1150  // v2s32, v4s32, and v2s64.
1151  unsigned Opc = 0;
1152  unsigned EltSize = DstTy.getElementType().getSizeInBits();
1153  if (EltSize == 32)
1154  Opc = (DstTy.getNumElements() == 2) ? AArch64::REV32v8i8
1155  : AArch64::REV32v16i8;
1156  else if (EltSize == 64)
1157  Opc = AArch64::REV64v16i8;
1158 
1159  // We should always get something by the time we get here...
1160  assert(Opc != 0 && "Didn't get an opcode for G_BSWAP?");
1161 
1162  I.setDesc(TII.get(Opc));
1163  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1164  }
1165 
1166  case TargetOpcode::G_FCONSTANT:
1167  case TargetOpcode::G_CONSTANT: {
1168  const bool isFP = Opcode == TargetOpcode::G_FCONSTANT;
1169 
1170  const LLT s32 = LLT::scalar(32);
1171  const LLT s64 = LLT::scalar(64);
1172  const LLT p0 = LLT::pointer(0, 64);
1173 
1174  const unsigned DefReg = I.getOperand(0).getReg();
1175  const LLT DefTy = MRI.getType(DefReg);
1176  const unsigned DefSize = DefTy.getSizeInBits();
1177  const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1178 
1179  // FIXME: Redundant check, but even less readable when factored out.
1180  if (isFP) {
1181  if (Ty != s32 && Ty != s64) {
1182  LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty
1183  << " constant, expected: " << s32 << " or " << s64
1184  << '\n');
1185  return false;
1186  }
1187 
1188  if (RB.getID() != AArch64::FPRRegBankID) {
1189  LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty
1190  << " constant on bank: " << RB
1191  << ", expected: FPR\n");
1192  return false;
1193  }
1194 
1195  // The case when we have 0.0 is covered by tablegen. Reject it here so we
1196  // can be sure tablegen works correctly and isn't rescued by this code.
1197  if (I.getOperand(1).getFPImm()->getValueAPF().isExactlyValue(0.0))
1198  return false;
1199  } else {
1200  // s32 and s64 are covered by tablegen.
1201  if (Ty != p0) {
1202  LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty
1203  << " constant, expected: " << s32 << ", " << s64
1204  << ", or " << p0 << '\n');
1205  return false;
1206  }
1207 
1208  if (RB.getID() != AArch64::GPRRegBankID) {
1209  LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty
1210  << " constant on bank: " << RB
1211  << ", expected: GPR\n");
1212  return false;
1213  }
1214  }
1215 
1216  const unsigned MovOpc =
1217  DefSize == 32 ? AArch64::MOVi32imm : AArch64::MOVi64imm;
1218 
1219  if (isFP) {
1220  // Either emit a FMOV, or emit a copy to emit a normal mov.
1221  const TargetRegisterClass &GPRRC =
1222  DefSize == 32 ? AArch64::GPR32RegClass : AArch64::GPR64RegClass;
1223  const TargetRegisterClass &FPRRC =
1224  DefSize == 32 ? AArch64::FPR32RegClass : AArch64::FPR64RegClass;
1225 
1226  // Can we use a FMOV instruction to represent the immediate?
1227  if (emitFMovForFConstant(I, MRI))
1228  return true;
1229 
1230  // Nope. Emit a copy and use a normal mov instead.
1231  const unsigned DefGPRReg = MRI.createVirtualRegister(&GPRRC);
1232  MachineOperand &RegOp = I.getOperand(0);
1233  RegOp.setReg(DefGPRReg);
1234  MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator()));
1235  MIB.buildCopy({DefReg}, {DefGPRReg});
1236 
1237  if (!RBI.constrainGenericRegister(DefReg, FPRRC, MRI)) {
1238  LLVM_DEBUG(dbgs() << "Failed to constrain G_FCONSTANT def operand\n");
1239  return false;
1240  }
1241 
1242  MachineOperand &ImmOp = I.getOperand(1);
1243  // FIXME: Is going through int64_t always correct?
1244  ImmOp.ChangeToImmediate(
1246  } else if (I.getOperand(1).isCImm()) {
1247  uint64_t Val = I.getOperand(1).getCImm()->getZExtValue();
1248  I.getOperand(1).ChangeToImmediate(Val);
1249  } else if (I.getOperand(1).isImm()) {
1250  uint64_t Val = I.getOperand(1).getImm();
1251  I.getOperand(1).ChangeToImmediate(Val);
1252  }
1253 
1254  I.setDesc(TII.get(MovOpc));
1256  return true;
1257  }
1258  case TargetOpcode::G_EXTRACT: {
1259  LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
1260  LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1261  (void)DstTy;
1262  unsigned SrcSize = SrcTy.getSizeInBits();
1263  // Larger extracts are vectors, same-size extracts should be something else
1264  // by now (either split up or simplified to a COPY).
1265  if (SrcTy.getSizeInBits() > 64 || Ty.getSizeInBits() > 32)
1266  return false;
1267 
1268  I.setDesc(TII.get(SrcSize == 64 ? AArch64::UBFMXri : AArch64::UBFMWri));
1270  Ty.getSizeInBits() - 1);
1271 
1272  if (SrcSize < 64) {
1273  assert(SrcSize == 32 && DstTy.getSizeInBits() == 16 &&
1274  "unexpected G_EXTRACT types");
1275  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1276  }
1277 
1278  unsigned DstReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
1279  MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator()));
1280  MIB.buildInstr(TargetOpcode::COPY, {I.getOperand(0).getReg()}, {})
1281  .addReg(DstReg, 0, AArch64::sub_32);
1283  AArch64::GPR32RegClass, MRI);
1284  I.getOperand(0).setReg(DstReg);
1285 
1286  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1287  }
1288 
1289  case TargetOpcode::G_INSERT: {
1290  LLT SrcTy = MRI.getType(I.getOperand(2).getReg());
1291  LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1292  unsigned DstSize = DstTy.getSizeInBits();
1293  // Larger inserts are vectors, same-size ones should be something else by
1294  // now (split up or turned into COPYs).
1295  if (Ty.getSizeInBits() > 64 || SrcTy.getSizeInBits() > 32)
1296  return false;
1297 
1298  I.setDesc(TII.get(DstSize == 64 ? AArch64::BFMXri : AArch64::BFMWri));
1299  unsigned LSB = I.getOperand(3).getImm();
1300  unsigned Width = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
1301  I.getOperand(3).setImm((DstSize - LSB) % DstSize);
1302  MachineInstrBuilder(MF, I).addImm(Width - 1);
1303 
1304  if (DstSize < 64) {
1305  assert(DstSize == 32 && SrcTy.getSizeInBits() == 16 &&
1306  "unexpected G_INSERT types");
1307  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1308  }
1309 
1310  unsigned SrcReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
1311  BuildMI(MBB, I.getIterator(), I.getDebugLoc(),
1312  TII.get(AArch64::SUBREG_TO_REG))
1313  .addDef(SrcReg)
1314  .addImm(0)
1315  .addUse(I.getOperand(2).getReg())
1316  .addImm(AArch64::sub_32);
1318  AArch64::GPR32RegClass, MRI);
1319  I.getOperand(2).setReg(SrcReg);
1320 
1321  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1322  }
1323  case TargetOpcode::G_FRAME_INDEX: {
1324  // allocas and G_FRAME_INDEX are only supported in addrspace(0).
1325  if (Ty != LLT::pointer(0, 64)) {
1326  LLVM_DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << Ty
1327  << ", expected: " << LLT::pointer(0, 64) << '\n');
1328  return false;
1329  }
1330  I.setDesc(TII.get(AArch64::ADDXri));
1331 
1332  // MOs for a #0 shifted immediate.
1333  I.addOperand(MachineOperand::CreateImm(0));
1334  I.addOperand(MachineOperand::CreateImm(0));
1335 
1336  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1337  }
1338 
1339  case TargetOpcode::G_GLOBAL_VALUE: {
1340  auto GV = I.getOperand(1).getGlobal();
1341  if (GV->isThreadLocal()) {
1342  // FIXME: we don't support TLS yet.
1343  return false;
1344  }
1345  unsigned char OpFlags = STI.ClassifyGlobalReference(GV, TM);
1346  if (OpFlags & AArch64II::MO_GOT) {
1347  I.setDesc(TII.get(AArch64::LOADgot));
1348  I.getOperand(1).setTargetFlags(OpFlags);
1349  } else if (TM.getCodeModel() == CodeModel::Large) {
1350  // Materialize the global using movz/movk instructions.
1351  materializeLargeCMVal(I, GV, OpFlags);
1352  I.eraseFromParent();
1353  return true;
1354  } else if (TM.getCodeModel() == CodeModel::Tiny) {
1355  I.setDesc(TII.get(AArch64::ADR));
1356  I.getOperand(1).setTargetFlags(OpFlags);
1357  } else {
1358  I.setDesc(TII.get(AArch64::MOVaddr));
1360  MachineInstrBuilder MIB(MF, I);
1361  MIB.addGlobalAddress(GV, I.getOperand(1).getOffset(),
1363  }
1364  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1365  }
1366 
1367  case TargetOpcode::G_LOAD:
1368  case TargetOpcode::G_STORE: {
1369  LLT PtrTy = MRI.getType(I.getOperand(1).getReg());
1370 
1371  if (PtrTy != LLT::pointer(0, 64)) {
1372  LLVM_DEBUG(dbgs() << "Load/Store pointer has type: " << PtrTy
1373  << ", expected: " << LLT::pointer(0, 64) << '\n');
1374  return false;
1375  }
1376 
1377  auto &MemOp = **I.memoperands_begin();
1378  if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
1379  LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n");
1380  return false;
1381  }
1382  unsigned MemSizeInBits = MemOp.getSize() * 8;
1383 
1384  const unsigned PtrReg = I.getOperand(1).getReg();
1385 #ifndef NDEBUG
1386  const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI);
1387  // Sanity-check the pointer register.
1388  assert(PtrRB.getID() == AArch64::GPRRegBankID &&
1389  "Load/Store pointer operand isn't a GPR");
1390  assert(MRI.getType(PtrReg).isPointer() &&
1391  "Load/Store pointer operand isn't a pointer");
1392 #endif
1393 
1394  const unsigned ValReg = I.getOperand(0).getReg();
1395  const RegisterBank &RB = *RBI.getRegBank(ValReg, MRI, TRI);
1396 
1397  const unsigned NewOpc =
1398  selectLoadStoreUIOp(I.getOpcode(), RB.getID(), MemSizeInBits);
1399  if (NewOpc == I.getOpcode())
1400  return false;
1401 
1402  I.setDesc(TII.get(NewOpc));
1403 
1404  uint64_t Offset = 0;
1405  auto *PtrMI = MRI.getVRegDef(PtrReg);
1406 
1407  // Try to fold a GEP into our unsigned immediate addressing mode.
1408  if (PtrMI->getOpcode() == TargetOpcode::G_GEP) {
1409  if (auto COff = getConstantVRegVal(PtrMI->getOperand(2).getReg(), MRI)) {
1410  int64_t Imm = *COff;
1411  const unsigned Size = MemSizeInBits / 8;
1412  const unsigned Scale = Log2_32(Size);
1413  if ((Imm & (Size - 1)) == 0 && Imm >= 0 && Imm < (0x1000 << Scale)) {
1414  unsigned Ptr2Reg = PtrMI->getOperand(1).getReg();
1415  I.getOperand(1).setReg(Ptr2Reg);
1416  PtrMI = MRI.getVRegDef(Ptr2Reg);
1417  Offset = Imm / Size;
1418  }
1419  }
1420  }
1421 
1422  // If we haven't folded anything into our addressing mode yet, try to fold
1423  // a frame index into the base+offset.
1424  if (!Offset && PtrMI->getOpcode() == TargetOpcode::G_FRAME_INDEX)
1425  I.getOperand(1).ChangeToFrameIndex(PtrMI->getOperand(1).getIndex());
1426 
1427  I.addOperand(MachineOperand::CreateImm(Offset));
1428 
1429  // If we're storing a 0, use WZR/XZR.
1430  if (auto CVal = getConstantVRegVal(ValReg, MRI)) {
1431  if (*CVal == 0 && Opcode == TargetOpcode::G_STORE) {
1432  if (I.getOpcode() == AArch64::STRWui)
1433  I.getOperand(0).setReg(AArch64::WZR);
1434  else if (I.getOpcode() == AArch64::STRXui)
1435  I.getOperand(0).setReg(AArch64::XZR);
1436  }
1437  }
1438 
1439  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1440  }
1441 
1442  case TargetOpcode::G_SMULH:
1443  case TargetOpcode::G_UMULH: {
1444  // Reject the various things we don't support yet.
1445  if (unsupportedBinOp(I, RBI, MRI, TRI))
1446  return false;
1447 
1448  const unsigned DefReg = I.getOperand(0).getReg();
1449  const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1450 
1451  if (RB.getID() != AArch64::GPRRegBankID) {
1452  LLVM_DEBUG(dbgs() << "G_[SU]MULH on bank: " << RB << ", expected: GPR\n");
1453  return false;
1454  }
1455 
1456  if (Ty != LLT::scalar(64)) {
1457  LLVM_DEBUG(dbgs() << "G_[SU]MULH has type: " << Ty
1458  << ", expected: " << LLT::scalar(64) << '\n');
1459  return false;
1460  }
1461 
1462  unsigned NewOpc = I.getOpcode() == TargetOpcode::G_SMULH ? AArch64::SMULHrr
1463  : AArch64::UMULHrr;
1464  I.setDesc(TII.get(NewOpc));
1465 
1466  // Now that we selected an opcode, we need to constrain the register
1467  // operands to use appropriate classes.
1468  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1469  }
1470  case TargetOpcode::G_FADD:
1471  case TargetOpcode::G_FSUB:
1472  case TargetOpcode::G_FMUL:
1473  case TargetOpcode::G_FDIV:
1474 
1475  case TargetOpcode::G_ASHR:
1476  if (MRI.getType(I.getOperand(0).getReg()).isVector())
1477  return selectVectorASHR(I, MRI);
1479  case TargetOpcode::G_SHL:
1480  if (Opcode == TargetOpcode::G_SHL &&
1481  MRI.getType(I.getOperand(0).getReg()).isVector())
1482  return selectVectorSHL(I, MRI);
1484  case TargetOpcode::G_OR:
1485  case TargetOpcode::G_LSHR:
1486  case TargetOpcode::G_GEP: {
1487  // Reject the various things we don't support yet.
1488  if (unsupportedBinOp(I, RBI, MRI, TRI))
1489  return false;
1490 
1491  const unsigned OpSize = Ty.getSizeInBits();
1492 
1493  const unsigned DefReg = I.getOperand(0).getReg();
1494  const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1495 
1496  const unsigned NewOpc = selectBinaryOp(I.getOpcode(), RB.getID(), OpSize);
1497  if (NewOpc == I.getOpcode())
1498  return false;
1499 
1500  I.setDesc(TII.get(NewOpc));
1501  // FIXME: Should the type be always reset in setDesc?
1502 
1503  // Now that we selected an opcode, we need to constrain the register
1504  // operands to use appropriate classes.
1505  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1506  }
1507 
1508  case TargetOpcode::G_UADDO: {
1509  // TODO: Support other types.
1510  unsigned OpSize = Ty.getSizeInBits();
1511  if (OpSize != 32 && OpSize != 64) {
1512  LLVM_DEBUG(
1513  dbgs()
1514  << "G_UADDO currently only supported for 32 and 64 b types.\n");
1515  return false;
1516  }
1517 
1518  // TODO: Support vectors.
1519  if (Ty.isVector()) {
1520  LLVM_DEBUG(dbgs() << "G_UADDO currently only supported for scalars.\n");
1521  return false;
1522  }
1523 
1524  // Add and set the set condition flag.
1525  unsigned AddsOpc = OpSize == 32 ? AArch64::ADDSWrr : AArch64::ADDSXrr;
1526  MachineIRBuilder MIRBuilder(I);
1527  auto AddsMI = MIRBuilder.buildInstr(
1528  AddsOpc, {I.getOperand(0).getReg()},
1529  {I.getOperand(2).getReg(), I.getOperand(3).getReg()});
1530  constrainSelectedInstRegOperands(*AddsMI, TII, TRI, RBI);
1531 
1532  // Now, put the overflow result in the register given by the first operand
1533  // to the G_UADDO. CSINC increments the result when the predicate is false,
1534  // so to get the increment when it's true, we need to use the inverse. In
1535  // this case, we want to increment when carry is set.
1536  auto CsetMI = MIRBuilder
1537  .buildInstr(AArch64::CSINCWr, {I.getOperand(1).getReg()},
1538  {AArch64::WZR, AArch64::WZR})
1540  constrainSelectedInstRegOperands(*CsetMI, TII, TRI, RBI);
1541  I.eraseFromParent();
1542  return true;
1543  }
1544 
1545  case TargetOpcode::G_PTR_MASK: {
1546  uint64_t Align = I.getOperand(2).getImm();
1547  if (Align >= 64 || Align == 0)
1548  return false;
1549 
1550  uint64_t Mask = ~((1ULL << Align) - 1);
1551  I.setDesc(TII.get(AArch64::ANDXri));
1553 
1554  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1555  }
1556  case TargetOpcode::G_PTRTOINT:
1557  case TargetOpcode::G_TRUNC: {
1558  const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1559  const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
1560 
1561  const unsigned DstReg = I.getOperand(0).getReg();
1562  const unsigned SrcReg = I.getOperand(1).getReg();
1563 
1564  const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1565  const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
1566 
1567  if (DstRB.getID() != SrcRB.getID()) {
1568  LLVM_DEBUG(
1569  dbgs() << "G_TRUNC/G_PTRTOINT input/output on different banks\n");
1570  return false;
1571  }
1572 
1573  if (DstRB.getID() == AArch64::GPRRegBankID) {
1574  const TargetRegisterClass *DstRC =
1575  getRegClassForTypeOnBank(DstTy, DstRB, RBI);
1576  if (!DstRC)
1577  return false;
1578 
1579  const TargetRegisterClass *SrcRC =
1580  getRegClassForTypeOnBank(SrcTy, SrcRB, RBI);
1581  if (!SrcRC)
1582  return false;
1583 
1584  if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1585  !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1586  LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC/G_PTRTOINT\n");
1587  return false;
1588  }
1589 
1590  if (DstRC == SrcRC) {
1591  // Nothing to be done
1592  } else if (Opcode == TargetOpcode::G_TRUNC && DstTy == LLT::scalar(32) &&
1593  SrcTy == LLT::scalar(64)) {
1594  llvm_unreachable("TableGen can import this case");
1595  return false;
1596  } else if (DstRC == &AArch64::GPR32RegClass &&
1597  SrcRC == &AArch64::GPR64RegClass) {
1598  I.getOperand(1).setSubReg(AArch64::sub_32);
1599  } else {
1600  LLVM_DEBUG(
1601  dbgs() << "Unhandled mismatched classes in G_TRUNC/G_PTRTOINT\n");
1602  return false;
1603  }
1604 
1605  I.setDesc(TII.get(TargetOpcode::COPY));
1606  return true;
1607  } else if (DstRB.getID() == AArch64::FPRRegBankID) {
1608  if (DstTy == LLT::vector(4, 16) && SrcTy == LLT::vector(4, 32)) {
1609  I.setDesc(TII.get(AArch64::XTNv4i16));
1611  return true;
1612  }
1613  }
1614 
1615  return false;
1616  }
1617 
1618  case TargetOpcode::G_ANYEXT: {
1619  const unsigned DstReg = I.getOperand(0).getReg();
1620  const unsigned SrcReg = I.getOperand(1).getReg();
1621 
1622  const RegisterBank &RBDst = *RBI.getRegBank(DstReg, MRI, TRI);
1623  if (RBDst.getID() != AArch64::GPRRegBankID) {
1624  LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBDst
1625  << ", expected: GPR\n");
1626  return false;
1627  }
1628 
1629  const RegisterBank &RBSrc = *RBI.getRegBank(SrcReg, MRI, TRI);
1630  if (RBSrc.getID() != AArch64::GPRRegBankID) {
1631  LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBSrc
1632  << ", expected: GPR\n");
1633  return false;
1634  }
1635 
1636  const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
1637 
1638  if (DstSize == 0) {
1639  LLVM_DEBUG(dbgs() << "G_ANYEXT operand has no size, not a gvreg?\n");
1640  return false;
1641  }
1642 
1643  if (DstSize != 64 && DstSize > 32) {
1644  LLVM_DEBUG(dbgs() << "G_ANYEXT to size: " << DstSize
1645  << ", expected: 32 or 64\n");
1646  return false;
1647  }
1648  // At this point G_ANYEXT is just like a plain COPY, but we need
1649  // to explicitly form the 64-bit value if any.
1650  if (DstSize > 32) {
1651  unsigned ExtSrc = MRI.createVirtualRegister(&AArch64::GPR64allRegClass);
1652  BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
1653  .addDef(ExtSrc)
1654  .addImm(0)
1655  .addUse(SrcReg)
1656  .addImm(AArch64::sub_32);
1657  I.getOperand(1).setReg(ExtSrc);
1658  }
1659  return selectCopy(I, TII, MRI, TRI, RBI);
1660  }
1661 
1662  case TargetOpcode::G_ZEXT:
1663  case TargetOpcode::G_SEXT: {
1664  unsigned Opcode = I.getOpcode();
1665  const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
1666  SrcTy = MRI.getType(I.getOperand(1).getReg());
1667  const bool isSigned = Opcode == TargetOpcode::G_SEXT;
1668  const unsigned DefReg = I.getOperand(0).getReg();
1669  const unsigned SrcReg = I.getOperand(1).getReg();
1670  const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1671 
1672  if (RB.getID() != AArch64::GPRRegBankID) {
1673  LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode()) << " on bank: " << RB
1674  << ", expected: GPR\n");
1675  return false;
1676  }
1677 
1678  MachineInstr *ExtI;
1679  if (DstTy == LLT::scalar(64)) {
1680  // FIXME: Can we avoid manually doing this?
1681  if (!RBI.constrainGenericRegister(SrcReg, AArch64::GPR32RegClass, MRI)) {
1682  LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(Opcode)
1683  << " operand\n");
1684  return false;
1685  }
1686 
1687  const unsigned SrcXReg =
1688  MRI.createVirtualRegister(&AArch64::GPR64RegClass);
1689  BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
1690  .addDef(SrcXReg)
1691  .addImm(0)
1692  .addUse(SrcReg)
1693  .addImm(AArch64::sub_32);
1694 
1695  const unsigned NewOpc = isSigned ? AArch64::SBFMXri : AArch64::UBFMXri;
1696  ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
1697  .addDef(DefReg)
1698  .addUse(SrcXReg)
1699  .addImm(0)
1700  .addImm(SrcTy.getSizeInBits() - 1);
1701  } else if (DstTy.isScalar() && DstTy.getSizeInBits() <= 32) {
1702  const unsigned NewOpc = isSigned ? AArch64::SBFMWri : AArch64::UBFMWri;
1703  ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
1704  .addDef(DefReg)
1705  .addUse(SrcReg)
1706  .addImm(0)
1707  .addImm(SrcTy.getSizeInBits() - 1);
1708  } else {
1709  return false;
1710  }
1711 
1713 
1714  I.eraseFromParent();
1715  return true;
1716  }
1717 
1718  case TargetOpcode::G_SITOFP:
1719  case TargetOpcode::G_UITOFP:
1720  case TargetOpcode::G_FPTOSI:
1721  case TargetOpcode::G_FPTOUI: {
1722  const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
1723  SrcTy = MRI.getType(I.getOperand(1).getReg());
1724  const unsigned NewOpc = selectFPConvOpc(Opcode, DstTy, SrcTy);
1725  if (NewOpc == Opcode)
1726  return false;
1727 
1728  I.setDesc(TII.get(NewOpc));
1730 
1731  return true;
1732  }
1733 
1734 
1735  case TargetOpcode::G_INTTOPTR:
1736  // The importer is currently unable to import pointer types since they
1737  // didn't exist in SelectionDAG.
1738  return selectCopy(I, TII, MRI, TRI, RBI);
1739 
1740  case TargetOpcode::G_BITCAST:
1741  // Imported SelectionDAG rules can handle every bitcast except those that
1742  // bitcast from a type to the same type. Ideally, these shouldn't occur
1743  // but we might not run an optimizer that deletes them. The other exception
1744  // is bitcasts involving pointer types, as SelectionDAG has no knowledge
1745  // of them.
1746  return selectCopy(I, TII, MRI, TRI, RBI);
1747 
1748  case TargetOpcode::G_SELECT: {
1749  if (MRI.getType(I.getOperand(1).getReg()) != LLT::scalar(1)) {
1750  LLVM_DEBUG(dbgs() << "G_SELECT cond has type: " << Ty
1751  << ", expected: " << LLT::scalar(1) << '\n');
1752  return false;
1753  }
1754 
1755  const unsigned CondReg = I.getOperand(1).getReg();
1756  const unsigned TReg = I.getOperand(2).getReg();
1757  const unsigned FReg = I.getOperand(3).getReg();
1758 
1759  // If we have a floating-point result, then we should use a floating point
1760  // select instead of an integer select.
1761  bool IsFP = (RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI)->getID() !=
1762  AArch64::GPRRegBankID);
1763  unsigned CSelOpc = 0;
1764 
1765  if (Ty == LLT::scalar(32)) {
1766  CSelOpc = IsFP ? AArch64::FCSELSrrr : AArch64::CSELWr;
1767  } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
1768  CSelOpc = IsFP ? AArch64::FCSELDrrr : AArch64::CSELXr;
1769  } else {
1770  return false;
1771  }
1772 
1773  MachineInstr &TstMI =
1774  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri))
1775  .addDef(AArch64::WZR)
1776  .addUse(CondReg)
1778 
1779  MachineInstr &CSelMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CSelOpc))
1780  .addDef(I.getOperand(0).getReg())
1781  .addUse(TReg)
1782  .addUse(FReg)
1784 
1786  constrainSelectedInstRegOperands(CSelMI, TII, TRI, RBI);
1787 
1788  I.eraseFromParent();
1789  return true;
1790  }
1791  case TargetOpcode::G_ICMP: {
1792  if (Ty.isVector())
1793  return selectVectorICmp(I, MRI);
1794 
1795  if (Ty != LLT::scalar(32)) {
1796  LLVM_DEBUG(dbgs() << "G_ICMP result has type: " << Ty
1797  << ", expected: " << LLT::scalar(32) << '\n');
1798  return false;
1799  }
1800 
1801  unsigned CmpOpc = 0;
1802  unsigned ZReg = 0;
1803 
1804  LLT CmpTy = MRI.getType(I.getOperand(2).getReg());
1805  if (CmpTy == LLT::scalar(32)) {
1806  CmpOpc = AArch64::SUBSWrr;
1807  ZReg = AArch64::WZR;
1808  } else if (CmpTy == LLT::scalar(64) || CmpTy.isPointer()) {
1809  CmpOpc = AArch64::SUBSXrr;
1810  ZReg = AArch64::XZR;
1811  } else {
1812  return false;
1813  }
1814 
1815  // CSINC increments the result by one when the condition code is false.
1816  // Therefore, we have to invert the predicate to get an increment by 1 when
1817  // the predicate is true.
1818  const AArch64CC::CondCode invCC =
1819  changeICMPPredToAArch64CC(CmpInst::getInversePredicate(
1821 
1822  MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
1823  .addDef(ZReg)
1824  .addUse(I.getOperand(2).getReg())
1825  .addUse(I.getOperand(3).getReg());
1826 
1827  MachineInstr &CSetMI =
1828  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1829  .addDef(I.getOperand(0).getReg())
1830  .addUse(AArch64::WZR)
1831  .addUse(AArch64::WZR)
1832  .addImm(invCC);
1833 
1835  constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
1836 
1837  I.eraseFromParent();
1838  return true;
1839  }
1840 
1841  case TargetOpcode::G_FCMP: {
1842  if (Ty != LLT::scalar(32)) {
1843  LLVM_DEBUG(dbgs() << "G_FCMP result has type: " << Ty
1844  << ", expected: " << LLT::scalar(32) << '\n');
1845  return false;
1846  }
1847 
1848  unsigned CmpOpc = 0;
1849  LLT CmpTy = MRI.getType(I.getOperand(2).getReg());
1850  if (CmpTy == LLT::scalar(32)) {
1851  CmpOpc = AArch64::FCMPSrr;
1852  } else if (CmpTy == LLT::scalar(64)) {
1853  CmpOpc = AArch64::FCMPDrr;
1854  } else {
1855  return false;
1856  }
1857 
1858  // FIXME: regbank
1859 
1860  AArch64CC::CondCode CC1, CC2;
1862  (CmpInst::Predicate)I.getOperand(1).getPredicate(), CC1, CC2);
1863 
1864  MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
1865  .addUse(I.getOperand(2).getReg())
1866  .addUse(I.getOperand(3).getReg());
1867 
1868  const unsigned DefReg = I.getOperand(0).getReg();
1869  unsigned Def1Reg = DefReg;
1870  if (CC2 != AArch64CC::AL)
1871  Def1Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
1872 
1873  MachineInstr &CSetMI =
1874  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1875  .addDef(Def1Reg)
1876  .addUse(AArch64::WZR)
1877  .addUse(AArch64::WZR)
1878  .addImm(getInvertedCondCode(CC1));
1879 
1880  if (CC2 != AArch64CC::AL) {
1881  unsigned Def2Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
1882  MachineInstr &CSet2MI =
1883  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1884  .addDef(Def2Reg)
1885  .addUse(AArch64::WZR)
1886  .addUse(AArch64::WZR)
1887  .addImm(getInvertedCondCode(CC2));
1888  MachineInstr &OrMI =
1889  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ORRWrr))
1890  .addDef(DefReg)
1891  .addUse(Def1Reg)
1892  .addUse(Def2Reg);
1894  constrainSelectedInstRegOperands(CSet2MI, TII, TRI, RBI);
1895  }
1896 
1898  constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
1899 
1900  I.eraseFromParent();
1901  return true;
1902  }
1903  case TargetOpcode::G_VASTART:
1904  return STI.isTargetDarwin() ? selectVaStartDarwin(I, MF, MRI)
1905  : selectVaStartAAPCS(I, MF, MRI);
1906  case TargetOpcode::G_INTRINSIC:
1907  return selectIntrinsic(I, MRI);
1908  case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1909  return selectIntrinsicWithSideEffects(I, MRI);
1910  case TargetOpcode::G_IMPLICIT_DEF: {
1911  I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
1912  const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1913  const unsigned DstReg = I.getOperand(0).getReg();
1914  const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1915  const TargetRegisterClass *DstRC =
1916  getRegClassForTypeOnBank(DstTy, DstRB, RBI);
1917  RBI.constrainGenericRegister(DstReg, *DstRC, MRI);
1918  return true;
1919  }
1920  case TargetOpcode::G_BLOCK_ADDR: {
1921  if (TM.getCodeModel() == CodeModel::Large) {
1922  materializeLargeCMVal(I, I.getOperand(1).getBlockAddress(), 0);
1923  I.eraseFromParent();
1924  return true;
1925  } else {
1926  I.setDesc(TII.get(AArch64::MOVaddrBA));
1927  auto MovMI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::MOVaddrBA),
1928  I.getOperand(0).getReg())
1929  .addBlockAddress(I.getOperand(1).getBlockAddress(),
1930  /* Offset */ 0, AArch64II::MO_PAGE)
1931  .addBlockAddress(
1932  I.getOperand(1).getBlockAddress(), /* Offset */ 0,
1934  I.eraseFromParent();
1935  return constrainSelectedInstRegOperands(*MovMI, TII, TRI, RBI);
1936  }
1937  }
1938  case TargetOpcode::G_INTRINSIC_TRUNC:
1939  return selectIntrinsicTrunc(I, MRI);
1940  case TargetOpcode::G_INTRINSIC_ROUND:
1941  return selectIntrinsicRound(I, MRI);
1942  case TargetOpcode::G_BUILD_VECTOR:
1943  return selectBuildVector(I, MRI);
1944  case TargetOpcode::G_MERGE_VALUES:
1945  return selectMergeValues(I, MRI);
1946  case TargetOpcode::G_UNMERGE_VALUES:
1947  return selectUnmergeValues(I, MRI);
1948  case TargetOpcode::G_SHUFFLE_VECTOR:
1949  return selectShuffleVector(I, MRI);
1950  case TargetOpcode::G_EXTRACT_VECTOR_ELT:
1951  return selectExtractElt(I, MRI);
1952  case TargetOpcode::G_INSERT_VECTOR_ELT:
1953  return selectInsertElt(I, MRI);
1954  case TargetOpcode::G_CONCAT_VECTORS:
1955  return selectConcatVectors(I, MRI);
1956  }
1957 
1958  return false;
1959 }
1960 
1961 bool AArch64InstructionSelector::selectIntrinsicTrunc(
1962  MachineInstr &I, MachineRegisterInfo &MRI) const {
1963  const LLT SrcTy = MRI.getType(I.getOperand(0).getReg());
1964 
1965  // Select the correct opcode.
1966  unsigned Opc = 0;
1967  if (!SrcTy.isVector()) {
1968  switch (SrcTy.getSizeInBits()) {
1969  default:
1970  case 16:
1971  Opc = AArch64::FRINTZHr;
1972  break;
1973  case 32:
1974  Opc = AArch64::FRINTZSr;
1975  break;
1976  case 64:
1977  Opc = AArch64::FRINTZDr;
1978  break;
1979  }
1980  } else {
1981  unsigned NumElts = SrcTy.getNumElements();
1982  switch (SrcTy.getElementType().getSizeInBits()) {
1983  default:
1984  break;
1985  case 16:
1986  if (NumElts == 4)
1987  Opc = AArch64::FRINTZv4f16;
1988  else if (NumElts == 8)
1989  Opc = AArch64::FRINTZv8f16;
1990  break;
1991  case 32:
1992  if (NumElts == 2)
1993  Opc = AArch64::FRINTZv2f32;
1994  else if (NumElts == 4)
1995  Opc = AArch64::FRINTZv4f32;
1996  break;
1997  case 64:
1998  if (NumElts == 2)
1999  Opc = AArch64::FRINTZv2f64;
2000  break;
2001  }
2002  }
2003 
2004  if (!Opc) {
2005  // Didn't get an opcode above, bail.
2006  LLVM_DEBUG(dbgs() << "Unsupported type for G_INTRINSIC_TRUNC!\n");
2007  return false;
2008  }
2009 
2010  // Legalization would have set us up perfectly for this; we just need to
2011  // set the opcode and move on.
2012  I.setDesc(TII.get(Opc));
2013  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2014 }
2015 
2016 bool AArch64InstructionSelector::selectIntrinsicRound(
2017  MachineInstr &I, MachineRegisterInfo &MRI) const {
2018  const LLT SrcTy = MRI.getType(I.getOperand(0).getReg());
2019 
2020  // Select the correct opcode.
2021  unsigned Opc = 0;
2022  if (!SrcTy.isVector()) {
2023  switch (SrcTy.getSizeInBits()) {
2024  default:
2025  case 16:
2026  Opc = AArch64::FRINTAHr;
2027  break;
2028  case 32:
2029  Opc = AArch64::FRINTASr;
2030  break;
2031  case 64:
2032  Opc = AArch64::FRINTADr;
2033  break;
2034  }
2035  } else {
2036  unsigned NumElts = SrcTy.getNumElements();
2037  switch (SrcTy.getElementType().getSizeInBits()) {
2038  default:
2039  break;
2040  case 16:
2041  if (NumElts == 4)
2042  Opc = AArch64::FRINTAv4f16;
2043  else if (NumElts == 8)
2044  Opc = AArch64::FRINTAv8f16;
2045  break;
2046  case 32:
2047  if (NumElts == 2)
2048  Opc = AArch64::FRINTAv2f32;
2049  else if (NumElts == 4)
2050  Opc = AArch64::FRINTAv4f32;
2051  break;
2052  case 64:
2053  if (NumElts == 2)
2054  Opc = AArch64::FRINTAv2f64;
2055  break;
2056  }
2057  }
2058 
2059  if (!Opc) {
2060  // Didn't get an opcode above, bail.
2061  LLVM_DEBUG(dbgs() << "Unsupported type for G_INTRINSIC_ROUND!\n");
2062  return false;
2063  }
2064 
2065  // Legalization would have set us up perfectly for this; we just need to
2066  // set the opcode and move on.
2067  I.setDesc(TII.get(Opc));
2068  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2069 }
2070 
2071 bool AArch64InstructionSelector::selectVectorICmp(
2072  MachineInstr &I, MachineRegisterInfo &MRI) const {
2073  unsigned DstReg = I.getOperand(0).getReg();
2074  LLT DstTy = MRI.getType(DstReg);
2075  unsigned SrcReg = I.getOperand(2).getReg();
2076  unsigned Src2Reg = I.getOperand(3).getReg();
2077  LLT SrcTy = MRI.getType(SrcReg);
2078 
2079  unsigned SrcEltSize = SrcTy.getElementType().getSizeInBits();
2080  unsigned NumElts = DstTy.getNumElements();
2081 
2082  // First index is element size, 0 == 8b, 1 == 16b, 2 == 32b, 3 == 64b
2083  // Second index is num elts, 0 == v2, 1 == v4, 2 == v8, 3 == v16
2084  // Third index is cc opcode:
2085  // 0 == eq
2086  // 1 == ugt
2087  // 2 == uge
2088  // 3 == ult
2089  // 4 == ule
2090  // 5 == sgt
2091  // 6 == sge
2092  // 7 == slt
2093  // 8 == sle
2094  // ne is done by negating 'eq' result.
2095 
2096  // This table below assumes that for some comparisons the operands will be
2097  // commuted.
2098  // ult op == commute + ugt op
2099  // ule op == commute + uge op
2100  // slt op == commute + sgt op
2101  // sle op == commute + sge op
2102  unsigned PredIdx = 0;
2103  bool SwapOperands = false;
2105  switch (Pred) {
2106  case CmpInst::ICMP_NE:
2107  case CmpInst::ICMP_EQ:
2108  PredIdx = 0;
2109  break;
2110  case CmpInst::ICMP_UGT:
2111  PredIdx = 1;
2112  break;
2113  case CmpInst::ICMP_UGE:
2114  PredIdx = 2;
2115  break;
2116  case CmpInst::ICMP_ULT:
2117  PredIdx = 3;
2118  SwapOperands = true;
2119  break;
2120  case CmpInst::ICMP_ULE:
2121  PredIdx = 4;
2122  SwapOperands = true;
2123  break;
2124  case CmpInst::ICMP_SGT:
2125  PredIdx = 5;
2126  break;
2127  case CmpInst::ICMP_SGE:
2128  PredIdx = 6;
2129  break;
2130  case CmpInst::ICMP_SLT:
2131  PredIdx = 7;
2132  SwapOperands = true;
2133  break;
2134  case CmpInst::ICMP_SLE:
2135  PredIdx = 8;
2136  SwapOperands = true;
2137  break;
2138  default:
2139  llvm_unreachable("Unhandled icmp predicate");
2140  return false;
2141  }
2142 
2143  // This table obviously should be tablegen'd when we have our GISel native
2144  // tablegen selector.
2145 
2146  static const unsigned OpcTable[4][4][9] = {
2147  {
2148  {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2149  0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2150  0 /* invalid */},
2151  {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2152  0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2153  0 /* invalid */},
2154  {AArch64::CMEQv8i8, AArch64::CMHIv8i8, AArch64::CMHSv8i8,
2155  AArch64::CMHIv8i8, AArch64::CMHSv8i8, AArch64::CMGTv8i8,
2156  AArch64::CMGEv8i8, AArch64::CMGTv8i8, AArch64::CMGEv8i8},
2157  {AArch64::CMEQv16i8, AArch64::CMHIv16i8, AArch64::CMHSv16i8,
2158  AArch64::CMHIv16i8, AArch64::CMHSv16i8, AArch64::CMGTv16i8,
2159  AArch64::CMGEv16i8, AArch64::CMGTv16i8, AArch64::CMGEv16i8}
2160  },
2161  {
2162  {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2163  0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2164  0 /* invalid */},
2165  {AArch64::CMEQv4i16, AArch64::CMHIv4i16, AArch64::CMHSv4i16,
2166  AArch64::CMHIv4i16, AArch64::CMHSv4i16, AArch64::CMGTv4i16,
2167  AArch64::CMGEv4i16, AArch64::CMGTv4i16, AArch64::CMGEv4i16},
2168  {AArch64::CMEQv8i16, AArch64::CMHIv8i16, AArch64::CMHSv8i16,
2169  AArch64::CMHIv8i16, AArch64::CMHSv8i16, AArch64::CMGTv8i16,
2170  AArch64::CMGEv8i16, AArch64::CMGTv8i16, AArch64::CMGEv8i16},
2171  {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2172  0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2173  0 /* invalid */}
2174  },
2175  {
2176  {AArch64::CMEQv2i32, AArch64::CMHIv2i32, AArch64::CMHSv2i32,
2177  AArch64::CMHIv2i32, AArch64::CMHSv2i32, AArch64::CMGTv2i32,
2178  AArch64::CMGEv2i32, AArch64::CMGTv2i32, AArch64::CMGEv2i32},
2179  {AArch64::CMEQv4i32, AArch64::CMHIv4i32, AArch64::CMHSv4i32,
2180  AArch64::CMHIv4i32, AArch64::CMHSv4i32, AArch64::CMGTv4i32,
2181  AArch64::CMGEv4i32, AArch64::CMGTv4i32, AArch64::CMGEv4i32},
2182  {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2183  0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2184  0 /* invalid */},
2185  {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2186  0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2187  0 /* invalid */}
2188  },
2189  {
2190  {AArch64::CMEQv2i64, AArch64::CMHIv2i64, AArch64::CMHSv2i64,
2191  AArch64::CMHIv2i64, AArch64::CMHSv2i64, AArch64::CMGTv2i64,
2192  AArch64::CMGEv2i64, AArch64::CMGTv2i64, AArch64::CMGEv2i64},
2193  {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2194  0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2195  0 /* invalid */},
2196  {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2197  0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2198  0 /* invalid */},
2199  {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2200  0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2201  0 /* invalid */}
2202  },
2203  };
2204  unsigned EltIdx = Log2_32(SrcEltSize / 8);
2205  unsigned NumEltsIdx = Log2_32(NumElts / 2);
2206  unsigned Opc = OpcTable[EltIdx][NumEltsIdx][PredIdx];
2207  if (!Opc) {
2208  LLVM_DEBUG(dbgs() << "Could not map G_ICMP to cmp opcode");
2209  return false;
2210  }
2211 
2212  const RegisterBank &VecRB = *RBI.getRegBank(SrcReg, MRI, TRI);
2213  const TargetRegisterClass *SrcRC =
2214  getRegClassForTypeOnBank(SrcTy, VecRB, RBI, true);
2215  if (!SrcRC) {
2216  LLVM_DEBUG(dbgs() << "Could not determine source register class.\n");
2217  return false;
2218  }
2219 
2220  unsigned NotOpc = Pred == ICmpInst::ICMP_NE ? AArch64::NOTv8i8 : 0;
2221  if (SrcTy.getSizeInBits() == 128)
2222  NotOpc = NotOpc ? AArch64::NOTv16i8 : 0;
2223 
2224  if (SwapOperands)
2225  std::swap(SrcReg, Src2Reg);
2226 
2227  MachineIRBuilder MIB(I);
2228  auto Cmp = MIB.buildInstr(Opc, {SrcRC}, {SrcReg, Src2Reg});
2230 
2231  // Invert if we had a 'ne' cc.
2232  if (NotOpc) {
2233  Cmp = MIB.buildInstr(NotOpc, {DstReg}, {Cmp});
2235  } else {
2236  MIB.buildCopy(DstReg, Cmp.getReg(0));
2237  }
2238  RBI.constrainGenericRegister(DstReg, *SrcRC, MRI);
2239  I.eraseFromParent();
2240  return true;
2241 }
2242 
2243 MachineInstr *AArch64InstructionSelector::emitScalarToVector(
2244  unsigned EltSize, const TargetRegisterClass *DstRC, unsigned Scalar,
2245  MachineIRBuilder &MIRBuilder) const {
2246  auto Undef = MIRBuilder.buildInstr(TargetOpcode::IMPLICIT_DEF, {DstRC}, {});
2247 
2248  auto BuildFn = [&](unsigned SubregIndex) {
2249  auto Ins =
2250  MIRBuilder
2251  .buildInstr(TargetOpcode::INSERT_SUBREG, {DstRC}, {Undef, Scalar})
2252  .addImm(SubregIndex);
2255  return &*Ins;
2256  };
2257 
2258  switch (EltSize) {
2259  case 16:
2260  return BuildFn(AArch64::hsub);
2261  case 32:
2262  return BuildFn(AArch64::ssub);
2263  case 64:
2264  return BuildFn(AArch64::dsub);
2265  default:
2266  return nullptr;
2267  }
2268 }
2269 
2271  MachineInstr &I, MachineRegisterInfo &MRI) const {
2272  assert(I.getOpcode() == TargetOpcode::G_MERGE_VALUES && "unexpected opcode");
2273  const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
2274  const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
2275  assert(!DstTy.isVector() && !SrcTy.isVector() && "invalid merge operation");
2276 
2277  // At the moment we only support merging two s32s into an s64.
2278  if (I.getNumOperands() != 3)
2279  return false;
2280  if (DstTy.getSizeInBits() != 64 || SrcTy.getSizeInBits() != 32)
2281  return false;
2282  const RegisterBank &RB = *RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI);
2283  if (RB.getID() != AArch64::GPRRegBankID)
2284  return false;
2285 
2286  auto *DstRC = &AArch64::GPR64RegClass;
2287  unsigned SubToRegDef = MRI.createVirtualRegister(DstRC);
2288  MachineInstr &SubRegMI = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
2289  TII.get(TargetOpcode::SUBREG_TO_REG))
2290  .addDef(SubToRegDef)
2291  .addImm(0)
2292  .addUse(I.getOperand(1).getReg())
2293  .addImm(AArch64::sub_32);
2294  unsigned SubToRegDef2 = MRI.createVirtualRegister(DstRC);
2295  // Need to anyext the second scalar before we can use bfm
2296  MachineInstr &SubRegMI2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
2297  TII.get(TargetOpcode::SUBREG_TO_REG))
2298  .addDef(SubToRegDef2)
2299  .addImm(0)
2300  .addUse(I.getOperand(2).getReg())
2301  .addImm(AArch64::sub_32);
2302  MachineInstr &BFM =
2303  *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::BFMXri))
2304  .addDef(I.getOperand(0).getReg())
2305  .addUse(SubToRegDef)
2306  .addUse(SubToRegDef2)
2307  .addImm(32)
2308  .addImm(31);
2309  constrainSelectedInstRegOperands(SubRegMI, TII, TRI, RBI);
2310  constrainSelectedInstRegOperands(SubRegMI2, TII, TRI, RBI);
2312  I.eraseFromParent();
2313  return true;
2314 }
2315 
2316 static bool getLaneCopyOpcode(unsigned &CopyOpc, unsigned &ExtractSubReg,
2317  const unsigned EltSize) {
2318  // Choose a lane copy opcode and subregister based off of the size of the
2319  // vector's elements.
2320  switch (EltSize) {
2321  case 16:
2322  CopyOpc = AArch64::CPYi16;
2323  ExtractSubReg = AArch64::hsub;
2324  break;
2325  case 32:
2326  CopyOpc = AArch64::CPYi32;
2327  ExtractSubReg = AArch64::ssub;
2328  break;
2329  case 64:
2330  CopyOpc = AArch64::CPYi64;
2331  ExtractSubReg = AArch64::dsub;
2332  break;
2333  default:
2334  // Unknown size, bail out.
2335  LLVM_DEBUG(dbgs() << "Elt size '" << EltSize << "' unsupported.\n");
2336  return false;
2337  }
2338  return true;
2339 }
2340 
2341 MachineInstr *AArch64InstructionSelector::emitExtractVectorElt(
2342  Optional<unsigned> DstReg, const RegisterBank &DstRB, LLT ScalarTy,
2343  unsigned VecReg, unsigned LaneIdx, MachineIRBuilder &MIRBuilder) const {
2344  MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
2345  unsigned CopyOpc = 0;
2346  unsigned ExtractSubReg = 0;
2347  if (!getLaneCopyOpcode(CopyOpc, ExtractSubReg, ScalarTy.getSizeInBits())) {
2348  LLVM_DEBUG(
2349  dbgs() << "Couldn't determine lane copy opcode for instruction.\n");
2350  return nullptr;
2351  }
2352 
2353  const TargetRegisterClass *DstRC =
2354  getRegClassForTypeOnBank(ScalarTy, DstRB, RBI, true);
2355  if (!DstRC) {
2356  LLVM_DEBUG(dbgs() << "Could not determine destination register class.\n");
2357  return nullptr;
2358  }
2359 
2360  const RegisterBank &VecRB = *RBI.getRegBank(VecReg, MRI, TRI);
2361  const LLT &VecTy = MRI.getType(VecReg);
2362  const TargetRegisterClass *VecRC =
2363  getRegClassForTypeOnBank(VecTy, VecRB, RBI, true);
2364  if (!VecRC) {
2365  LLVM_DEBUG(dbgs() << "Could not determine source register class.\n");
2366  return nullptr;
2367  }
2368 
2369  // The register that we're going to copy into.
2370  unsigned InsertReg = VecReg;
2371  if (!DstReg)
2372  DstReg = MRI.createVirtualRegister(DstRC);
2373  // If the lane index is 0, we just use a subregister COPY.
2374  if (LaneIdx == 0) {
2375  auto Copy = MIRBuilder.buildInstr(TargetOpcode::COPY, {*DstReg}, {})
2376  .addReg(VecReg, 0, ExtractSubReg);
2377  RBI.constrainGenericRegister(*DstReg, *DstRC, MRI);
2378  return &*Copy;
2379  }
2380 
2381  // Lane copies require 128-bit wide registers. If we're dealing with an
2382  // unpacked vector, then we need to move up to that width. Insert an implicit
2383  // def and a subregister insert to get us there.
2384  if (VecTy.getSizeInBits() != 128) {
2385  MachineInstr *ScalarToVector = emitScalarToVector(
2386  VecTy.getSizeInBits(), &AArch64::FPR128RegClass, VecReg, MIRBuilder);
2387  if (!ScalarToVector)
2388  return nullptr;
2389  InsertReg = ScalarToVector->getOperand(0).getReg();
2390  }
2391 
2392  MachineInstr *LaneCopyMI =
2393  MIRBuilder.buildInstr(CopyOpc, {*DstReg}, {InsertReg}).addImm(LaneIdx);
2394  constrainSelectedInstRegOperands(*LaneCopyMI, TII, TRI, RBI);
2395 
2396  // Make sure that we actually constrain the initial copy.
2397  RBI.constrainGenericRegister(*DstReg, *DstRC, MRI);
2398  return LaneCopyMI;
2399 }
2400 
2401 bool AArch64InstructionSelector::selectExtractElt(
2402  MachineInstr &I, MachineRegisterInfo &MRI) const {
2403  assert(I.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT &&
2404  "unexpected opcode!");
2405  unsigned DstReg = I.getOperand(0).getReg();
2406  const LLT NarrowTy = MRI.getType(DstReg);
2407  const unsigned SrcReg = I.getOperand(1).getReg();
2408  const LLT WideTy = MRI.getType(SrcReg);
2409  (void)WideTy;
2410  assert(WideTy.getSizeInBits() >= NarrowTy.getSizeInBits() &&
2411  "source register size too small!");
2412  assert(NarrowTy.isScalar() && "cannot extract vector into vector!");
2413 
2414  // Need the lane index to determine the correct copy opcode.
2415  MachineOperand &LaneIdxOp = I.getOperand(2);
2416  assert(LaneIdxOp.isReg() && "Lane index operand was not a register?");
2417 
2418  if (RBI.getRegBank(DstReg, MRI, TRI)->getID() != AArch64::FPRRegBankID) {
2419  LLVM_DEBUG(dbgs() << "Cannot extract into GPR.\n");
2420  return false;
2421  }
2422 
2423  // Find the index to extract from.
2424  auto VRegAndVal = getConstantVRegValWithLookThrough(LaneIdxOp.getReg(), MRI);
2425  if (!VRegAndVal)
2426  return false;
2427  unsigned LaneIdx = VRegAndVal->Value;
2428 
2429  MachineIRBuilder MIRBuilder(I);
2430 
2431  const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
2432  MachineInstr *Extract = emitExtractVectorElt(DstReg, DstRB, NarrowTy, SrcReg,
2433  LaneIdx, MIRBuilder);
2434  if (!Extract)
2435  return false;
2436 
2437  I.eraseFromParent();
2438  return true;
2439 }
2440 
2441 bool AArch64InstructionSelector::selectSplitVectorUnmerge(
2442  MachineInstr &I, MachineRegisterInfo &MRI) const {
2443  unsigned NumElts = I.getNumOperands() - 1;
2444  unsigned SrcReg = I.getOperand(NumElts).getReg();
2445  const LLT NarrowTy = MRI.getType(I.getOperand(0).getReg());
2446  const LLT SrcTy = MRI.getType(SrcReg);
2447 
2448  assert(NarrowTy.isVector() && "Expected an unmerge into vectors");
2449  if (SrcTy.getSizeInBits() > 128) {
2450  LLVM_DEBUG(dbgs() << "Unexpected vector type for vec split unmerge");
2451  return false;
2452  }
2453 
2454  MachineIRBuilder MIB(I);
2455 
2456  // We implement a split vector operation by treating the sub-vectors as
2457  // scalars and extracting them.
2458  const RegisterBank &DstRB =
2459  *RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI);
2460  for (unsigned OpIdx = 0; OpIdx < NumElts; ++OpIdx) {
2461  unsigned Dst = I.getOperand(OpIdx).getReg();
2462  MachineInstr *Extract =
2463  emitExtractVectorElt(Dst, DstRB, NarrowTy, SrcReg, OpIdx, MIB);
2464  if (!Extract)
2465  return false;
2466  }
2467  I.eraseFromParent();
2468  return true;
2469 }
2470 
2472  MachineInstr &I, MachineRegisterInfo &MRI) const {
2473  assert(I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2474  "unexpected opcode");
2475 
2476  // TODO: Handle unmerging into GPRs and from scalars to scalars.
2477  if (RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI)->getID() !=
2478  AArch64::FPRRegBankID ||
2479  RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI)->getID() !=
2480  AArch64::FPRRegBankID) {
2481  LLVM_DEBUG(dbgs() << "Unmerging vector-to-gpr and scalar-to-scalar "
2482  "currently unsupported.\n");
2483  return false;
2484  }
2485 
2486  // The last operand is the vector source register, and every other operand is
2487  // a register to unpack into.
2488  unsigned NumElts = I.getNumOperands() - 1;
2489  unsigned SrcReg = I.getOperand(NumElts).getReg();
2490  const LLT NarrowTy = MRI.getType(I.getOperand(0).getReg());
2491  const LLT WideTy = MRI.getType(SrcReg);
2492  (void)WideTy;
2493  assert(WideTy.isVector() && "can only unmerge from vector types!");
2494  assert(WideTy.getSizeInBits() > NarrowTy.getSizeInBits() &&
2495  "source register size too small!");
2496 
2497  if (!NarrowTy.isScalar())
2498  return selectSplitVectorUnmerge(I, MRI);
2499 
2500  MachineIRBuilder MIB(I);
2501 
2502  // Choose a lane copy opcode and subregister based off of the size of the
2503  // vector's elements.
2504  unsigned CopyOpc = 0;
2505  unsigned ExtractSubReg = 0;
2506  if (!getLaneCopyOpcode(CopyOpc, ExtractSubReg, NarrowTy.getSizeInBits()))
2507  return false;
2508 
2509  // Set up for the lane copies.
2510  MachineBasicBlock &MBB = *I.getParent();
2511 
2512  // Stores the registers we'll be copying from.
2513  SmallVector<unsigned, 4> InsertRegs;
2514 
2515  // We'll use the first register twice, so we only need NumElts-1 registers.
2516  unsigned NumInsertRegs = NumElts - 1;
2517 
2518  // If our elements fit into exactly 128 bits, then we can copy from the source
2519  // directly. Otherwise, we need to do a bit of setup with some subregister
2520  // inserts.
2521  if (NarrowTy.getSizeInBits() * NumElts == 128) {
2522  InsertRegs = SmallVector<unsigned, 4>(NumInsertRegs, SrcReg);
2523  } else {
2524  // No. We have to perform subregister inserts. For each insert, create an
2525  // implicit def and a subregister insert, and save the register we create.
2526  for (unsigned Idx = 0; Idx < NumInsertRegs; ++Idx) {
2527  unsigned ImpDefReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass);
2528  MachineInstr &ImpDefMI =
2529  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(TargetOpcode::IMPLICIT_DEF),
2530  ImpDefReg);
2531 
2532  // Now, create the subregister insert from SrcReg.
2533  unsigned InsertReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass);
2534  MachineInstr &InsMI =
2535  *BuildMI(MBB, I, I.getDebugLoc(),
2536  TII.get(TargetOpcode::INSERT_SUBREG), InsertReg)
2537  .addUse(ImpDefReg)
2538  .addUse(SrcReg)
2539  .addImm(AArch64::dsub);
2540 
2541  constrainSelectedInstRegOperands(ImpDefMI, TII, TRI, RBI);
2543 
2544  // Save the register so that we can copy from it after.
2545  InsertRegs.push_back(InsertReg);
2546  }
2547  }
2548 
2549  // Now that we've created any necessary subregister inserts, we can
2550  // create the copies.
2551  //
2552  // Perform the first copy separately as a subregister copy.
2553  unsigned CopyTo = I.getOperand(0).getReg();
2554  auto FirstCopy = MIB.buildInstr(TargetOpcode::COPY, {CopyTo}, {})
2555  .addReg(InsertRegs[0], 0, ExtractSubReg);
2556  constrainSelectedInstRegOperands(*FirstCopy, TII, TRI, RBI);
2557 
2558  // Now, perform the remaining copies as vector lane copies.
2559  unsigned LaneIdx = 1;
2560  for (unsigned InsReg : InsertRegs) {
2561  unsigned CopyTo = I.getOperand(LaneIdx).getReg();
2562  MachineInstr &CopyInst =
2563  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CopyOpc), CopyTo)
2564  .addUse(InsReg)
2565  .addImm(LaneIdx);
2566  constrainSelectedInstRegOperands(CopyInst, TII, TRI, RBI);
2567  ++LaneIdx;
2568  }
2569 
2570  // Separately constrain the first copy's destination. Because of the
2571  // limitation in constrainOperandRegClass, we can't guarantee that this will
2572  // actually be constrained. So, do it ourselves using the second operand.
2573  const TargetRegisterClass *RC =
2574  MRI.getRegClassOrNull(I.getOperand(1).getReg());
2575  if (!RC) {
2576  LLVM_DEBUG(dbgs() << "Couldn't constrain copy destination.\n");
2577  return false;
2578  }
2579 
2580  RBI.constrainGenericRegister(CopyTo, *RC, MRI);
2581  I.eraseFromParent();
2582  return true;
2583 }
2584 
2585 bool AArch64InstructionSelector::selectConcatVectors(
2586  MachineInstr &I, MachineRegisterInfo &MRI) const {
2587  assert(I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
2588  "Unexpected opcode");
2589  unsigned Dst = I.getOperand(0).getReg();
2590  unsigned Op1 = I.getOperand(1).getReg();
2591  unsigned Op2 = I.getOperand(2).getReg();
2592  MachineIRBuilder MIRBuilder(I);
2593  MachineInstr *ConcatMI = emitVectorConcat(Dst, Op1, Op2, MIRBuilder);
2594  if (!ConcatMI)
2595  return false;
2596  I.eraseFromParent();
2597  return true;
2598 }
2599 
2600 void AArch64InstructionSelector::collectShuffleMaskIndices(
2602  SmallVectorImpl<Optional<int>> &Idxs) const {
2603  MachineInstr *MaskDef = MRI.getVRegDef(I.getOperand(3).getReg());
2604  assert(
2605  MaskDef->getOpcode() == TargetOpcode::G_BUILD_VECTOR &&
2606  "G_SHUFFLE_VECTOR should have a constant mask operand as G_BUILD_VECTOR");
2607  // Find the constant indices.
2608  for (unsigned i = 1, e = MaskDef->getNumOperands(); i < e; ++i) {
2609  MachineInstr *ScalarDef = MRI.getVRegDef(MaskDef->getOperand(i).getReg());
2610  assert(ScalarDef && "Could not find vreg def of shufflevec index op");
2611  // Look through copies.
2612  while (ScalarDef->getOpcode() == TargetOpcode::COPY) {
2613  ScalarDef = MRI.getVRegDef(ScalarDef->getOperand(1).getReg());
2614  assert(ScalarDef && "Could not find def of copy operand");
2615  }
2616  if (ScalarDef->getOpcode() != TargetOpcode::G_CONSTANT) {
2617  // This be an undef if not a constant.
2618  assert(ScalarDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
2619  Idxs.push_back(None);
2620  } else {
2621  Idxs.push_back(ScalarDef->getOperand(1).getCImm()->getSExtValue());
2622  }
2623  }
2624 }
2625 
2626 unsigned
2627 AArch64InstructionSelector::emitConstantPoolEntry(Constant *CPVal,
2628  MachineFunction &MF) const {
2629  Type *CPTy = CPVal->getType();
2630  unsigned Align = MF.getDataLayout().getPrefTypeAlignment(CPTy);
2631  if (Align == 0)
2632  Align = MF.getDataLayout().getTypeAllocSize(CPTy);
2633 
2635  return MCP->getConstantPoolIndex(CPVal, Align);
2636 }
2637 
2638 MachineInstr *AArch64InstructionSelector::emitLoadFromConstantPool(
2639  Constant *CPVal, MachineIRBuilder &MIRBuilder) const {
2640  unsigned CPIdx = emitConstantPoolEntry(CPVal, MIRBuilder.getMF());
2641 
2642  auto Adrp =
2643  MIRBuilder.buildInstr(AArch64::ADRP, {&AArch64::GPR64RegClass}, {})
2644  .addConstantPoolIndex(CPIdx, 0, AArch64II::MO_PAGE);
2645 
2646  MachineInstr *LoadMI = nullptr;
2647  switch (MIRBuilder.getDataLayout().getTypeStoreSize(CPVal->getType())) {
2648  case 16:
2649  LoadMI =
2650  &*MIRBuilder
2651  .buildInstr(AArch64::LDRQui, {&AArch64::FPR128RegClass}, {Adrp})
2652  .addConstantPoolIndex(CPIdx, 0,
2654  break;
2655  case 8:
2656  LoadMI = &*MIRBuilder
2657  .buildInstr(AArch64::LDRDui, {&AArch64::FPR64RegClass}, {Adrp})
2658  .addConstantPoolIndex(
2660  break;
2661  default:
2662  LLVM_DEBUG(dbgs() << "Could not load from constant pool of type "
2663  << *CPVal->getType());
2664  return nullptr;
2665  }
2667  constrainSelectedInstRegOperands(*LoadMI, TII, TRI, RBI);
2668  return LoadMI;
2669 }
2670 
2671 /// Return an <Opcode, SubregIndex> pair to do an vector elt insert of a given
2672 /// size and RB.
2673 static std::pair<unsigned, unsigned>
2674 getInsertVecEltOpInfo(const RegisterBank &RB, unsigned EltSize) {
2675  unsigned Opc, SubregIdx;
2676  if (RB.getID() == AArch64::GPRRegBankID) {
2677  if (EltSize == 32) {
2678  Opc = AArch64::INSvi32gpr;
2679  SubregIdx = AArch64::ssub;
2680  } else if (EltSize == 64) {
2681  Opc = AArch64::INSvi64gpr;
2682  SubregIdx = AArch64::dsub;
2683  } else {
2684  llvm_unreachable("invalid elt size!");
2685  }
2686  } else {
2687  if (EltSize == 8) {
2688  Opc = AArch64::INSvi8lane;
2689  SubregIdx = AArch64::bsub;
2690  } else if (EltSize == 16) {
2691  Opc = AArch64::INSvi16lane;
2692  SubregIdx = AArch64::hsub;
2693  } else if (EltSize == 32) {
2694  Opc = AArch64::INSvi32lane;
2695  SubregIdx = AArch64::ssub;
2696  } else if (EltSize == 64) {
2697  Opc = AArch64::INSvi64lane;
2698  SubregIdx = AArch64::dsub;
2699  } else {
2700  llvm_unreachable("invalid elt size!");
2701  }
2702  }
2703  return std::make_pair(Opc, SubregIdx);
2704 }
2705 
2706 MachineInstr *AArch64InstructionSelector::emitVectorConcat(
2707  Optional<unsigned> Dst, unsigned Op1, unsigned Op2,
2708  MachineIRBuilder &MIRBuilder) const {
2709  // We implement a vector concat by:
2710  // 1. Use scalar_to_vector to insert the lower vector into the larger dest
2711  // 2. Insert the upper vector into the destination's upper element
2712  // TODO: some of this code is common with G_BUILD_VECTOR handling.
2713  MachineRegisterInfo &MRI = MIRBuilder.getMF().getRegInfo();
2714 
2715  const LLT Op1Ty = MRI.getType(Op1);
2716  const LLT Op2Ty = MRI.getType(Op2);
2717 
2718  if (Op1Ty != Op2Ty) {
2719  LLVM_DEBUG(dbgs() << "Could not do vector concat of differing vector tys");
2720  return nullptr;
2721  }
2722  assert(Op1Ty.isVector() && "Expected a vector for vector concat");
2723 
2724  if (Op1Ty.getSizeInBits() >= 128) {
2725  LLVM_DEBUG(dbgs() << "Vector concat not supported for full size vectors");
2726  return nullptr;
2727  }
2728 
2729  // At the moment we just support 64 bit vector concats.
2730  if (Op1Ty.getSizeInBits() != 64) {
2731  LLVM_DEBUG(dbgs() << "Vector concat supported for 64b vectors");
2732  return nullptr;
2733  }
2734 
2735  const LLT ScalarTy = LLT::scalar(Op1Ty.getSizeInBits());
2736  const RegisterBank &FPRBank = *RBI.getRegBank(Op1, MRI, TRI);
2737  const TargetRegisterClass *DstRC =
2738  getMinClassForRegBank(FPRBank, Op1Ty.getSizeInBits() * 2);
2739 
2740  MachineInstr *WidenedOp1 =
2741  emitScalarToVector(ScalarTy.getSizeInBits(), DstRC, Op1, MIRBuilder);
2742  MachineInstr *WidenedOp2 =
2743  emitScalarToVector(ScalarTy.getSizeInBits(), DstRC, Op2, MIRBuilder);
2744  if (!WidenedOp1 || !WidenedOp2) {
2745  LLVM_DEBUG(dbgs() << "Could not emit a vector from scalar value");
2746  return nullptr;
2747  }
2748 
2749  // Now do the insert of the upper element.
2750  unsigned InsertOpc, InsSubRegIdx;
2751  std::tie(InsertOpc, InsSubRegIdx) =
2752  getInsertVecEltOpInfo(FPRBank, ScalarTy.getSizeInBits());
2753 
2754  if (!Dst)
2755  Dst = MRI.createVirtualRegister(DstRC);
2756  auto InsElt =
2757  MIRBuilder
2758  .buildInstr(InsertOpc, {*Dst}, {WidenedOp1->getOperand(0).getReg()})
2759  .addImm(1) /* Lane index */
2760  .addUse(WidenedOp2->getOperand(0).getReg())
2761  .addImm(0);
2762  constrainSelectedInstRegOperands(*InsElt, TII, TRI, RBI);
2763  return &*InsElt;
2764 }
2765 
2766 MachineInstr *AArch64InstructionSelector::emitFMovForFConstant(
2767  MachineInstr &I, MachineRegisterInfo &MRI) const {
2768  assert(I.getOpcode() == TargetOpcode::G_FCONSTANT &&
2769  "Expected a G_FCONSTANT!");
2770  MachineOperand &ImmOp = I.getOperand(1);
2771  unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
2772 
2773  // Only handle 32 and 64 bit defs for now.
2774  if (DefSize != 32 && DefSize != 64)
2775  return nullptr;
2776 
2777  // Don't handle null values using FMOV.
2778  if (ImmOp.getFPImm()->isNullValue())
2779  return nullptr;
2780 
2781  // Get the immediate representation for the FMOV.
2782  const APFloat &ImmValAPF = ImmOp.getFPImm()->getValueAPF();
2783  int Imm = DefSize == 32 ? AArch64_AM::getFP32Imm(ImmValAPF)
2784  : AArch64_AM::getFP64Imm(ImmValAPF);
2785 
2786  // If this is -1, it means the immediate can't be represented as the requested
2787  // floating point value. Bail.
2788  if (Imm == -1)
2789  return nullptr;
2790 
2791  // Update MI to represent the new FMOV instruction, constrain it, and return.
2792  ImmOp.ChangeToImmediate(Imm);
2793  unsigned MovOpc = DefSize == 32 ? AArch64::FMOVSi : AArch64::FMOVDi;
2794  I.setDesc(TII.get(MovOpc));
2796  return &I;
2797 }
2798 
2799 bool AArch64InstructionSelector::tryOptVectorDup(MachineInstr &I) const {
2800  // Try to match a vector splat operation into a dup instruction.
2801  // We're looking for this pattern:
2802  // %scalar:gpr(s64) = COPY $x0
2803  // %undef:fpr(<2 x s64>) = G_IMPLICIT_DEF
2804  // %cst0:gpr(s32) = G_CONSTANT i32 0
2805  // %zerovec:fpr(<2 x s32>) = G_BUILD_VECTOR %cst0(s32), %cst0(s32)
2806  // %ins:fpr(<2 x s64>) = G_INSERT_VECTOR_ELT %undef, %scalar(s64), %cst0(s32)
2807  // %splat:fpr(<2 x s64>) = G_SHUFFLE_VECTOR %ins(<2 x s64>), %undef,
2808  // %zerovec(<2 x s32>)
2809  //
2810  // ...into:
2811  // %splat = DUP %scalar
2812  // We use the regbank of the scalar to determine which kind of dup to use.
2813  MachineIRBuilder MIB(I);
2814  MachineRegisterInfo &MRI = *MIB.getMRI();
2816  using namespace TargetOpcode;
2817  using namespace MIPatternMatch;
2818 
2819  // Begin matching the insert.
2820  auto *InsMI =
2821  findMIFromReg(I.getOperand(1).getReg(), G_INSERT_VECTOR_ELT, MIB);
2822  if (!InsMI)
2823  return false;
2824  // Match the undef vector operand.
2825  auto *UndefMI =
2826  findMIFromReg(InsMI->getOperand(1).getReg(), G_IMPLICIT_DEF, MIB);
2827  if (!UndefMI)
2828  return false;
2829  // Match the scalar being splatted.
2830  unsigned ScalarReg = InsMI->getOperand(2).getReg();
2831  const RegisterBank *ScalarRB = RBI.getRegBank(ScalarReg, MRI, TRI);
2832  // Match the index constant 0.
2833  int64_t Index = 0;
2834  if (!mi_match(InsMI->getOperand(3).getReg(), MRI, m_ICst(Index)) || Index)
2835  return false;
2836 
2837  // The shuffle's second operand doesn't matter if the mask is all zero.
2838  auto *ZeroVec = findMIFromReg(I.getOperand(3).getReg(), G_BUILD_VECTOR, MIB);
2839  if (!ZeroVec)
2840  return false;
2841  int64_t Zero = 0;
2842  if (!mi_match(ZeroVec->getOperand(1).getReg(), MRI, m_ICst(Zero)) || Zero)
2843  return false;
2844  for (unsigned i = 1, e = ZeroVec->getNumOperands() - 1; i < e; ++i) {
2845  if (ZeroVec->getOperand(i).getReg() != ZeroVec->getOperand(1).getReg())
2846  return false; // This wasn't an all zeros vector.
2847  }
2848 
2849  // We're done, now find out what kind of splat we need.
2850  LLT VecTy = MRI.getType(I.getOperand(0).getReg());
2851  LLT EltTy = VecTy.getElementType();
2852  if (VecTy.getSizeInBits() != 128 || EltTy.getSizeInBits() < 32) {
2853  LLVM_DEBUG(dbgs() << "Could not optimize splat pattern < 128b yet");
2854  return false;
2855  }
2856  bool IsFP = ScalarRB->getID() == AArch64::FPRRegBankID;
2857  static const unsigned OpcTable[2][2] = {
2858  {AArch64::DUPv4i32gpr, AArch64::DUPv2i64gpr},
2859  {AArch64::DUPv4i32lane, AArch64::DUPv2i64lane}};
2860  unsigned Opc = OpcTable[IsFP][EltTy.getSizeInBits() == 64];
2861 
2862  // For FP splats, we need to widen the scalar reg via undef too.
2863  if (IsFP) {
2864  MachineInstr *Widen = emitScalarToVector(
2865  EltTy.getSizeInBits(), &AArch64::FPR128RegClass, ScalarReg, MIB);
2866  if (!Widen)
2867  return false;
2868  ScalarReg = Widen->getOperand(0).getReg();
2869  }
2870  auto Dup = MIB.buildInstr(Opc, {I.getOperand(0).getReg()}, {ScalarReg});
2871  if (IsFP)
2872  Dup.addImm(0);
2873  constrainSelectedInstRegOperands(*Dup, TII, TRI, RBI);
2874  I.eraseFromParent();
2875  return true;
2876 }
2877 
2878 bool AArch64InstructionSelector::tryOptVectorShuffle(MachineInstr &I) const {
2879  if (TM.getOptLevel() == CodeGenOpt::None)
2880  return false;
2881  if (tryOptVectorDup(I))
2882  return true;
2883  return false;
2884 }
2885 
2886 bool AArch64InstructionSelector::selectShuffleVector(
2887  MachineInstr &I, MachineRegisterInfo &MRI) const {
2888  if (tryOptVectorShuffle(I))
2889  return true;
2890  const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
2891  unsigned Src1Reg = I.getOperand(1).getReg();
2892  const LLT Src1Ty = MRI.getType(Src1Reg);
2893  unsigned Src2Reg = I.getOperand(2).getReg();
2894  const LLT Src2Ty = MRI.getType(Src2Reg);
2895 
2896  MachineBasicBlock &MBB = *I.getParent();
2897  MachineFunction &MF = *MBB.getParent();
2898  LLVMContext &Ctx = MF.getFunction().getContext();
2899 
2900  // G_SHUFFLE_VECTOR doesn't really have a strictly enforced constant mask
2901  // operand, it comes in as a normal vector value which we have to analyze to
2902  // find the mask indices. If the mask element is undef, then
2903  // collectShuffleMaskIndices() will add a None entry for that index into
2904  // the list.
2906  collectShuffleMaskIndices(I, MRI, Mask);
2907  assert(!Mask.empty() && "Expected to find mask indices");
2908 
2909  // G_SHUFFLE_VECTOR is weird in that the source operands can be scalars, if
2910  // it's originated from a <1 x T> type. Those should have been lowered into
2911  // G_BUILD_VECTOR earlier.
2912  if (!Src1Ty.isVector() || !Src2Ty.isVector()) {
2913  LLVM_DEBUG(dbgs() << "Could not select a \"scalar\" G_SHUFFLE_VECTOR\n");
2914  return false;
2915  }
2916 
2917  unsigned BytesPerElt = DstTy.getElementType().getSizeInBits() / 8;
2918 
2920  for (auto &MaybeVal : Mask) {
2921  // For now, any undef indexes we'll just assume to be 0. This should be
2922  // optimized in future, e.g. to select DUP etc.
2923  int Val = MaybeVal.hasValue() ? *MaybeVal : 0;
2924  for (unsigned Byte = 0; Byte < BytesPerElt; ++Byte) {
2925  unsigned Offset = Byte + Val * BytesPerElt;
2926  CstIdxs.emplace_back(ConstantInt::get(Type::getInt8Ty(Ctx), Offset));
2927  }
2928  }
2929 
2930  MachineIRBuilder MIRBuilder(I);
2931 
2932  // Use a constant pool to load the index vector for TBL.
2933  Constant *CPVal = ConstantVector::get(CstIdxs);
2934  MachineInstr *IndexLoad = emitLoadFromConstantPool(CPVal, MIRBuilder);
2935  if (!IndexLoad) {
2936  LLVM_DEBUG(dbgs() << "Could not load from a constant pool");
2937  return false;
2938  }
2939 
2940  if (DstTy.getSizeInBits() != 128) {
2941  assert(DstTy.getSizeInBits() == 64 && "Unexpected shuffle result ty");
2942  // This case can be done with TBL1.
2943  MachineInstr *Concat = emitVectorConcat(None, Src1Reg, Src2Reg, MIRBuilder);
2944  if (!Concat) {
2945  LLVM_DEBUG(dbgs() << "Could not do vector concat for tbl1");
2946  return false;
2947  }
2948 
2949  // The constant pool load will be 64 bits, so need to convert to FPR128 reg.
2950  IndexLoad =
2951  emitScalarToVector(64, &AArch64::FPR128RegClass,
2952  IndexLoad->getOperand(0).getReg(), MIRBuilder);
2953 
2954  auto TBL1 = MIRBuilder.buildInstr(
2955  AArch64::TBLv16i8One, {&AArch64::FPR128RegClass},
2956  {Concat->getOperand(0).getReg(), IndexLoad->getOperand(0).getReg()});
2958 
2959  auto Copy =
2960  MIRBuilder
2961  .buildInstr(TargetOpcode::COPY, {I.getOperand(0).getReg()}, {})
2962  .addReg(TBL1.getReg(0), 0, AArch64::dsub);
2963  RBI.constrainGenericRegister(Copy.getReg(0), AArch64::FPR64RegClass, MRI);
2964  I.eraseFromParent();
2965  return true;
2966  }
2967 
2968  // For TBL2 we need to emit a REG_SEQUENCE to tie together two consecutive
2969  // Q registers for regalloc.
2970  auto RegSeq = MIRBuilder
2971  .buildInstr(TargetOpcode::REG_SEQUENCE,
2972  {&AArch64::QQRegClass}, {Src1Reg})
2973  .addImm(AArch64::qsub0)
2974  .addUse(Src2Reg)
2975  .addImm(AArch64::qsub1);
2976 
2977  auto TBL2 =
2978  MIRBuilder.buildInstr(AArch64::TBLv16i8Two, {I.getOperand(0).getReg()},
2979  {RegSeq, IndexLoad->getOperand(0).getReg()});
2980  constrainSelectedInstRegOperands(*RegSeq, TII, TRI, RBI);
2982  I.eraseFromParent();
2983  return true;
2984 }
2985 
2986 MachineInstr *AArch64InstructionSelector::emitLaneInsert(
2987  Optional<unsigned> DstReg, unsigned SrcReg, unsigned EltReg,
2988  unsigned LaneIdx, const RegisterBank &RB,
2989  MachineIRBuilder &MIRBuilder) const {
2990  MachineInstr *InsElt = nullptr;
2991  const TargetRegisterClass *DstRC = &AArch64::FPR128RegClass;
2992  MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
2993 
2994  // Create a register to define with the insert if one wasn't passed in.
2995  if (!DstReg)
2996  DstReg = MRI.createVirtualRegister(DstRC);
2997 
2998  unsigned EltSize = MRI.getType(EltReg).getSizeInBits();
2999  unsigned Opc = getInsertVecEltOpInfo(RB, EltSize).first;
3000 
3001  if (RB.getID() == AArch64::FPRRegBankID) {
3002  auto InsSub = emitScalarToVector(EltSize, DstRC, EltReg, MIRBuilder);
3003  InsElt = MIRBuilder.buildInstr(Opc, {*DstReg}, {SrcReg})
3004  .addImm(LaneIdx)
3005  .addUse(InsSub->getOperand(0).getReg())
3006  .addImm(0);
3007  } else {
3008  InsElt = MIRBuilder.buildInstr(Opc, {*DstReg}, {SrcReg})
3009  .addImm(LaneIdx)
3010  .addUse(EltReg);
3011  }
3012 
3013  constrainSelectedInstRegOperands(*InsElt, TII, TRI, RBI);
3014  return InsElt;
3015 }
3016 
3017 bool AArch64InstructionSelector::selectInsertElt(
3018  MachineInstr &I, MachineRegisterInfo &MRI) const {
3019  assert(I.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT);
3020 
3021  // Get information on the destination.
3022  unsigned DstReg = I.getOperand(0).getReg();
3023  const LLT DstTy = MRI.getType(DstReg);
3024  unsigned VecSize = DstTy.getSizeInBits();
3025 
3026  // Get information on the element we want to insert into the destination.
3027  unsigned EltReg = I.getOperand(2).getReg();
3028  const LLT EltTy = MRI.getType(EltReg);
3029  unsigned EltSize = EltTy.getSizeInBits();
3030  if (EltSize < 16 || EltSize > 64)
3031  return false; // Don't support all element types yet.
3032 
3033  // Find the definition of the index. Bail out if it's not defined by a
3034  // G_CONSTANT.
3035  unsigned IdxReg = I.getOperand(3).getReg();
3036  auto VRegAndVal = getConstantVRegValWithLookThrough(IdxReg, MRI);
3037  if (!VRegAndVal)
3038  return false;
3039  unsigned LaneIdx = VRegAndVal->Value;
3040 
3041  // Perform the lane insert.
3042  unsigned SrcReg = I.getOperand(1).getReg();
3043  const RegisterBank &EltRB = *RBI.getRegBank(EltReg, MRI, TRI);
3044  MachineIRBuilder MIRBuilder(I);
3045 
3046  if (VecSize < 128) {
3047  // If the vector we're inserting into is smaller than 128 bits, widen it
3048  // to 128 to do the insert.
3049  MachineInstr *ScalarToVec = emitScalarToVector(
3050  VecSize, &AArch64::FPR128RegClass, SrcReg, MIRBuilder);
3051  if (!ScalarToVec)
3052  return false;
3053  SrcReg = ScalarToVec->getOperand(0).getReg();
3054  }
3055 
3056  // Create an insert into a new FPR128 register.
3057  // Note that if our vector is already 128 bits, we end up emitting an extra
3058  // register.
3059  MachineInstr *InsMI =
3060  emitLaneInsert(None, SrcReg, EltReg, LaneIdx, EltRB, MIRBuilder);
3061 
3062  if (VecSize < 128) {
3063  // If we had to widen to perform the insert, then we have to demote back to
3064  // the original size to get the result we want.
3065  unsigned DemoteVec = InsMI->getOperand(0).getReg();
3066  const TargetRegisterClass *RC =
3067  getMinClassForRegBank(*RBI.getRegBank(DemoteVec, MRI, TRI), VecSize);
3068  if (RC != &AArch64::FPR32RegClass && RC != &AArch64::FPR64RegClass) {
3069  LLVM_DEBUG(dbgs() << "Unsupported register class!\n");
3070  return false;
3071  }
3072  unsigned SubReg = 0;
3073  if (!getSubRegForClass(RC, TRI, SubReg))
3074  return false;
3075  if (SubReg != AArch64::ssub && SubReg != AArch64::dsub) {
3076  LLVM_DEBUG(dbgs() << "Unsupported destination size! (" << VecSize
3077  << "\n");
3078  return false;
3079  }
3080  MIRBuilder.buildInstr(TargetOpcode::COPY, {DstReg}, {})
3081  .addReg(DemoteVec, 0, SubReg);
3082  RBI.constrainGenericRegister(DstReg, *RC, MRI);
3083  } else {
3084  // No widening needed.
3085  InsMI->getOperand(0).setReg(DstReg);
3086  constrainSelectedInstRegOperands(*InsMI, TII, TRI, RBI);
3087  }
3088 
3089  I.eraseFromParent();
3090  return true;
3091 }
3092 
3093 bool AArch64InstructionSelector::selectBuildVector(
3094  MachineInstr &I, MachineRegisterInfo &MRI) const {
3095  assert(I.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
3096  // Until we port more of the optimized selections, for now just use a vector
3097  // insert sequence.
3098  const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
3099  const LLT EltTy = MRI.getType(I.getOperand(1).getReg());
3100  unsigned EltSize = EltTy.getSizeInBits();
3101  if (EltSize < 16 || EltSize > 64)
3102  return false; // Don't support all element types yet.
3103  const RegisterBank &RB = *RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI);
3104  MachineIRBuilder MIRBuilder(I);
3105 
3106  const TargetRegisterClass *DstRC = &AArch64::FPR128RegClass;
3107  MachineInstr *ScalarToVec =
3108  emitScalarToVector(DstTy.getElementType().getSizeInBits(), DstRC,
3109  I.getOperand(1).getReg(), MIRBuilder);
3110  if (!ScalarToVec)
3111  return false;
3112 
3113  unsigned DstVec = ScalarToVec->getOperand(0).getReg();
3114  unsigned DstSize = DstTy.getSizeInBits();
3115 
3116  // Keep track of the last MI we inserted. Later on, we might be able to save
3117  // a copy using it.
3118  MachineInstr *PrevMI = nullptr;
3119  for (unsigned i = 2, e = DstSize / EltSize + 1; i < e; ++i) {
3120  // Note that if we don't do a subregister copy, we can end up making an
3121  // extra register.
3122  PrevMI = &*emitLaneInsert(None, DstVec, I.getOperand(i).getReg(), i - 1, RB,
3123  MIRBuilder);
3124  DstVec = PrevMI->getOperand(0).getReg();
3125  }
3126 
3127  // If DstTy's size in bits is less than 128, then emit a subregister copy
3128  // from DstVec to the last register we've defined.
3129  if (DstSize < 128) {
3130  // Force this to be FPR using the destination vector.
3131  const TargetRegisterClass *RC =
3132  getMinClassForRegBank(*RBI.getRegBank(DstVec, MRI, TRI), DstSize);
3133  if (!RC)
3134  return false;
3135  if (RC != &AArch64::FPR32RegClass && RC != &AArch64::FPR64RegClass) {
3136  LLVM_DEBUG(dbgs() << "Unsupported register class!\n");
3137  return false;
3138  }
3139 
3140  unsigned SubReg = 0;
3141  if (!getSubRegForClass(RC, TRI, SubReg))
3142  return false;
3143  if (SubReg != AArch64::ssub && SubReg != AArch64::dsub) {
3144  LLVM_DEBUG(dbgs() << "Unsupported destination size! (" << DstSize
3145  << "\n");
3146  return false;
3147  }
3148 
3149  unsigned Reg = MRI.createVirtualRegister(RC);
3150  unsigned DstReg = I.getOperand(0).getReg();
3151 
3152  MIRBuilder.buildInstr(TargetOpcode::COPY, {DstReg}, {})
3153  .addReg(DstVec, 0, SubReg);
3154  MachineOperand &RegOp = I.getOperand(1);
3155  RegOp.setReg(Reg);
3156  RBI.constrainGenericRegister(DstReg, *RC, MRI);
3157  } else {
3158  // We don't need a subregister copy. Save a copy by re-using the
3159  // destination register on the final insert.
3160  assert(PrevMI && "PrevMI was null?");
3161  PrevMI->getOperand(0).setReg(I.getOperand(0).getReg());
3162  constrainSelectedInstRegOperands(*PrevMI, TII, TRI, RBI);
3163  }
3164 
3165  I.eraseFromParent();
3166  return true;
3167 }
3168 
3169 /// Helper function to find an intrinsic ID on an a MachineInstr. Returns the
3170 /// ID if it exists, and 0 otherwise.
3171 static unsigned findIntrinsicID(MachineInstr &I) {
3172  auto IntrinOp = find_if(I.operands(), [&](const MachineOperand &Op) {
3173  return Op.isIntrinsicID();
3174  });
3175  if (IntrinOp == I.operands_end())
3176  return 0;
3177  return IntrinOp->getIntrinsicID();
3178 }
3179 
3180 /// Helper function to emit the correct opcode for a llvm.aarch64.stlxr
3181 /// intrinsic.
3182 static unsigned getStlxrOpcode(unsigned NumBytesToStore) {
3183  switch (NumBytesToStore) {
3184  // TODO: 1, 2, and 4 byte stores.
3185  case 8:
3186  return AArch64::STLXRX;
3187  default:
3188  LLVM_DEBUG(dbgs() << "Unexpected number of bytes to store! ("
3189  << NumBytesToStore << ")\n");
3190  break;
3191  }
3192  return 0;
3193 }
3194 
3195 bool AArch64InstructionSelector::selectIntrinsicWithSideEffects(
3196  MachineInstr &I, MachineRegisterInfo &MRI) const {
3197  // Find the intrinsic ID.
3198  unsigned IntrinID = findIntrinsicID(I);
3199  if (!IntrinID)
3200  return false;
3201  MachineIRBuilder MIRBuilder(I);
3202 
3203  // Select the instruction.
3204  switch (IntrinID) {
3205  default:
3206  return false;
3207  case Intrinsic::trap:
3208  MIRBuilder.buildInstr(AArch64::BRK, {}, {}).addImm(1);
3209  break;
3210  case Intrinsic::aarch64_stlxr:
3211  unsigned StatReg = I.getOperand(0).getReg();
3212  assert(RBI.getSizeInBits(StatReg, MRI, TRI) == 32 &&
3213  "Status register must be 32 bits!");
3214  unsigned SrcReg = I.getOperand(2).getReg();
3215 
3216  if (RBI.getSizeInBits(SrcReg, MRI, TRI) != 64) {
3217  LLVM_DEBUG(dbgs() << "Only support 64-bit sources right now.\n");
3218  return false;
3219  }
3220 
3221  unsigned PtrReg = I.getOperand(3).getReg();
3222  assert(MRI.getType(PtrReg).isPointer() && "Expected pointer operand");
3223 
3224  // Expect only one memory operand.
3225  if (!I.hasOneMemOperand())
3226  return false;
3227 
3228  const MachineMemOperand *MemOp = *I.memoperands_begin();
3229  unsigned NumBytesToStore = MemOp->getSize();
3230  unsigned Opc = getStlxrOpcode(NumBytesToStore);
3231  if (!Opc)
3232  return false;
3233 
3234  auto StoreMI = MIRBuilder.buildInstr(Opc, {StatReg}, {SrcReg, PtrReg});
3235  constrainSelectedInstRegOperands(*StoreMI, TII, TRI, RBI);
3236  }
3237 
3238  I.eraseFromParent();
3239  return true;
3240 }
3241 
3242 bool AArch64InstructionSelector::selectIntrinsic(
3243  MachineInstr &I, MachineRegisterInfo &MRI) const {
3244  unsigned IntrinID = findIntrinsicID(I);
3245  if (!IntrinID)
3246  return false;
3247  MachineIRBuilder MIRBuilder(I);
3248 
3249  switch (IntrinID) {
3250  default:
3251  break;
3252  case Intrinsic::aarch64_crypto_sha1h:
3253  unsigned DstReg = I.getOperand(0).getReg();
3254  unsigned SrcReg = I.getOperand(2).getReg();
3255 
3256  // FIXME: Should this be an assert?
3257  if (MRI.getType(DstReg).getSizeInBits() != 32 ||
3258  MRI.getType(SrcReg).getSizeInBits() != 32)
3259  return false;
3260 
3261  // The operation has to happen on FPRs. Set up some new FPR registers for
3262  // the source and destination if they are on GPRs.
3263  if (RBI.getRegBank(SrcReg, MRI, TRI)->getID() != AArch64::FPRRegBankID) {
3264  SrcReg = MRI.createVirtualRegister(&AArch64::FPR32RegClass);
3265  MIRBuilder.buildCopy({SrcReg}, {I.getOperand(2)});
3266 
3267  // Make sure the copy ends up getting constrained properly.
3269  AArch64::GPR32RegClass, MRI);
3270  }
3271 
3272  if (RBI.getRegBank(DstReg, MRI, TRI)->getID() != AArch64::FPRRegBankID)
3273  DstReg = MRI.createVirtualRegister(&AArch64::FPR32RegClass);
3274 
3275  // Actually insert the instruction.
3276  auto SHA1Inst = MIRBuilder.buildInstr(AArch64::SHA1Hrr, {DstReg}, {SrcReg});
3277  constrainSelectedInstRegOperands(*SHA1Inst, TII, TRI, RBI);
3278 
3279  // Did we create a new register for the destination?
3280  if (DstReg != I.getOperand(0).getReg()) {
3281  // Yep. Copy the result of the instruction back into the original
3282  // destination.
3283  MIRBuilder.buildCopy({I.getOperand(0)}, {DstReg});
3285  AArch64::GPR32RegClass, MRI);
3286  }
3287 
3288  I.eraseFromParent();
3289  return true;
3290  }
3291  return false;
3292 }
3293 
3294 /// SelectArithImmed - Select an immediate value that can be represented as
3295 /// a 12-bit value shifted left by either 0 or 12. If so, return true with
3296 /// Val set to the 12-bit value and Shift set to the shifter operand.
3298 AArch64InstructionSelector::selectArithImmed(MachineOperand &Root) const {
3299  MachineInstr &MI = *Root.getParent();
3300  MachineBasicBlock &MBB = *MI.getParent();
3301  MachineFunction &MF = *MBB.getParent();
3302  MachineRegisterInfo &MRI = MF.getRegInfo();
3303 
3304  // This function is called from the addsub_shifted_imm ComplexPattern,
3305  // which lists [imm] as the list of opcode it's interested in, however
3306  // we still need to check whether the operand is actually an immediate
3307  // here because the ComplexPattern opcode list is only used in
3308  // root-level opcode matching.
3309  uint64_t Immed;
3310  if (Root.isImm())
3311  Immed = Root.getImm();
3312  else if (Root.isCImm())
3313  Immed = Root.getCImm()->getZExtValue();
3314  else if (Root.isReg()) {
3315  MachineInstr *Def = MRI.getVRegDef(Root.getReg());
3316  if (Def->getOpcode() != TargetOpcode::G_CONSTANT)
3317  return None;
3318  MachineOperand &Op1 = Def->getOperand(1);
3319  if (!Op1.isCImm() || Op1.getCImm()->getBitWidth() > 64)
3320  return None;
3321  Immed = Op1.getCImm()->getZExtValue();
3322  } else
3323  return None;
3324 
3325  unsigned ShiftAmt;
3326 
3327  if (Immed >> 12 == 0) {
3328  ShiftAmt = 0;
3329  } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
3330  ShiftAmt = 12;
3331  Immed = Immed >> 12;
3332  } else
3333  return None;
3334 
3335  unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
3336  return {{
3337  [=](MachineInstrBuilder &MIB) { MIB.addImm(Immed); },
3338  [=](MachineInstrBuilder &MIB) { MIB.addImm(ShVal); },
3339  }};
3340 }
3341 
3342 /// Select a "register plus unscaled signed 9-bit immediate" address. This
3343 /// should only match when there is an offset that is not valid for a scaled
3344 /// immediate addressing mode. The "Size" argument is the size in bytes of the
3345 /// memory reference, which is needed here to know what is valid for a scaled
3346 /// immediate.
3348 AArch64InstructionSelector::selectAddrModeUnscaled(MachineOperand &Root,
3349  unsigned Size) const {
3350  MachineRegisterInfo &MRI =
3351  Root.getParent()->getParent()->getParent()->getRegInfo();
3352 
3353  if (!Root.isReg())
3354  return None;
3355 
3356  if (!isBaseWithConstantOffset(Root, MRI))
3357  return None;
3358 
3359  MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
3360  if (!RootDef)
3361  return None;
3362 
3363  MachineOperand &OffImm = RootDef->getOperand(2);
3364  if (!OffImm.isReg())
3365  return None;
3366  MachineInstr *RHS = MRI.getVRegDef(OffImm.getReg());
3367  if (!RHS || RHS->getOpcode() != TargetOpcode::G_CONSTANT)
3368  return None;
3369  int64_t RHSC;
3370  MachineOperand &RHSOp1 = RHS->getOperand(1);
3371  if (!RHSOp1.isCImm() || RHSOp1.getCImm()->getBitWidth() > 64)
3372  return None;
3373  RHSC = RHSOp1.getCImm()->getSExtValue();
3374 
3375  // If the offset is valid as a scaled immediate, don't match here.
3376  if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Log2_32(Size)))
3377  return None;
3378  if (RHSC >= -256 && RHSC < 256) {
3379  MachineOperand &Base = RootDef->getOperand(1);
3380  return {{
3381  [=](MachineInstrBuilder &MIB) { MIB.add(Base); },
3382  [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
3383  }};
3384  }
3385  return None;
3386 }
3387 
3388 /// Select a "register plus scaled unsigned 12-bit immediate" address. The
3389 /// "Size" argument is the size in bytes of the memory reference, which
3390 /// determines the scale.
3392 AArch64InstructionSelector::selectAddrModeIndexed(MachineOperand &Root,
3393  unsigned Size) const {
3394  MachineRegisterInfo &MRI =
3395  Root.getParent()->getParent()->getParent()->getRegInfo();
3396 
3397  if (!Root.isReg())
3398  return None;
3399 
3400  MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
3401  if (!RootDef)
3402  return None;
3403 
3404  if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
3405  return {{
3406  [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
3407  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
3408  }};
3409  }
3410 
3411  if (isBaseWithConstantOffset(Root, MRI)) {
3412  MachineOperand &LHS = RootDef->getOperand(1);
3413  MachineOperand &RHS = RootDef->getOperand(2);
3414  MachineInstr *LHSDef = MRI.getVRegDef(LHS.getReg());
3415  MachineInstr *RHSDef = MRI.getVRegDef(RHS.getReg());
3416  if (LHSDef && RHSDef) {
3417  int64_t RHSC = (int64_t)RHSDef->getOperand(1).getCImm()->getZExtValue();
3418  unsigned Scale = Log2_32(Size);
3419  if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
3420  if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
3421  return {{
3422  [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
3423  [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
3424  }};
3425 
3426  return {{
3427  [=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
3428  [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
3429  }};
3430  }
3431  }
3432  }
3433 
3434  // Before falling back to our general case, check if the unscaled
3435  // instructions can handle this. If so, that's preferable.
3436  if (selectAddrModeUnscaled(Root, Size).hasValue())
3437  return None;
3438 
3439  return {{
3440  [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3441  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
3442  }};
3443 }
3444 
3445 void AArch64InstructionSelector::renderTruncImm(MachineInstrBuilder &MIB,
3446  const MachineInstr &MI) const {
3447  const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
3448  assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
3450  assert(CstVal && "Expected constant value");
3451  MIB.addImm(CstVal.getValue());
3452 }
3453 
3454 namespace llvm {
3457  AArch64Subtarget &Subtarget,
3458  AArch64RegisterBankInfo &RBI) {
3459  return new AArch64InstructionSelector(TM, Subtarget, RBI);
3460 }
3461 }
static std::pair< const TargetRegisterClass *, const TargetRegisterClass * > getRegClassesForCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Helper function to get the source and destination register classes for a copy.
const NoneType None
Definition: None.h:23
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:645
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
mop_iterator operands_end()
Definition: MachineInstr.h:453
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
MO_G3 - A symbol operand with this flag (granule 3) represents the high 16-bits of a 64-bit address...
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1562
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
MachineBasicBlock * getMBB() const
static std::pair< unsigned, unsigned > getInsertVecEltOpInfo(const RegisterBank &RB, unsigned EltSize)
Return an <Opcode, SubregIndex> pair to do an vector elt insert of a given size and RB...
static bool selectMergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
This class represents lattice values for constants.
Definition: AllocatorList.h:23
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
void setTargetFlags(unsigned F)
void push_back(const T &Elt)
Definition: SmallVector.h:211
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:382
bool isScalar() const
static CondCode getInvertedCondCode(CondCode Code)
unsigned getReg() const
getReg - Returns the register number.
unsigned Reg
static uint64_t selectImpl(uint64_t CandidateMask, uint64_t &NextInSequenceMask)
LLT getType(unsigned Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register...
uint64_t getSize() const
Return the size in bytes of the memory reference.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:323
static uint32_t Concat[]
unsigned const TargetRegisterInfo * TRI
MO_G0 - A symbol operand with this flag (granule 0) represents the bits 0-15 of a 64-bit address...
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned char TargetFlags=0) const
iterator_range< mop_iterator > operands()
Definition: MachineInstr.h:458
static const TargetRegisterClass * getMinClassForRegBank(const RegisterBank &RB, unsigned SizeInBits, bool GetAllRegSet=false)
Given a register bank, and size in bits, return the smallest register class that can represent that c...
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
T get() const
Returns the value of the specified pointer type.
Definition: PointerUnion.h:205
unsigned getBitWidth() const
getBitWidth - Return the bitwidth of this constant.
Definition: Constants.h:142
LegalityPredicate isVector(unsigned TypeIdx)
True iff the specified type index is a vector.
MO_G2 - A symbol operand with this flag (granule 2) represents the bits 32-47 of a 64-bit address...
This file declares the targeting of the RegisterBankInfo class for AArch64.
bool isVector() const
T dyn_cast() const
Returns the current pointer if it is of the specified pointer type, otherwises returns null...
Definition: PointerUnion.h:212
Holds all the information related to register banks.
A description of a memory reference used in the backend.
void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II)
Set the insertion point before the specified position.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
const HexagonInstrInfo * TII
const ConstantFP * getFPImm() const
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:411
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
static unsigned getStlxrOpcode(unsigned NumBytesToStore)
Helper function to emit the correct opcode for a llvm.aarch64.stlxr intrinsic.
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
unsigned SubReg
static StringRef getName(Value *V)
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:408
LLT getElementType() const
Returns the vector&#39;s element type. Only valid for vector types.
bool mi_match(Reg R, MachineRegisterInfo &MRI, Pattern &&P)
MachineInstr * getVRegDef(unsigned Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:244
static int getID(struct InternalInstruction *insn, const void *miiArg)
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
const RegClassOrRegBank & getRegClassOrRegBank(unsigned Reg) const
Return the register bank or register class of Reg.
static bool isStore(int Opcode)
MachineFunction & getMF()
Getter for the function we currently build.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:995
#define EQ(a, b)
Definition: regexec.c:112
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
void ChangeToImmediate(int64_t ImmVal)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value...
TargetInstrInfo - Interface to description of machine instruction set.
static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
Select the AArch64 opcode for the basic binary operation GenericOpc (such as G_OR or G_SDIV)...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
#define P(N)
MachineRegisterInfo * getMRI()
Getter for MRI.
Control flow instructions. These all have token chains.
Definition: ISDOpcodes.h:657
const TargetRegisterInfo * getTargetRegisterInfo() const
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:148
MO_G1 - A symbol operand with this flag (granule 1) represents the bits 16-31 of a 64-bit address...
static bool getLaneCopyOpcode(unsigned &CopyOpc, unsigned &ExtractSubReg, const unsigned EltSize)
unsigned const MachineRegisterInfo * MRI
static int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
Select the AArch64 opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the (value)...
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
static unsigned selectFPConvOpc(unsigned GenericOpc, LLT DstTy, LLT SrcTy)
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
This is an important base class in LLVM.
Definition: Constant.h:41
const GlobalValue * getGlobal() const
Helper class to build MachineInstr.
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
unsigned getPrefTypeAlignment(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Definition: DataLayout.cpp:759
bool isExactlyValue(double V) const
We don&#39;t rely on operator== working on double values, as it returns true for things that are clearly ...
Definition: APFloat.h:1129
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
Definition: MachineInstr.h:548
bool isValid() const
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:709
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
void setImm(int64_t immVal)
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
self_iterator getIterator()
Definition: ilist_node.h:81
auto find_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1213
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const MachineInstrBuilder & addFrameIndex(int Idx) const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:196
bool isCopy() const
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
StringRef getName(unsigned Opcode) const
Returns the name for the instructions with the given opcode.
Definition: MCInstrInfo.h:50
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
static SDValue Widen(SelectionDAG *CurDAG, SDValue N)
#define GET_GLOBALISEL_TEMPORARIES_INIT
const APFloat & getValueAPF() const
Definition: Constants.h:302
unsigned createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
Optional< ValueAndVReg > getConstantVRegValWithLookThrough(unsigned VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT (LookThroug...
Definition: Utils.cpp:221
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
BlockVerifier::State From
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:533
RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
MachineOperand class - Representation of each machine instruction operand.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Definition: PPCPredicates.h:26
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:113
This class implements the register bank concept.
Definition: RegisterBank.h:28
int64_t getImm() const
const Function & getFunction() const
Return the LLVM function that this machine code represents.
This file declares the MachineIRBuilder class.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:538
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:940
static bool isValidCopy(const MachineInstr &I, const RegisterBank &DstBank, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Helper function that verifies that we have a valid copy at the end of selectCopy. ...
Optional< int64_t > getConstantVRegVal(unsigned VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition: Utils.cpp:210
bool isPointer() const
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:253
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:461
Provides the logic to select generic machine instructions.
Representation of each machine instruction.
Definition: MachineInstr.h:63
static bool selectSubregisterCopy(MachineInstr &I, MachineRegisterInfo &MRI, const RegisterBankInfo &RBI, unsigned SrcReg, const TargetRegisterClass *From, const TargetRegisterClass *To, unsigned SubReg)
Helper function for selectCopy.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
This class provides the information for the target register banks.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
ConstantMatch m_ICst(int64_t &Cst)
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
static AArch64CC::CondCode changeICMPPredToAArch64CC(CmpInst::Predicate P)
InstructionSelector * createAArch64InstructionSelector(const AArch64TargetMachine &, AArch64Subtarget &, AArch64RegisterBankInfo &)
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:44
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
int64_t getOffset() const
Return the offset from the symbol in this operand.
const BlockAddress * getBlockAddress() const
void setReg(unsigned Reg)
Change the register this operand corresponds to.
#define I(x, y, z)
Definition: MD5.cpp:58
static unsigned findIntrinsicID(MachineInstr &I)
Helper function to find an intrinsic ID on an a MachineInstr.
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page...
void setSubReg(unsigned subReg)
static const TargetRegisterClass * constrainGenericRegister(unsigned Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
#define GET_GLOBALISEL_PREDICATES_INIT
uint32_t Size
Definition: Profile.cpp:46
const DataLayout & getDataLayout() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const TargetRegisterClass * getRegClassOrNull(unsigned Reg) const
Return the register class of Reg, or null if Reg has not been assigned a register class yet...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static void changeFCMPPredToAArch64CC(CmpInst::Predicate P, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2)
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel...
Definition: TargetOpcodes.h:30
LLVM Value Representation.
Definition: Value.h:72
unsigned getSizeInBits(unsigned Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:444
uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:250
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:80
void ChangeToFrameIndex(int Idx)
Replace this operand with a frame index.
IRTranslator LLVM IR MI
static bool unsupportedBinOp(const MachineInstr &I, const AArch64RegisterBankInfo &RBI, const MachineRegisterInfo &MRI, const AArch64RegisterInfo &TRI)
Check whether I is a currently unsupported binary operation:
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow...
APInt bitcastToAPInt() const
Definition: APFloat.h:1093
unsigned getRegSizeInBits(const TargetRegisterClass &RC) const
Return the size in bits of a register from class RC.
const MachineInstrBuilder & addDef(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition: Constants.h:156
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
#define LLVM_DEBUG(X)
Definition: Debug.h:122
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:413
static bool getSubRegForClass(const TargetRegisterClass *RC, const TargetRegisterInfo &TRI, unsigned &SubReg)
Returns the correct subregister to use for a given register class.
const ConstantInt * getCImm() const
#define DEBUG_TYPE
unsigned getConstantPoolIndex(const Constant *C, unsigned Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one...
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
unsigned getID() const
Get the identifier of this register bank.
Definition: RegisterBank.h:47
A discriminated union of two or more pointer types, with the discriminator in the low bit of the poin...
Definition: PointerUnion.h:163
unsigned getPredicate() const