LLVM  7.0.0svn
AArch64InstructionSelector.cpp
Go to the documentation of this file.
1 //===- AArch64InstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the targeting of the InstructionSelector class for
11 /// AArch64.
12 /// \todo This should be generated by TableGen.
13 //===----------------------------------------------------------------------===//
14 
15 #include "AArch64InstrInfo.h"
18 #include "AArch64RegisterInfo.h"
19 #include "AArch64Subtarget.h"
20 #include "AArch64TargetMachine.h"
31 #include "llvm/IR/Type.h"
32 #include "llvm/Support/Debug.h"
34 
35 #define DEBUG_TYPE "aarch64-isel"
36 
37 using namespace llvm;
38 
39 namespace {
40 
41 #define GET_GLOBALISEL_PREDICATE_BITSET
42 #include "AArch64GenGlobalISel.inc"
43 #undef GET_GLOBALISEL_PREDICATE_BITSET
44 
45 class AArch64InstructionSelector : public InstructionSelector {
46 public:
47  AArch64InstructionSelector(const AArch64TargetMachine &TM,
48  const AArch64Subtarget &STI,
49  const AArch64RegisterBankInfo &RBI);
50 
51  bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override;
52  static const char *getName() { return DEBUG_TYPE; }
53 
54 private:
55  /// tblgen-erated 'select' implementation, used as the initial selector for
56  /// the patterns that don't require complex C++.
57  bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
58 
59  bool selectVaStartAAPCS(MachineInstr &I, MachineFunction &MF,
60  MachineRegisterInfo &MRI) const;
61  bool selectVaStartDarwin(MachineInstr &I, MachineFunction &MF,
62  MachineRegisterInfo &MRI) const;
63 
64  bool selectCompareBranch(MachineInstr &I, MachineFunction &MF,
65  MachineRegisterInfo &MRI) const;
66 
67  ComplexRendererFns selectArithImmed(MachineOperand &Root) const;
68 
69  ComplexRendererFns selectAddrModeUnscaled(MachineOperand &Root,
70  unsigned Size) const;
71 
72  ComplexRendererFns selectAddrModeUnscaled8(MachineOperand &Root) const {
73  return selectAddrModeUnscaled(Root, 1);
74  }
75  ComplexRendererFns selectAddrModeUnscaled16(MachineOperand &Root) const {
76  return selectAddrModeUnscaled(Root, 2);
77  }
78  ComplexRendererFns selectAddrModeUnscaled32(MachineOperand &Root) const {
79  return selectAddrModeUnscaled(Root, 4);
80  }
81  ComplexRendererFns selectAddrModeUnscaled64(MachineOperand &Root) const {
82  return selectAddrModeUnscaled(Root, 8);
83  }
84  ComplexRendererFns selectAddrModeUnscaled128(MachineOperand &Root) const {
85  return selectAddrModeUnscaled(Root, 16);
86  }
87 
88  ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root,
89  unsigned Size) const;
90  template <int Width>
91  ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root) const {
92  return selectAddrModeIndexed(Root, Width / 8);
93  }
94 
95  void renderTruncImm(MachineInstrBuilder &MIB, const MachineInstr &MI) const;
96 
97  const AArch64TargetMachine &TM;
98  const AArch64Subtarget &STI;
99  const AArch64InstrInfo &TII;
100  const AArch64RegisterInfo &TRI;
101  const AArch64RegisterBankInfo &RBI;
102 
103 #define GET_GLOBALISEL_PREDICATES_DECL
104 #include "AArch64GenGlobalISel.inc"
105 #undef GET_GLOBALISEL_PREDICATES_DECL
106 
107 // We declare the temporaries used by selectImpl() in the class to minimize the
108 // cost of constructing placeholder values.
109 #define GET_GLOBALISEL_TEMPORARIES_DECL
110 #include "AArch64GenGlobalISel.inc"
111 #undef GET_GLOBALISEL_TEMPORARIES_DECL
112 };
113 
114 } // end anonymous namespace
115 
116 #define GET_GLOBALISEL_IMPL
117 #include "AArch64GenGlobalISel.inc"
118 #undef GET_GLOBALISEL_IMPL
119 
120 AArch64InstructionSelector::AArch64InstructionSelector(
121  const AArch64TargetMachine &TM, const AArch64Subtarget &STI,
122  const AArch64RegisterBankInfo &RBI)
123  : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
124  TRI(*STI.getRegisterInfo()), RBI(RBI),
126 #include "AArch64GenGlobalISel.inc"
129 #include "AArch64GenGlobalISel.inc"
131 {
132 }
133 
134 // FIXME: This should be target-independent, inferred from the types declared
135 // for each class in the bank.
136 static const TargetRegisterClass *
137 getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB,
138  const RegisterBankInfo &RBI,
139  bool GetAllRegSet = false) {
140  if (RB.getID() == AArch64::GPRRegBankID) {
141  if (Ty.getSizeInBits() <= 32)
142  return GetAllRegSet ? &AArch64::GPR32allRegClass
143  : &AArch64::GPR32RegClass;
144  if (Ty.getSizeInBits() == 64)
145  return GetAllRegSet ? &AArch64::GPR64allRegClass
146  : &AArch64::GPR64RegClass;
147  return nullptr;
148  }
149 
150  if (RB.getID() == AArch64::FPRRegBankID) {
151  if (Ty.getSizeInBits() <= 16)
152  return &AArch64::FPR16RegClass;
153  if (Ty.getSizeInBits() == 32)
154  return &AArch64::FPR32RegClass;
155  if (Ty.getSizeInBits() == 64)
156  return &AArch64::FPR64RegClass;
157  if (Ty.getSizeInBits() == 128)
158  return &AArch64::FPR128RegClass;
159  return nullptr;
160  }
161 
162  return nullptr;
163 }
164 
165 /// Check whether \p I is a currently unsupported binary operation:
166 /// - it has an unsized type
167 /// - an operand is not a vreg
168 /// - all operands are not in the same bank
169 /// These are checks that should someday live in the verifier, but right now,
170 /// these are mostly limitations of the aarch64 selector.
171 static bool unsupportedBinOp(const MachineInstr &I,
172  const AArch64RegisterBankInfo &RBI,
173  const MachineRegisterInfo &MRI,
174  const AArch64RegisterInfo &TRI) {
175  LLT Ty = MRI.getType(I.getOperand(0).getReg());
176  if (!Ty.isValid()) {
177  LLVM_DEBUG(dbgs() << "Generic binop register should be typed\n");
178  return true;
179  }
180 
181  const RegisterBank *PrevOpBank = nullptr;
182  for (auto &MO : I.operands()) {
183  // FIXME: Support non-register operands.
184  if (!MO.isReg()) {
185  LLVM_DEBUG(dbgs() << "Generic inst non-reg operands are unsupported\n");
186  return true;
187  }
188 
189  // FIXME: Can generic operations have physical registers operands? If
190  // so, this will need to be taught about that, and we'll need to get the
191  // bank out of the minimal class for the register.
192  // Either way, this needs to be documented (and possibly verified).
193  if (!TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
194  LLVM_DEBUG(dbgs() << "Generic inst has physical register operand\n");
195  return true;
196  }
197 
198  const RegisterBank *OpBank = RBI.getRegBank(MO.getReg(), MRI, TRI);
199  if (!OpBank) {
200  LLVM_DEBUG(dbgs() << "Generic register has no bank or class\n");
201  return true;
202  }
203 
204  if (PrevOpBank && OpBank != PrevOpBank) {
205  LLVM_DEBUG(dbgs() << "Generic inst operands have different banks\n");
206  return true;
207  }
208  PrevOpBank = OpBank;
209  }
210  return false;
211 }
212 
213 /// Select the AArch64 opcode for the basic binary operation \p GenericOpc
214 /// (such as G_OR or G_SDIV), appropriate for the register bank \p RegBankID
215 /// and of size \p OpSize.
216 /// \returns \p GenericOpc if the combination is unsupported.
217 static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID,
218  unsigned OpSize) {
219  switch (RegBankID) {
220  case AArch64::GPRRegBankID:
221  if (OpSize == 32) {
222  switch (GenericOpc) {
223  case TargetOpcode::G_SHL:
224  return AArch64::LSLVWr;
225  case TargetOpcode::G_LSHR:
226  return AArch64::LSRVWr;
227  case TargetOpcode::G_ASHR:
228  return AArch64::ASRVWr;
229  default:
230  return GenericOpc;
231  }
232  } else if (OpSize == 64) {
233  switch (GenericOpc) {
234  case TargetOpcode::G_GEP:
235  return AArch64::ADDXrr;
236  case TargetOpcode::G_SHL:
237  return AArch64::LSLVXr;
238  case TargetOpcode::G_LSHR:
239  return AArch64::LSRVXr;
240  case TargetOpcode::G_ASHR:
241  return AArch64::ASRVXr;
242  default:
243  return GenericOpc;
244  }
245  }
246  break;
247  case AArch64::FPRRegBankID:
248  switch (OpSize) {
249  case 32:
250  switch (GenericOpc) {
251  case TargetOpcode::G_FADD:
252  return AArch64::FADDSrr;
253  case TargetOpcode::G_FSUB:
254  return AArch64::FSUBSrr;
255  case TargetOpcode::G_FMUL:
256  return AArch64::FMULSrr;
257  case TargetOpcode::G_FDIV:
258  return AArch64::FDIVSrr;
259  default:
260  return GenericOpc;
261  }
262  case 64:
263  switch (GenericOpc) {
264  case TargetOpcode::G_FADD:
265  return AArch64::FADDDrr;
266  case TargetOpcode::G_FSUB:
267  return AArch64::FSUBDrr;
268  case TargetOpcode::G_FMUL:
269  return AArch64::FMULDrr;
270  case TargetOpcode::G_FDIV:
271  return AArch64::FDIVDrr;
272  case TargetOpcode::G_OR:
273  return AArch64::ORRv8i8;
274  default:
275  return GenericOpc;
276  }
277  }
278  break;
279  }
280  return GenericOpc;
281 }
282 
283 /// Select the AArch64 opcode for the G_LOAD or G_STORE operation \p GenericOpc,
284 /// appropriate for the (value) register bank \p RegBankID and of memory access
285 /// size \p OpSize. This returns the variant with the base+unsigned-immediate
286 /// addressing mode (e.g., LDRXui).
287 /// \returns \p GenericOpc if the combination is unsupported.
288 static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID,
289  unsigned OpSize) {
290  const bool isStore = GenericOpc == TargetOpcode::G_STORE;
291  switch (RegBankID) {
292  case AArch64::GPRRegBankID:
293  switch (OpSize) {
294  case 8:
295  return isStore ? AArch64::STRBBui : AArch64::LDRBBui;
296  case 16:
297  return isStore ? AArch64::STRHHui : AArch64::LDRHHui;
298  case 32:
299  return isStore ? AArch64::STRWui : AArch64::LDRWui;
300  case 64:
301  return isStore ? AArch64::STRXui : AArch64::LDRXui;
302  }
303  break;
304  case AArch64::FPRRegBankID:
305  switch (OpSize) {
306  case 8:
307  return isStore ? AArch64::STRBui : AArch64::LDRBui;
308  case 16:
309  return isStore ? AArch64::STRHui : AArch64::LDRHui;
310  case 32:
311  return isStore ? AArch64::STRSui : AArch64::LDRSui;
312  case 64:
313  return isStore ? AArch64::STRDui : AArch64::LDRDui;
314  }
315  break;
316  }
317  return GenericOpc;
318 }
319 
321  MachineRegisterInfo &MRI, unsigned SrcReg) {
322  // Copies from gpr32 to fpr16 need to use a sub-register copy.
323  unsigned CopyReg = MRI.createVirtualRegister(&AArch64::FPR32RegClass);
324  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::COPY))
325  .addDef(CopyReg)
326  .addUse(SrcReg);
327  unsigned SubRegCopy = MRI.createVirtualRegister(&AArch64::FPR16RegClass);
328  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
329  .addDef(SubRegCopy)
330  .addUse(CopyReg, 0, AArch64::hsub);
331 
332  MachineOperand &RegOp = I.getOperand(1);
333  RegOp.setReg(SubRegCopy);
334  return true;
335 }
336 
339  const RegisterBankInfo &RBI) {
340 
341  unsigned DstReg = I.getOperand(0).getReg();
342  unsigned SrcReg = I.getOperand(1).getReg();
343 
344  if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
345  if (TRI.getRegClass(AArch64::FPR16RegClassID)->contains(DstReg) &&
346  !TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
347  const RegisterBank &RegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
348  const TargetRegisterClass *SrcRC = getRegClassForTypeOnBank(
349  MRI.getType(SrcReg), RegBank, RBI, /* GetAllRegSet */ true);
350  if (SrcRC == &AArch64::GPR32allRegClass)
351  return selectFP16CopyFromGPR32(I, TII, MRI, SrcReg);
352  }
353  assert(I.isCopy() && "Generic operators do not allow physical registers");
354  return true;
355  }
356 
357  const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
358  const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
359  (void)DstSize;
360  const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
361  (void)SrcSize;
362  assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.isCopy()) &&
363  "No phys reg on generic operators");
364  assert(
365  (DstSize == SrcSize ||
366  // Copies are a mean to setup initial types, the number of
367  // bits may not exactly match.
368  (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
369  DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI)) ||
370  // Copies are a mean to copy bits around, as long as we are
371  // on the same register class, that's fine. Otherwise, that
372  // means we need some SUBREG_TO_REG or AND & co.
373  (((DstSize + 31) / 32 == (SrcSize + 31) / 32) && DstSize > SrcSize)) &&
374  "Copy with different width?!");
375  assert((DstSize <= 64 || RegBank.getID() == AArch64::FPRRegBankID) &&
376  "GPRs cannot get more than 64-bit width values");
377 
378  const TargetRegisterClass *RC = getRegClassForTypeOnBank(
379  MRI.getType(DstReg), RegBank, RBI, /* GetAllRegSet */ true);
380  if (!RC) {
381  LLVM_DEBUG(dbgs() << "Unexpected bitcast size " << DstSize << '\n');
382  return false;
383  }
384 
385  if (!TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
386  const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(SrcReg);
387  const TargetRegisterClass *SrcRC =
388  RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
389  const RegisterBank *RB = nullptr;
390  if (!SrcRC) {
391  RB = RegClassOrBank.get<const RegisterBank *>();
392  SrcRC = getRegClassForTypeOnBank(MRI.getType(SrcReg), *RB, RBI, true);
393  }
394  // Copies from fpr16 to gpr32 need to use SUBREG_TO_REG.
395  if (RC == &AArch64::GPR32allRegClass && SrcRC == &AArch64::FPR16RegClass) {
396  unsigned PromoteReg = MRI.createVirtualRegister(&AArch64::FPR32RegClass);
397  BuildMI(*I.getParent(), I, I.getDebugLoc(),
398  TII.get(AArch64::SUBREG_TO_REG))
399  .addDef(PromoteReg)
400  .addImm(0)
401  .addUse(SrcReg)
402  .addImm(AArch64::hsub);
403  MachineOperand &RegOp = I.getOperand(1);
404  RegOp.setReg(PromoteReg);
405  } else if (RC == &AArch64::FPR16RegClass &&
406  SrcRC == &AArch64::GPR32allRegClass) {
407  selectFP16CopyFromGPR32(I, TII, MRI, SrcReg);
408  }
409  }
410 
411  // No need to constrain SrcReg. It will get constrained when
412  // we hit another of its use or its defs.
413  // Copies do not have constraints.
414  if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
415  LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
416  << " operand\n");
417  return false;
418  }
419  I.setDesc(TII.get(AArch64::COPY));
420  return true;
421 }
422 
423 static unsigned selectFPConvOpc(unsigned GenericOpc, LLT DstTy, LLT SrcTy) {
424  if (!DstTy.isScalar() || !SrcTy.isScalar())
425  return GenericOpc;
426 
427  const unsigned DstSize = DstTy.getSizeInBits();
428  const unsigned SrcSize = SrcTy.getSizeInBits();
429 
430  switch (DstSize) {
431  case 32:
432  switch (SrcSize) {
433  case 32:
434  switch (GenericOpc) {
435  case TargetOpcode::G_SITOFP:
436  return AArch64::SCVTFUWSri;
437  case TargetOpcode::G_UITOFP:
438  return AArch64::UCVTFUWSri;
439  case TargetOpcode::G_FPTOSI:
440  return AArch64::FCVTZSUWSr;
441  case TargetOpcode::G_FPTOUI:
442  return AArch64::FCVTZUUWSr;
443  default:
444  return GenericOpc;
445  }
446  case 64:
447  switch (GenericOpc) {
448  case TargetOpcode::G_SITOFP:
449  return AArch64::SCVTFUXSri;
450  case TargetOpcode::G_UITOFP:
451  return AArch64::UCVTFUXSri;
452  case TargetOpcode::G_FPTOSI:
453  return AArch64::FCVTZSUWDr;
454  case TargetOpcode::G_FPTOUI:
455  return AArch64::FCVTZUUWDr;
456  default:
457  return GenericOpc;
458  }
459  default:
460  return GenericOpc;
461  }
462  case 64:
463  switch (SrcSize) {
464  case 32:
465  switch (GenericOpc) {
466  case TargetOpcode::G_SITOFP:
467  return AArch64::SCVTFUWDri;
468  case TargetOpcode::G_UITOFP:
469  return AArch64::UCVTFUWDri;
470  case TargetOpcode::G_FPTOSI:
471  return AArch64::FCVTZSUXSr;
472  case TargetOpcode::G_FPTOUI:
473  return AArch64::FCVTZUUXSr;
474  default:
475  return GenericOpc;
476  }
477  case 64:
478  switch (GenericOpc) {
479  case TargetOpcode::G_SITOFP:
480  return AArch64::SCVTFUXDri;
481  case TargetOpcode::G_UITOFP:
482  return AArch64::UCVTFUXDri;
483  case TargetOpcode::G_FPTOSI:
484  return AArch64::FCVTZSUXDr;
485  case TargetOpcode::G_FPTOUI:
486  return AArch64::FCVTZUUXDr;
487  default:
488  return GenericOpc;
489  }
490  default:
491  return GenericOpc;
492  }
493  default:
494  return GenericOpc;
495  };
496  return GenericOpc;
497 }
498 
500  switch (P) {
501  default:
502  llvm_unreachable("Unknown condition code!");
503  case CmpInst::ICMP_NE:
504  return AArch64CC::NE;
505  case CmpInst::ICMP_EQ:
506  return AArch64CC::EQ;
507  case CmpInst::ICMP_SGT:
508  return AArch64CC::GT;
509  case CmpInst::ICMP_SGE:
510  return AArch64CC::GE;
511  case CmpInst::ICMP_SLT:
512  return AArch64CC::LT;
513  case CmpInst::ICMP_SLE:
514  return AArch64CC::LE;
515  case CmpInst::ICMP_UGT:
516  return AArch64CC::HI;
517  case CmpInst::ICMP_UGE:
518  return AArch64CC::HS;
519  case CmpInst::ICMP_ULT:
520  return AArch64CC::LO;
521  case CmpInst::ICMP_ULE:
522  return AArch64CC::LS;
523  }
524 }
525 
528  AArch64CC::CondCode &CondCode2) {
529  CondCode2 = AArch64CC::AL;
530  switch (P) {
531  default:
532  llvm_unreachable("Unknown FP condition!");
533  case CmpInst::FCMP_OEQ:
534  CondCode = AArch64CC::EQ;
535  break;
536  case CmpInst::FCMP_OGT:
537  CondCode = AArch64CC::GT;
538  break;
539  case CmpInst::FCMP_OGE:
540  CondCode = AArch64CC::GE;
541  break;
542  case CmpInst::FCMP_OLT:
543  CondCode = AArch64CC::MI;
544  break;
545  case CmpInst::FCMP_OLE:
546  CondCode = AArch64CC::LS;
547  break;
548  case CmpInst::FCMP_ONE:
549  CondCode = AArch64CC::MI;
550  CondCode2 = AArch64CC::GT;
551  break;
552  case CmpInst::FCMP_ORD:
553  CondCode = AArch64CC::VC;
554  break;
555  case CmpInst::FCMP_UNO:
556  CondCode = AArch64CC::VS;
557  break;
558  case CmpInst::FCMP_UEQ:
559  CondCode = AArch64CC::EQ;
560  CondCode2 = AArch64CC::VS;
561  break;
562  case CmpInst::FCMP_UGT:
563  CondCode = AArch64CC::HI;
564  break;
565  case CmpInst::FCMP_UGE:
566  CondCode = AArch64CC::PL;
567  break;
568  case CmpInst::FCMP_ULT:
569  CondCode = AArch64CC::LT;
570  break;
571  case CmpInst::FCMP_ULE:
572  CondCode = AArch64CC::LE;
573  break;
574  case CmpInst::FCMP_UNE:
575  CondCode = AArch64CC::NE;
576  break;
577  }
578 }
579 
580 bool AArch64InstructionSelector::selectCompareBranch(
582 
583  const unsigned CondReg = I.getOperand(0).getReg();
584  MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
585  MachineInstr *CCMI = MRI.getVRegDef(CondReg);
586  if (CCMI->getOpcode() == TargetOpcode::G_TRUNC)
587  CCMI = MRI.getVRegDef(CCMI->getOperand(1).getReg());
588  if (CCMI->getOpcode() != TargetOpcode::G_ICMP)
589  return false;
590 
591  unsigned LHS = CCMI->getOperand(2).getReg();
592  unsigned RHS = CCMI->getOperand(3).getReg();
593  if (!getConstantVRegVal(RHS, MRI))
594  std::swap(RHS, LHS);
595 
596  const auto RHSImm = getConstantVRegVal(RHS, MRI);
597  if (!RHSImm || *RHSImm != 0)
598  return false;
599 
600  const RegisterBank &RB = *RBI.getRegBank(LHS, MRI, TRI);
601  if (RB.getID() != AArch64::GPRRegBankID)
602  return false;
603 
604  const auto Pred = (CmpInst::Predicate)CCMI->getOperand(1).getPredicate();
605  if (Pred != CmpInst::ICMP_NE && Pred != CmpInst::ICMP_EQ)
606  return false;
607 
608  const unsigned CmpWidth = MRI.getType(LHS).getSizeInBits();
609  unsigned CBOpc = 0;
610  if (CmpWidth <= 32)
611  CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZW : AArch64::CBNZW);
612  else if (CmpWidth == 64)
613  CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZX : AArch64::CBNZX);
614  else
615  return false;
616 
617  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CBOpc))
618  .addUse(LHS)
619  .addMBB(DestMBB)
620  .constrainAllUses(TII, TRI, RBI);
621 
622  I.eraseFromParent();
623  return true;
624 }
625 
626 bool AArch64InstructionSelector::selectVaStartAAPCS(
627  MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
628  return false;
629 }
630 
631 bool AArch64InstructionSelector::selectVaStartDarwin(
632  MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
634  unsigned ListReg = I.getOperand(0).getReg();
635 
636  unsigned ArgsAddrReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
637 
638  auto MIB =
639  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::ADDXri))
640  .addDef(ArgsAddrReg)
641  .addFrameIndex(FuncInfo->getVarArgsStackIndex())
642  .addImm(0)
643  .addImm(0);
644 
646 
647  MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::STRXui))
648  .addUse(ArgsAddrReg)
649  .addUse(ListReg)
650  .addImm(0)
652 
654  I.eraseFromParent();
655  return true;
656 }
657 
658 bool AArch64InstructionSelector::select(MachineInstr &I,
659  CodeGenCoverage &CoverageInfo) const {
660  assert(I.getParent() && "Instruction should be in a basic block!");
661  assert(I.getParent()->getParent() && "Instruction should be in a function!");
662 
663  MachineBasicBlock &MBB = *I.getParent();
664  MachineFunction &MF = *MBB.getParent();
665  MachineRegisterInfo &MRI = MF.getRegInfo();
666 
667  unsigned Opcode = I.getOpcode();
668  // G_PHI requires same handling as PHI
669  if (!isPreISelGenericOpcode(Opcode) || Opcode == TargetOpcode::G_PHI) {
670  // Certain non-generic instructions also need some special handling.
671 
672  if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
673  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
674 
675  if (Opcode == TargetOpcode::PHI || Opcode == TargetOpcode::G_PHI) {
676  const unsigned DefReg = I.getOperand(0).getReg();
677  const LLT DefTy = MRI.getType(DefReg);
678 
679  const TargetRegisterClass *DefRC = nullptr;
680  if (TargetRegisterInfo::isPhysicalRegister(DefReg)) {
681  DefRC = TRI.getRegClass(DefReg);
682  } else {
683  const RegClassOrRegBank &RegClassOrBank =
684  MRI.getRegClassOrRegBank(DefReg);
685 
686  DefRC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
687  if (!DefRC) {
688  if (!DefTy.isValid()) {
689  LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
690  return false;
691  }
692  const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
693  DefRC = getRegClassForTypeOnBank(DefTy, RB, RBI);
694  if (!DefRC) {
695  LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
696  return false;
697  }
698  }
699  }
700  I.setDesc(TII.get(TargetOpcode::PHI));
701 
702  return RBI.constrainGenericRegister(DefReg, *DefRC, MRI);
703  }
704 
705  if (I.isCopy())
706  return selectCopy(I, TII, MRI, TRI, RBI);
707 
708  return true;
709  }
710 
711 
712  if (I.getNumOperands() != I.getNumExplicitOperands()) {
713  LLVM_DEBUG(
714  dbgs() << "Generic instruction has unexpected implicit operands\n");
715  return false;
716  }
717 
718  if (selectImpl(I, CoverageInfo))
719  return true;
720 
721  LLT Ty =
722  I.getOperand(0).isReg() ? MRI.getType(I.getOperand(0).getReg()) : LLT{};
723 
724  switch (Opcode) {
725  case TargetOpcode::G_BRCOND: {
726  if (Ty.getSizeInBits() > 32) {
727  // We shouldn't need this on AArch64, but it would be implemented as an
728  // EXTRACT_SUBREG followed by a TBNZW because TBNZX has no encoding if the
729  // bit being tested is < 32.
730  LLVM_DEBUG(dbgs() << "G_BRCOND has type: " << Ty
731  << ", expected at most 32-bits");
732  return false;
733  }
734 
735  const unsigned CondReg = I.getOperand(0).getReg();
736  MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
737 
738  if (selectCompareBranch(I, MF, MRI))
739  return true;
740 
741  auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::TBNZW))
742  .addUse(CondReg)
743  .addImm(/*bit offset=*/0)
744  .addMBB(DestMBB);
745 
746  I.eraseFromParent();
747  return constrainSelectedInstRegOperands(*MIB.getInstr(), TII, TRI, RBI);
748  }
749 
750  case TargetOpcode::G_BRINDIRECT: {
751  I.setDesc(TII.get(AArch64::BR));
752  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
753  }
754 
755  case TargetOpcode::G_FCONSTANT:
756  case TargetOpcode::G_CONSTANT: {
757  const bool isFP = Opcode == TargetOpcode::G_FCONSTANT;
758 
759  const LLT s32 = LLT::scalar(32);
760  const LLT s64 = LLT::scalar(64);
761  const LLT p0 = LLT::pointer(0, 64);
762 
763  const unsigned DefReg = I.getOperand(0).getReg();
764  const LLT DefTy = MRI.getType(DefReg);
765  const unsigned DefSize = DefTy.getSizeInBits();
766  const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
767 
768  // FIXME: Redundant check, but even less readable when factored out.
769  if (isFP) {
770  if (Ty != s32 && Ty != s64) {
771  LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty
772  << " constant, expected: " << s32 << " or " << s64
773  << '\n');
774  return false;
775  }
776 
777  if (RB.getID() != AArch64::FPRRegBankID) {
778  LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty
779  << " constant on bank: " << RB
780  << ", expected: FPR\n");
781  return false;
782  }
783 
784  // The case when we have 0.0 is covered by tablegen. Reject it here so we
785  // can be sure tablegen works correctly and isn't rescued by this code.
786  if (I.getOperand(1).getFPImm()->getValueAPF().isExactlyValue(0.0))
787  return false;
788  } else {
789  // s32 and s64 are covered by tablegen.
790  if (Ty != p0) {
791  LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty
792  << " constant, expected: " << s32 << ", " << s64
793  << ", or " << p0 << '\n');
794  return false;
795  }
796 
797  if (RB.getID() != AArch64::GPRRegBankID) {
798  LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty
799  << " constant on bank: " << RB
800  << ", expected: GPR\n");
801  return false;
802  }
803  }
804 
805  const unsigned MovOpc =
806  DefSize == 32 ? AArch64::MOVi32imm : AArch64::MOVi64imm;
807 
808  I.setDesc(TII.get(MovOpc));
809 
810  if (isFP) {
811  const TargetRegisterClass &GPRRC =
812  DefSize == 32 ? AArch64::GPR32RegClass : AArch64::GPR64RegClass;
813  const TargetRegisterClass &FPRRC =
814  DefSize == 32 ? AArch64::FPR32RegClass : AArch64::FPR64RegClass;
815 
816  const unsigned DefGPRReg = MRI.createVirtualRegister(&GPRRC);
817  MachineOperand &RegOp = I.getOperand(0);
818  RegOp.setReg(DefGPRReg);
819 
820  BuildMI(MBB, std::next(I.getIterator()), I.getDebugLoc(),
821  TII.get(AArch64::COPY))
822  .addDef(DefReg)
823  .addUse(DefGPRReg);
824 
825  if (!RBI.constrainGenericRegister(DefReg, FPRRC, MRI)) {
826  LLVM_DEBUG(dbgs() << "Failed to constrain G_FCONSTANT def operand\n");
827  return false;
828  }
829 
830  MachineOperand &ImmOp = I.getOperand(1);
831  // FIXME: Is going through int64_t always correct?
832  ImmOp.ChangeToImmediate(
834  } else if (I.getOperand(1).isCImm()) {
835  uint64_t Val = I.getOperand(1).getCImm()->getZExtValue();
836  I.getOperand(1).ChangeToImmediate(Val);
837  } else if (I.getOperand(1).isImm()) {
838  uint64_t Val = I.getOperand(1).getImm();
839  I.getOperand(1).ChangeToImmediate(Val);
840  }
841 
843  return true;
844  }
845  case TargetOpcode::G_EXTRACT: {
846  LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
847  LLT DstTy = MRI.getType(I.getOperand(0).getReg());
848  (void)DstTy;
849  unsigned SrcSize = SrcTy.getSizeInBits();
850  // Larger extracts are vectors, same-size extracts should be something else
851  // by now (either split up or simplified to a COPY).
852  if (SrcTy.getSizeInBits() > 64 || Ty.getSizeInBits() > 32)
853  return false;
854 
855  I.setDesc(TII.get(SrcSize == 64 ? AArch64::UBFMXri : AArch64::UBFMWri));
857  Ty.getSizeInBits() - 1);
858 
859  if (SrcSize < 64) {
860  assert(SrcSize == 32 && DstTy.getSizeInBits() == 16 &&
861  "unexpected G_EXTRACT types");
862  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
863  }
864 
865  unsigned DstReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
866  BuildMI(MBB, std::next(I.getIterator()), I.getDebugLoc(),
867  TII.get(AArch64::COPY))
868  .addDef(I.getOperand(0).getReg())
869  .addUse(DstReg, 0, AArch64::sub_32);
871  AArch64::GPR32RegClass, MRI);
872  I.getOperand(0).setReg(DstReg);
873 
874  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
875  }
876 
877  case TargetOpcode::G_INSERT: {
878  LLT SrcTy = MRI.getType(I.getOperand(2).getReg());
879  LLT DstTy = MRI.getType(I.getOperand(0).getReg());
880  unsigned DstSize = DstTy.getSizeInBits();
881  // Larger inserts are vectors, same-size ones should be something else by
882  // now (split up or turned into COPYs).
883  if (Ty.getSizeInBits() > 64 || SrcTy.getSizeInBits() > 32)
884  return false;
885 
886  I.setDesc(TII.get(DstSize == 64 ? AArch64::BFMXri : AArch64::BFMWri));
887  unsigned LSB = I.getOperand(3).getImm();
888  unsigned Width = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
889  I.getOperand(3).setImm((DstSize - LSB) % DstSize);
890  MachineInstrBuilder(MF, I).addImm(Width - 1);
891 
892  if (DstSize < 64) {
893  assert(DstSize == 32 && SrcTy.getSizeInBits() == 16 &&
894  "unexpected G_INSERT types");
895  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
896  }
897 
898  unsigned SrcReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
899  BuildMI(MBB, I.getIterator(), I.getDebugLoc(),
900  TII.get(AArch64::SUBREG_TO_REG))
901  .addDef(SrcReg)
902  .addImm(0)
903  .addUse(I.getOperand(2).getReg())
904  .addImm(AArch64::sub_32);
906  AArch64::GPR32RegClass, MRI);
907  I.getOperand(2).setReg(SrcReg);
908 
909  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
910  }
911  case TargetOpcode::G_FRAME_INDEX: {
912  // allocas and G_FRAME_INDEX are only supported in addrspace(0).
913  if (Ty != LLT::pointer(0, 64)) {
914  LLVM_DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << Ty
915  << ", expected: " << LLT::pointer(0, 64) << '\n');
916  return false;
917  }
918  I.setDesc(TII.get(AArch64::ADDXri));
919 
920  // MOs for a #0 shifted immediate.
921  I.addOperand(MachineOperand::CreateImm(0));
922  I.addOperand(MachineOperand::CreateImm(0));
923 
924  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
925  }
926 
927  case TargetOpcode::G_GLOBAL_VALUE: {
928  auto GV = I.getOperand(1).getGlobal();
929  if (GV->isThreadLocal()) {
930  // FIXME: we don't support TLS yet.
931  return false;
932  }
933  unsigned char OpFlags = STI.ClassifyGlobalReference(GV, TM);
934  if (OpFlags & AArch64II::MO_GOT) {
935  I.setDesc(TII.get(AArch64::LOADgot));
936  I.getOperand(1).setTargetFlags(OpFlags);
937  } else if (TM.getCodeModel() == CodeModel::Large) {
938  // Materialize the global using movz/movk instructions.
939  unsigned MovZDstReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
940  auto InsertPt = std::next(I.getIterator());
941  auto MovZ =
942  BuildMI(MBB, InsertPt, I.getDebugLoc(), TII.get(AArch64::MOVZXi))
943  .addDef(MovZDstReg);
944  MovZ->addOperand(MF, I.getOperand(1));
945  MovZ->getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_G0 |
947  MovZ->addOperand(MF, MachineOperand::CreateImm(0));
949 
950  auto BuildMovK = [&](unsigned SrcReg, unsigned char Flags,
951  unsigned Offset, unsigned ForceDstReg) {
952  unsigned DstReg =
953  ForceDstReg ? ForceDstReg
954  : MRI.createVirtualRegister(&AArch64::GPR64RegClass);
955  auto MovI = BuildMI(MBB, InsertPt, MovZ->getDebugLoc(),
956  TII.get(AArch64::MOVKXi))
957  .addDef(DstReg)
958  .addReg(SrcReg);
959  MovI->addOperand(MF, MachineOperand::CreateGA(
960  GV, MovZ->getOperand(1).getOffset(), Flags));
961  MovI->addOperand(MF, MachineOperand::CreateImm(Offset));
963  return DstReg;
964  };
965  unsigned DstReg = BuildMovK(MovZ->getOperand(0).getReg(),
967  DstReg = BuildMovK(DstReg, AArch64II::MO_G2 | AArch64II::MO_NC, 32, 0);
968  BuildMovK(DstReg, AArch64II::MO_G3, 48, I.getOperand(0).getReg());
969  I.eraseFromParent();
970  return true;
971  } else {
972  I.setDesc(TII.get(AArch64::MOVaddr));
974  MachineInstrBuilder MIB(MF, I);
975  MIB.addGlobalAddress(GV, I.getOperand(1).getOffset(),
977  }
978  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
979  }
980 
981  case TargetOpcode::G_LOAD:
982  case TargetOpcode::G_STORE: {
983  LLT PtrTy = MRI.getType(I.getOperand(1).getReg());
984 
985  if (PtrTy != LLT::pointer(0, 64)) {
986  LLVM_DEBUG(dbgs() << "Load/Store pointer has type: " << PtrTy
987  << ", expected: " << LLT::pointer(0, 64) << '\n');
988  return false;
989  }
990 
991  auto &MemOp = **I.memoperands_begin();
992  if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
993  LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n");
994  return false;
995  }
996  unsigned MemSizeInBits = MemOp.getSize() * 8;
997 
998  // FIXME: PR36018: Volatile loads in some cases are incorrectly selected by
999  // folding with an extend. Until we have a G_SEXTLOAD solution bail out if
1000  // we hit one.
1001  if (Opcode == TargetOpcode::G_LOAD && MemOp.isVolatile())
1002  return false;
1003 
1004  const unsigned PtrReg = I.getOperand(1).getReg();
1005 #ifndef NDEBUG
1006  const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI);
1007  // Sanity-check the pointer register.
1008  assert(PtrRB.getID() == AArch64::GPRRegBankID &&
1009  "Load/Store pointer operand isn't a GPR");
1010  assert(MRI.getType(PtrReg).isPointer() &&
1011  "Load/Store pointer operand isn't a pointer");
1012 #endif
1013 
1014  const unsigned ValReg = I.getOperand(0).getReg();
1015  const RegisterBank &RB = *RBI.getRegBank(ValReg, MRI, TRI);
1016 
1017  const unsigned NewOpc =
1018  selectLoadStoreUIOp(I.getOpcode(), RB.getID(), MemSizeInBits);
1019  if (NewOpc == I.getOpcode())
1020  return false;
1021 
1022  I.setDesc(TII.get(NewOpc));
1023 
1024  uint64_t Offset = 0;
1025  auto *PtrMI = MRI.getVRegDef(PtrReg);
1026 
1027  // Try to fold a GEP into our unsigned immediate addressing mode.
1028  if (PtrMI->getOpcode() == TargetOpcode::G_GEP) {
1029  if (auto COff = getConstantVRegVal(PtrMI->getOperand(2).getReg(), MRI)) {
1030  int64_t Imm = *COff;
1031  const unsigned Size = MemSizeInBits / 8;
1032  const unsigned Scale = Log2_32(Size);
1033  if ((Imm & (Size - 1)) == 0 && Imm >= 0 && Imm < (0x1000 << Scale)) {
1034  unsigned Ptr2Reg = PtrMI->getOperand(1).getReg();
1035  I.getOperand(1).setReg(Ptr2Reg);
1036  PtrMI = MRI.getVRegDef(Ptr2Reg);
1037  Offset = Imm / Size;
1038  }
1039  }
1040  }
1041 
1042  // If we haven't folded anything into our addressing mode yet, try to fold
1043  // a frame index into the base+offset.
1044  if (!Offset && PtrMI->getOpcode() == TargetOpcode::G_FRAME_INDEX)
1045  I.getOperand(1).ChangeToFrameIndex(PtrMI->getOperand(1).getIndex());
1046 
1047  I.addOperand(MachineOperand::CreateImm(Offset));
1048 
1049  // If we're storing a 0, use WZR/XZR.
1050  if (auto CVal = getConstantVRegVal(ValReg, MRI)) {
1051  if (*CVal == 0 && Opcode == TargetOpcode::G_STORE) {
1052  if (I.getOpcode() == AArch64::STRWui)
1053  I.getOperand(0).setReg(AArch64::WZR);
1054  else if (I.getOpcode() == AArch64::STRXui)
1055  I.getOperand(0).setReg(AArch64::XZR);
1056  }
1057  }
1058 
1059  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1060  }
1061 
1062  case TargetOpcode::G_SMULH:
1063  case TargetOpcode::G_UMULH: {
1064  // Reject the various things we don't support yet.
1065  if (unsupportedBinOp(I, RBI, MRI, TRI))
1066  return false;
1067 
1068  const unsigned DefReg = I.getOperand(0).getReg();
1069  const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1070 
1071  if (RB.getID() != AArch64::GPRRegBankID) {
1072  LLVM_DEBUG(dbgs() << "G_[SU]MULH on bank: " << RB << ", expected: GPR\n");
1073  return false;
1074  }
1075 
1076  if (Ty != LLT::scalar(64)) {
1077  LLVM_DEBUG(dbgs() << "G_[SU]MULH has type: " << Ty
1078  << ", expected: " << LLT::scalar(64) << '\n');
1079  return false;
1080  }
1081 
1082  unsigned NewOpc = I.getOpcode() == TargetOpcode::G_SMULH ? AArch64::SMULHrr
1083  : AArch64::UMULHrr;
1084  I.setDesc(TII.get(NewOpc));
1085 
1086  // Now that we selected an opcode, we need to constrain the register
1087  // operands to use appropriate classes.
1088  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1089  }
1090  case TargetOpcode::G_FADD:
1091  case TargetOpcode::G_FSUB:
1092  case TargetOpcode::G_FMUL:
1093  case TargetOpcode::G_FDIV:
1094 
1095  case TargetOpcode::G_OR:
1096  case TargetOpcode::G_SHL:
1097  case TargetOpcode::G_LSHR:
1098  case TargetOpcode::G_ASHR:
1099  case TargetOpcode::G_GEP: {
1100  // Reject the various things we don't support yet.
1101  if (unsupportedBinOp(I, RBI, MRI, TRI))
1102  return false;
1103 
1104  const unsigned OpSize = Ty.getSizeInBits();
1105 
1106  const unsigned DefReg = I.getOperand(0).getReg();
1107  const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1108 
1109  const unsigned NewOpc = selectBinaryOp(I.getOpcode(), RB.getID(), OpSize);
1110  if (NewOpc == I.getOpcode())
1111  return false;
1112 
1113  I.setDesc(TII.get(NewOpc));
1114  // FIXME: Should the type be always reset in setDesc?
1115 
1116  // Now that we selected an opcode, we need to constrain the register
1117  // operands to use appropriate classes.
1118  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1119  }
1120 
1121  case TargetOpcode::G_PTR_MASK: {
1122  uint64_t Align = I.getOperand(2).getImm();
1123  if (Align >= 64 || Align == 0)
1124  return false;
1125 
1126  uint64_t Mask = ~((1ULL << Align) - 1);
1127  I.setDesc(TII.get(AArch64::ANDXri));
1129 
1130  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1131  }
1132  case TargetOpcode::G_PTRTOINT:
1133  case TargetOpcode::G_TRUNC: {
1134  const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1135  const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
1136 
1137  const unsigned DstReg = I.getOperand(0).getReg();
1138  const unsigned SrcReg = I.getOperand(1).getReg();
1139 
1140  const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1141  const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
1142 
1143  if (DstRB.getID() != SrcRB.getID()) {
1144  LLVM_DEBUG(
1145  dbgs() << "G_TRUNC/G_PTRTOINT input/output on different banks\n");
1146  return false;
1147  }
1148 
1149  if (DstRB.getID() == AArch64::GPRRegBankID) {
1150  const TargetRegisterClass *DstRC =
1151  getRegClassForTypeOnBank(DstTy, DstRB, RBI);
1152  if (!DstRC)
1153  return false;
1154 
1155  const TargetRegisterClass *SrcRC =
1156  getRegClassForTypeOnBank(SrcTy, SrcRB, RBI);
1157  if (!SrcRC)
1158  return false;
1159 
1160  if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1161  !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1162  LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC/G_PTRTOINT\n");
1163  return false;
1164  }
1165 
1166  if (DstRC == SrcRC) {
1167  // Nothing to be done
1168  } else if (Opcode == TargetOpcode::G_TRUNC && DstTy == LLT::scalar(32) &&
1169  SrcTy == LLT::scalar(64)) {
1170  llvm_unreachable("TableGen can import this case");
1171  return false;
1172  } else if (DstRC == &AArch64::GPR32RegClass &&
1173  SrcRC == &AArch64::GPR64RegClass) {
1174  I.getOperand(1).setSubReg(AArch64::sub_32);
1175  } else {
1176  LLVM_DEBUG(
1177  dbgs() << "Unhandled mismatched classes in G_TRUNC/G_PTRTOINT\n");
1178  return false;
1179  }
1180 
1181  I.setDesc(TII.get(TargetOpcode::COPY));
1182  return true;
1183  } else if (DstRB.getID() == AArch64::FPRRegBankID) {
1184  if (DstTy == LLT::vector(4, 16) && SrcTy == LLT::vector(4, 32)) {
1185  I.setDesc(TII.get(AArch64::XTNv4i16));
1187  return true;
1188  }
1189  }
1190 
1191  return false;
1192  }
1193 
1194  case TargetOpcode::G_ANYEXT: {
1195  const unsigned DstReg = I.getOperand(0).getReg();
1196  const unsigned SrcReg = I.getOperand(1).getReg();
1197 
1198  const RegisterBank &RBDst = *RBI.getRegBank(DstReg, MRI, TRI);
1199  if (RBDst.getID() != AArch64::GPRRegBankID) {
1200  LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBDst
1201  << ", expected: GPR\n");
1202  return false;
1203  }
1204 
1205  const RegisterBank &RBSrc = *RBI.getRegBank(SrcReg, MRI, TRI);
1206  if (RBSrc.getID() != AArch64::GPRRegBankID) {
1207  LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBSrc
1208  << ", expected: GPR\n");
1209  return false;
1210  }
1211 
1212  const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
1213 
1214  if (DstSize == 0) {
1215  LLVM_DEBUG(dbgs() << "G_ANYEXT operand has no size, not a gvreg?\n");
1216  return false;
1217  }
1218 
1219  if (DstSize != 64 && DstSize > 32) {
1220  LLVM_DEBUG(dbgs() << "G_ANYEXT to size: " << DstSize
1221  << ", expected: 32 or 64\n");
1222  return false;
1223  }
1224  // At this point G_ANYEXT is just like a plain COPY, but we need
1225  // to explicitly form the 64-bit value if any.
1226  if (DstSize > 32) {
1227  unsigned ExtSrc = MRI.createVirtualRegister(&AArch64::GPR64allRegClass);
1228  BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
1229  .addDef(ExtSrc)
1230  .addImm(0)
1231  .addUse(SrcReg)
1232  .addImm(AArch64::sub_32);
1233  I.getOperand(1).setReg(ExtSrc);
1234  }
1235  return selectCopy(I, TII, MRI, TRI, RBI);
1236  }
1237 
1238  case TargetOpcode::G_ZEXT:
1239  case TargetOpcode::G_SEXT: {
1240  unsigned Opcode = I.getOpcode();
1241  const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
1242  SrcTy = MRI.getType(I.getOperand(1).getReg());
1243  const bool isSigned = Opcode == TargetOpcode::G_SEXT;
1244  const unsigned DefReg = I.getOperand(0).getReg();
1245  const unsigned SrcReg = I.getOperand(1).getReg();
1246  const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1247 
1248  if (RB.getID() != AArch64::GPRRegBankID) {
1249  LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode()) << " on bank: " << RB
1250  << ", expected: GPR\n");
1251  return false;
1252  }
1253 
1254  MachineInstr *ExtI;
1255  if (DstTy == LLT::scalar(64)) {
1256  // FIXME: Can we avoid manually doing this?
1257  if (!RBI.constrainGenericRegister(SrcReg, AArch64::GPR32RegClass, MRI)) {
1258  LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(Opcode)
1259  << " operand\n");
1260  return false;
1261  }
1262 
1263  const unsigned SrcXReg =
1264  MRI.createVirtualRegister(&AArch64::GPR64RegClass);
1265  BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
1266  .addDef(SrcXReg)
1267  .addImm(0)
1268  .addUse(SrcReg)
1269  .addImm(AArch64::sub_32);
1270 
1271  const unsigned NewOpc = isSigned ? AArch64::SBFMXri : AArch64::UBFMXri;
1272  ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
1273  .addDef(DefReg)
1274  .addUse(SrcXReg)
1275  .addImm(0)
1276  .addImm(SrcTy.getSizeInBits() - 1);
1277  } else if (DstTy.isScalar() && DstTy.getSizeInBits() <= 32) {
1278  const unsigned NewOpc = isSigned ? AArch64::SBFMWri : AArch64::UBFMWri;
1279  ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
1280  .addDef(DefReg)
1281  .addUse(SrcReg)
1282  .addImm(0)
1283  .addImm(SrcTy.getSizeInBits() - 1);
1284  } else {
1285  return false;
1286  }
1287 
1289 
1290  I.eraseFromParent();
1291  return true;
1292  }
1293 
1294  case TargetOpcode::G_SITOFP:
1295  case TargetOpcode::G_UITOFP:
1296  case TargetOpcode::G_FPTOSI:
1297  case TargetOpcode::G_FPTOUI: {
1298  const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
1299  SrcTy = MRI.getType(I.getOperand(1).getReg());
1300  const unsigned NewOpc = selectFPConvOpc(Opcode, DstTy, SrcTy);
1301  if (NewOpc == Opcode)
1302  return false;
1303 
1304  I.setDesc(TII.get(NewOpc));
1306 
1307  return true;
1308  }
1309 
1310 
1311  case TargetOpcode::G_INTTOPTR:
1312  // The importer is currently unable to import pointer types since they
1313  // didn't exist in SelectionDAG.
1314  return selectCopy(I, TII, MRI, TRI, RBI);
1315 
1316  case TargetOpcode::G_BITCAST:
1317  // Imported SelectionDAG rules can handle every bitcast except those that
1318  // bitcast from a type to the same type. Ideally, these shouldn't occur
1319  // but we might not run an optimizer that deletes them.
1320  if (MRI.getType(I.getOperand(0).getReg()) ==
1321  MRI.getType(I.getOperand(1).getReg()))
1322  return selectCopy(I, TII, MRI, TRI, RBI);
1323  return false;
1324 
1325  case TargetOpcode::G_SELECT: {
1326  if (MRI.getType(I.getOperand(1).getReg()) != LLT::scalar(1)) {
1327  LLVM_DEBUG(dbgs() << "G_SELECT cond has type: " << Ty
1328  << ", expected: " << LLT::scalar(1) << '\n');
1329  return false;
1330  }
1331 
1332  const unsigned CondReg = I.getOperand(1).getReg();
1333  const unsigned TReg = I.getOperand(2).getReg();
1334  const unsigned FReg = I.getOperand(3).getReg();
1335 
1336  unsigned CSelOpc = 0;
1337 
1338  if (Ty == LLT::scalar(32)) {
1339  CSelOpc = AArch64::CSELWr;
1340  } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
1341  CSelOpc = AArch64::CSELXr;
1342  } else {
1343  return false;
1344  }
1345 
1346  MachineInstr &TstMI =
1347  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri))
1348  .addDef(AArch64::WZR)
1349  .addUse(CondReg)
1351 
1352  MachineInstr &CSelMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CSelOpc))
1353  .addDef(I.getOperand(0).getReg())
1354  .addUse(TReg)
1355  .addUse(FReg)
1357 
1359  constrainSelectedInstRegOperands(CSelMI, TII, TRI, RBI);
1360 
1361  I.eraseFromParent();
1362  return true;
1363  }
1364  case TargetOpcode::G_ICMP: {
1365  if (Ty != LLT::scalar(32)) {
1366  LLVM_DEBUG(dbgs() << "G_ICMP result has type: " << Ty
1367  << ", expected: " << LLT::scalar(32) << '\n');
1368  return false;
1369  }
1370 
1371  unsigned CmpOpc = 0;
1372  unsigned ZReg = 0;
1373 
1374  LLT CmpTy = MRI.getType(I.getOperand(2).getReg());
1375  if (CmpTy == LLT::scalar(32)) {
1376  CmpOpc = AArch64::SUBSWrr;
1377  ZReg = AArch64::WZR;
1378  } else if (CmpTy == LLT::scalar(64) || CmpTy.isPointer()) {
1379  CmpOpc = AArch64::SUBSXrr;
1380  ZReg = AArch64::XZR;
1381  } else {
1382  return false;
1383  }
1384 
1385  // CSINC increments the result by one when the condition code is false.
1386  // Therefore, we have to invert the predicate to get an increment by 1 when
1387  // the predicate is true.
1388  const AArch64CC::CondCode invCC =
1389  changeICMPPredToAArch64CC(CmpInst::getInversePredicate(
1391 
1392  MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
1393  .addDef(ZReg)
1394  .addUse(I.getOperand(2).getReg())
1395  .addUse(I.getOperand(3).getReg());
1396 
1397  MachineInstr &CSetMI =
1398  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1399  .addDef(I.getOperand(0).getReg())
1400  .addUse(AArch64::WZR)
1401  .addUse(AArch64::WZR)
1402  .addImm(invCC);
1403 
1405  constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
1406 
1407  I.eraseFromParent();
1408  return true;
1409  }
1410 
1411  case TargetOpcode::G_FCMP: {
1412  if (Ty != LLT::scalar(32)) {
1413  LLVM_DEBUG(dbgs() << "G_FCMP result has type: " << Ty
1414  << ", expected: " << LLT::scalar(32) << '\n');
1415  return false;
1416  }
1417 
1418  unsigned CmpOpc = 0;
1419  LLT CmpTy = MRI.getType(I.getOperand(2).getReg());
1420  if (CmpTy == LLT::scalar(32)) {
1421  CmpOpc = AArch64::FCMPSrr;
1422  } else if (CmpTy == LLT::scalar(64)) {
1423  CmpOpc = AArch64::FCMPDrr;
1424  } else {
1425  return false;
1426  }
1427 
1428  // FIXME: regbank
1429 
1430  AArch64CC::CondCode CC1, CC2;
1432  (CmpInst::Predicate)I.getOperand(1).getPredicate(), CC1, CC2);
1433 
1434  MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
1435  .addUse(I.getOperand(2).getReg())
1436  .addUse(I.getOperand(3).getReg());
1437 
1438  const unsigned DefReg = I.getOperand(0).getReg();
1439  unsigned Def1Reg = DefReg;
1440  if (CC2 != AArch64CC::AL)
1441  Def1Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
1442 
1443  MachineInstr &CSetMI =
1444  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1445  .addDef(Def1Reg)
1446  .addUse(AArch64::WZR)
1447  .addUse(AArch64::WZR)
1448  .addImm(getInvertedCondCode(CC1));
1449 
1450  if (CC2 != AArch64CC::AL) {
1451  unsigned Def2Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
1452  MachineInstr &CSet2MI =
1453  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1454  .addDef(Def2Reg)
1455  .addUse(AArch64::WZR)
1456  .addUse(AArch64::WZR)
1457  .addImm(getInvertedCondCode(CC2));
1458  MachineInstr &OrMI =
1459  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ORRWrr))
1460  .addDef(DefReg)
1461  .addUse(Def1Reg)
1462  .addUse(Def2Reg);
1464  constrainSelectedInstRegOperands(CSet2MI, TII, TRI, RBI);
1465  }
1466 
1468  constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
1469 
1470  I.eraseFromParent();
1471  return true;
1472  }
1473  case TargetOpcode::G_VASTART:
1474  return STI.isTargetDarwin() ? selectVaStartDarwin(I, MF, MRI)
1475  : selectVaStartAAPCS(I, MF, MRI);
1476  case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1477  if (!I.getOperand(0).isIntrinsicID())
1478  return false;
1479  if (I.getOperand(0).getIntrinsicID() != Intrinsic::trap)
1480  return false;
1481  BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::BRK))
1482  .addImm(1);
1483  I.eraseFromParent();
1484  return true;
1485  case TargetOpcode::G_IMPLICIT_DEF:
1486  I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
1487  const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1488  const unsigned DstReg = I.getOperand(0).getReg();
1489  const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1490  const TargetRegisterClass *DstRC =
1491  getRegClassForTypeOnBank(DstTy, DstRB, RBI);
1492  RBI.constrainGenericRegister(DstReg, *DstRC, MRI);
1493  return true;
1494  }
1495 
1496  return false;
1497 }
1498 
1499 /// SelectArithImmed - Select an immediate value that can be represented as
1500 /// a 12-bit value shifted left by either 0 or 12. If so, return true with
1501 /// Val set to the 12-bit value and Shift set to the shifter operand.
1503 AArch64InstructionSelector::selectArithImmed(MachineOperand &Root) const {
1504  MachineInstr &MI = *Root.getParent();
1505  MachineBasicBlock &MBB = *MI.getParent();
1506  MachineFunction &MF = *MBB.getParent();
1507  MachineRegisterInfo &MRI = MF.getRegInfo();
1508 
1509  // This function is called from the addsub_shifted_imm ComplexPattern,
1510  // which lists [imm] as the list of opcode it's interested in, however
1511  // we still need to check whether the operand is actually an immediate
1512  // here because the ComplexPattern opcode list is only used in
1513  // root-level opcode matching.
1514  uint64_t Immed;
1515  if (Root.isImm())
1516  Immed = Root.getImm();
1517  else if (Root.isCImm())
1518  Immed = Root.getCImm()->getZExtValue();
1519  else if (Root.isReg()) {
1520  MachineInstr *Def = MRI.getVRegDef(Root.getReg());
1521  if (Def->getOpcode() != TargetOpcode::G_CONSTANT)
1522  return None;
1523  MachineOperand &Op1 = Def->getOperand(1);
1524  if (!Op1.isCImm() || Op1.getCImm()->getBitWidth() > 64)
1525  return None;
1526  Immed = Op1.getCImm()->getZExtValue();
1527  } else
1528  return None;
1529 
1530  unsigned ShiftAmt;
1531 
1532  if (Immed >> 12 == 0) {
1533  ShiftAmt = 0;
1534  } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
1535  ShiftAmt = 12;
1536  Immed = Immed >> 12;
1537  } else
1538  return None;
1539 
1540  unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
1541  return {{
1542  [=](MachineInstrBuilder &MIB) { MIB.addImm(Immed); },
1543  [=](MachineInstrBuilder &MIB) { MIB.addImm(ShVal); },
1544  }};
1545 }
1546 
1547 /// Select a "register plus unscaled signed 9-bit immediate" address. This
1548 /// should only match when there is an offset that is not valid for a scaled
1549 /// immediate addressing mode. The "Size" argument is the size in bytes of the
1550 /// memory reference, which is needed here to know what is valid for a scaled
1551 /// immediate.
1553 AArch64InstructionSelector::selectAddrModeUnscaled(MachineOperand &Root,
1554  unsigned Size) const {
1555  MachineRegisterInfo &MRI =
1556  Root.getParent()->getParent()->getParent()->getRegInfo();
1557 
1558  if (!Root.isReg())
1559  return None;
1560 
1561  if (!isBaseWithConstantOffset(Root, MRI))
1562  return None;
1563 
1564  MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
1565  if (!RootDef)
1566  return None;
1567 
1568  MachineOperand &OffImm = RootDef->getOperand(2);
1569  if (!OffImm.isReg())
1570  return None;
1571  MachineInstr *RHS = MRI.getVRegDef(OffImm.getReg());
1572  if (!RHS || RHS->getOpcode() != TargetOpcode::G_CONSTANT)
1573  return None;
1574  int64_t RHSC;
1575  MachineOperand &RHSOp1 = RHS->getOperand(1);
1576  if (!RHSOp1.isCImm() || RHSOp1.getCImm()->getBitWidth() > 64)
1577  return None;
1578  RHSC = RHSOp1.getCImm()->getSExtValue();
1579 
1580  // If the offset is valid as a scaled immediate, don't match here.
1581  if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Log2_32(Size)))
1582  return None;
1583  if (RHSC >= -256 && RHSC < 256) {
1584  MachineOperand &Base = RootDef->getOperand(1);
1585  return {{
1586  [=](MachineInstrBuilder &MIB) { MIB.add(Base); },
1587  [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
1588  }};
1589  }
1590  return None;
1591 }
1592 
1593 /// Select a "register plus scaled unsigned 12-bit immediate" address. The
1594 /// "Size" argument is the size in bytes of the memory reference, which
1595 /// determines the scale.
1597 AArch64InstructionSelector::selectAddrModeIndexed(MachineOperand &Root,
1598  unsigned Size) const {
1599  MachineRegisterInfo &MRI =
1600  Root.getParent()->getParent()->getParent()->getRegInfo();
1601 
1602  if (!Root.isReg())
1603  return None;
1604 
1605  MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
1606  if (!RootDef)
1607  return None;
1608 
1609  if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
1610  return {{
1611  [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
1612  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
1613  }};
1614  }
1615 
1616  if (isBaseWithConstantOffset(Root, MRI)) {
1617  MachineOperand &LHS = RootDef->getOperand(1);
1618  MachineOperand &RHS = RootDef->getOperand(2);
1619  MachineInstr *LHSDef = MRI.getVRegDef(LHS.getReg());
1620  MachineInstr *RHSDef = MRI.getVRegDef(RHS.getReg());
1621  if (LHSDef && RHSDef) {
1622  int64_t RHSC = (int64_t)RHSDef->getOperand(1).getCImm()->getZExtValue();
1623  unsigned Scale = Log2_32(Size);
1624  if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
1625  if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
1626  return {{
1627  [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
1628  [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
1629  }};
1630 
1631  return {{
1632  [=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
1633  [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
1634  }};
1635  }
1636  }
1637  }
1638 
1639  // Before falling back to our general case, check if the unscaled
1640  // instructions can handle this. If so, that's preferable.
1641  if (selectAddrModeUnscaled(Root, Size).hasValue())
1642  return None;
1643 
1644  return {{
1645  [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
1646  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
1647  }};
1648 }
1649 
1650 void AArch64InstructionSelector::renderTruncImm(MachineInstrBuilder &MIB,
1651  const MachineInstr &MI) const {
1652  const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
1653  assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
1655  assert(CstVal && "Expected constant value");
1656  MIB.addImm(CstVal.getValue());
1657 }
1658 
1659 namespace llvm {
1662  AArch64Subtarget &Subtarget,
1663  AArch64RegisterBankInfo &RBI) {
1664  return new AArch64InstructionSelector(TM, Subtarget, RBI);
1665 }
1666 }
static bool selectFP16CopyFromGPR32(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, unsigned SrcReg)
const NoneType None
Definition: None.h:24
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
MO_G3 - A symbol operand with this flag (granule 3) represents the high 16-bits of a 64-bit address...
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1547
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
bool contains(unsigned Reg) const
Return true if the specified register is included in this register class.
MachineBasicBlock * getMBB() const
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
void setTargetFlags(unsigned F)
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:285
bool isScalar() const
static CondCode getInvertedCondCode(CondCode Code)
unsigned getReg() const
getReg - Returns the register number.
LLT getType(unsigned Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register...
unsigned const TargetRegisterInfo * TRI
MO_G0 - A symbol operand with this flag (granule 0) represents the bits 0-15 of a 64-bit address...
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned char TargetFlags=0) const
iterator_range< mop_iterator > operands()
Definition: MachineInstr.h:361
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
unsigned getBitWidth() const
getBitWidth - Return the bitwidth of this constant.
Definition: Constants.h:143
const TargetRegisterClass * getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
MO_G2 - A symbol operand with this flag (granule 2) represents the bits 32-47 of a 64-bit address...
This file declares the targeting of the RegisterBankInfo class for AArch64.
bool isIntrinsicID() const
Holds all the information related to register banks.
const HexagonInstrInfo * TII
const ConstantFP * getFPImm() const
unsigned getNumOperands() const
Access to explicit operands of the instruction.
Definition: MachineInstr.h:314
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
static StringRef getName(Value *V)
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:311
MachineInstr * getVRegDef(unsigned Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
const RegClassOrRegBank & getRegClassOrRegBank(unsigned Reg) const
Return the register bank or register class of Reg.
static bool isStore(int Opcode)
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:911
#define EQ(a, b)
Definition: regexec.c:112
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
void ChangeToImmediate(int64_t ImmVal)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value...
TargetInstrInfo - Interface to description of machine instruction set.
static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
Select the AArch64 opcode for the basic binary operation GenericOpc (such as G_OR or G_SDIV)...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
#define P(N)
Control flow instructions. These all have token chains.
Definition: ISDOpcodes.h:590
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:149
MO_G1 - A symbol operand with this flag (granule 1) represents the bits 16-31 of a 64-bit address...
unsigned const MachineRegisterInfo * MRI
static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
Select the AArch64 opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the (value)...
static unsigned selectFPConvOpc(unsigned GenericOpc, LLT DstTy, LLT SrcTy)
const GlobalValue * getGlobal() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
bool isExactlyValue(double V) const
We don&#39;t rely on operator== working on double values, as it returns true for things that are clearly ...
Definition: APFloat.h:1130
bool isValid() const
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:885
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
void setImm(int64_t immVal)
self_iterator getIterator()
Definition: ilist_node.h:82
const MachineInstrBuilder & addFrameIndex(int Idx) const
T dyn_cast() const
Returns the current pointer if it is of the specified pointer type, otherwises returns null...
Definition: PointerUnion.h:142
bool isCopy() const
Definition: MachineInstr.h:892
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
StringRef getName(unsigned Opcode) const
Returns the name for the instructions with the given opcode.
Definition: MCInstrInfo.h:51
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
#define GET_GLOBALISEL_TEMPORARIES_INIT
const APFloat & getValueAPF() const
Definition: Constants.h:299
unsigned createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:416
RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
MachineOperand class - Representation of each machine instruction operand.
Intrinsic::ID getIntrinsicID() const
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Definition: PPCPredicates.h:27
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:87
This class implements the register bank concept.
Definition: RegisterBank.h:29
int64_t getImm() const
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:133
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:531
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:924
Optional< int64_t > getConstantVRegVal(unsigned VReg, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:184
bool isPointer() const
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:156
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
Provides the logic to select generic machine instructions.
Representation of each machine instruction.
Definition: MachineInstr.h:60
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
This class provides the information for the target register banks.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static AArch64CC::CondCode changeICMPPredToAArch64CC(CmpInst::Predicate P)
InstructionSelector * createAArch64InstructionSelector(const AArch64TargetMachine &, AArch64Subtarget &, AArch64RegisterBankInfo &)
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:45
int64_t getOffset() const
Return the offset from the symbol in this operand.
void setReg(unsigned Reg)
Change the register this operand corresponds to.
#define I(x, y, z)
Definition: MD5.cpp:58
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page...
void setSubReg(unsigned subReg)
static const TargetRegisterClass * constrainGenericRegister(unsigned Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
#define GET_GLOBALISEL_PREDICATES_INIT
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
T get() const
Returns the value of the specified pointer type.
Definition: PointerUnion.h:135
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static void changeFCMPPredToAArch64CC(CmpInst::Predicate P, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2)
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel...
Definition: TargetOpcodes.h:31
unsigned getSizeInBits(unsigned Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
constexpr char Size[]
Key for Kernel::Arg::Metadata::mSize.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:81
void ChangeToFrameIndex(int Idx)
Replace this operand with a frame index.
IRTranslator LLVM IR MI
static bool unsupportedBinOp(const MachineInstr &I, const AArch64RegisterBankInfo &RBI, const MachineRegisterInfo &MRI, const AArch64RegisterInfo &TRI)
Check whether I is a currently unsupported binary operation:
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow...
APInt bitcastToAPInt() const
Definition: APFloat.h:1094
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition: Constants.h:157
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
#define LLVM_DEBUG(X)
Definition: Debug.h:119
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:316
const ConstantInt * getCImm() const
#define DEBUG_TYPE
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
unsigned getID() const
Get the identifier of this register bank.
Definition: RegisterBank.h:48
A discriminated union of two pointer types, with the discriminator in the low bit of the pointer...
Definition: PointerUnion.h:87
unsigned getPredicate() const