LLVM  8.0.0svn
AArch64InstructionSelector.cpp
Go to the documentation of this file.
1 //===- AArch64InstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the targeting of the InstructionSelector class for
11 /// AArch64.
12 /// \todo This should be generated by TableGen.
13 //===----------------------------------------------------------------------===//
14 
15 #include "AArch64InstrInfo.h"
18 #include "AArch64RegisterInfo.h"
19 #include "AArch64Subtarget.h"
20 #include "AArch64TargetMachine.h"
32 #include "llvm/IR/Type.h"
33 #include "llvm/Support/Debug.h"
35 
36 #define DEBUG_TYPE "aarch64-isel"
37 
38 using namespace llvm;
39 
40 namespace {
41 
42 #define GET_GLOBALISEL_PREDICATE_BITSET
43 #include "AArch64GenGlobalISel.inc"
44 #undef GET_GLOBALISEL_PREDICATE_BITSET
45 
46 class AArch64InstructionSelector : public InstructionSelector {
47 public:
48  AArch64InstructionSelector(const AArch64TargetMachine &TM,
49  const AArch64Subtarget &STI,
50  const AArch64RegisterBankInfo &RBI);
51 
52  bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override;
53  static const char *getName() { return DEBUG_TYPE; }
54 
55 private:
56  /// tblgen-erated 'select' implementation, used as the initial selector for
57  /// the patterns that don't require complex C++.
58  bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
59 
60  bool selectVaStartAAPCS(MachineInstr &I, MachineFunction &MF,
61  MachineRegisterInfo &MRI) const;
62  bool selectVaStartDarwin(MachineInstr &I, MachineFunction &MF,
63  MachineRegisterInfo &MRI) const;
64 
65  bool selectCompareBranch(MachineInstr &I, MachineFunction &MF,
66  MachineRegisterInfo &MRI) const;
67 
68  ComplexRendererFns selectArithImmed(MachineOperand &Root) const;
69 
70  ComplexRendererFns selectAddrModeUnscaled(MachineOperand &Root,
71  unsigned Size) const;
72 
73  ComplexRendererFns selectAddrModeUnscaled8(MachineOperand &Root) const {
74  return selectAddrModeUnscaled(Root, 1);
75  }
76  ComplexRendererFns selectAddrModeUnscaled16(MachineOperand &Root) const {
77  return selectAddrModeUnscaled(Root, 2);
78  }
79  ComplexRendererFns selectAddrModeUnscaled32(MachineOperand &Root) const {
80  return selectAddrModeUnscaled(Root, 4);
81  }
82  ComplexRendererFns selectAddrModeUnscaled64(MachineOperand &Root) const {
83  return selectAddrModeUnscaled(Root, 8);
84  }
85  ComplexRendererFns selectAddrModeUnscaled128(MachineOperand &Root) const {
86  return selectAddrModeUnscaled(Root, 16);
87  }
88 
89  ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root,
90  unsigned Size) const;
91  template <int Width>
92  ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root) const {
93  return selectAddrModeIndexed(Root, Width / 8);
94  }
95 
96  void renderTruncImm(MachineInstrBuilder &MIB, const MachineInstr &MI) const;
97 
98  // Materialize a GlobalValue or BlockAddress using a movz+movk sequence.
99  void materializeLargeCMVal(MachineInstr &I, const Value *V,
100  unsigned char OpFlags) const;
101 
102  const AArch64TargetMachine &TM;
103  const AArch64Subtarget &STI;
104  const AArch64InstrInfo &TII;
105  const AArch64RegisterInfo &TRI;
106  const AArch64RegisterBankInfo &RBI;
107 
108 #define GET_GLOBALISEL_PREDICATES_DECL
109 #include "AArch64GenGlobalISel.inc"
110 #undef GET_GLOBALISEL_PREDICATES_DECL
111 
112 // We declare the temporaries used by selectImpl() in the class to minimize the
113 // cost of constructing placeholder values.
114 #define GET_GLOBALISEL_TEMPORARIES_DECL
115 #include "AArch64GenGlobalISel.inc"
116 #undef GET_GLOBALISEL_TEMPORARIES_DECL
117 };
118 
119 } // end anonymous namespace
120 
121 #define GET_GLOBALISEL_IMPL
122 #include "AArch64GenGlobalISel.inc"
123 #undef GET_GLOBALISEL_IMPL
124 
125 AArch64InstructionSelector::AArch64InstructionSelector(
126  const AArch64TargetMachine &TM, const AArch64Subtarget &STI,
127  const AArch64RegisterBankInfo &RBI)
128  : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
129  TRI(*STI.getRegisterInfo()), RBI(RBI),
131 #include "AArch64GenGlobalISel.inc"
134 #include "AArch64GenGlobalISel.inc"
136 {
137 }
138 
139 // FIXME: This should be target-independent, inferred from the types declared
140 // for each class in the bank.
141 static const TargetRegisterClass *
142 getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB,
143  const RegisterBankInfo &RBI,
144  bool GetAllRegSet = false) {
145  if (RB.getID() == AArch64::GPRRegBankID) {
146  if (Ty.getSizeInBits() <= 32)
147  return GetAllRegSet ? &AArch64::GPR32allRegClass
148  : &AArch64::GPR32RegClass;
149  if (Ty.getSizeInBits() == 64)
150  return GetAllRegSet ? &AArch64::GPR64allRegClass
151  : &AArch64::GPR64RegClass;
152  return nullptr;
153  }
154 
155  if (RB.getID() == AArch64::FPRRegBankID) {
156  if (Ty.getSizeInBits() <= 16)
157  return &AArch64::FPR16RegClass;
158  if (Ty.getSizeInBits() == 32)
159  return &AArch64::FPR32RegClass;
160  if (Ty.getSizeInBits() == 64)
161  return &AArch64::FPR64RegClass;
162  if (Ty.getSizeInBits() == 128)
163  return &AArch64::FPR128RegClass;
164  return nullptr;
165  }
166 
167  return nullptr;
168 }
169 
170 /// Check whether \p I is a currently unsupported binary operation:
171 /// - it has an unsized type
172 /// - an operand is not a vreg
173 /// - all operands are not in the same bank
174 /// These are checks that should someday live in the verifier, but right now,
175 /// these are mostly limitations of the aarch64 selector.
176 static bool unsupportedBinOp(const MachineInstr &I,
177  const AArch64RegisterBankInfo &RBI,
178  const MachineRegisterInfo &MRI,
179  const AArch64RegisterInfo &TRI) {
180  LLT Ty = MRI.getType(I.getOperand(0).getReg());
181  if (!Ty.isValid()) {
182  LLVM_DEBUG(dbgs() << "Generic binop register should be typed\n");
183  return true;
184  }
185 
186  const RegisterBank *PrevOpBank = nullptr;
187  for (auto &MO : I.operands()) {
188  // FIXME: Support non-register operands.
189  if (!MO.isReg()) {
190  LLVM_DEBUG(dbgs() << "Generic inst non-reg operands are unsupported\n");
191  return true;
192  }
193 
194  // FIXME: Can generic operations have physical registers operands? If
195  // so, this will need to be taught about that, and we'll need to get the
196  // bank out of the minimal class for the register.
197  // Either way, this needs to be documented (and possibly verified).
198  if (!TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
199  LLVM_DEBUG(dbgs() << "Generic inst has physical register operand\n");
200  return true;
201  }
202 
203  const RegisterBank *OpBank = RBI.getRegBank(MO.getReg(), MRI, TRI);
204  if (!OpBank) {
205  LLVM_DEBUG(dbgs() << "Generic register has no bank or class\n");
206  return true;
207  }
208 
209  if (PrevOpBank && OpBank != PrevOpBank) {
210  LLVM_DEBUG(dbgs() << "Generic inst operands have different banks\n");
211  return true;
212  }
213  PrevOpBank = OpBank;
214  }
215  return false;
216 }
217 
218 /// Select the AArch64 opcode for the basic binary operation \p GenericOpc
219 /// (such as G_OR or G_SDIV), appropriate for the register bank \p RegBankID
220 /// and of size \p OpSize.
221 /// \returns \p GenericOpc if the combination is unsupported.
222 static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID,
223  unsigned OpSize) {
224  switch (RegBankID) {
225  case AArch64::GPRRegBankID:
226  if (OpSize == 32) {
227  switch (GenericOpc) {
228  case TargetOpcode::G_SHL:
229  return AArch64::LSLVWr;
230  case TargetOpcode::G_LSHR:
231  return AArch64::LSRVWr;
232  case TargetOpcode::G_ASHR:
233  return AArch64::ASRVWr;
234  default:
235  return GenericOpc;
236  }
237  } else if (OpSize == 64) {
238  switch (GenericOpc) {
239  case TargetOpcode::G_GEP:
240  return AArch64::ADDXrr;
241  case TargetOpcode::G_SHL:
242  return AArch64::LSLVXr;
243  case TargetOpcode::G_LSHR:
244  return AArch64::LSRVXr;
245  case TargetOpcode::G_ASHR:
246  return AArch64::ASRVXr;
247  default:
248  return GenericOpc;
249  }
250  }
251  break;
252  case AArch64::FPRRegBankID:
253  switch (OpSize) {
254  case 32:
255  switch (GenericOpc) {
256  case TargetOpcode::G_FADD:
257  return AArch64::FADDSrr;
258  case TargetOpcode::G_FSUB:
259  return AArch64::FSUBSrr;
260  case TargetOpcode::G_FMUL:
261  return AArch64::FMULSrr;
262  case TargetOpcode::G_FDIV:
263  return AArch64::FDIVSrr;
264  default:
265  return GenericOpc;
266  }
267  case 64:
268  switch (GenericOpc) {
269  case TargetOpcode::G_FADD:
270  return AArch64::FADDDrr;
271  case TargetOpcode::G_FSUB:
272  return AArch64::FSUBDrr;
273  case TargetOpcode::G_FMUL:
274  return AArch64::FMULDrr;
275  case TargetOpcode::G_FDIV:
276  return AArch64::FDIVDrr;
277  case TargetOpcode::G_OR:
278  return AArch64::ORRv8i8;
279  default:
280  return GenericOpc;
281  }
282  }
283  break;
284  }
285  return GenericOpc;
286 }
287 
288 /// Select the AArch64 opcode for the G_LOAD or G_STORE operation \p GenericOpc,
289 /// appropriate for the (value) register bank \p RegBankID and of memory access
290 /// size \p OpSize. This returns the variant with the base+unsigned-immediate
291 /// addressing mode (e.g., LDRXui).
292 /// \returns \p GenericOpc if the combination is unsupported.
293 static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID,
294  unsigned OpSize) {
295  const bool isStore = GenericOpc == TargetOpcode::G_STORE;
296  switch (RegBankID) {
297  case AArch64::GPRRegBankID:
298  switch (OpSize) {
299  case 8:
300  return isStore ? AArch64::STRBBui : AArch64::LDRBBui;
301  case 16:
302  return isStore ? AArch64::STRHHui : AArch64::LDRHHui;
303  case 32:
304  return isStore ? AArch64::STRWui : AArch64::LDRWui;
305  case 64:
306  return isStore ? AArch64::STRXui : AArch64::LDRXui;
307  }
308  break;
309  case AArch64::FPRRegBankID:
310  switch (OpSize) {
311  case 8:
312  return isStore ? AArch64::STRBui : AArch64::LDRBui;
313  case 16:
314  return isStore ? AArch64::STRHui : AArch64::LDRHui;
315  case 32:
316  return isStore ? AArch64::STRSui : AArch64::LDRSui;
317  case 64:
318  return isStore ? AArch64::STRDui : AArch64::LDRDui;
319  }
320  break;
321  }
322  return GenericOpc;
323 }
324 
326  MachineRegisterInfo &MRI, unsigned SrcReg) {
327  // Copies from gpr32 to fpr16 need to use a sub-register copy.
328  unsigned CopyReg = MRI.createVirtualRegister(&AArch64::FPR32RegClass);
329  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::COPY))
330  .addDef(CopyReg)
331  .addUse(SrcReg);
332  unsigned SubRegCopy = MRI.createVirtualRegister(&AArch64::FPR16RegClass);
333  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
334  .addDef(SubRegCopy)
335  .addUse(CopyReg, 0, AArch64::hsub);
336 
337  MachineOperand &RegOp = I.getOperand(1);
338  RegOp.setReg(SubRegCopy);
339  return true;
340 }
341 
344  const RegisterBankInfo &RBI) {
345 
346  unsigned DstReg = I.getOperand(0).getReg();
347  unsigned SrcReg = I.getOperand(1).getReg();
348 
349  if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
350  if (TRI.getRegClass(AArch64::FPR16RegClassID)->contains(DstReg) &&
351  !TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
352  const RegisterBank &RegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
353  const TargetRegisterClass *SrcRC = getRegClassForTypeOnBank(
354  MRI.getType(SrcReg), RegBank, RBI, /* GetAllRegSet */ true);
355  if (SrcRC == &AArch64::GPR32allRegClass)
356  return selectFP16CopyFromGPR32(I, TII, MRI, SrcReg);
357  }
358  assert(I.isCopy() && "Generic operators do not allow physical registers");
359  return true;
360  }
361 
362  const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
363  const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
364  (void)DstSize;
365  const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
366  (void)SrcSize;
367  assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.isCopy()) &&
368  "No phys reg on generic operators");
369  assert(
370  (DstSize == SrcSize ||
371  // Copies are a mean to setup initial types, the number of
372  // bits may not exactly match.
373  (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
374  DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI)) ||
375  // Copies are a mean to copy bits around, as long as we are
376  // on the same register class, that's fine. Otherwise, that
377  // means we need some SUBREG_TO_REG or AND & co.
378  (((DstSize + 31) / 32 == (SrcSize + 31) / 32) && DstSize > SrcSize)) &&
379  "Copy with different width?!");
380  assert((DstSize <= 64 || RegBank.getID() == AArch64::FPRRegBankID) &&
381  "GPRs cannot get more than 64-bit width values");
382 
383  const TargetRegisterClass *RC = getRegClassForTypeOnBank(
384  MRI.getType(DstReg), RegBank, RBI, /* GetAllRegSet */ true);
385  if (!RC) {
386  LLVM_DEBUG(dbgs() << "Unexpected bitcast size " << DstSize << '\n');
387  return false;
388  }
389 
390  if (!TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
391  const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(SrcReg);
392  const TargetRegisterClass *SrcRC =
393  RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
394  const RegisterBank *RB = nullptr;
395  if (!SrcRC) {
396  RB = RegClassOrBank.get<const RegisterBank *>();
397  SrcRC = getRegClassForTypeOnBank(MRI.getType(SrcReg), *RB, RBI, true);
398  }
399  // Copies from fpr16 to gpr32 need to use SUBREG_TO_REG.
400  if (RC == &AArch64::GPR32allRegClass && SrcRC == &AArch64::FPR16RegClass) {
401  unsigned PromoteReg = MRI.createVirtualRegister(&AArch64::FPR32RegClass);
402  BuildMI(*I.getParent(), I, I.getDebugLoc(),
403  TII.get(AArch64::SUBREG_TO_REG))
404  .addDef(PromoteReg)
405  .addImm(0)
406  .addUse(SrcReg)
407  .addImm(AArch64::hsub);
408  MachineOperand &RegOp = I.getOperand(1);
409  RegOp.setReg(PromoteReg);
410  } else if (RC == &AArch64::FPR16RegClass &&
411  SrcRC == &AArch64::GPR32allRegClass) {
412  selectFP16CopyFromGPR32(I, TII, MRI, SrcReg);
413  }
414  }
415 
416  // No need to constrain SrcReg. It will get constrained when
417  // we hit another of its use or its defs.
418  // Copies do not have constraints.
419  if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
420  LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
421  << " operand\n");
422  return false;
423  }
424  I.setDesc(TII.get(AArch64::COPY));
425  return true;
426 }
427 
428 static unsigned selectFPConvOpc(unsigned GenericOpc, LLT DstTy, LLT SrcTy) {
429  if (!DstTy.isScalar() || !SrcTy.isScalar())
430  return GenericOpc;
431 
432  const unsigned DstSize = DstTy.getSizeInBits();
433  const unsigned SrcSize = SrcTy.getSizeInBits();
434 
435  switch (DstSize) {
436  case 32:
437  switch (SrcSize) {
438  case 32:
439  switch (GenericOpc) {
440  case TargetOpcode::G_SITOFP:
441  return AArch64::SCVTFUWSri;
442  case TargetOpcode::G_UITOFP:
443  return AArch64::UCVTFUWSri;
444  case TargetOpcode::G_FPTOSI:
445  return AArch64::FCVTZSUWSr;
446  case TargetOpcode::G_FPTOUI:
447  return AArch64::FCVTZUUWSr;
448  default:
449  return GenericOpc;
450  }
451  case 64:
452  switch (GenericOpc) {
453  case TargetOpcode::G_SITOFP:
454  return AArch64::SCVTFUXSri;
455  case TargetOpcode::G_UITOFP:
456  return AArch64::UCVTFUXSri;
457  case TargetOpcode::G_FPTOSI:
458  return AArch64::FCVTZSUWDr;
459  case TargetOpcode::G_FPTOUI:
460  return AArch64::FCVTZUUWDr;
461  default:
462  return GenericOpc;
463  }
464  default:
465  return GenericOpc;
466  }
467  case 64:
468  switch (SrcSize) {
469  case 32:
470  switch (GenericOpc) {
471  case TargetOpcode::G_SITOFP:
472  return AArch64::SCVTFUWDri;
473  case TargetOpcode::G_UITOFP:
474  return AArch64::UCVTFUWDri;
475  case TargetOpcode::G_FPTOSI:
476  return AArch64::FCVTZSUXSr;
477  case TargetOpcode::G_FPTOUI:
478  return AArch64::FCVTZUUXSr;
479  default:
480  return GenericOpc;
481  }
482  case 64:
483  switch (GenericOpc) {
484  case TargetOpcode::G_SITOFP:
485  return AArch64::SCVTFUXDri;
486  case TargetOpcode::G_UITOFP:
487  return AArch64::UCVTFUXDri;
488  case TargetOpcode::G_FPTOSI:
489  return AArch64::FCVTZSUXDr;
490  case TargetOpcode::G_FPTOUI:
491  return AArch64::FCVTZUUXDr;
492  default:
493  return GenericOpc;
494  }
495  default:
496  return GenericOpc;
497  }
498  default:
499  return GenericOpc;
500  };
501  return GenericOpc;
502 }
503 
505  switch (P) {
506  default:
507  llvm_unreachable("Unknown condition code!");
508  case CmpInst::ICMP_NE:
509  return AArch64CC::NE;
510  case CmpInst::ICMP_EQ:
511  return AArch64CC::EQ;
512  case CmpInst::ICMP_SGT:
513  return AArch64CC::GT;
514  case CmpInst::ICMP_SGE:
515  return AArch64CC::GE;
516  case CmpInst::ICMP_SLT:
517  return AArch64CC::LT;
518  case CmpInst::ICMP_SLE:
519  return AArch64CC::LE;
520  case CmpInst::ICMP_UGT:
521  return AArch64CC::HI;
522  case CmpInst::ICMP_UGE:
523  return AArch64CC::HS;
524  case CmpInst::ICMP_ULT:
525  return AArch64CC::LO;
526  case CmpInst::ICMP_ULE:
527  return AArch64CC::LS;
528  }
529 }
530 
533  AArch64CC::CondCode &CondCode2) {
534  CondCode2 = AArch64CC::AL;
535  switch (P) {
536  default:
537  llvm_unreachable("Unknown FP condition!");
538  case CmpInst::FCMP_OEQ:
539  CondCode = AArch64CC::EQ;
540  break;
541  case CmpInst::FCMP_OGT:
542  CondCode = AArch64CC::GT;
543  break;
544  case CmpInst::FCMP_OGE:
545  CondCode = AArch64CC::GE;
546  break;
547  case CmpInst::FCMP_OLT:
548  CondCode = AArch64CC::MI;
549  break;
550  case CmpInst::FCMP_OLE:
551  CondCode = AArch64CC::LS;
552  break;
553  case CmpInst::FCMP_ONE:
554  CondCode = AArch64CC::MI;
555  CondCode2 = AArch64CC::GT;
556  break;
557  case CmpInst::FCMP_ORD:
558  CondCode = AArch64CC::VC;
559  break;
560  case CmpInst::FCMP_UNO:
561  CondCode = AArch64CC::VS;
562  break;
563  case CmpInst::FCMP_UEQ:
564  CondCode = AArch64CC::EQ;
565  CondCode2 = AArch64CC::VS;
566  break;
567  case CmpInst::FCMP_UGT:
568  CondCode = AArch64CC::HI;
569  break;
570  case CmpInst::FCMP_UGE:
571  CondCode = AArch64CC::PL;
572  break;
573  case CmpInst::FCMP_ULT:
574  CondCode = AArch64CC::LT;
575  break;
576  case CmpInst::FCMP_ULE:
577  CondCode = AArch64CC::LE;
578  break;
579  case CmpInst::FCMP_UNE:
580  CondCode = AArch64CC::NE;
581  break;
582  }
583 }
584 
585 bool AArch64InstructionSelector::selectCompareBranch(
587 
588  const unsigned CondReg = I.getOperand(0).getReg();
589  MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
590  MachineInstr *CCMI = MRI.getVRegDef(CondReg);
591  if (CCMI->getOpcode() == TargetOpcode::G_TRUNC)
592  CCMI = MRI.getVRegDef(CCMI->getOperand(1).getReg());
593  if (CCMI->getOpcode() != TargetOpcode::G_ICMP)
594  return false;
595 
596  unsigned LHS = CCMI->getOperand(2).getReg();
597  unsigned RHS = CCMI->getOperand(3).getReg();
598  if (!getConstantVRegVal(RHS, MRI))
599  std::swap(RHS, LHS);
600 
601  const auto RHSImm = getConstantVRegVal(RHS, MRI);
602  if (!RHSImm || *RHSImm != 0)
603  return false;
604 
605  const RegisterBank &RB = *RBI.getRegBank(LHS, MRI, TRI);
606  if (RB.getID() != AArch64::GPRRegBankID)
607  return false;
608 
609  const auto Pred = (CmpInst::Predicate)CCMI->getOperand(1).getPredicate();
610  if (Pred != CmpInst::ICMP_NE && Pred != CmpInst::ICMP_EQ)
611  return false;
612 
613  const unsigned CmpWidth = MRI.getType(LHS).getSizeInBits();
614  unsigned CBOpc = 0;
615  if (CmpWidth <= 32)
616  CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZW : AArch64::CBNZW);
617  else if (CmpWidth == 64)
618  CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZX : AArch64::CBNZX);
619  else
620  return false;
621 
622  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CBOpc))
623  .addUse(LHS)
624  .addMBB(DestMBB)
625  .constrainAllUses(TII, TRI, RBI);
626 
627  I.eraseFromParent();
628  return true;
629 }
630 
631 bool AArch64InstructionSelector::selectVaStartAAPCS(
632  MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
633  return false;
634 }
635 
636 bool AArch64InstructionSelector::selectVaStartDarwin(
637  MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
639  unsigned ListReg = I.getOperand(0).getReg();
640 
641  unsigned ArgsAddrReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
642 
643  auto MIB =
644  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::ADDXri))
645  .addDef(ArgsAddrReg)
646  .addFrameIndex(FuncInfo->getVarArgsStackIndex())
647  .addImm(0)
648  .addImm(0);
649 
651 
652  MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::STRXui))
653  .addUse(ArgsAddrReg)
654  .addUse(ListReg)
655  .addImm(0)
657 
659  I.eraseFromParent();
660  return true;
661 }
662 
663 void AArch64InstructionSelector::materializeLargeCMVal(
664  MachineInstr &I, const Value *V, unsigned char OpFlags) const {
665  MachineBasicBlock &MBB = *I.getParent();
666  MachineFunction &MF = *MBB.getParent();
667  MachineRegisterInfo &MRI = MF.getRegInfo();
668  MachineIRBuilder MIB(I);
669 
670  auto MovZ = MIB.buildInstr(AArch64::MOVZXi, &AArch64::GPR64RegClass);
671  MovZ->addOperand(MF, I.getOperand(1));
672  MovZ->getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_G0 |
674  MovZ->addOperand(MF, MachineOperand::CreateImm(0));
676 
677  auto BuildMovK = [&](unsigned SrcReg, unsigned char Flags, unsigned Offset,
678  unsigned ForceDstReg) {
679  unsigned DstReg = ForceDstReg
680  ? ForceDstReg
681  : MRI.createVirtualRegister(&AArch64::GPR64RegClass);
682  auto MovI = MIB.buildInstr(AArch64::MOVKXi).addDef(DstReg).addUse(SrcReg);
683  if (auto *GV = dyn_cast<GlobalValue>(V)) {
684  MovI->addOperand(MF, MachineOperand::CreateGA(
685  GV, MovZ->getOperand(1).getOffset(), Flags));
686  } else {
687  MovI->addOperand(
688  MF, MachineOperand::CreateBA(cast<BlockAddress>(V),
689  MovZ->getOperand(1).getOffset(), Flags));
690  }
691  MovI->addOperand(MF, MachineOperand::CreateImm(Offset));
693  return DstReg;
694  };
695  unsigned DstReg = BuildMovK(MovZ->getOperand(0).getReg(),
697  DstReg = BuildMovK(DstReg, AArch64II::MO_G2 | AArch64II::MO_NC, 32, 0);
698  BuildMovK(DstReg, AArch64II::MO_G3, 48, I.getOperand(0).getReg());
699  return;
700 }
701 
702 bool AArch64InstructionSelector::select(MachineInstr &I,
703  CodeGenCoverage &CoverageInfo) const {
704  assert(I.getParent() && "Instruction should be in a basic block!");
705  assert(I.getParent()->getParent() && "Instruction should be in a function!");
706 
707  MachineBasicBlock &MBB = *I.getParent();
708  MachineFunction &MF = *MBB.getParent();
709  MachineRegisterInfo &MRI = MF.getRegInfo();
710 
711  unsigned Opcode = I.getOpcode();
712  // G_PHI requires same handling as PHI
713  if (!isPreISelGenericOpcode(Opcode) || Opcode == TargetOpcode::G_PHI) {
714  // Certain non-generic instructions also need some special handling.
715 
716  if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
717  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
718 
719  if (Opcode == TargetOpcode::PHI || Opcode == TargetOpcode::G_PHI) {
720  const unsigned DefReg = I.getOperand(0).getReg();
721  const LLT DefTy = MRI.getType(DefReg);
722 
723  const TargetRegisterClass *DefRC = nullptr;
724  if (TargetRegisterInfo::isPhysicalRegister(DefReg)) {
725  DefRC = TRI.getRegClass(DefReg);
726  } else {
727  const RegClassOrRegBank &RegClassOrBank =
728  MRI.getRegClassOrRegBank(DefReg);
729 
730  DefRC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
731  if (!DefRC) {
732  if (!DefTy.isValid()) {
733  LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
734  return false;
735  }
736  const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
737  DefRC = getRegClassForTypeOnBank(DefTy, RB, RBI);
738  if (!DefRC) {
739  LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
740  return false;
741  }
742  }
743  }
744  I.setDesc(TII.get(TargetOpcode::PHI));
745 
746  return RBI.constrainGenericRegister(DefReg, *DefRC, MRI);
747  }
748 
749  if (I.isCopy())
750  return selectCopy(I, TII, MRI, TRI, RBI);
751 
752  return true;
753  }
754 
755 
756  if (I.getNumOperands() != I.getNumExplicitOperands()) {
757  LLVM_DEBUG(
758  dbgs() << "Generic instruction has unexpected implicit operands\n");
759  return false;
760  }
761 
762  if (selectImpl(I, CoverageInfo))
763  return true;
764 
765  LLT Ty =
766  I.getOperand(0).isReg() ? MRI.getType(I.getOperand(0).getReg()) : LLT{};
767 
768  switch (Opcode) {
769  case TargetOpcode::G_BRCOND: {
770  if (Ty.getSizeInBits() > 32) {
771  // We shouldn't need this on AArch64, but it would be implemented as an
772  // EXTRACT_SUBREG followed by a TBNZW because TBNZX has no encoding if the
773  // bit being tested is < 32.
774  LLVM_DEBUG(dbgs() << "G_BRCOND has type: " << Ty
775  << ", expected at most 32-bits");
776  return false;
777  }
778 
779  const unsigned CondReg = I.getOperand(0).getReg();
780  MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
781 
782  if (selectCompareBranch(I, MF, MRI))
783  return true;
784 
785  auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::TBNZW))
786  .addUse(CondReg)
787  .addImm(/*bit offset=*/0)
788  .addMBB(DestMBB);
789 
790  I.eraseFromParent();
791  return constrainSelectedInstRegOperands(*MIB.getInstr(), TII, TRI, RBI);
792  }
793 
794  case TargetOpcode::G_BRINDIRECT: {
795  I.setDesc(TII.get(AArch64::BR));
796  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
797  }
798 
799  case TargetOpcode::G_FCONSTANT:
800  case TargetOpcode::G_CONSTANT: {
801  const bool isFP = Opcode == TargetOpcode::G_FCONSTANT;
802 
803  const LLT s32 = LLT::scalar(32);
804  const LLT s64 = LLT::scalar(64);
805  const LLT p0 = LLT::pointer(0, 64);
806 
807  const unsigned DefReg = I.getOperand(0).getReg();
808  const LLT DefTy = MRI.getType(DefReg);
809  const unsigned DefSize = DefTy.getSizeInBits();
810  const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
811 
812  // FIXME: Redundant check, but even less readable when factored out.
813  if (isFP) {
814  if (Ty != s32 && Ty != s64) {
815  LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty
816  << " constant, expected: " << s32 << " or " << s64
817  << '\n');
818  return false;
819  }
820 
821  if (RB.getID() != AArch64::FPRRegBankID) {
822  LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty
823  << " constant on bank: " << RB
824  << ", expected: FPR\n");
825  return false;
826  }
827 
828  // The case when we have 0.0 is covered by tablegen. Reject it here so we
829  // can be sure tablegen works correctly and isn't rescued by this code.
830  if (I.getOperand(1).getFPImm()->getValueAPF().isExactlyValue(0.0))
831  return false;
832  } else {
833  // s32 and s64 are covered by tablegen.
834  if (Ty != p0) {
835  LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty
836  << " constant, expected: " << s32 << ", " << s64
837  << ", or " << p0 << '\n');
838  return false;
839  }
840 
841  if (RB.getID() != AArch64::GPRRegBankID) {
842  LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty
843  << " constant on bank: " << RB
844  << ", expected: GPR\n");
845  return false;
846  }
847  }
848 
849  const unsigned MovOpc =
850  DefSize == 32 ? AArch64::MOVi32imm : AArch64::MOVi64imm;
851 
852  I.setDesc(TII.get(MovOpc));
853 
854  if (isFP) {
855  const TargetRegisterClass &GPRRC =
856  DefSize == 32 ? AArch64::GPR32RegClass : AArch64::GPR64RegClass;
857  const TargetRegisterClass &FPRRC =
858  DefSize == 32 ? AArch64::FPR32RegClass : AArch64::FPR64RegClass;
859 
860  const unsigned DefGPRReg = MRI.createVirtualRegister(&GPRRC);
861  MachineOperand &RegOp = I.getOperand(0);
862  RegOp.setReg(DefGPRReg);
863 
864  BuildMI(MBB, std::next(I.getIterator()), I.getDebugLoc(),
865  TII.get(AArch64::COPY))
866  .addDef(DefReg)
867  .addUse(DefGPRReg);
868 
869  if (!RBI.constrainGenericRegister(DefReg, FPRRC, MRI)) {
870  LLVM_DEBUG(dbgs() << "Failed to constrain G_FCONSTANT def operand\n");
871  return false;
872  }
873 
874  MachineOperand &ImmOp = I.getOperand(1);
875  // FIXME: Is going through int64_t always correct?
876  ImmOp.ChangeToImmediate(
878  } else if (I.getOperand(1).isCImm()) {
879  uint64_t Val = I.getOperand(1).getCImm()->getZExtValue();
880  I.getOperand(1).ChangeToImmediate(Val);
881  } else if (I.getOperand(1).isImm()) {
882  uint64_t Val = I.getOperand(1).getImm();
883  I.getOperand(1).ChangeToImmediate(Val);
884  }
885 
887  return true;
888  }
889  case TargetOpcode::G_EXTRACT: {
890  LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
891  LLT DstTy = MRI.getType(I.getOperand(0).getReg());
892  (void)DstTy;
893  unsigned SrcSize = SrcTy.getSizeInBits();
894  // Larger extracts are vectors, same-size extracts should be something else
895  // by now (either split up or simplified to a COPY).
896  if (SrcTy.getSizeInBits() > 64 || Ty.getSizeInBits() > 32)
897  return false;
898 
899  I.setDesc(TII.get(SrcSize == 64 ? AArch64::UBFMXri : AArch64::UBFMWri));
901  Ty.getSizeInBits() - 1);
902 
903  if (SrcSize < 64) {
904  assert(SrcSize == 32 && DstTy.getSizeInBits() == 16 &&
905  "unexpected G_EXTRACT types");
906  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
907  }
908 
909  unsigned DstReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
910  BuildMI(MBB, std::next(I.getIterator()), I.getDebugLoc(),
911  TII.get(AArch64::COPY))
912  .addDef(I.getOperand(0).getReg())
913  .addUse(DstReg, 0, AArch64::sub_32);
915  AArch64::GPR32RegClass, MRI);
916  I.getOperand(0).setReg(DstReg);
917 
918  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
919  }
920 
921  case TargetOpcode::G_INSERT: {
922  LLT SrcTy = MRI.getType(I.getOperand(2).getReg());
923  LLT DstTy = MRI.getType(I.getOperand(0).getReg());
924  unsigned DstSize = DstTy.getSizeInBits();
925  // Larger inserts are vectors, same-size ones should be something else by
926  // now (split up or turned into COPYs).
927  if (Ty.getSizeInBits() > 64 || SrcTy.getSizeInBits() > 32)
928  return false;
929 
930  I.setDesc(TII.get(DstSize == 64 ? AArch64::BFMXri : AArch64::BFMWri));
931  unsigned LSB = I.getOperand(3).getImm();
932  unsigned Width = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
933  I.getOperand(3).setImm((DstSize - LSB) % DstSize);
934  MachineInstrBuilder(MF, I).addImm(Width - 1);
935 
936  if (DstSize < 64) {
937  assert(DstSize == 32 && SrcTy.getSizeInBits() == 16 &&
938  "unexpected G_INSERT types");
939  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
940  }
941 
942  unsigned SrcReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
943  BuildMI(MBB, I.getIterator(), I.getDebugLoc(),
944  TII.get(AArch64::SUBREG_TO_REG))
945  .addDef(SrcReg)
946  .addImm(0)
947  .addUse(I.getOperand(2).getReg())
948  .addImm(AArch64::sub_32);
950  AArch64::GPR32RegClass, MRI);
951  I.getOperand(2).setReg(SrcReg);
952 
953  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
954  }
955  case TargetOpcode::G_FRAME_INDEX: {
956  // allocas and G_FRAME_INDEX are only supported in addrspace(0).
957  if (Ty != LLT::pointer(0, 64)) {
958  LLVM_DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << Ty
959  << ", expected: " << LLT::pointer(0, 64) << '\n');
960  return false;
961  }
962  I.setDesc(TII.get(AArch64::ADDXri));
963 
964  // MOs for a #0 shifted immediate.
965  I.addOperand(MachineOperand::CreateImm(0));
966  I.addOperand(MachineOperand::CreateImm(0));
967 
968  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
969  }
970 
971  case TargetOpcode::G_GLOBAL_VALUE: {
972  auto GV = I.getOperand(1).getGlobal();
973  if (GV->isThreadLocal()) {
974  // FIXME: we don't support TLS yet.
975  return false;
976  }
977  unsigned char OpFlags = STI.ClassifyGlobalReference(GV, TM);
978  if (OpFlags & AArch64II::MO_GOT) {
979  I.setDesc(TII.get(AArch64::LOADgot));
980  I.getOperand(1).setTargetFlags(OpFlags);
981  } else if (TM.getCodeModel() == CodeModel::Large) {
982  // Materialize the global using movz/movk instructions.
983  materializeLargeCMVal(I, GV, OpFlags);
984  I.eraseFromParent();
985  return true;
986  } else if (TM.getCodeModel() == CodeModel::Tiny) {
987  I.setDesc(TII.get(AArch64::ADR));
988  I.getOperand(1).setTargetFlags(OpFlags);
989  } else {
990  I.setDesc(TII.get(AArch64::MOVaddr));
992  MachineInstrBuilder MIB(MF, I);
993  MIB.addGlobalAddress(GV, I.getOperand(1).getOffset(),
995  }
996  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
997  }
998 
999  case TargetOpcode::G_LOAD:
1000  case TargetOpcode::G_STORE: {
1001  LLT PtrTy = MRI.getType(I.getOperand(1).getReg());
1002 
1003  if (PtrTy != LLT::pointer(0, 64)) {
1004  LLVM_DEBUG(dbgs() << "Load/Store pointer has type: " << PtrTy
1005  << ", expected: " << LLT::pointer(0, 64) << '\n');
1006  return false;
1007  }
1008 
1009  auto &MemOp = **I.memoperands_begin();
1010  if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
1011  LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n");
1012  return false;
1013  }
1014  unsigned MemSizeInBits = MemOp.getSize() * 8;
1015 
1016  // FIXME: PR36018: Volatile loads in some cases are incorrectly selected by
1017  // folding with an extend. Until we have a G_SEXTLOAD solution bail out if
1018  // we hit one.
1019  if (Opcode == TargetOpcode::G_LOAD && MemOp.isVolatile())
1020  return false;
1021 
1022  const unsigned PtrReg = I.getOperand(1).getReg();
1023 #ifndef NDEBUG
1024  const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI);
1025  // Sanity-check the pointer register.
1026  assert(PtrRB.getID() == AArch64::GPRRegBankID &&
1027  "Load/Store pointer operand isn't a GPR");
1028  assert(MRI.getType(PtrReg).isPointer() &&
1029  "Load/Store pointer operand isn't a pointer");
1030 #endif
1031 
1032  const unsigned ValReg = I.getOperand(0).getReg();
1033  const RegisterBank &RB = *RBI.getRegBank(ValReg, MRI, TRI);
1034 
1035  const unsigned NewOpc =
1036  selectLoadStoreUIOp(I.getOpcode(), RB.getID(), MemSizeInBits);
1037  if (NewOpc == I.getOpcode())
1038  return false;
1039 
1040  I.setDesc(TII.get(NewOpc));
1041 
1042  uint64_t Offset = 0;
1043  auto *PtrMI = MRI.getVRegDef(PtrReg);
1044 
1045  // Try to fold a GEP into our unsigned immediate addressing mode.
1046  if (PtrMI->getOpcode() == TargetOpcode::G_GEP) {
1047  if (auto COff = getConstantVRegVal(PtrMI->getOperand(2).getReg(), MRI)) {
1048  int64_t Imm = *COff;
1049  const unsigned Size = MemSizeInBits / 8;
1050  const unsigned Scale = Log2_32(Size);
1051  if ((Imm & (Size - 1)) == 0 && Imm >= 0 && Imm < (0x1000 << Scale)) {
1052  unsigned Ptr2Reg = PtrMI->getOperand(1).getReg();
1053  I.getOperand(1).setReg(Ptr2Reg);
1054  PtrMI = MRI.getVRegDef(Ptr2Reg);
1055  Offset = Imm / Size;
1056  }
1057  }
1058  }
1059 
1060  // If we haven't folded anything into our addressing mode yet, try to fold
1061  // a frame index into the base+offset.
1062  if (!Offset && PtrMI->getOpcode() == TargetOpcode::G_FRAME_INDEX)
1063  I.getOperand(1).ChangeToFrameIndex(PtrMI->getOperand(1).getIndex());
1064 
1065  I.addOperand(MachineOperand::CreateImm(Offset));
1066 
1067  // If we're storing a 0, use WZR/XZR.
1068  if (auto CVal = getConstantVRegVal(ValReg, MRI)) {
1069  if (*CVal == 0 && Opcode == TargetOpcode::G_STORE) {
1070  if (I.getOpcode() == AArch64::STRWui)
1071  I.getOperand(0).setReg(AArch64::WZR);
1072  else if (I.getOpcode() == AArch64::STRXui)
1073  I.getOperand(0).setReg(AArch64::XZR);
1074  }
1075  }
1076 
1077  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1078  }
1079 
1080  case TargetOpcode::G_SMULH:
1081  case TargetOpcode::G_UMULH: {
1082  // Reject the various things we don't support yet.
1083  if (unsupportedBinOp(I, RBI, MRI, TRI))
1084  return false;
1085 
1086  const unsigned DefReg = I.getOperand(0).getReg();
1087  const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1088 
1089  if (RB.getID() != AArch64::GPRRegBankID) {
1090  LLVM_DEBUG(dbgs() << "G_[SU]MULH on bank: " << RB << ", expected: GPR\n");
1091  return false;
1092  }
1093 
1094  if (Ty != LLT::scalar(64)) {
1095  LLVM_DEBUG(dbgs() << "G_[SU]MULH has type: " << Ty
1096  << ", expected: " << LLT::scalar(64) << '\n');
1097  return false;
1098  }
1099 
1100  unsigned NewOpc = I.getOpcode() == TargetOpcode::G_SMULH ? AArch64::SMULHrr
1101  : AArch64::UMULHrr;
1102  I.setDesc(TII.get(NewOpc));
1103 
1104  // Now that we selected an opcode, we need to constrain the register
1105  // operands to use appropriate classes.
1106  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1107  }
1108  case TargetOpcode::G_FADD:
1109  case TargetOpcode::G_FSUB:
1110  case TargetOpcode::G_FMUL:
1111  case TargetOpcode::G_FDIV:
1112 
1113  case TargetOpcode::G_OR:
1114  case TargetOpcode::G_SHL:
1115  case TargetOpcode::G_LSHR:
1116  case TargetOpcode::G_ASHR:
1117  case TargetOpcode::G_GEP: {
1118  // Reject the various things we don't support yet.
1119  if (unsupportedBinOp(I, RBI, MRI, TRI))
1120  return false;
1121 
1122  const unsigned OpSize = Ty.getSizeInBits();
1123 
1124  const unsigned DefReg = I.getOperand(0).getReg();
1125  const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1126 
1127  const unsigned NewOpc = selectBinaryOp(I.getOpcode(), RB.getID(), OpSize);
1128  if (NewOpc == I.getOpcode())
1129  return false;
1130 
1131  I.setDesc(TII.get(NewOpc));
1132  // FIXME: Should the type be always reset in setDesc?
1133 
1134  // Now that we selected an opcode, we need to constrain the register
1135  // operands to use appropriate classes.
1136  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1137  }
1138 
1139  case TargetOpcode::G_PTR_MASK: {
1140  uint64_t Align = I.getOperand(2).getImm();
1141  if (Align >= 64 || Align == 0)
1142  return false;
1143 
1144  uint64_t Mask = ~((1ULL << Align) - 1);
1145  I.setDesc(TII.get(AArch64::ANDXri));
1147 
1148  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1149  }
1150  case TargetOpcode::G_PTRTOINT:
1151  case TargetOpcode::G_TRUNC: {
1152  const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1153  const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
1154 
1155  const unsigned DstReg = I.getOperand(0).getReg();
1156  const unsigned SrcReg = I.getOperand(1).getReg();
1157 
1158  const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1159  const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
1160 
1161  if (DstRB.getID() != SrcRB.getID()) {
1162  LLVM_DEBUG(
1163  dbgs() << "G_TRUNC/G_PTRTOINT input/output on different banks\n");
1164  return false;
1165  }
1166 
1167  if (DstRB.getID() == AArch64::GPRRegBankID) {
1168  const TargetRegisterClass *DstRC =
1169  getRegClassForTypeOnBank(DstTy, DstRB, RBI);
1170  if (!DstRC)
1171  return false;
1172 
1173  const TargetRegisterClass *SrcRC =
1174  getRegClassForTypeOnBank(SrcTy, SrcRB, RBI);
1175  if (!SrcRC)
1176  return false;
1177 
1178  if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1179  !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1180  LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC/G_PTRTOINT\n");
1181  return false;
1182  }
1183 
1184  if (DstRC == SrcRC) {
1185  // Nothing to be done
1186  } else if (Opcode == TargetOpcode::G_TRUNC && DstTy == LLT::scalar(32) &&
1187  SrcTy == LLT::scalar(64)) {
1188  llvm_unreachable("TableGen can import this case");
1189  return false;
1190  } else if (DstRC == &AArch64::GPR32RegClass &&
1191  SrcRC == &AArch64::GPR64RegClass) {
1192  I.getOperand(1).setSubReg(AArch64::sub_32);
1193  } else {
1194  LLVM_DEBUG(
1195  dbgs() << "Unhandled mismatched classes in G_TRUNC/G_PTRTOINT\n");
1196  return false;
1197  }
1198 
1199  I.setDesc(TII.get(TargetOpcode::COPY));
1200  return true;
1201  } else if (DstRB.getID() == AArch64::FPRRegBankID) {
1202  if (DstTy == LLT::vector(4, 16) && SrcTy == LLT::vector(4, 32)) {
1203  I.setDesc(TII.get(AArch64::XTNv4i16));
1205  return true;
1206  }
1207  }
1208 
1209  return false;
1210  }
1211 
1212  case TargetOpcode::G_ANYEXT: {
1213  const unsigned DstReg = I.getOperand(0).getReg();
1214  const unsigned SrcReg = I.getOperand(1).getReg();
1215 
1216  const RegisterBank &RBDst = *RBI.getRegBank(DstReg, MRI, TRI);
1217  if (RBDst.getID() != AArch64::GPRRegBankID) {
1218  LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBDst
1219  << ", expected: GPR\n");
1220  return false;
1221  }
1222 
1223  const RegisterBank &RBSrc = *RBI.getRegBank(SrcReg, MRI, TRI);
1224  if (RBSrc.getID() != AArch64::GPRRegBankID) {
1225  LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBSrc
1226  << ", expected: GPR\n");
1227  return false;
1228  }
1229 
1230  const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
1231 
1232  if (DstSize == 0) {
1233  LLVM_DEBUG(dbgs() << "G_ANYEXT operand has no size, not a gvreg?\n");
1234  return false;
1235  }
1236 
1237  if (DstSize != 64 && DstSize > 32) {
1238  LLVM_DEBUG(dbgs() << "G_ANYEXT to size: " << DstSize
1239  << ", expected: 32 or 64\n");
1240  return false;
1241  }
1242  // At this point G_ANYEXT is just like a plain COPY, but we need
1243  // to explicitly form the 64-bit value if any.
1244  if (DstSize > 32) {
1245  unsigned ExtSrc = MRI.createVirtualRegister(&AArch64::GPR64allRegClass);
1246  BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
1247  .addDef(ExtSrc)
1248  .addImm(0)
1249  .addUse(SrcReg)
1250  .addImm(AArch64::sub_32);
1251  I.getOperand(1).setReg(ExtSrc);
1252  }
1253  return selectCopy(I, TII, MRI, TRI, RBI);
1254  }
1255 
1256  case TargetOpcode::G_ZEXT:
1257  case TargetOpcode::G_SEXT: {
1258  unsigned Opcode = I.getOpcode();
1259  const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
1260  SrcTy = MRI.getType(I.getOperand(1).getReg());
1261  const bool isSigned = Opcode == TargetOpcode::G_SEXT;
1262  const unsigned DefReg = I.getOperand(0).getReg();
1263  const unsigned SrcReg = I.getOperand(1).getReg();
1264  const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1265 
1266  if (RB.getID() != AArch64::GPRRegBankID) {
1267  LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode()) << " on bank: " << RB
1268  << ", expected: GPR\n");
1269  return false;
1270  }
1271 
1272  MachineInstr *ExtI;
1273  if (DstTy == LLT::scalar(64)) {
1274  // FIXME: Can we avoid manually doing this?
1275  if (!RBI.constrainGenericRegister(SrcReg, AArch64::GPR32RegClass, MRI)) {
1276  LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(Opcode)
1277  << " operand\n");
1278  return false;
1279  }
1280 
1281  const unsigned SrcXReg =
1282  MRI.createVirtualRegister(&AArch64::GPR64RegClass);
1283  BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
1284  .addDef(SrcXReg)
1285  .addImm(0)
1286  .addUse(SrcReg)
1287  .addImm(AArch64::sub_32);
1288 
1289  const unsigned NewOpc = isSigned ? AArch64::SBFMXri : AArch64::UBFMXri;
1290  ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
1291  .addDef(DefReg)
1292  .addUse(SrcXReg)
1293  .addImm(0)
1294  .addImm(SrcTy.getSizeInBits() - 1);
1295  } else if (DstTy.isScalar() && DstTy.getSizeInBits() <= 32) {
1296  const unsigned NewOpc = isSigned ? AArch64::SBFMWri : AArch64::UBFMWri;
1297  ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
1298  .addDef(DefReg)
1299  .addUse(SrcReg)
1300  .addImm(0)
1301  .addImm(SrcTy.getSizeInBits() - 1);
1302  } else {
1303  return false;
1304  }
1305 
1307 
1308  I.eraseFromParent();
1309  return true;
1310  }
1311 
1312  case TargetOpcode::G_SITOFP:
1313  case TargetOpcode::G_UITOFP:
1314  case TargetOpcode::G_FPTOSI:
1315  case TargetOpcode::G_FPTOUI: {
1316  const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
1317  SrcTy = MRI.getType(I.getOperand(1).getReg());
1318  const unsigned NewOpc = selectFPConvOpc(Opcode, DstTy, SrcTy);
1319  if (NewOpc == Opcode)
1320  return false;
1321 
1322  I.setDesc(TII.get(NewOpc));
1324 
1325  return true;
1326  }
1327 
1328 
1329  case TargetOpcode::G_INTTOPTR:
1330  // The importer is currently unable to import pointer types since they
1331  // didn't exist in SelectionDAG.
1332  return selectCopy(I, TII, MRI, TRI, RBI);
1333 
1334  case TargetOpcode::G_BITCAST:
1335  // Imported SelectionDAG rules can handle every bitcast except those that
1336  // bitcast from a type to the same type. Ideally, these shouldn't occur
1337  // but we might not run an optimizer that deletes them.
1338  if (MRI.getType(I.getOperand(0).getReg()) ==
1339  MRI.getType(I.getOperand(1).getReg()))
1340  return selectCopy(I, TII, MRI, TRI, RBI);
1341  return false;
1342 
1343  case TargetOpcode::G_SELECT: {
1344  if (MRI.getType(I.getOperand(1).getReg()) != LLT::scalar(1)) {
1345  LLVM_DEBUG(dbgs() << "G_SELECT cond has type: " << Ty
1346  << ", expected: " << LLT::scalar(1) << '\n');
1347  return false;
1348  }
1349 
1350  const unsigned CondReg = I.getOperand(1).getReg();
1351  const unsigned TReg = I.getOperand(2).getReg();
1352  const unsigned FReg = I.getOperand(3).getReg();
1353 
1354  unsigned CSelOpc = 0;
1355 
1356  if (Ty == LLT::scalar(32)) {
1357  CSelOpc = AArch64::CSELWr;
1358  } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
1359  CSelOpc = AArch64::CSELXr;
1360  } else {
1361  return false;
1362  }
1363 
1364  MachineInstr &TstMI =
1365  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri))
1366  .addDef(AArch64::WZR)
1367  .addUse(CondReg)
1369 
1370  MachineInstr &CSelMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CSelOpc))
1371  .addDef(I.getOperand(0).getReg())
1372  .addUse(TReg)
1373  .addUse(FReg)
1375 
1377  constrainSelectedInstRegOperands(CSelMI, TII, TRI, RBI);
1378 
1379  I.eraseFromParent();
1380  return true;
1381  }
1382  case TargetOpcode::G_ICMP: {
1383  if (Ty != LLT::scalar(32)) {
1384  LLVM_DEBUG(dbgs() << "G_ICMP result has type: " << Ty
1385  << ", expected: " << LLT::scalar(32) << '\n');
1386  return false;
1387  }
1388 
1389  unsigned CmpOpc = 0;
1390  unsigned ZReg = 0;
1391 
1392  LLT CmpTy = MRI.getType(I.getOperand(2).getReg());
1393  if (CmpTy == LLT::scalar(32)) {
1394  CmpOpc = AArch64::SUBSWrr;
1395  ZReg = AArch64::WZR;
1396  } else if (CmpTy == LLT::scalar(64) || CmpTy.isPointer()) {
1397  CmpOpc = AArch64::SUBSXrr;
1398  ZReg = AArch64::XZR;
1399  } else {
1400  return false;
1401  }
1402 
1403  // CSINC increments the result by one when the condition code is false.
1404  // Therefore, we have to invert the predicate to get an increment by 1 when
1405  // the predicate is true.
1406  const AArch64CC::CondCode invCC =
1407  changeICMPPredToAArch64CC(CmpInst::getInversePredicate(
1409 
1410  MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
1411  .addDef(ZReg)
1412  .addUse(I.getOperand(2).getReg())
1413  .addUse(I.getOperand(3).getReg());
1414 
1415  MachineInstr &CSetMI =
1416  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1417  .addDef(I.getOperand(0).getReg())
1418  .addUse(AArch64::WZR)
1419  .addUse(AArch64::WZR)
1420  .addImm(invCC);
1421 
1423  constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
1424 
1425  I.eraseFromParent();
1426  return true;
1427  }
1428 
1429  case TargetOpcode::G_FCMP: {
1430  if (Ty != LLT::scalar(32)) {
1431  LLVM_DEBUG(dbgs() << "G_FCMP result has type: " << Ty
1432  << ", expected: " << LLT::scalar(32) << '\n');
1433  return false;
1434  }
1435 
1436  unsigned CmpOpc = 0;
1437  LLT CmpTy = MRI.getType(I.getOperand(2).getReg());
1438  if (CmpTy == LLT::scalar(32)) {
1439  CmpOpc = AArch64::FCMPSrr;
1440  } else if (CmpTy == LLT::scalar(64)) {
1441  CmpOpc = AArch64::FCMPDrr;
1442  } else {
1443  return false;
1444  }
1445 
1446  // FIXME: regbank
1447 
1448  AArch64CC::CondCode CC1, CC2;
1450  (CmpInst::Predicate)I.getOperand(1).getPredicate(), CC1, CC2);
1451 
1452  MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
1453  .addUse(I.getOperand(2).getReg())
1454  .addUse(I.getOperand(3).getReg());
1455 
1456  const unsigned DefReg = I.getOperand(0).getReg();
1457  unsigned Def1Reg = DefReg;
1458  if (CC2 != AArch64CC::AL)
1459  Def1Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
1460 
1461  MachineInstr &CSetMI =
1462  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1463  .addDef(Def1Reg)
1464  .addUse(AArch64::WZR)
1465  .addUse(AArch64::WZR)
1466  .addImm(getInvertedCondCode(CC1));
1467 
1468  if (CC2 != AArch64CC::AL) {
1469  unsigned Def2Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
1470  MachineInstr &CSet2MI =
1471  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1472  .addDef(Def2Reg)
1473  .addUse(AArch64::WZR)
1474  .addUse(AArch64::WZR)
1475  .addImm(getInvertedCondCode(CC2));
1476  MachineInstr &OrMI =
1477  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ORRWrr))
1478  .addDef(DefReg)
1479  .addUse(Def1Reg)
1480  .addUse(Def2Reg);
1482  constrainSelectedInstRegOperands(CSet2MI, TII, TRI, RBI);
1483  }
1484 
1486  constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
1487 
1488  I.eraseFromParent();
1489  return true;
1490  }
1491  case TargetOpcode::G_VASTART:
1492  return STI.isTargetDarwin() ? selectVaStartDarwin(I, MF, MRI)
1493  : selectVaStartAAPCS(I, MF, MRI);
1494  case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1495  if (!I.getOperand(0).isIntrinsicID())
1496  return false;
1497  if (I.getOperand(0).getIntrinsicID() != Intrinsic::trap)
1498  return false;
1499  BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::BRK))
1500  .addImm(1);
1501  I.eraseFromParent();
1502  return true;
1503  case TargetOpcode::G_IMPLICIT_DEF: {
1504  I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
1505  const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1506  const unsigned DstReg = I.getOperand(0).getReg();
1507  const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1508  const TargetRegisterClass *DstRC =
1509  getRegClassForTypeOnBank(DstTy, DstRB, RBI);
1510  RBI.constrainGenericRegister(DstReg, *DstRC, MRI);
1511  return true;
1512  }
1513  case TargetOpcode::G_BLOCK_ADDR: {
1514  if (TM.getCodeModel() == CodeModel::Large) {
1515  materializeLargeCMVal(I, I.getOperand(1).getBlockAddress(), 0);
1516  I.eraseFromParent();
1517  return true;
1518  } else {
1519  I.setDesc(TII.get(AArch64::MOVaddrBA));
1520  auto MovMI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::MOVaddrBA),
1521  I.getOperand(0).getReg())
1522  .addBlockAddress(I.getOperand(1).getBlockAddress(),
1523  /* Offset */ 0, AArch64II::MO_PAGE)
1524  .addBlockAddress(
1525  I.getOperand(1).getBlockAddress(), /* Offset */ 0,
1527  I.eraseFromParent();
1528  return constrainSelectedInstRegOperands(*MovMI, TII, TRI, RBI);
1529  }
1530  }
1531  }
1532 
1533  return false;
1534 }
1535 
1536 /// SelectArithImmed - Select an immediate value that can be represented as
1537 /// a 12-bit value shifted left by either 0 or 12. If so, return true with
1538 /// Val set to the 12-bit value and Shift set to the shifter operand.
1540 AArch64InstructionSelector::selectArithImmed(MachineOperand &Root) const {
1541  MachineInstr &MI = *Root.getParent();
1542  MachineBasicBlock &MBB = *MI.getParent();
1543  MachineFunction &MF = *MBB.getParent();
1544  MachineRegisterInfo &MRI = MF.getRegInfo();
1545 
1546  // This function is called from the addsub_shifted_imm ComplexPattern,
1547  // which lists [imm] as the list of opcode it's interested in, however
1548  // we still need to check whether the operand is actually an immediate
1549  // here because the ComplexPattern opcode list is only used in
1550  // root-level opcode matching.
1551  uint64_t Immed;
1552  if (Root.isImm())
1553  Immed = Root.getImm();
1554  else if (Root.isCImm())
1555  Immed = Root.getCImm()->getZExtValue();
1556  else if (Root.isReg()) {
1557  MachineInstr *Def = MRI.getVRegDef(Root.getReg());
1558  if (Def->getOpcode() != TargetOpcode::G_CONSTANT)
1559  return None;
1560  MachineOperand &Op1 = Def->getOperand(1);
1561  if (!Op1.isCImm() || Op1.getCImm()->getBitWidth() > 64)
1562  return None;
1563  Immed = Op1.getCImm()->getZExtValue();
1564  } else
1565  return None;
1566 
1567  unsigned ShiftAmt;
1568 
1569  if (Immed >> 12 == 0) {
1570  ShiftAmt = 0;
1571  } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
1572  ShiftAmt = 12;
1573  Immed = Immed >> 12;
1574  } else
1575  return None;
1576 
1577  unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
1578  return {{
1579  [=](MachineInstrBuilder &MIB) { MIB.addImm(Immed); },
1580  [=](MachineInstrBuilder &MIB) { MIB.addImm(ShVal); },
1581  }};
1582 }
1583 
1584 /// Select a "register plus unscaled signed 9-bit immediate" address. This
1585 /// should only match when there is an offset that is not valid for a scaled
1586 /// immediate addressing mode. The "Size" argument is the size in bytes of the
1587 /// memory reference, which is needed here to know what is valid for a scaled
1588 /// immediate.
1590 AArch64InstructionSelector::selectAddrModeUnscaled(MachineOperand &Root,
1591  unsigned Size) const {
1592  MachineRegisterInfo &MRI =
1593  Root.getParent()->getParent()->getParent()->getRegInfo();
1594 
1595  if (!Root.isReg())
1596  return None;
1597 
1598  if (!isBaseWithConstantOffset(Root, MRI))
1599  return None;
1600 
1601  MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
1602  if (!RootDef)
1603  return None;
1604 
1605  MachineOperand &OffImm = RootDef->getOperand(2);
1606  if (!OffImm.isReg())
1607  return None;
1608  MachineInstr *RHS = MRI.getVRegDef(OffImm.getReg());
1609  if (!RHS || RHS->getOpcode() != TargetOpcode::G_CONSTANT)
1610  return None;
1611  int64_t RHSC;
1612  MachineOperand &RHSOp1 = RHS->getOperand(1);
1613  if (!RHSOp1.isCImm() || RHSOp1.getCImm()->getBitWidth() > 64)
1614  return None;
1615  RHSC = RHSOp1.getCImm()->getSExtValue();
1616 
1617  // If the offset is valid as a scaled immediate, don't match here.
1618  if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Log2_32(Size)))
1619  return None;
1620  if (RHSC >= -256 && RHSC < 256) {
1621  MachineOperand &Base = RootDef->getOperand(1);
1622  return {{
1623  [=](MachineInstrBuilder &MIB) { MIB.add(Base); },
1624  [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
1625  }};
1626  }
1627  return None;
1628 }
1629 
1630 /// Select a "register plus scaled unsigned 12-bit immediate" address. The
1631 /// "Size" argument is the size in bytes of the memory reference, which
1632 /// determines the scale.
1634 AArch64InstructionSelector::selectAddrModeIndexed(MachineOperand &Root,
1635  unsigned Size) const {
1636  MachineRegisterInfo &MRI =
1637  Root.getParent()->getParent()->getParent()->getRegInfo();
1638 
1639  if (!Root.isReg())
1640  return None;
1641 
1642  MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
1643  if (!RootDef)
1644  return None;
1645 
1646  if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
1647  return {{
1648  [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
1649  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
1650  }};
1651  }
1652 
1653  if (isBaseWithConstantOffset(Root, MRI)) {
1654  MachineOperand &LHS = RootDef->getOperand(1);
1655  MachineOperand &RHS = RootDef->getOperand(2);
1656  MachineInstr *LHSDef = MRI.getVRegDef(LHS.getReg());
1657  MachineInstr *RHSDef = MRI.getVRegDef(RHS.getReg());
1658  if (LHSDef && RHSDef) {
1659  int64_t RHSC = (int64_t)RHSDef->getOperand(1).getCImm()->getZExtValue();
1660  unsigned Scale = Log2_32(Size);
1661  if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
1662  if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
1663  return {{
1664  [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
1665  [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
1666  }};
1667 
1668  return {{
1669  [=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
1670  [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
1671  }};
1672  }
1673  }
1674  }
1675 
1676  // Before falling back to our general case, check if the unscaled
1677  // instructions can handle this. If so, that's preferable.
1678  if (selectAddrModeUnscaled(Root, Size).hasValue())
1679  return None;
1680 
1681  return {{
1682  [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
1683  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
1684  }};
1685 }
1686 
1687 void AArch64InstructionSelector::renderTruncImm(MachineInstrBuilder &MIB,
1688  const MachineInstr &MI) const {
1689  const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
1690  assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
1692  assert(CstVal && "Expected constant value");
1693  MIB.addImm(CstVal.getValue());
1694 }
1695 
1696 namespace llvm {
1699  AArch64Subtarget &Subtarget,
1700  AArch64RegisterBankInfo &RBI) {
1701  return new AArch64InstructionSelector(TM, Subtarget, RBI);
1702 }
1703 }
static bool selectFP16CopyFromGPR32(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, unsigned SrcReg)
const NoneType None
Definition: None.h:24
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
MO_G3 - A symbol operand with this flag (granule 3) represents the high 16-bits of a 64-bit address...
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1557
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
bool contains(unsigned Reg) const
Return true if the specified register is included in this register class.
MachineBasicBlock * getMBB() const
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
void setTargetFlags(unsigned F)
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:383
bool isScalar() const
static CondCode getInvertedCondCode(CondCode Code)
unsigned getReg() const
getReg - Returns the register number.
LLT getType(unsigned Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register...
unsigned const TargetRegisterInfo * TRI
MO_G0 - A symbol operand with this flag (granule 0) represents the bits 0-15 of a 64-bit address...
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned char TargetFlags=0) const
iterator_range< mop_iterator > operands()
Definition: MachineInstr.h:459
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
unsigned getBitWidth() const
getBitWidth - Return the bitwidth of this constant.
Definition: Constants.h:143
const TargetRegisterClass * getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
MO_G2 - A symbol operand with this flag (granule 2) represents the bits 32-47 of a 64-bit address...
This file declares the targeting of the RegisterBankInfo class for AArch64.
bool isIntrinsicID() const
Holds all the information related to register banks.
const HexagonInstrInfo * TII
const ConstantFP * getFPImm() const
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:412
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
static StringRef getName(Value *V)
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:409
MachineInstr * getVRegDef(unsigned Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
const RegClassOrRegBank & getRegClassOrRegBank(unsigned Reg) const
Return the register bank or register class of Reg.
static bool isStore(int Opcode)
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:937
#define EQ(a, b)
Definition: regexec.c:112
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
void ChangeToImmediate(int64_t ImmVal)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value...
TargetInstrInfo - Interface to description of machine instruction set.
static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
Select the AArch64 opcode for the basic binary operation GenericOpc (such as G_OR or G_SDIV)...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
#define P(N)
Control flow instructions. These all have token chains.
Definition: ISDOpcodes.h:607
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:149
MO_G1 - A symbol operand with this flag (granule 1) represents the bits 16-31 of a 64-bit address...
unsigned const MachineRegisterInfo * MRI
static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
Select the AArch64 opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the (value)...
static unsigned selectFPConvOpc(unsigned GenericOpc, LLT DstTy, LLT SrcTy)
const GlobalValue * getGlobal() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
bool isExactlyValue(double V) const
We don&#39;t rely on operator== working on double values, as it returns true for things that are clearly ...
Definition: APFloat.h:1130
bool isValid() const
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:657
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
void setImm(int64_t immVal)
self_iterator getIterator()
Definition: ilist_node.h:82
const MachineInstrBuilder & addFrameIndex(int Idx) const
T dyn_cast() const
Returns the current pointer if it is of the specified pointer type, otherwises returns null...
Definition: PointerUnion.h:142
bool isCopy() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
StringRef getName(unsigned Opcode) const
Returns the name for the instructions with the given opcode.
Definition: MCInstrInfo.h:51
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
#define GET_GLOBALISEL_TEMPORARIES_INIT
const APFloat & getValueAPF() const
Definition: Constants.h:299
unsigned createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:534
RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
MachineOperand class - Representation of each machine instruction operand.
Intrinsic::ID getIntrinsicID() const
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Definition: PPCPredicates.h:27
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:88
This class implements the register bank concept.
Definition: RegisterBank.h:29
int64_t getImm() const
This file declares the MachineIRBuilder class.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:133
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:539
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:941
Optional< int64_t > getConstantVRegVal(unsigned VReg, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:185
bool isPointer() const
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:254
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
Provides the logic to select generic machine instructions.
Representation of each machine instruction.
Definition: MachineInstr.h:64
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
This class provides the information for the target register banks.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static AArch64CC::CondCode changeICMPPredToAArch64CC(CmpInst::Predicate P)
InstructionSelector * createAArch64InstructionSelector(const AArch64TargetMachine &, AArch64Subtarget &, AArch64RegisterBankInfo &)
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:45
int64_t getOffset() const
Return the offset from the symbol in this operand.
const BlockAddress * getBlockAddress() const
void setReg(unsigned Reg)
Change the register this operand corresponds to.
#define I(x, y, z)
Definition: MD5.cpp:58
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page...
void setSubReg(unsigned subReg)
static const TargetRegisterClass * constrainGenericRegister(unsigned Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
#define GET_GLOBALISEL_PREDICATES_INIT
uint32_t Size
Definition: Profile.cpp:47
bool isReg() const
isReg - Tests if this is a MO_Register operand.
T get() const
Returns the value of the specified pointer type.
Definition: PointerUnion.h:135
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static void changeFCMPPredToAArch64CC(CmpInst::Predicate P, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2)
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel...
Definition: TargetOpcodes.h:31
LLVM Value Representation.
Definition: Value.h:73
unsigned getSizeInBits(unsigned Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:81
void ChangeToFrameIndex(int Idx)
Replace this operand with a frame index.
MachineInstrBuilder buildInstr(unsigned Opc, DstTy &&Ty, UseArgsTy &&... Args)
DAG like Generic method for building arbitrary instructions as above.
IRTranslator LLVM IR MI
static bool unsupportedBinOp(const MachineInstr &I, const AArch64RegisterBankInfo &RBI, const MachineRegisterInfo &MRI, const AArch64RegisterInfo &TRI)
Check whether I is a currently unsupported binary operation:
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow...
APInt bitcastToAPInt() const
Definition: APFloat.h:1094
const MachineInstrBuilder & addDef(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition: Constants.h:157
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
#define LLVM_DEBUG(X)
Definition: Debug.h:123
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:414
const ConstantInt * getCImm() const
#define DEBUG_TYPE
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
unsigned getID() const
Get the identifier of this register bank.
Definition: RegisterBank.h:48
A discriminated union of two pointer types, with the discriminator in the low bit of the pointer...
Definition: PointerUnion.h:87
unsigned getPredicate() const