LLVM  7.0.0svn
AArch64InstructionSelector.cpp
Go to the documentation of this file.
1 //===- AArch64InstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the targeting of the InstructionSelector class for
11 /// AArch64.
12 /// \todo This should be generated by TableGen.
13 //===----------------------------------------------------------------------===//
14 
15 #include "AArch64InstrInfo.h"
18 #include "AArch64RegisterInfo.h"
19 #include "AArch64Subtarget.h"
20 #include "AArch64TargetMachine.h"
31 #include "llvm/IR/Type.h"
32 #include "llvm/Support/Debug.h"
34 
35 #define DEBUG_TYPE "aarch64-isel"
36 
37 using namespace llvm;
38 
39 namespace {
40 
41 #define GET_GLOBALISEL_PREDICATE_BITSET
42 #include "AArch64GenGlobalISel.inc"
43 #undef GET_GLOBALISEL_PREDICATE_BITSET
44 
45 class AArch64InstructionSelector : public InstructionSelector {
46 public:
47  AArch64InstructionSelector(const AArch64TargetMachine &TM,
48  const AArch64Subtarget &STI,
49  const AArch64RegisterBankInfo &RBI);
50 
51  bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override;
52  static const char *getName() { return DEBUG_TYPE; }
53 
54 private:
55  /// tblgen-erated 'select' implementation, used as the initial selector for
56  /// the patterns that don't require complex C++.
57  bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
58 
59  bool selectVaStartAAPCS(MachineInstr &I, MachineFunction &MF,
60  MachineRegisterInfo &MRI) const;
61  bool selectVaStartDarwin(MachineInstr &I, MachineFunction &MF,
62  MachineRegisterInfo &MRI) const;
63 
64  bool selectCompareBranch(MachineInstr &I, MachineFunction &MF,
65  MachineRegisterInfo &MRI) const;
66 
67  ComplexRendererFns selectArithImmed(MachineOperand &Root) const;
68 
69  ComplexRendererFns selectAddrModeUnscaled(MachineOperand &Root,
70  unsigned Size) const;
71 
72  ComplexRendererFns selectAddrModeUnscaled8(MachineOperand &Root) const {
73  return selectAddrModeUnscaled(Root, 1);
74  }
75  ComplexRendererFns selectAddrModeUnscaled16(MachineOperand &Root) const {
76  return selectAddrModeUnscaled(Root, 2);
77  }
78  ComplexRendererFns selectAddrModeUnscaled32(MachineOperand &Root) const {
79  return selectAddrModeUnscaled(Root, 4);
80  }
81  ComplexRendererFns selectAddrModeUnscaled64(MachineOperand &Root) const {
82  return selectAddrModeUnscaled(Root, 8);
83  }
84  ComplexRendererFns selectAddrModeUnscaled128(MachineOperand &Root) const {
85  return selectAddrModeUnscaled(Root, 16);
86  }
87 
88  ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root,
89  unsigned Size) const;
90  template <int Width>
91  ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root) const {
92  return selectAddrModeIndexed(Root, Width / 8);
93  }
94 
95  void renderTruncImm(MachineInstrBuilder &MIB, const MachineInstr &MI) const;
96 
97  const AArch64TargetMachine &TM;
98  const AArch64Subtarget &STI;
99  const AArch64InstrInfo &TII;
100  const AArch64RegisterInfo &TRI;
101  const AArch64RegisterBankInfo &RBI;
102 
103 #define GET_GLOBALISEL_PREDICATES_DECL
104 #include "AArch64GenGlobalISel.inc"
105 #undef GET_GLOBALISEL_PREDICATES_DECL
106 
107 // We declare the temporaries used by selectImpl() in the class to minimize the
108 // cost of constructing placeholder values.
109 #define GET_GLOBALISEL_TEMPORARIES_DECL
110 #include "AArch64GenGlobalISel.inc"
111 #undef GET_GLOBALISEL_TEMPORARIES_DECL
112 };
113 
114 } // end anonymous namespace
115 
116 #define GET_GLOBALISEL_IMPL
117 #include "AArch64GenGlobalISel.inc"
118 #undef GET_GLOBALISEL_IMPL
119 
120 AArch64InstructionSelector::AArch64InstructionSelector(
121  const AArch64TargetMachine &TM, const AArch64Subtarget &STI,
122  const AArch64RegisterBankInfo &RBI)
123  : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
124  TRI(*STI.getRegisterInfo()), RBI(RBI),
126 #include "AArch64GenGlobalISel.inc"
129 #include "AArch64GenGlobalISel.inc"
131 {
132 }
133 
134 // FIXME: This should be target-independent, inferred from the types declared
135 // for each class in the bank.
136 static const TargetRegisterClass *
137 getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB,
138  const RegisterBankInfo &RBI,
139  bool GetAllRegSet = false) {
140  if (RB.getID() == AArch64::GPRRegBankID) {
141  if (Ty.getSizeInBits() <= 32)
142  return GetAllRegSet ? &AArch64::GPR32allRegClass
143  : &AArch64::GPR32RegClass;
144  if (Ty.getSizeInBits() == 64)
145  return GetAllRegSet ? &AArch64::GPR64allRegClass
146  : &AArch64::GPR64RegClass;
147  return nullptr;
148  }
149 
150  if (RB.getID() == AArch64::FPRRegBankID) {
151  if (Ty.getSizeInBits() <= 16)
152  return &AArch64::FPR16RegClass;
153  if (Ty.getSizeInBits() == 32)
154  return &AArch64::FPR32RegClass;
155  if (Ty.getSizeInBits() == 64)
156  return &AArch64::FPR64RegClass;
157  if (Ty.getSizeInBits() == 128)
158  return &AArch64::FPR128RegClass;
159  return nullptr;
160  }
161 
162  return nullptr;
163 }
164 
165 /// Check whether \p I is a currently unsupported binary operation:
166 /// - it has an unsized type
167 /// - an operand is not a vreg
168 /// - all operands are not in the same bank
169 /// These are checks that should someday live in the verifier, but right now,
170 /// these are mostly limitations of the aarch64 selector.
171 static bool unsupportedBinOp(const MachineInstr &I,
172  const AArch64RegisterBankInfo &RBI,
173  const MachineRegisterInfo &MRI,
174  const AArch64RegisterInfo &TRI) {
175  LLT Ty = MRI.getType(I.getOperand(0).getReg());
176  if (!Ty.isValid()) {
177  DEBUG(dbgs() << "Generic binop register should be typed\n");
178  return true;
179  }
180 
181  const RegisterBank *PrevOpBank = nullptr;
182  for (auto &MO : I.operands()) {
183  // FIXME: Support non-register operands.
184  if (!MO.isReg()) {
185  DEBUG(dbgs() << "Generic inst non-reg operands are unsupported\n");
186  return true;
187  }
188 
189  // FIXME: Can generic operations have physical registers operands? If
190  // so, this will need to be taught about that, and we'll need to get the
191  // bank out of the minimal class for the register.
192  // Either way, this needs to be documented (and possibly verified).
193  if (!TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
194  DEBUG(dbgs() << "Generic inst has physical register operand\n");
195  return true;
196  }
197 
198  const RegisterBank *OpBank = RBI.getRegBank(MO.getReg(), MRI, TRI);
199  if (!OpBank) {
200  DEBUG(dbgs() << "Generic register has no bank or class\n");
201  return true;
202  }
203 
204  if (PrevOpBank && OpBank != PrevOpBank) {
205  DEBUG(dbgs() << "Generic inst operands have different banks\n");
206  return true;
207  }
208  PrevOpBank = OpBank;
209  }
210  return false;
211 }
212 
213 /// Select the AArch64 opcode for the basic binary operation \p GenericOpc
214 /// (such as G_OR or G_SDIV), appropriate for the register bank \p RegBankID
215 /// and of size \p OpSize.
216 /// \returns \p GenericOpc if the combination is unsupported.
217 static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID,
218  unsigned OpSize) {
219  switch (RegBankID) {
220  case AArch64::GPRRegBankID:
221  if (OpSize == 32) {
222  switch (GenericOpc) {
223  case TargetOpcode::G_SHL:
224  return AArch64::LSLVWr;
225  case TargetOpcode::G_LSHR:
226  return AArch64::LSRVWr;
227  case TargetOpcode::G_ASHR:
228  return AArch64::ASRVWr;
229  default:
230  return GenericOpc;
231  }
232  } else if (OpSize == 64) {
233  switch (GenericOpc) {
234  case TargetOpcode::G_GEP:
235  return AArch64::ADDXrr;
236  case TargetOpcode::G_SHL:
237  return AArch64::LSLVXr;
238  case TargetOpcode::G_LSHR:
239  return AArch64::LSRVXr;
240  case TargetOpcode::G_ASHR:
241  return AArch64::ASRVXr;
242  default:
243  return GenericOpc;
244  }
245  }
246  break;
247  case AArch64::FPRRegBankID:
248  switch (OpSize) {
249  case 32:
250  switch (GenericOpc) {
251  case TargetOpcode::G_FADD:
252  return AArch64::FADDSrr;
253  case TargetOpcode::G_FSUB:
254  return AArch64::FSUBSrr;
255  case TargetOpcode::G_FMUL:
256  return AArch64::FMULSrr;
257  case TargetOpcode::G_FDIV:
258  return AArch64::FDIVSrr;
259  default:
260  return GenericOpc;
261  }
262  case 64:
263  switch (GenericOpc) {
264  case TargetOpcode::G_FADD:
265  return AArch64::FADDDrr;
266  case TargetOpcode::G_FSUB:
267  return AArch64::FSUBDrr;
268  case TargetOpcode::G_FMUL:
269  return AArch64::FMULDrr;
270  case TargetOpcode::G_FDIV:
271  return AArch64::FDIVDrr;
272  case TargetOpcode::G_OR:
273  return AArch64::ORRv8i8;
274  default:
275  return GenericOpc;
276  }
277  }
278  break;
279  }
280  return GenericOpc;
281 }
282 
283 /// Select the AArch64 opcode for the G_LOAD or G_STORE operation \p GenericOpc,
284 /// appropriate for the (value) register bank \p RegBankID and of memory access
285 /// size \p OpSize. This returns the variant with the base+unsigned-immediate
286 /// addressing mode (e.g., LDRXui).
287 /// \returns \p GenericOpc if the combination is unsupported.
288 static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID,
289  unsigned OpSize) {
290  const bool isStore = GenericOpc == TargetOpcode::G_STORE;
291  switch (RegBankID) {
292  case AArch64::GPRRegBankID:
293  switch (OpSize) {
294  case 8:
295  return isStore ? AArch64::STRBBui : AArch64::LDRBBui;
296  case 16:
297  return isStore ? AArch64::STRHHui : AArch64::LDRHHui;
298  case 32:
299  return isStore ? AArch64::STRWui : AArch64::LDRWui;
300  case 64:
301  return isStore ? AArch64::STRXui : AArch64::LDRXui;
302  }
303  break;
304  case AArch64::FPRRegBankID:
305  switch (OpSize) {
306  case 8:
307  return isStore ? AArch64::STRBui : AArch64::LDRBui;
308  case 16:
309  return isStore ? AArch64::STRHui : AArch64::LDRHui;
310  case 32:
311  return isStore ? AArch64::STRSui : AArch64::LDRSui;
312  case 64:
313  return isStore ? AArch64::STRDui : AArch64::LDRDui;
314  }
315  break;
316  }
317  return GenericOpc;
318 }
319 
322  const RegisterBankInfo &RBI) {
323 
324  unsigned DstReg = I.getOperand(0).getReg();
325  if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
326  assert(I.isCopy() && "Generic operators do not allow physical registers");
327  return true;
328  }
329 
330  const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
331  const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
332  (void)DstSize;
333  unsigned SrcReg = I.getOperand(1).getReg();
334  const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
335  (void)SrcSize;
336  assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.isCopy()) &&
337  "No phys reg on generic operators");
338  assert(
339  (DstSize == SrcSize ||
340  // Copies are a mean to setup initial types, the number of
341  // bits may not exactly match.
342  (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
343  DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI)) ||
344  // Copies are a mean to copy bits around, as long as we are
345  // on the same register class, that's fine. Otherwise, that
346  // means we need some SUBREG_TO_REG or AND & co.
347  (((DstSize + 31) / 32 == (SrcSize + 31) / 32) && DstSize > SrcSize)) &&
348  "Copy with different width?!");
349  assert((DstSize <= 64 || RegBank.getID() == AArch64::FPRRegBankID) &&
350  "GPRs cannot get more than 64-bit width values");
351 
352  const TargetRegisterClass *RC = getRegClassForTypeOnBank(
353  MRI.getType(DstReg), RegBank, RBI, /* GetAllRegSet */ true);
354  if (!RC) {
355  DEBUG(dbgs() << "Unexpected bitcast size " << DstSize << '\n');
356  return false;
357  }
358 
359  // No need to constrain SrcReg. It will get constrained when
360  // we hit another of its use or its defs.
361  // Copies do not have constraints.
362  if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
363  DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
364  << " operand\n");
365  return false;
366  }
367  I.setDesc(TII.get(AArch64::COPY));
368  return true;
369 }
370 
371 static unsigned selectFPConvOpc(unsigned GenericOpc, LLT DstTy, LLT SrcTy) {
372  if (!DstTy.isScalar() || !SrcTy.isScalar())
373  return GenericOpc;
374 
375  const unsigned DstSize = DstTy.getSizeInBits();
376  const unsigned SrcSize = SrcTy.getSizeInBits();
377 
378  switch (DstSize) {
379  case 32:
380  switch (SrcSize) {
381  case 32:
382  switch (GenericOpc) {
383  case TargetOpcode::G_SITOFP:
384  return AArch64::SCVTFUWSri;
385  case TargetOpcode::G_UITOFP:
386  return AArch64::UCVTFUWSri;
387  case TargetOpcode::G_FPTOSI:
388  return AArch64::FCVTZSUWSr;
389  case TargetOpcode::G_FPTOUI:
390  return AArch64::FCVTZUUWSr;
391  default:
392  return GenericOpc;
393  }
394  case 64:
395  switch (GenericOpc) {
396  case TargetOpcode::G_SITOFP:
397  return AArch64::SCVTFUXSri;
398  case TargetOpcode::G_UITOFP:
399  return AArch64::UCVTFUXSri;
400  case TargetOpcode::G_FPTOSI:
401  return AArch64::FCVTZSUWDr;
402  case TargetOpcode::G_FPTOUI:
403  return AArch64::FCVTZUUWDr;
404  default:
405  return GenericOpc;
406  }
407  default:
408  return GenericOpc;
409  }
410  case 64:
411  switch (SrcSize) {
412  case 32:
413  switch (GenericOpc) {
414  case TargetOpcode::G_SITOFP:
415  return AArch64::SCVTFUWDri;
416  case TargetOpcode::G_UITOFP:
417  return AArch64::UCVTFUWDri;
418  case TargetOpcode::G_FPTOSI:
419  return AArch64::FCVTZSUXSr;
420  case TargetOpcode::G_FPTOUI:
421  return AArch64::FCVTZUUXSr;
422  default:
423  return GenericOpc;
424  }
425  case 64:
426  switch (GenericOpc) {
427  case TargetOpcode::G_SITOFP:
428  return AArch64::SCVTFUXDri;
429  case TargetOpcode::G_UITOFP:
430  return AArch64::UCVTFUXDri;
431  case TargetOpcode::G_FPTOSI:
432  return AArch64::FCVTZSUXDr;
433  case TargetOpcode::G_FPTOUI:
434  return AArch64::FCVTZUUXDr;
435  default:
436  return GenericOpc;
437  }
438  default:
439  return GenericOpc;
440  }
441  default:
442  return GenericOpc;
443  };
444  return GenericOpc;
445 }
446 
448  switch (P) {
449  default:
450  llvm_unreachable("Unknown condition code!");
451  case CmpInst::ICMP_NE:
452  return AArch64CC::NE;
453  case CmpInst::ICMP_EQ:
454  return AArch64CC::EQ;
455  case CmpInst::ICMP_SGT:
456  return AArch64CC::GT;
457  case CmpInst::ICMP_SGE:
458  return AArch64CC::GE;
459  case CmpInst::ICMP_SLT:
460  return AArch64CC::LT;
461  case CmpInst::ICMP_SLE:
462  return AArch64CC::LE;
463  case CmpInst::ICMP_UGT:
464  return AArch64CC::HI;
465  case CmpInst::ICMP_UGE:
466  return AArch64CC::HS;
467  case CmpInst::ICMP_ULT:
468  return AArch64CC::LO;
469  case CmpInst::ICMP_ULE:
470  return AArch64CC::LS;
471  }
472 }
473 
476  AArch64CC::CondCode &CondCode2) {
477  CondCode2 = AArch64CC::AL;
478  switch (P) {
479  default:
480  llvm_unreachable("Unknown FP condition!");
481  case CmpInst::FCMP_OEQ:
482  CondCode = AArch64CC::EQ;
483  break;
484  case CmpInst::FCMP_OGT:
485  CondCode = AArch64CC::GT;
486  break;
487  case CmpInst::FCMP_OGE:
488  CondCode = AArch64CC::GE;
489  break;
490  case CmpInst::FCMP_OLT:
491  CondCode = AArch64CC::MI;
492  break;
493  case CmpInst::FCMP_OLE:
494  CondCode = AArch64CC::LS;
495  break;
496  case CmpInst::FCMP_ONE:
497  CondCode = AArch64CC::MI;
498  CondCode2 = AArch64CC::GT;
499  break;
500  case CmpInst::FCMP_ORD:
501  CondCode = AArch64CC::VC;
502  break;
503  case CmpInst::FCMP_UNO:
504  CondCode = AArch64CC::VS;
505  break;
506  case CmpInst::FCMP_UEQ:
507  CondCode = AArch64CC::EQ;
508  CondCode2 = AArch64CC::VS;
509  break;
510  case CmpInst::FCMP_UGT:
511  CondCode = AArch64CC::HI;
512  break;
513  case CmpInst::FCMP_UGE:
514  CondCode = AArch64CC::PL;
515  break;
516  case CmpInst::FCMP_ULT:
517  CondCode = AArch64CC::LT;
518  break;
519  case CmpInst::FCMP_ULE:
520  CondCode = AArch64CC::LE;
521  break;
522  case CmpInst::FCMP_UNE:
523  CondCode = AArch64CC::NE;
524  break;
525  }
526 }
527 
528 bool AArch64InstructionSelector::selectCompareBranch(
530 
531  const unsigned CondReg = I.getOperand(0).getReg();
532  MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
533  MachineInstr *CCMI = MRI.getVRegDef(CondReg);
534  if (CCMI->getOpcode() == TargetOpcode::G_TRUNC)
535  CCMI = MRI.getVRegDef(CCMI->getOperand(1).getReg());
536  if (CCMI->getOpcode() != TargetOpcode::G_ICMP)
537  return false;
538 
539  unsigned LHS = CCMI->getOperand(2).getReg();
540  unsigned RHS = CCMI->getOperand(3).getReg();
541  if (!getConstantVRegVal(RHS, MRI))
542  std::swap(RHS, LHS);
543 
544  const auto RHSImm = getConstantVRegVal(RHS, MRI);
545  if (!RHSImm || *RHSImm != 0)
546  return false;
547 
548  const RegisterBank &RB = *RBI.getRegBank(LHS, MRI, TRI);
549  if (RB.getID() != AArch64::GPRRegBankID)
550  return false;
551 
552  const auto Pred = (CmpInst::Predicate)CCMI->getOperand(1).getPredicate();
553  if (Pred != CmpInst::ICMP_NE && Pred != CmpInst::ICMP_EQ)
554  return false;
555 
556  const unsigned CmpWidth = MRI.getType(LHS).getSizeInBits();
557  unsigned CBOpc = 0;
558  if (CmpWidth <= 32)
559  CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZW : AArch64::CBNZW);
560  else if (CmpWidth == 64)
561  CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZX : AArch64::CBNZX);
562  else
563  return false;
564 
565  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CBOpc))
566  .addUse(LHS)
567  .addMBB(DestMBB)
568  .constrainAllUses(TII, TRI, RBI);
569 
570  I.eraseFromParent();
571  return true;
572 }
573 
574 bool AArch64InstructionSelector::selectVaStartAAPCS(
575  MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
576  return false;
577 }
578 
579 bool AArch64InstructionSelector::selectVaStartDarwin(
580  MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
582  unsigned ListReg = I.getOperand(0).getReg();
583 
584  unsigned ArgsAddrReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
585 
586  auto MIB =
587  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::ADDXri))
588  .addDef(ArgsAddrReg)
589  .addFrameIndex(FuncInfo->getVarArgsStackIndex())
590  .addImm(0)
591  .addImm(0);
592 
593  constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
594 
595  MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::STRXui))
596  .addUse(ArgsAddrReg)
597  .addUse(ListReg)
598  .addImm(0)
600 
601  constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
602  I.eraseFromParent();
603  return true;
604 }
605 
606 bool AArch64InstructionSelector::select(MachineInstr &I,
607  CodeGenCoverage &CoverageInfo) const {
608  assert(I.getParent() && "Instruction should be in a basic block!");
609  assert(I.getParent()->getParent() && "Instruction should be in a function!");
610 
611  MachineBasicBlock &MBB = *I.getParent();
612  MachineFunction &MF = *MBB.getParent();
613  MachineRegisterInfo &MRI = MF.getRegInfo();
614 
615  unsigned Opcode = I.getOpcode();
616  // G_PHI requires same handling as PHI
617  if (!isPreISelGenericOpcode(Opcode) || Opcode == TargetOpcode::G_PHI) {
618  // Certain non-generic instructions also need some special handling.
619 
620  if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
621  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
622 
623  if (Opcode == TargetOpcode::PHI || Opcode == TargetOpcode::G_PHI) {
624  const unsigned DefReg = I.getOperand(0).getReg();
625  const LLT DefTy = MRI.getType(DefReg);
626 
627  const TargetRegisterClass *DefRC = nullptr;
628  if (TargetRegisterInfo::isPhysicalRegister(DefReg)) {
629  DefRC = TRI.getRegClass(DefReg);
630  } else {
631  const RegClassOrRegBank &RegClassOrBank =
632  MRI.getRegClassOrRegBank(DefReg);
633 
634  DefRC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
635  if (!DefRC) {
636  if (!DefTy.isValid()) {
637  DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
638  return false;
639  }
640  const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
641  DefRC = getRegClassForTypeOnBank(DefTy, RB, RBI);
642  if (!DefRC) {
643  DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
644  return false;
645  }
646  }
647  }
648  I.setDesc(TII.get(TargetOpcode::PHI));
649 
650  return RBI.constrainGenericRegister(DefReg, *DefRC, MRI);
651  }
652 
653  if (I.isCopy())
654  return selectCopy(I, TII, MRI, TRI, RBI);
655 
656  return true;
657  }
658 
659 
660  if (I.getNumOperands() != I.getNumExplicitOperands()) {
661  DEBUG(dbgs() << "Generic instruction has unexpected implicit operands\n");
662  return false;
663  }
664 
665  if (selectImpl(I, CoverageInfo))
666  return true;
667 
668  LLT Ty =
669  I.getOperand(0).isReg() ? MRI.getType(I.getOperand(0).getReg()) : LLT{};
670 
671  switch (Opcode) {
672  case TargetOpcode::G_BRCOND: {
673  if (Ty.getSizeInBits() > 32) {
674  // We shouldn't need this on AArch64, but it would be implemented as an
675  // EXTRACT_SUBREG followed by a TBNZW because TBNZX has no encoding if the
676  // bit being tested is < 32.
677  DEBUG(dbgs() << "G_BRCOND has type: " << Ty
678  << ", expected at most 32-bits");
679  return false;
680  }
681 
682  const unsigned CondReg = I.getOperand(0).getReg();
683  MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
684 
685  if (selectCompareBranch(I, MF, MRI))
686  return true;
687 
688  auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::TBNZW))
689  .addUse(CondReg)
690  .addImm(/*bit offset=*/0)
691  .addMBB(DestMBB);
692 
693  I.eraseFromParent();
694  return constrainSelectedInstRegOperands(*MIB.getInstr(), TII, TRI, RBI);
695  }
696 
697  case TargetOpcode::G_BRINDIRECT: {
698  I.setDesc(TII.get(AArch64::BR));
699  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
700  }
701 
702  case TargetOpcode::G_FCONSTANT:
703  case TargetOpcode::G_CONSTANT: {
704  const bool isFP = Opcode == TargetOpcode::G_FCONSTANT;
705 
706  const LLT s32 = LLT::scalar(32);
707  const LLT s64 = LLT::scalar(64);
708  const LLT p0 = LLT::pointer(0, 64);
709 
710  const unsigned DefReg = I.getOperand(0).getReg();
711  const LLT DefTy = MRI.getType(DefReg);
712  const unsigned DefSize = DefTy.getSizeInBits();
713  const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
714 
715  // FIXME: Redundant check, but even less readable when factored out.
716  if (isFP) {
717  if (Ty != s32 && Ty != s64) {
718  DEBUG(dbgs() << "Unable to materialize FP " << Ty
719  << " constant, expected: " << s32 << " or " << s64
720  << '\n');
721  return false;
722  }
723 
724  if (RB.getID() != AArch64::FPRRegBankID) {
725  DEBUG(dbgs() << "Unable to materialize FP " << Ty
726  << " constant on bank: " << RB << ", expected: FPR\n");
727  return false;
728  }
729 
730  // The case when we have 0.0 is covered by tablegen. Reject it here so we
731  // can be sure tablegen works correctly and isn't rescued by this code.
732  if (I.getOperand(1).getFPImm()->getValueAPF().isExactlyValue(0.0))
733  return false;
734  } else {
735  // s32 and s64 are covered by tablegen.
736  if (Ty != p0) {
737  DEBUG(dbgs() << "Unable to materialize integer " << Ty
738  << " constant, expected: " << s32 << ", " << s64 << ", or "
739  << p0 << '\n');
740  return false;
741  }
742 
743  if (RB.getID() != AArch64::GPRRegBankID) {
744  DEBUG(dbgs() << "Unable to materialize integer " << Ty
745  << " constant on bank: " << RB << ", expected: GPR\n");
746  return false;
747  }
748  }
749 
750  const unsigned MovOpc =
751  DefSize == 32 ? AArch64::MOVi32imm : AArch64::MOVi64imm;
752 
753  I.setDesc(TII.get(MovOpc));
754 
755  if (isFP) {
756  const TargetRegisterClass &GPRRC =
757  DefSize == 32 ? AArch64::GPR32RegClass : AArch64::GPR64RegClass;
758  const TargetRegisterClass &FPRRC =
759  DefSize == 32 ? AArch64::FPR32RegClass : AArch64::FPR64RegClass;
760 
761  const unsigned DefGPRReg = MRI.createVirtualRegister(&GPRRC);
762  MachineOperand &RegOp = I.getOperand(0);
763  RegOp.setReg(DefGPRReg);
764 
765  BuildMI(MBB, std::next(I.getIterator()), I.getDebugLoc(),
766  TII.get(AArch64::COPY))
767  .addDef(DefReg)
768  .addUse(DefGPRReg);
769 
770  if (!RBI.constrainGenericRegister(DefReg, FPRRC, MRI)) {
771  DEBUG(dbgs() << "Failed to constrain G_FCONSTANT def operand\n");
772  return false;
773  }
774 
775  MachineOperand &ImmOp = I.getOperand(1);
776  // FIXME: Is going through int64_t always correct?
777  ImmOp.ChangeToImmediate(
779  } else if (I.getOperand(1).isCImm()) {
780  uint64_t Val = I.getOperand(1).getCImm()->getZExtValue();
781  I.getOperand(1).ChangeToImmediate(Val);
782  } else if (I.getOperand(1).isImm()) {
783  uint64_t Val = I.getOperand(1).getImm();
784  I.getOperand(1).ChangeToImmediate(Val);
785  }
786 
788  return true;
789  }
790  case TargetOpcode::G_EXTRACT: {
791  LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
792  // Larger extracts are vectors, same-size extracts should be something else
793  // by now (either split up or simplified to a COPY).
794  if (SrcTy.getSizeInBits() > 64 || Ty.getSizeInBits() > 32)
795  return false;
796 
797  I.setDesc(TII.get(AArch64::UBFMXri));
799  Ty.getSizeInBits() - 1);
800 
801  unsigned DstReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
802  BuildMI(MBB, std::next(I.getIterator()), I.getDebugLoc(),
803  TII.get(AArch64::COPY))
804  .addDef(I.getOperand(0).getReg())
805  .addUse(DstReg, 0, AArch64::sub_32);
807  AArch64::GPR32RegClass, MRI);
808  I.getOperand(0).setReg(DstReg);
809 
810  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
811  }
812 
813  case TargetOpcode::G_INSERT: {
814  LLT SrcTy = MRI.getType(I.getOperand(2).getReg());
815  // Larger inserts are vectors, same-size ones should be something else by
816  // now (split up or turned into COPYs).
817  if (Ty.getSizeInBits() > 64 || SrcTy.getSizeInBits() > 32)
818  return false;
819 
820  I.setDesc(TII.get(AArch64::BFMXri));
821  unsigned LSB = I.getOperand(3).getImm();
822  unsigned Width = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
823  I.getOperand(3).setImm((64 - LSB) % 64);
824  MachineInstrBuilder(MF, I).addImm(Width - 1);
825 
826  unsigned SrcReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
827  BuildMI(MBB, I.getIterator(), I.getDebugLoc(),
828  TII.get(AArch64::SUBREG_TO_REG))
829  .addDef(SrcReg)
830  .addImm(0)
831  .addUse(I.getOperand(2).getReg())
832  .addImm(AArch64::sub_32);
834  AArch64::GPR32RegClass, MRI);
835  I.getOperand(2).setReg(SrcReg);
836 
837  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
838  }
839  case TargetOpcode::G_FRAME_INDEX: {
840  // allocas and G_FRAME_INDEX are only supported in addrspace(0).
841  if (Ty != LLT::pointer(0, 64)) {
842  DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << Ty
843  << ", expected: " << LLT::pointer(0, 64) << '\n');
844  return false;
845  }
846  I.setDesc(TII.get(AArch64::ADDXri));
847 
848  // MOs for a #0 shifted immediate.
849  I.addOperand(MachineOperand::CreateImm(0));
850  I.addOperand(MachineOperand::CreateImm(0));
851 
852  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
853  }
854 
855  case TargetOpcode::G_GLOBAL_VALUE: {
856  auto GV = I.getOperand(1).getGlobal();
857  if (GV->isThreadLocal()) {
858  // FIXME: we don't support TLS yet.
859  return false;
860  }
861  unsigned char OpFlags = STI.ClassifyGlobalReference(GV, TM);
862  if (OpFlags & AArch64II::MO_GOT) {
863  I.setDesc(TII.get(AArch64::LOADgot));
864  I.getOperand(1).setTargetFlags(OpFlags);
865  } else if (TM.getCodeModel() == CodeModel::Large) {
866  // Materialize the global using movz/movk instructions.
867  unsigned MovZDstReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
868  auto InsertPt = std::next(I.getIterator());
869  auto MovZ =
870  BuildMI(MBB, InsertPt, I.getDebugLoc(), TII.get(AArch64::MOVZXi))
871  .addDef(MovZDstReg);
872  MovZ->addOperand(MF, I.getOperand(1));
873  MovZ->getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_G0 |
875  MovZ->addOperand(MF, MachineOperand::CreateImm(0));
876  constrainSelectedInstRegOperands(*MovZ, TII, TRI, RBI);
877 
878  auto BuildMovK = [&](unsigned SrcReg, unsigned char Flags,
879  unsigned Offset, unsigned ForceDstReg) {
880  unsigned DstReg =
881  ForceDstReg ? ForceDstReg
882  : MRI.createVirtualRegister(&AArch64::GPR64RegClass);
883  auto MovI = BuildMI(MBB, InsertPt, MovZ->getDebugLoc(),
884  TII.get(AArch64::MOVKXi))
885  .addDef(DstReg)
886  .addReg(SrcReg);
887  MovI->addOperand(MF, MachineOperand::CreateGA(
888  GV, MovZ->getOperand(1).getOffset(), Flags));
889  MovI->addOperand(MF, MachineOperand::CreateImm(Offset));
890  constrainSelectedInstRegOperands(*MovI, TII, TRI, RBI);
891  return DstReg;
892  };
893  unsigned DstReg = BuildMovK(MovZ->getOperand(0).getReg(),
895  DstReg = BuildMovK(DstReg, AArch64II::MO_G2 | AArch64II::MO_NC, 32, 0);
896  BuildMovK(DstReg, AArch64II::MO_G3, 48, I.getOperand(0).getReg());
897  I.eraseFromParent();
898  return true;
899  } else {
900  I.setDesc(TII.get(AArch64::MOVaddr));
902  MachineInstrBuilder MIB(MF, I);
903  MIB.addGlobalAddress(GV, I.getOperand(1).getOffset(),
905  }
906  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
907  }
908 
909  case TargetOpcode::G_LOAD:
910  case TargetOpcode::G_STORE: {
911  LLT MemTy = Ty;
912  LLT PtrTy = MRI.getType(I.getOperand(1).getReg());
913 
914  if (PtrTy != LLT::pointer(0, 64)) {
915  DEBUG(dbgs() << "Load/Store pointer has type: " << PtrTy
916  << ", expected: " << LLT::pointer(0, 64) << '\n');
917  return false;
918  }
919 
920  auto &MemOp = **I.memoperands_begin();
921  if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
922  DEBUG(dbgs() << "Atomic load/store not supported yet\n");
923  return false;
924  }
925 
926  // FIXME: PR36018: Volatile loads in some cases are incorrectly selected by
927  // folding with an extend. Until we have a G_SEXTLOAD solution bail out if
928  // we hit one.
929  if (Opcode == TargetOpcode::G_LOAD && MemOp.isVolatile())
930  return false;
931 
932  const unsigned PtrReg = I.getOperand(1).getReg();
933 #ifndef NDEBUG
934  const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI);
935  // Sanity-check the pointer register.
936  assert(PtrRB.getID() == AArch64::GPRRegBankID &&
937  "Load/Store pointer operand isn't a GPR");
938  assert(MRI.getType(PtrReg).isPointer() &&
939  "Load/Store pointer operand isn't a pointer");
940 #endif
941 
942  const unsigned ValReg = I.getOperand(0).getReg();
943  const RegisterBank &RB = *RBI.getRegBank(ValReg, MRI, TRI);
944 
945  const unsigned NewOpc =
946  selectLoadStoreUIOp(I.getOpcode(), RB.getID(), MemTy.getSizeInBits());
947  if (NewOpc == I.getOpcode())
948  return false;
949 
950  I.setDesc(TII.get(NewOpc));
951 
952  uint64_t Offset = 0;
953  auto *PtrMI = MRI.getVRegDef(PtrReg);
954 
955  // Try to fold a GEP into our unsigned immediate addressing mode.
956  if (PtrMI->getOpcode() == TargetOpcode::G_GEP) {
957  if (auto COff = getConstantVRegVal(PtrMI->getOperand(2).getReg(), MRI)) {
958  int64_t Imm = *COff;
959  const unsigned Size = MemTy.getSizeInBits() / 8;
960  const unsigned Scale = Log2_32(Size);
961  if ((Imm & (Size - 1)) == 0 && Imm >= 0 && Imm < (0x1000 << Scale)) {
962  unsigned Ptr2Reg = PtrMI->getOperand(1).getReg();
963  I.getOperand(1).setReg(Ptr2Reg);
964  PtrMI = MRI.getVRegDef(Ptr2Reg);
965  Offset = Imm / Size;
966  }
967  }
968  }
969 
970  // If we haven't folded anything into our addressing mode yet, try to fold
971  // a frame index into the base+offset.
972  if (!Offset && PtrMI->getOpcode() == TargetOpcode::G_FRAME_INDEX)
973  I.getOperand(1).ChangeToFrameIndex(PtrMI->getOperand(1).getIndex());
974 
975  I.addOperand(MachineOperand::CreateImm(Offset));
976 
977  // If we're storing a 0, use WZR/XZR.
978  if (auto CVal = getConstantVRegVal(ValReg, MRI)) {
979  if (*CVal == 0 && Opcode == TargetOpcode::G_STORE) {
980  if (I.getOpcode() == AArch64::STRWui)
981  I.getOperand(0).setReg(AArch64::WZR);
982  else if (I.getOpcode() == AArch64::STRXui)
983  I.getOperand(0).setReg(AArch64::XZR);
984  }
985  }
986 
987  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
988  }
989 
990  case TargetOpcode::G_SMULH:
991  case TargetOpcode::G_UMULH: {
992  // Reject the various things we don't support yet.
993  if (unsupportedBinOp(I, RBI, MRI, TRI))
994  return false;
995 
996  const unsigned DefReg = I.getOperand(0).getReg();
997  const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
998 
999  if (RB.getID() != AArch64::GPRRegBankID) {
1000  DEBUG(dbgs() << "G_[SU]MULH on bank: " << RB << ", expected: GPR\n");
1001  return false;
1002  }
1003 
1004  if (Ty != LLT::scalar(64)) {
1005  DEBUG(dbgs() << "G_[SU]MULH has type: " << Ty
1006  << ", expected: " << LLT::scalar(64) << '\n');
1007  return false;
1008  }
1009 
1010  unsigned NewOpc = I.getOpcode() == TargetOpcode::G_SMULH ? AArch64::SMULHrr
1011  : AArch64::UMULHrr;
1012  I.setDesc(TII.get(NewOpc));
1013 
1014  // Now that we selected an opcode, we need to constrain the register
1015  // operands to use appropriate classes.
1016  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1017  }
1018  case TargetOpcode::G_FADD:
1019  case TargetOpcode::G_FSUB:
1020  case TargetOpcode::G_FMUL:
1021  case TargetOpcode::G_FDIV:
1022 
1023  case TargetOpcode::G_OR:
1024  case TargetOpcode::G_SHL:
1025  case TargetOpcode::G_LSHR:
1026  case TargetOpcode::G_ASHR:
1027  case TargetOpcode::G_GEP: {
1028  // Reject the various things we don't support yet.
1029  if (unsupportedBinOp(I, RBI, MRI, TRI))
1030  return false;
1031 
1032  const unsigned OpSize = Ty.getSizeInBits();
1033 
1034  const unsigned DefReg = I.getOperand(0).getReg();
1035  const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1036 
1037  const unsigned NewOpc = selectBinaryOp(I.getOpcode(), RB.getID(), OpSize);
1038  if (NewOpc == I.getOpcode())
1039  return false;
1040 
1041  I.setDesc(TII.get(NewOpc));
1042  // FIXME: Should the type be always reset in setDesc?
1043 
1044  // Now that we selected an opcode, we need to constrain the register
1045  // operands to use appropriate classes.
1046  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1047  }
1048 
1049  case TargetOpcode::G_PTR_MASK: {
1050  uint64_t Align = I.getOperand(2).getImm();
1051  if (Align >= 64 || Align == 0)
1052  return false;
1053 
1054  uint64_t Mask = ~((1ULL << Align) - 1);
1055  I.setDesc(TII.get(AArch64::ANDXri));
1057 
1058  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1059  }
1060  case TargetOpcode::G_PTRTOINT:
1061  case TargetOpcode::G_TRUNC: {
1062  const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1063  const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
1064 
1065  const unsigned DstReg = I.getOperand(0).getReg();
1066  const unsigned SrcReg = I.getOperand(1).getReg();
1067 
1068  const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1069  const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
1070 
1071  if (DstRB.getID() != SrcRB.getID()) {
1072  DEBUG(dbgs() << "G_TRUNC/G_PTRTOINT input/output on different banks\n");
1073  return false;
1074  }
1075 
1076  if (DstRB.getID() == AArch64::GPRRegBankID) {
1077  const TargetRegisterClass *DstRC =
1078  getRegClassForTypeOnBank(DstTy, DstRB, RBI);
1079  if (!DstRC)
1080  return false;
1081 
1082  const TargetRegisterClass *SrcRC =
1083  getRegClassForTypeOnBank(SrcTy, SrcRB, RBI);
1084  if (!SrcRC)
1085  return false;
1086 
1087  if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1088  !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1089  DEBUG(dbgs() << "Failed to constrain G_TRUNC/G_PTRTOINT\n");
1090  return false;
1091  }
1092 
1093  if (DstRC == SrcRC) {
1094  // Nothing to be done
1095  } else if (Opcode == TargetOpcode::G_TRUNC && DstTy == LLT::scalar(32) &&
1096  SrcTy == LLT::scalar(64)) {
1097  llvm_unreachable("TableGen can import this case");
1098  return false;
1099  } else if (DstRC == &AArch64::GPR32RegClass &&
1100  SrcRC == &AArch64::GPR64RegClass) {
1101  I.getOperand(1).setSubReg(AArch64::sub_32);
1102  } else {
1103  DEBUG(dbgs() << "Unhandled mismatched classes in G_TRUNC/G_PTRTOINT\n");
1104  return false;
1105  }
1106 
1107  I.setDesc(TII.get(TargetOpcode::COPY));
1108  return true;
1109  } else if (DstRB.getID() == AArch64::FPRRegBankID) {
1110  if (DstTy == LLT::vector(4, 16) && SrcTy == LLT::vector(4, 32)) {
1111  I.setDesc(TII.get(AArch64::XTNv4i16));
1112  constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1113  return true;
1114  }
1115  }
1116 
1117  return false;
1118  }
1119 
1120  case TargetOpcode::G_ANYEXT: {
1121  const unsigned DstReg = I.getOperand(0).getReg();
1122  const unsigned SrcReg = I.getOperand(1).getReg();
1123 
1124  const RegisterBank &RBDst = *RBI.getRegBank(DstReg, MRI, TRI);
1125  if (RBDst.getID() != AArch64::GPRRegBankID) {
1126  DEBUG(dbgs() << "G_ANYEXT on bank: " << RBDst << ", expected: GPR\n");
1127  return false;
1128  }
1129 
1130  const RegisterBank &RBSrc = *RBI.getRegBank(SrcReg, MRI, TRI);
1131  if (RBSrc.getID() != AArch64::GPRRegBankID) {
1132  DEBUG(dbgs() << "G_ANYEXT on bank: " << RBSrc << ", expected: GPR\n");
1133  return false;
1134  }
1135 
1136  const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
1137 
1138  if (DstSize == 0) {
1139  DEBUG(dbgs() << "G_ANYEXT operand has no size, not a gvreg?\n");
1140  return false;
1141  }
1142 
1143  if (DstSize != 64 && DstSize > 32) {
1144  DEBUG(dbgs() << "G_ANYEXT to size: " << DstSize
1145  << ", expected: 32 or 64\n");
1146  return false;
1147  }
1148  // At this point G_ANYEXT is just like a plain COPY, but we need
1149  // to explicitly form the 64-bit value if any.
1150  if (DstSize > 32) {
1151  unsigned ExtSrc = MRI.createVirtualRegister(&AArch64::GPR64allRegClass);
1152  BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
1153  .addDef(ExtSrc)
1154  .addImm(0)
1155  .addUse(SrcReg)
1156  .addImm(AArch64::sub_32);
1157  I.getOperand(1).setReg(ExtSrc);
1158  }
1159  return selectCopy(I, TII, MRI, TRI, RBI);
1160  }
1161 
1162  case TargetOpcode::G_ZEXT:
1163  case TargetOpcode::G_SEXT: {
1164  unsigned Opcode = I.getOpcode();
1165  const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
1166  SrcTy = MRI.getType(I.getOperand(1).getReg());
1167  const bool isSigned = Opcode == TargetOpcode::G_SEXT;
1168  const unsigned DefReg = I.getOperand(0).getReg();
1169  const unsigned SrcReg = I.getOperand(1).getReg();
1170  const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1171 
1172  if (RB.getID() != AArch64::GPRRegBankID) {
1173  DEBUG(dbgs() << TII.getName(I.getOpcode()) << " on bank: " << RB
1174  << ", expected: GPR\n");
1175  return false;
1176  }
1177 
1178  MachineInstr *ExtI;
1179  if (DstTy == LLT::scalar(64)) {
1180  // FIXME: Can we avoid manually doing this?
1181  if (!RBI.constrainGenericRegister(SrcReg, AArch64::GPR32RegClass, MRI)) {
1182  DEBUG(dbgs() << "Failed to constrain " << TII.getName(Opcode)
1183  << " operand\n");
1184  return false;
1185  }
1186 
1187  const unsigned SrcXReg =
1188  MRI.createVirtualRegister(&AArch64::GPR64RegClass);
1189  BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
1190  .addDef(SrcXReg)
1191  .addImm(0)
1192  .addUse(SrcReg)
1193  .addImm(AArch64::sub_32);
1194 
1195  const unsigned NewOpc = isSigned ? AArch64::SBFMXri : AArch64::UBFMXri;
1196  ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
1197  .addDef(DefReg)
1198  .addUse(SrcXReg)
1199  .addImm(0)
1200  .addImm(SrcTy.getSizeInBits() - 1);
1201  } else if (DstTy.isScalar() && DstTy.getSizeInBits() <= 32) {
1202  const unsigned NewOpc = isSigned ? AArch64::SBFMWri : AArch64::UBFMWri;
1203  ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
1204  .addDef(DefReg)
1205  .addUse(SrcReg)
1206  .addImm(0)
1207  .addImm(SrcTy.getSizeInBits() - 1);
1208  } else {
1209  return false;
1210  }
1211 
1212  constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1213 
1214  I.eraseFromParent();
1215  return true;
1216  }
1217 
1218  case TargetOpcode::G_SITOFP:
1219  case TargetOpcode::G_UITOFP:
1220  case TargetOpcode::G_FPTOSI:
1221  case TargetOpcode::G_FPTOUI: {
1222  const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
1223  SrcTy = MRI.getType(I.getOperand(1).getReg());
1224  const unsigned NewOpc = selectFPConvOpc(Opcode, DstTy, SrcTy);
1225  if (NewOpc == Opcode)
1226  return false;
1227 
1228  I.setDesc(TII.get(NewOpc));
1229  constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1230 
1231  return true;
1232  }
1233 
1234 
1235  case TargetOpcode::G_INTTOPTR:
1236  // The importer is currently unable to import pointer types since they
1237  // didn't exist in SelectionDAG.
1238  return selectCopy(I, TII, MRI, TRI, RBI);
1239 
1240  case TargetOpcode::G_BITCAST:
1241  // Imported SelectionDAG rules can handle every bitcast except those that
1242  // bitcast from a type to the same type. Ideally, these shouldn't occur
1243  // but we might not run an optimizer that deletes them.
1244  if (MRI.getType(I.getOperand(0).getReg()) ==
1245  MRI.getType(I.getOperand(1).getReg()))
1246  return selectCopy(I, TII, MRI, TRI, RBI);
1247  return false;
1248 
1249  case TargetOpcode::G_SELECT: {
1250  if (MRI.getType(I.getOperand(1).getReg()) != LLT::scalar(1)) {
1251  DEBUG(dbgs() << "G_SELECT cond has type: " << Ty
1252  << ", expected: " << LLT::scalar(1) << '\n');
1253  return false;
1254  }
1255 
1256  const unsigned CondReg = I.getOperand(1).getReg();
1257  const unsigned TReg = I.getOperand(2).getReg();
1258  const unsigned FReg = I.getOperand(3).getReg();
1259 
1260  unsigned CSelOpc = 0;
1261 
1262  if (Ty == LLT::scalar(32)) {
1263  CSelOpc = AArch64::CSELWr;
1264  } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
1265  CSelOpc = AArch64::CSELXr;
1266  } else {
1267  return false;
1268  }
1269 
1270  MachineInstr &TstMI =
1271  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri))
1272  .addDef(AArch64::WZR)
1273  .addUse(CondReg)
1275 
1276  MachineInstr &CSelMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CSelOpc))
1277  .addDef(I.getOperand(0).getReg())
1278  .addUse(TReg)
1279  .addUse(FReg)
1281 
1282  constrainSelectedInstRegOperands(TstMI, TII, TRI, RBI);
1283  constrainSelectedInstRegOperands(CSelMI, TII, TRI, RBI);
1284 
1285  I.eraseFromParent();
1286  return true;
1287  }
1288  case TargetOpcode::G_ICMP: {
1289  if (Ty != LLT::scalar(32)) {
1290  DEBUG(dbgs() << "G_ICMP result has type: " << Ty
1291  << ", expected: " << LLT::scalar(32) << '\n');
1292  return false;
1293  }
1294 
1295  unsigned CmpOpc = 0;
1296  unsigned ZReg = 0;
1297 
1298  LLT CmpTy = MRI.getType(I.getOperand(2).getReg());
1299  if (CmpTy == LLT::scalar(32)) {
1300  CmpOpc = AArch64::SUBSWrr;
1301  ZReg = AArch64::WZR;
1302  } else if (CmpTy == LLT::scalar(64) || CmpTy.isPointer()) {
1303  CmpOpc = AArch64::SUBSXrr;
1304  ZReg = AArch64::XZR;
1305  } else {
1306  return false;
1307  }
1308 
1309  // CSINC increments the result by one when the condition code is false.
1310  // Therefore, we have to invert the predicate to get an increment by 1 when
1311  // the predicate is true.
1312  const AArch64CC::CondCode invCC =
1313  changeICMPPredToAArch64CC(CmpInst::getInversePredicate(
1315 
1316  MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
1317  .addDef(ZReg)
1318  .addUse(I.getOperand(2).getReg())
1319  .addUse(I.getOperand(3).getReg());
1320 
1321  MachineInstr &CSetMI =
1322  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1323  .addDef(I.getOperand(0).getReg())
1324  .addUse(AArch64::WZR)
1325  .addUse(AArch64::WZR)
1326  .addImm(invCC);
1327 
1328  constrainSelectedInstRegOperands(CmpMI, TII, TRI, RBI);
1329  constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
1330 
1331  I.eraseFromParent();
1332  return true;
1333  }
1334 
1335  case TargetOpcode::G_FCMP: {
1336  if (Ty != LLT::scalar(32)) {
1337  DEBUG(dbgs() << "G_FCMP result has type: " << Ty
1338  << ", expected: " << LLT::scalar(32) << '\n');
1339  return false;
1340  }
1341 
1342  unsigned CmpOpc = 0;
1343  LLT CmpTy = MRI.getType(I.getOperand(2).getReg());
1344  if (CmpTy == LLT::scalar(32)) {
1345  CmpOpc = AArch64::FCMPSrr;
1346  } else if (CmpTy == LLT::scalar(64)) {
1347  CmpOpc = AArch64::FCMPDrr;
1348  } else {
1349  return false;
1350  }
1351 
1352  // FIXME: regbank
1353 
1354  AArch64CC::CondCode CC1, CC2;
1356  (CmpInst::Predicate)I.getOperand(1).getPredicate(), CC1, CC2);
1357 
1358  MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
1359  .addUse(I.getOperand(2).getReg())
1360  .addUse(I.getOperand(3).getReg());
1361 
1362  const unsigned DefReg = I.getOperand(0).getReg();
1363  unsigned Def1Reg = DefReg;
1364  if (CC2 != AArch64CC::AL)
1365  Def1Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
1366 
1367  MachineInstr &CSetMI =
1368  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1369  .addDef(Def1Reg)
1370  .addUse(AArch64::WZR)
1371  .addUse(AArch64::WZR)
1372  .addImm(getInvertedCondCode(CC1));
1373 
1374  if (CC2 != AArch64CC::AL) {
1375  unsigned Def2Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
1376  MachineInstr &CSet2MI =
1377  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1378  .addDef(Def2Reg)
1379  .addUse(AArch64::WZR)
1380  .addUse(AArch64::WZR)
1381  .addImm(getInvertedCondCode(CC2));
1382  MachineInstr &OrMI =
1383  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ORRWrr))
1384  .addDef(DefReg)
1385  .addUse(Def1Reg)
1386  .addUse(Def2Reg);
1387  constrainSelectedInstRegOperands(OrMI, TII, TRI, RBI);
1388  constrainSelectedInstRegOperands(CSet2MI, TII, TRI, RBI);
1389  }
1390 
1391  constrainSelectedInstRegOperands(CmpMI, TII, TRI, RBI);
1392  constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
1393 
1394  I.eraseFromParent();
1395  return true;
1396  }
1397  case TargetOpcode::G_VASTART:
1398  return STI.isTargetDarwin() ? selectVaStartDarwin(I, MF, MRI)
1399  : selectVaStartAAPCS(I, MF, MRI);
1400  case TargetOpcode::G_IMPLICIT_DEF:
1401  I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
1402  const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1403  const unsigned DstReg = I.getOperand(0).getReg();
1404  const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1405  const TargetRegisterClass *DstRC =
1406  getRegClassForTypeOnBank(DstTy, DstRB, RBI);
1407  RBI.constrainGenericRegister(DstReg, *DstRC, MRI);
1408  return true;
1409  }
1410 
1411  return false;
1412 }
1413 
1414 /// SelectArithImmed - Select an immediate value that can be represented as
1415 /// a 12-bit value shifted left by either 0 or 12. If so, return true with
1416 /// Val set to the 12-bit value and Shift set to the shifter operand.
1418 AArch64InstructionSelector::selectArithImmed(MachineOperand &Root) const {
1419  MachineInstr &MI = *Root.getParent();
1420  MachineBasicBlock &MBB = *MI.getParent();
1421  MachineFunction &MF = *MBB.getParent();
1422  MachineRegisterInfo &MRI = MF.getRegInfo();
1423 
1424  // This function is called from the addsub_shifted_imm ComplexPattern,
1425  // which lists [imm] as the list of opcode it's interested in, however
1426  // we still need to check whether the operand is actually an immediate
1427  // here because the ComplexPattern opcode list is only used in
1428  // root-level opcode matching.
1429  uint64_t Immed;
1430  if (Root.isImm())
1431  Immed = Root.getImm();
1432  else if (Root.isCImm())
1433  Immed = Root.getCImm()->getZExtValue();
1434  else if (Root.isReg()) {
1435  MachineInstr *Def = MRI.getVRegDef(Root.getReg());
1436  if (Def->getOpcode() != TargetOpcode::G_CONSTANT)
1437  return None;
1438  MachineOperand &Op1 = Def->getOperand(1);
1439  if (!Op1.isCImm() || Op1.getCImm()->getBitWidth() > 64)
1440  return None;
1441  Immed = Op1.getCImm()->getZExtValue();
1442  } else
1443  return None;
1444 
1445  unsigned ShiftAmt;
1446 
1447  if (Immed >> 12 == 0) {
1448  ShiftAmt = 0;
1449  } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
1450  ShiftAmt = 12;
1451  Immed = Immed >> 12;
1452  } else
1453  return None;
1454 
1455  unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
1456  return {{
1457  [=](MachineInstrBuilder &MIB) { MIB.addImm(Immed); },
1458  [=](MachineInstrBuilder &MIB) { MIB.addImm(ShVal); },
1459  }};
1460 }
1461 
1462 /// Select a "register plus unscaled signed 9-bit immediate" address. This
1463 /// should only match when there is an offset that is not valid for a scaled
1464 /// immediate addressing mode. The "Size" argument is the size in bytes of the
1465 /// memory reference, which is needed here to know what is valid for a scaled
1466 /// immediate.
1468 AArch64InstructionSelector::selectAddrModeUnscaled(MachineOperand &Root,
1469  unsigned Size) const {
1470  MachineRegisterInfo &MRI =
1471  Root.getParent()->getParent()->getParent()->getRegInfo();
1472 
1473  if (!Root.isReg())
1474  return None;
1475 
1476  if (!isBaseWithConstantOffset(Root, MRI))
1477  return None;
1478 
1479  MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
1480  if (!RootDef)
1481  return None;
1482 
1483  MachineOperand &OffImm = RootDef->getOperand(2);
1484  if (!OffImm.isReg())
1485  return None;
1486  MachineInstr *RHS = MRI.getVRegDef(OffImm.getReg());
1487  if (!RHS || RHS->getOpcode() != TargetOpcode::G_CONSTANT)
1488  return None;
1489  int64_t RHSC;
1490  MachineOperand &RHSOp1 = RHS->getOperand(1);
1491  if (!RHSOp1.isCImm() || RHSOp1.getCImm()->getBitWidth() > 64)
1492  return None;
1493  RHSC = RHSOp1.getCImm()->getSExtValue();
1494 
1495  // If the offset is valid as a scaled immediate, don't match here.
1496  if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Log2_32(Size)))
1497  return None;
1498  if (RHSC >= -256 && RHSC < 256) {
1499  MachineOperand &Base = RootDef->getOperand(1);
1500  return {{
1501  [=](MachineInstrBuilder &MIB) { MIB.add(Base); },
1502  [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
1503  }};
1504  }
1505  return None;
1506 }
1507 
1508 /// Select a "register plus scaled unsigned 12-bit immediate" address. The
1509 /// "Size" argument is the size in bytes of the memory reference, which
1510 /// determines the scale.
1512 AArch64InstructionSelector::selectAddrModeIndexed(MachineOperand &Root,
1513  unsigned Size) const {
1514  MachineRegisterInfo &MRI =
1515  Root.getParent()->getParent()->getParent()->getRegInfo();
1516 
1517  if (!Root.isReg())
1518  return None;
1519 
1520  MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
1521  if (!RootDef)
1522  return None;
1523 
1524  if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
1525  return {{
1526  [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
1527  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
1528  }};
1529  }
1530 
1531  if (isBaseWithConstantOffset(Root, MRI)) {
1532  MachineOperand &LHS = RootDef->getOperand(1);
1533  MachineOperand &RHS = RootDef->getOperand(2);
1534  MachineInstr *LHSDef = MRI.getVRegDef(LHS.getReg());
1535  MachineInstr *RHSDef = MRI.getVRegDef(RHS.getReg());
1536  if (LHSDef && RHSDef) {
1537  int64_t RHSC = (int64_t)RHSDef->getOperand(1).getCImm()->getZExtValue();
1538  unsigned Scale = Log2_32(Size);
1539  if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
1540  if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
1541  return {{
1542  [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
1543  [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
1544  }};
1545 
1546  return {{
1547  [=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
1548  [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
1549  }};
1550  }
1551  }
1552  }
1553 
1554  // Before falling back to our general case, check if the unscaled
1555  // instructions can handle this. If so, that's preferable.
1556  if (selectAddrModeUnscaled(Root, Size).hasValue())
1557  return None;
1558 
1559  return {{
1560  [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
1561  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
1562  }};
1563 }
1564 
1565 void AArch64InstructionSelector::renderTruncImm(MachineInstrBuilder &MIB,
1566  const MachineInstr &MI) const {
1567  const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
1568  assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
1570  assert(CstVal && "Expected constant value");
1571  MIB.addImm(CstVal.getValue());
1572 }
1573 
1574 namespace llvm {
1577  AArch64Subtarget &Subtarget,
1578  AArch64RegisterBankInfo &RBI) {
1579  return new AArch64InstructionSelector(TM, Subtarget, RBI);
1580 }
1581 }
const NoneType None
Definition: None.h:24
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
MO_G3 - A symbol operand with this flag (granule 3) represents the high 16-bits of a 64-bit address...
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1542
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
MachineBasicBlock * getMBB() const
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
void setTargetFlags(unsigned F)
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:271
bool isScalar() const
static CondCode getInvertedCondCode(CondCode Code)
unsigned getReg() const
getReg - Returns the register number.
MO_G0 - A symbol operand with this flag (granule 0) represents the bits 0-15 of a 64-bit address...
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned char TargetFlags=0) const
unsigned createGenericVirtualRegister(LLT Ty)
Create and return a new generic virtual register with low-level type Ty.
iterator_range< mop_iterator > operands()
Definition: MachineInstr.h:335
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
unsigned getBitWidth() const
getBitWidth - Return the bitwidth of this constant.
Definition: Constants.h:143
MO_G2 - A symbol operand with this flag (granule 2) represents the bits 32-47 of a 64-bit address...
This file declares the targeting of the RegisterBankInfo class for AArch64.
Holds all the information related to register banks.
const HexagonInstrInfo * TII
const ConstantFP * getFPImm() const
unsigned getNumOperands() const
Access to explicit operands of the instruction.
Definition: MachineInstr.h:296
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
static StringRef getName(Value *V)
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:293
MachineInstr * getVRegDef(unsigned Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
const RegClassOrRegBank & getRegClassOrRegBank(unsigned Reg) const
Return the register bank or register class of Reg.
static bool isStore(int Opcode)
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:918
#define EQ(a, b)
Definition: regexec.c:112
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
void ChangeToImmediate(int64_t ImmVal)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value...
TargetInstrInfo - Interface to description of machine instruction set.
static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
Select the AArch64 opcode for the basic binary operation GenericOpc (such as G_OR or G_SDIV)...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
#define P(N)
Control flow instructions. These all have token chains.
Definition: ISDOpcodes.h:597
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:149
MO_G1 - A symbol operand with this flag (granule 1) represents the bits 16-31 of a 64-bit address...
unsigned const MachineRegisterInfo * MRI
static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
Select the AArch64 opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the (value)...
static unsigned selectFPConvOpc(unsigned GenericOpc, LLT DstTy, LLT SrcTy)
const GlobalValue * getGlobal() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
bool isExactlyValue(double V) const
We don&#39;t rely on operator== working on double values, as it returns true for things that are clearly ...
Definition: APFloat.h:1130
bool isValid() const
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:853
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
void setImm(int64_t immVal)
self_iterator getIterator()
Definition: ilist_node.h:82
const MachineInstrBuilder & addFrameIndex(int Idx) const
T dyn_cast() const
Returns the current pointer if it is of the specified pointer type, otherwises returns null...
Definition: PointerUnion.h:142
bool isCopy() const
Definition: MachineInstr.h:860
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
StringRef getName(unsigned Opcode) const
Returns the name for the instructions with the given opcode.
Definition: MCInstrInfo.h:51
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
#define GET_GLOBALISEL_TEMPORARIES_INIT
const APFloat & getValueAPF() const
Definition: Constants.h:299
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:392
RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
MachineOperand class - Representation of each machine instruction operand.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Definition: PPCPredicates.h:27
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:77
This class implements the register bank concept.
Definition: RegisterBank.h:29
int64_t getImm() const
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:531
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:923
Optional< int64_t > getConstantVRegVal(unsigned VReg, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:174
bool isPointer() const
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:142
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
Provides the logic to select generic machine instructions.
Representation of each machine instruction.
Definition: MachineInstr.h:60
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
This class provides the information for the target register banks.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static AArch64CC::CondCode changeICMPPredToAArch64CC(CmpInst::Predicate P)
InstructionSelector * createAArch64InstructionSelector(const AArch64TargetMachine &, AArch64Subtarget &, AArch64RegisterBankInfo &)
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:45
int64_t getOffset() const
Return the offset from the symbol in this operand.
void setReg(unsigned Reg)
Change the register this operand corresponds to.
#define I(x, y, z)
Definition: MD5.cpp:58
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page...
void setSubReg(unsigned subReg)
LLT getType(unsigned VReg) const
Get the low-level type of VReg or LLT{} if VReg is not a generic (target independent) virtual registe...
static const TargetRegisterClass * constrainGenericRegister(unsigned Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
#define GET_GLOBALISEL_PREDICATES_INIT
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
T get() const
Returns the value of the specified pointer type.
Definition: PointerUnion.h:135
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static void changeFCMPPredToAArch64CC(CmpInst::Predicate P, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2)
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel...
Definition: TargetOpcodes.h:31
unsigned getSizeInBits(unsigned Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
constexpr char Size[]
Key for Kernel::Arg::Metadata::mSize.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:81
void ChangeToFrameIndex(int Idx)
Replace this operand with a frame index.
#define DEBUG(X)
Definition: Debug.h:118
IRTranslator LLVM IR MI
static bool unsupportedBinOp(const MachineInstr &I, const AArch64RegisterBankInfo &RBI, const MachineRegisterInfo &MRI, const AArch64RegisterInfo &TRI)
Check whether I is a currently unsupported binary operation:
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow...
APInt bitcastToAPInt() const
Definition: APFloat.h:1094
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition: Constants.h:157
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:298
const ConstantInt * getCImm() const
#define DEBUG_TYPE
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
unsigned getID() const
Get the identifier of this register bank.
Definition: RegisterBank.h:48
A discriminated union of two pointer types, with the discriminator in the low bit of the pointer...
Definition: PointerUnion.h:87
unsigned getPredicate() const