LLVM  7.0.0svn
X86InstructionSelector.cpp
Go to the documentation of this file.
1 //===- X86InstructionSelector.cpp -----------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the targeting of the InstructionSelector class for
11 /// X86.
12 /// \todo This should be generated by TableGen.
13 //===----------------------------------------------------------------------===//
14 
16 #include "X86InstrBuilder.h"
17 #include "X86InstrInfo.h"
18 #include "X86RegisterBankInfo.h"
19 #include "X86RegisterInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/InstrTypes.h"
39 #include "llvm/Support/CodeGen.h"
40 #include "llvm/Support/Debug.h"
45 #include <cassert>
46 #include <cstdint>
47 #include <tuple>
48 
49 #define DEBUG_TYPE "X86-isel"
50 
51 using namespace llvm;
52 
53 namespace {
54 
55 #define GET_GLOBALISEL_PREDICATE_BITSET
56 #include "X86GenGlobalISel.inc"
57 #undef GET_GLOBALISEL_PREDICATE_BITSET
58 
59 class X86InstructionSelector : public InstructionSelector {
60 public:
61  X86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &STI,
62  const X86RegisterBankInfo &RBI);
63 
64  bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override;
65  static const char *getName() { return DEBUG_TYPE; }
66 
67 private:
68  /// tblgen-erated 'select' implementation, used as the initial selector for
69  /// the patterns that don't require complex C++.
70  bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
71 
72  // TODO: remove after supported by Tablegen-erated instruction selection.
73  unsigned getLoadStoreOp(const LLT &Ty, const RegisterBank &RB, unsigned Opc,
74  uint64_t Alignment) const;
75 
76  bool selectLoadStoreOp(MachineInstr &I, MachineRegisterInfo &MRI,
77  MachineFunction &MF) const;
78  bool selectFrameIndexOrGep(MachineInstr &I, MachineRegisterInfo &MRI,
79  MachineFunction &MF) const;
80  bool selectGlobalValue(MachineInstr &I, MachineRegisterInfo &MRI,
81  MachineFunction &MF) const;
82  bool selectConstant(MachineInstr &I, MachineRegisterInfo &MRI,
83  MachineFunction &MF) const;
84  bool selectTrunc(MachineInstr &I, MachineRegisterInfo &MRI,
85  MachineFunction &MF) const;
86  bool selectZext(MachineInstr &I, MachineRegisterInfo &MRI,
87  MachineFunction &MF) const;
88  bool selectAnyext(MachineInstr &I, MachineRegisterInfo &MRI,
89  MachineFunction &MF) const;
90  bool selectCmp(MachineInstr &I, MachineRegisterInfo &MRI,
91  MachineFunction &MF) const;
92  bool selectUadde(MachineInstr &I, MachineRegisterInfo &MRI,
93  MachineFunction &MF) const;
94  bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
96  MachineFunction &MF,
97  CodeGenCoverage &CoverageInfo) const;
99  MachineFunction &MF,
100  CodeGenCoverage &CoverageInfo) const;
101  bool selectInsert(MachineInstr &I, MachineRegisterInfo &MRI,
102  MachineFunction &MF) const;
103  bool selectExtract(MachineInstr &I, MachineRegisterInfo &MRI,
104  MachineFunction &MF) const;
105  bool selectCondBranch(MachineInstr &I, MachineRegisterInfo &MRI,
106  MachineFunction &MF) const;
107  bool selectTurnIntoCOPY(MachineInstr &I, MachineRegisterInfo &MRI,
108  const unsigned DstReg,
109  const TargetRegisterClass *DstRC,
110  const unsigned SrcReg,
111  const TargetRegisterClass *SrcRC) const;
112  bool materializeFP(MachineInstr &I, MachineRegisterInfo &MRI,
113  MachineFunction &MF) const;
114  bool selectImplicitDefOrPHI(MachineInstr &I, MachineRegisterInfo &MRI) const;
115 
116  // emit insert subreg instruction and insert it before MachineInstr &I
117  bool emitInsertSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
118  MachineRegisterInfo &MRI, MachineFunction &MF) const;
119  // emit extract subreg instruction and insert it before MachineInstr &I
120  bool emitExtractSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
121  MachineRegisterInfo &MRI, MachineFunction &MF) const;
122 
123  const TargetRegisterClass *getRegClass(LLT Ty, const RegisterBank &RB) const;
124  const TargetRegisterClass *getRegClass(LLT Ty, unsigned Reg,
125  MachineRegisterInfo &MRI) const;
126 
127  const X86TargetMachine &TM;
128  const X86Subtarget &STI;
129  const X86InstrInfo &TII;
130  const X86RegisterInfo &TRI;
131  const X86RegisterBankInfo &RBI;
132 
133 #define GET_GLOBALISEL_PREDICATES_DECL
134 #include "X86GenGlobalISel.inc"
135 #undef GET_GLOBALISEL_PREDICATES_DECL
136 
137 #define GET_GLOBALISEL_TEMPORARIES_DECL
138 #include "X86GenGlobalISel.inc"
139 #undef GET_GLOBALISEL_TEMPORARIES_DECL
140 };
141 
142 } // end anonymous namespace
143 
144 #define GET_GLOBALISEL_IMPL
145 #include "X86GenGlobalISel.inc"
146 #undef GET_GLOBALISEL_IMPL
147 
148 X86InstructionSelector::X86InstructionSelector(const X86TargetMachine &TM,
149  const X86Subtarget &STI,
150  const X86RegisterBankInfo &RBI)
151  : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
152  TRI(*STI.getRegisterInfo()), RBI(RBI),
154 #include "X86GenGlobalISel.inc"
157 #include "X86GenGlobalISel.inc"
159 {
160 }
161 
162 // FIXME: This should be target-independent, inferred from the types declared
163 // for each class in the bank.
164 const TargetRegisterClass *
166  if (RB.getID() == X86::GPRRegBankID) {
167  if (Ty.getSizeInBits() <= 8)
168  return &X86::GR8RegClass;
169  if (Ty.getSizeInBits() == 16)
170  return &X86::GR16RegClass;
171  if (Ty.getSizeInBits() == 32)
172  return &X86::GR32RegClass;
173  if (Ty.getSizeInBits() == 64)
174  return &X86::GR64RegClass;
175  }
176  if (RB.getID() == X86::VECRRegBankID) {
177  if (Ty.getSizeInBits() == 32)
178  return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
179  if (Ty.getSizeInBits() == 64)
180  return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
181  if (Ty.getSizeInBits() == 128)
182  return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
183  if (Ty.getSizeInBits() == 256)
184  return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
185  if (Ty.getSizeInBits() == 512)
186  return &X86::VR512RegClass;
187  }
188 
189  llvm_unreachable("Unknown RegBank!");
190 }
191 
192 const TargetRegisterClass *
194  MachineRegisterInfo &MRI) const {
195  const RegisterBank &RegBank = *RBI.getRegBank(Reg, MRI, TRI);
196  return getRegClass(Ty, RegBank);
197 }
198 
199 static unsigned getSubRegIndex(const TargetRegisterClass *RC) {
200  unsigned SubIdx = X86::NoSubRegister;
201  if (RC == &X86::GR32RegClass) {
202  SubIdx = X86::sub_32bit;
203  } else if (RC == &X86::GR16RegClass) {
204  SubIdx = X86::sub_16bit;
205  } else if (RC == &X86::GR8RegClass) {
206  SubIdx = X86::sub_8bit;
207  }
208 
209  return SubIdx;
210 }
211 
212 static const TargetRegisterClass *getRegClassFromGRPhysReg(unsigned Reg) {
213  assert(TargetRegisterInfo::isPhysicalRegister(Reg));
214  if (X86::GR64RegClass.contains(Reg))
215  return &X86::GR64RegClass;
216  if (X86::GR32RegClass.contains(Reg))
217  return &X86::GR32RegClass;
218  if (X86::GR16RegClass.contains(Reg))
219  return &X86::GR16RegClass;
220  if (X86::GR8RegClass.contains(Reg))
221  return &X86::GR8RegClass;
222 
223  llvm_unreachable("Unknown RegClass for PhysReg!");
224 }
225 
226 // Set X86 Opcode and constrain DestReg.
228  MachineRegisterInfo &MRI) const {
229  unsigned DstReg = I.getOperand(0).getReg();
230  const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
231  const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
232 
233  unsigned SrcReg = I.getOperand(1).getReg();
234  const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
235  const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
236 
237  if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
238  assert(I.isCopy() && "Generic operators do not allow physical registers");
239 
240  if (DstSize > SrcSize && SrcRegBank.getID() == X86::GPRRegBankID &&
241  DstRegBank.getID() == X86::GPRRegBankID) {
242 
243  const TargetRegisterClass *SrcRC =
244  getRegClass(MRI.getType(SrcReg), SrcRegBank);
245  const TargetRegisterClass *DstRC = getRegClassFromGRPhysReg(DstReg);
246 
247  if (SrcRC != DstRC) {
248  // This case can be generated by ABI lowering, performe anyext
249  unsigned ExtSrc = MRI.createVirtualRegister(DstRC);
250  BuildMI(*I.getParent(), I, I.getDebugLoc(),
251  TII.get(TargetOpcode::SUBREG_TO_REG))
252  .addDef(ExtSrc)
253  .addImm(0)
254  .addReg(SrcReg)
255  .addImm(getSubRegIndex(SrcRC));
256 
257  I.getOperand(1).setReg(ExtSrc);
258  }
259  }
260 
261  return true;
262  }
263 
264  assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.isCopy()) &&
265  "No phys reg on generic operators");
266  assert((DstSize == SrcSize ||
267  // Copies are a mean to setup initial types, the number of
268  // bits may not exactly match.
269  (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
270  DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI))) &&
271  "Copy with different width?!");
272 
273  const TargetRegisterClass *DstRC =
274  getRegClass(MRI.getType(DstReg), DstRegBank);
275 
276  if (SrcRegBank.getID() == X86::GPRRegBankID &&
277  DstRegBank.getID() == X86::GPRRegBankID && SrcSize > DstSize &&
278  TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
279  // Change the physical register to performe truncate.
280 
281  const TargetRegisterClass *SrcRC = getRegClassFromGRPhysReg(SrcReg);
282 
283  if (DstRC != SrcRC) {
284  I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
285  I.getOperand(1).substPhysReg(SrcReg, TRI);
286  }
287  }
288 
289  // No need to constrain SrcReg. It will get constrained when
290  // we hit another of its use or its defs.
291  // Copies do not have constraints.
292  const TargetRegisterClass *OldRC = MRI.getRegClassOrNull(DstReg);
293  if (!OldRC || !DstRC->hasSubClassEq(OldRC)) {
294  if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
295  DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
296  << " operand\n");
297  return false;
298  }
299  }
300  I.setDesc(TII.get(X86::COPY));
301  return true;
302 }
303 
304 bool X86InstructionSelector::select(MachineInstr &I,
305  CodeGenCoverage &CoverageInfo) const {
306  assert(I.getParent() && "Instruction should be in a basic block!");
307  assert(I.getParent()->getParent() && "Instruction should be in a function!");
308 
309  MachineBasicBlock &MBB = *I.getParent();
310  MachineFunction &MF = *MBB.getParent();
311  MachineRegisterInfo &MRI = MF.getRegInfo();
312 
313  unsigned Opcode = I.getOpcode();
314  if (!isPreISelGenericOpcode(Opcode)) {
315  // Certain non-generic instructions also need some special handling.
316 
317  if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
318  return false;
319 
320  if (I.isCopy())
321  return selectCopy(I, MRI);
322 
323  return true;
324  }
325 
327  "Generic instruction has unexpected implicit operands\n");
328 
329  if (selectImpl(I, CoverageInfo))
330  return true;
331 
332  DEBUG(dbgs() << " C++ instruction selection: "; I.print(dbgs()));
333 
334  // TODO: This should be implemented by tblgen.
335  switch (I.getOpcode()) {
336  default:
337  return false;
338  case TargetOpcode::G_STORE:
339  case TargetOpcode::G_LOAD:
340  return selectLoadStoreOp(I, MRI, MF);
341  case TargetOpcode::G_GEP:
342  case TargetOpcode::G_FRAME_INDEX:
343  return selectFrameIndexOrGep(I, MRI, MF);
344  case TargetOpcode::G_GLOBAL_VALUE:
345  return selectGlobalValue(I, MRI, MF);
346  case TargetOpcode::G_CONSTANT:
347  return selectConstant(I, MRI, MF);
348  case TargetOpcode::G_FCONSTANT:
349  return materializeFP(I, MRI, MF);
350  case TargetOpcode::G_TRUNC:
351  return selectTrunc(I, MRI, MF);
352  case TargetOpcode::G_ZEXT:
353  return selectZext(I, MRI, MF);
354  case TargetOpcode::G_ANYEXT:
355  return selectAnyext(I, MRI, MF);
356  case TargetOpcode::G_ICMP:
357  return selectCmp(I, MRI, MF);
358  case TargetOpcode::G_UADDE:
359  return selectUadde(I, MRI, MF);
360  case TargetOpcode::G_UNMERGE_VALUES:
361  return selectUnmergeValues(I, MRI, MF, CoverageInfo);
362  case TargetOpcode::G_MERGE_VALUES:
363  return selectMergeValues(I, MRI, MF, CoverageInfo);
364  case TargetOpcode::G_EXTRACT:
365  return selectExtract(I, MRI, MF);
366  case TargetOpcode::G_INSERT:
367  return selectInsert(I, MRI, MF);
368  case TargetOpcode::G_BRCOND:
369  return selectCondBranch(I, MRI, MF);
370  case TargetOpcode::G_IMPLICIT_DEF:
371  case TargetOpcode::G_PHI:
372  return selectImplicitDefOrPHI(I, MRI);
373  }
374 
375  return false;
376 }
377 
378 unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty,
379  const RegisterBank &RB,
380  unsigned Opc,
381  uint64_t Alignment) const {
382  bool Isload = (Opc == TargetOpcode::G_LOAD);
383  bool HasAVX = STI.hasAVX();
384  bool HasAVX512 = STI.hasAVX512();
385  bool HasVLX = STI.hasVLX();
386 
387  if (Ty == LLT::scalar(8)) {
388  if (X86::GPRRegBankID == RB.getID())
389  return Isload ? X86::MOV8rm : X86::MOV8mr;
390  } else if (Ty == LLT::scalar(16)) {
391  if (X86::GPRRegBankID == RB.getID())
392  return Isload ? X86::MOV16rm : X86::MOV16mr;
393  } else if (Ty == LLT::scalar(32) || Ty == LLT::pointer(0, 32)) {
394  if (X86::GPRRegBankID == RB.getID())
395  return Isload ? X86::MOV32rm : X86::MOV32mr;
396  if (X86::VECRRegBankID == RB.getID())
397  return Isload ? (HasAVX512 ? X86::VMOVSSZrm
398  : HasAVX ? X86::VMOVSSrm : X86::MOVSSrm)
399  : (HasAVX512 ? X86::VMOVSSZmr
400  : HasAVX ? X86::VMOVSSmr : X86::MOVSSmr);
401  } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
402  if (X86::GPRRegBankID == RB.getID())
403  return Isload ? X86::MOV64rm : X86::MOV64mr;
404  if (X86::VECRRegBankID == RB.getID())
405  return Isload ? (HasAVX512 ? X86::VMOVSDZrm
406  : HasAVX ? X86::VMOVSDrm : X86::MOVSDrm)
407  : (HasAVX512 ? X86::VMOVSDZmr
408  : HasAVX ? X86::VMOVSDmr : X86::MOVSDmr);
409  } else if (Ty.isVector() && Ty.getSizeInBits() == 128) {
410  if (Alignment >= 16)
411  return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
412  : HasAVX512
413  ? X86::VMOVAPSZ128rm_NOVLX
414  : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
415  : (HasVLX ? X86::VMOVAPSZ128mr
416  : HasAVX512
417  ? X86::VMOVAPSZ128mr_NOVLX
418  : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
419  else
420  return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
421  : HasAVX512
422  ? X86::VMOVUPSZ128rm_NOVLX
423  : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
424  : (HasVLX ? X86::VMOVUPSZ128mr
425  : HasAVX512
426  ? X86::VMOVUPSZ128mr_NOVLX
427  : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
428  } else if (Ty.isVector() && Ty.getSizeInBits() == 256) {
429  if (Alignment >= 32)
430  return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
431  : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
432  : X86::VMOVAPSYrm)
433  : (HasVLX ? X86::VMOVAPSZ256mr
434  : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
435  : X86::VMOVAPSYmr);
436  else
437  return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
438  : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
439  : X86::VMOVUPSYrm)
440  : (HasVLX ? X86::VMOVUPSZ256mr
441  : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
442  : X86::VMOVUPSYmr);
443  } else if (Ty.isVector() && Ty.getSizeInBits() == 512) {
444  if (Alignment >= 64)
445  return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
446  else
447  return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
448  }
449  return Opc;
450 }
451 
452 // Fill in an address from the given instruction.
453 static void X86SelectAddress(const MachineInstr &I,
454  const MachineRegisterInfo &MRI,
455  X86AddressMode &AM) {
456  assert(I.getOperand(0).isReg() && "unsupported opperand.");
457  assert(MRI.getType(I.getOperand(0).getReg()).isPointer() &&
458  "unsupported type.");
459 
460  if (I.getOpcode() == TargetOpcode::G_GEP) {
461  if (auto COff = getConstantVRegVal(I.getOperand(2).getReg(), MRI)) {
462  int64_t Imm = *COff;
463  if (isInt<32>(Imm)) { // Check for displacement overflow.
464  AM.Disp = static_cast<int32_t>(Imm);
465  AM.Base.Reg = I.getOperand(1).getReg();
466  return;
467  }
468  }
469  } else if (I.getOpcode() == TargetOpcode::G_FRAME_INDEX) {
470  AM.Base.FrameIndex = I.getOperand(1).getIndex();
471  AM.BaseType = X86AddressMode::FrameIndexBase;
472  return;
473  }
474 
475  // Default behavior.
476  AM.Base.Reg = I.getOperand(0).getReg();
477 }
478 
479 bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
480  MachineRegisterInfo &MRI,
481  MachineFunction &MF) const {
482  unsigned Opc = I.getOpcode();
483 
484  assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
485  "unexpected instruction");
486 
487  const unsigned DefReg = I.getOperand(0).getReg();
488  LLT Ty = MRI.getType(DefReg);
489  const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
490 
491  auto &MemOp = **I.memoperands_begin();
492  if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
493  DEBUG(dbgs() << "Atomic load/store not supported yet\n");
494  return false;
495  }
496 
497  unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlignment());
498  if (NewOpc == Opc)
499  return false;
500 
501  X86AddressMode AM;
502  X86SelectAddress(*MRI.getVRegDef(I.getOperand(1).getReg()), MRI, AM);
503 
504  I.setDesc(TII.get(NewOpc));
505  MachineInstrBuilder MIB(MF, I);
506  if (Opc == TargetOpcode::G_LOAD) {
507  I.RemoveOperand(1);
508  addFullAddress(MIB, AM);
509  } else {
510  // G_STORE (VAL, Addr), X86Store instruction (Addr, VAL)
511  I.RemoveOperand(1);
512  I.RemoveOperand(0);
513  addFullAddress(MIB, AM).addUse(DefReg);
514  }
515  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
516 }
517 
518 static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI) {
519  if (Ty == LLT::pointer(0, 64))
520  return X86::LEA64r;
521  else if (Ty == LLT::pointer(0, 32))
522  return STI.isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r;
523  else
524  llvm_unreachable("Can't get LEA opcode. Unsupported type.");
525 }
526 
527 bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
528  MachineRegisterInfo &MRI,
529  MachineFunction &MF) const {
530  unsigned Opc = I.getOpcode();
531 
532  assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_GEP) &&
533  "unexpected instruction");
534 
535  const unsigned DefReg = I.getOperand(0).getReg();
536  LLT Ty = MRI.getType(DefReg);
537 
538  // Use LEA to calculate frame index and GEP
539  unsigned NewOpc = getLeaOP(Ty, STI);
540  I.setDesc(TII.get(NewOpc));
541  MachineInstrBuilder MIB(MF, I);
542 
543  if (Opc == TargetOpcode::G_FRAME_INDEX) {
544  addOffset(MIB, 0);
545  } else {
546  MachineOperand &InxOp = I.getOperand(2);
547  I.addOperand(InxOp); // set IndexReg
548  InxOp.ChangeToImmediate(1); // set Scale
549  MIB.addImm(0).addReg(0);
550  }
551 
552  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
553 }
554 
555 bool X86InstructionSelector::selectGlobalValue(MachineInstr &I,
556  MachineRegisterInfo &MRI,
557  MachineFunction &MF) const {
558  assert((I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
559  "unexpected instruction");
560 
561  auto GV = I.getOperand(1).getGlobal();
562  if (GV->isThreadLocal()) {
563  return false; // TODO: we don't support TLS yet.
564  }
565 
566  // Can't handle alternate code models yet.
567  if (TM.getCodeModel() != CodeModel::Small)
568  return false;
569 
570  X86AddressMode AM;
571  AM.GV = GV;
572  AM.GVOpFlags = STI.classifyGlobalReference(GV);
573 
574  // TODO: The ABI requires an extra load. not supported yet.
576  return false;
577 
578  // TODO: This reference is relative to the pic base. not supported yet.
580  return false;
581 
582  if (STI.isPICStyleRIPRel()) {
583  // Use rip-relative addressing.
584  assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
585  AM.Base.Reg = X86::RIP;
586  }
587 
588  const unsigned DefReg = I.getOperand(0).getReg();
589  LLT Ty = MRI.getType(DefReg);
590  unsigned NewOpc = getLeaOP(Ty, STI);
591 
592  I.setDesc(TII.get(NewOpc));
593  MachineInstrBuilder MIB(MF, I);
594 
595  I.RemoveOperand(1);
596  addFullAddress(MIB, AM);
597 
598  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
599 }
600 
601 bool X86InstructionSelector::selectConstant(MachineInstr &I,
602  MachineRegisterInfo &MRI,
603  MachineFunction &MF) const {
604  assert((I.getOpcode() == TargetOpcode::G_CONSTANT) &&
605  "unexpected instruction");
606 
607  const unsigned DefReg = I.getOperand(0).getReg();
608  LLT Ty = MRI.getType(DefReg);
609 
610  if (RBI.getRegBank(DefReg, MRI, TRI)->getID() != X86::GPRRegBankID)
611  return false;
612 
613  uint64_t Val = 0;
614  if (I.getOperand(1).isCImm()) {
615  Val = I.getOperand(1).getCImm()->getZExtValue();
616  I.getOperand(1).ChangeToImmediate(Val);
617  } else if (I.getOperand(1).isImm()) {
618  Val = I.getOperand(1).getImm();
619  } else
620  llvm_unreachable("Unsupported operand type.");
621 
622  unsigned NewOpc;
623  switch (Ty.getSizeInBits()) {
624  case 8:
625  NewOpc = X86::MOV8ri;
626  break;
627  case 16:
628  NewOpc = X86::MOV16ri;
629  break;
630  case 32:
631  NewOpc = X86::MOV32ri;
632  break;
633  case 64:
634  // TODO: in case isUInt<32>(Val), X86::MOV32ri can be used
635  if (isInt<32>(Val))
636  NewOpc = X86::MOV64ri32;
637  else
638  NewOpc = X86::MOV64ri;
639  break;
640  default:
641  llvm_unreachable("Can't select G_CONSTANT, unsupported type.");
642  }
643 
644  I.setDesc(TII.get(NewOpc));
645  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
646 }
647 
648 // Helper function for selectTrunc and selectAnyext.
649 // Returns true if DstRC lives on a floating register class and
650 // SrcRC lives on a 128-bit vector class.
651 static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC,
652  const TargetRegisterClass *SrcRC) {
653  return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass ||
654  DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) &&
655  (SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass);
656 }
657 
658 bool X86InstructionSelector::selectTurnIntoCOPY(
659  MachineInstr &I, MachineRegisterInfo &MRI, const unsigned DstReg,
660  const TargetRegisterClass *DstRC, const unsigned SrcReg,
661  const TargetRegisterClass *SrcRC) const {
662 
663  if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
664  !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
665  DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
666  << " operand\n");
667  return false;
668  }
669  I.setDesc(TII.get(X86::COPY));
670  return true;
671 }
672 
673 bool X86InstructionSelector::selectTrunc(MachineInstr &I,
674  MachineRegisterInfo &MRI,
675  MachineFunction &MF) const {
676  assert((I.getOpcode() == TargetOpcode::G_TRUNC) && "unexpected instruction");
677 
678  const unsigned DstReg = I.getOperand(0).getReg();
679  const unsigned SrcReg = I.getOperand(1).getReg();
680 
681  const LLT DstTy = MRI.getType(DstReg);
682  const LLT SrcTy = MRI.getType(SrcReg);
683 
684  const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
685  const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
686 
687  if (DstRB.getID() != SrcRB.getID()) {
688  DEBUG(dbgs() << "G_TRUNC input/output on different banks\n");
689  return false;
690  }
691 
692  const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
693  const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
694 
695  if (!DstRC || !SrcRC)
696  return false;
697 
698  // If that's truncation of the value that lives on the vector class and goes
699  // into the floating class, just replace it with copy, as we are able to
700  // select it as a regular move.
701  if (canTurnIntoCOPY(DstRC, SrcRC))
702  return selectTurnIntoCOPY(I, MRI, DstReg, DstRC, SrcReg, SrcRC);
703 
704  if (DstRB.getID() != X86::GPRRegBankID)
705  return false;
706 
707  unsigned SubIdx;
708  if (DstRC == SrcRC) {
709  // Nothing to be done
710  SubIdx = X86::NoSubRegister;
711  } else if (DstRC == &X86::GR32RegClass) {
712  SubIdx = X86::sub_32bit;
713  } else if (DstRC == &X86::GR16RegClass) {
714  SubIdx = X86::sub_16bit;
715  } else if (DstRC == &X86::GR8RegClass) {
716  SubIdx = X86::sub_8bit;
717  } else {
718  return false;
719  }
720 
721  SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
722 
723  if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
724  !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
725  DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
726  return false;
727  }
728 
729  I.getOperand(1).setSubReg(SubIdx);
730 
731  I.setDesc(TII.get(X86::COPY));
732  return true;
733 }
734 
735 bool X86InstructionSelector::selectZext(MachineInstr &I,
736  MachineRegisterInfo &MRI,
737  MachineFunction &MF) const {
738  assert((I.getOpcode() == TargetOpcode::G_ZEXT) && "unexpected instruction");
739 
740  const unsigned DstReg = I.getOperand(0).getReg();
741  const unsigned SrcReg = I.getOperand(1).getReg();
742 
743  const LLT DstTy = MRI.getType(DstReg);
744  const LLT SrcTy = MRI.getType(SrcReg);
745 
746  if (SrcTy != LLT::scalar(1))
747  return false;
748 
749  unsigned AndOpc;
750  if (DstTy == LLT::scalar(8))
751  AndOpc = X86::AND8ri;
752  else if (DstTy == LLT::scalar(16))
753  AndOpc = X86::AND16ri8;
754  else if (DstTy == LLT::scalar(32))
755  AndOpc = X86::AND32ri8;
756  else if (DstTy == LLT::scalar(64))
757  AndOpc = X86::AND64ri8;
758  else
759  return false;
760 
761  unsigned DefReg = SrcReg;
762  if (DstTy != LLT::scalar(8)) {
763  DefReg = MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
764  BuildMI(*I.getParent(), I, I.getDebugLoc(),
765  TII.get(TargetOpcode::SUBREG_TO_REG), DefReg)
766  .addImm(0)
767  .addReg(SrcReg)
768  .addImm(X86::sub_8bit);
769  }
770 
771  MachineInstr &AndInst =
772  *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AndOpc), DstReg)
773  .addReg(DefReg)
774  .addImm(1);
775 
776  constrainSelectedInstRegOperands(AndInst, TII, TRI, RBI);
777 
778  I.eraseFromParent();
779  return true;
780 }
781 
782 bool X86InstructionSelector::selectAnyext(MachineInstr &I,
783  MachineRegisterInfo &MRI,
784  MachineFunction &MF) const {
785  assert((I.getOpcode() == TargetOpcode::G_ANYEXT) && "unexpected instruction");
786 
787  const unsigned DstReg = I.getOperand(0).getReg();
788  const unsigned SrcReg = I.getOperand(1).getReg();
789 
790  const LLT DstTy = MRI.getType(DstReg);
791  const LLT SrcTy = MRI.getType(SrcReg);
792 
793  const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
794  const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
795 
796  assert(DstRB.getID() == SrcRB.getID() &&
797  "G_ANYEXT input/output on different banks\n");
798 
799  assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
800  "G_ANYEXT incorrect operand size");
801 
802  const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
803  const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
804 
805  // If that's ANY_EXT of the value that lives on the floating class and goes
806  // into the vector class, just replace it with copy, as we are able to select
807  // it as a regular move.
808  if (canTurnIntoCOPY(SrcRC, DstRC))
809  return selectTurnIntoCOPY(I, MRI, SrcReg, SrcRC, DstReg, DstRC);
810 
811  if (DstRB.getID() != X86::GPRRegBankID)
812  return false;
813 
814  if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
815  !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
816  DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
817  << " operand\n");
818  return false;
819  }
820 
821  if (SrcRC == DstRC) {
822  I.setDesc(TII.get(X86::COPY));
823  return true;
824  }
825 
826  BuildMI(*I.getParent(), I, I.getDebugLoc(),
827  TII.get(TargetOpcode::SUBREG_TO_REG))
828  .addDef(DstReg)
829  .addImm(0)
830  .addReg(SrcReg)
831  .addImm(getSubRegIndex(SrcRC));
832 
833  I.eraseFromParent();
834  return true;
835 }
836 
837 bool X86InstructionSelector::selectCmp(MachineInstr &I,
838  MachineRegisterInfo &MRI,
839  MachineFunction &MF) const {
840  assert((I.getOpcode() == TargetOpcode::G_ICMP) && "unexpected instruction");
841 
842  X86::CondCode CC;
843  bool SwapArgs;
844  std::tie(CC, SwapArgs) = X86::getX86ConditionCode(
846  unsigned OpSet = X86::getSETFromCond(CC);
847 
848  unsigned LHS = I.getOperand(2).getReg();
849  unsigned RHS = I.getOperand(3).getReg();
850 
851  if (SwapArgs)
852  std::swap(LHS, RHS);
853 
854  unsigned OpCmp;
855  LLT Ty = MRI.getType(LHS);
856 
857  switch (Ty.getSizeInBits()) {
858  default:
859  return false;
860  case 8:
861  OpCmp = X86::CMP8rr;
862  break;
863  case 16:
864  OpCmp = X86::CMP16rr;
865  break;
866  case 32:
867  OpCmp = X86::CMP32rr;
868  break;
869  case 64:
870  OpCmp = X86::CMP64rr;
871  break;
872  }
873 
875  *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
876  .addReg(LHS)
877  .addReg(RHS);
878 
879  MachineInstr &SetInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
880  TII.get(OpSet), I.getOperand(0).getReg());
881 
882  constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
883  constrainSelectedInstRegOperands(SetInst, TII, TRI, RBI);
884 
885  I.eraseFromParent();
886  return true;
887 }
888 
889 bool X86InstructionSelector::selectUadde(MachineInstr &I,
890  MachineRegisterInfo &MRI,
891  MachineFunction &MF) const {
892  assert((I.getOpcode() == TargetOpcode::G_UADDE) && "unexpected instruction");
893 
894  const unsigned DstReg = I.getOperand(0).getReg();
895  const unsigned CarryOutReg = I.getOperand(1).getReg();
896  const unsigned Op0Reg = I.getOperand(2).getReg();
897  const unsigned Op1Reg = I.getOperand(3).getReg();
898  unsigned CarryInReg = I.getOperand(4).getReg();
899 
900  const LLT DstTy = MRI.getType(DstReg);
901 
902  if (DstTy != LLT::scalar(32))
903  return false;
904 
905  // find CarryIn def instruction.
906  MachineInstr *Def = MRI.getVRegDef(CarryInReg);
907  while (Def->getOpcode() == TargetOpcode::G_TRUNC) {
908  CarryInReg = Def->getOperand(1).getReg();
909  Def = MRI.getVRegDef(CarryInReg);
910  }
911 
912  unsigned Opcode;
913  if (Def->getOpcode() == TargetOpcode::G_UADDE) {
914  // carry set by prev ADD.
915 
916  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), X86::EFLAGS)
917  .addReg(CarryInReg);
918 
919  if (!RBI.constrainGenericRegister(CarryInReg, X86::GR32RegClass, MRI))
920  return false;
921 
922  Opcode = X86::ADC32rr;
923  } else if (auto val = getConstantVRegVal(CarryInReg, MRI)) {
924  // carry is constant, support only 0.
925  if (*val != 0)
926  return false;
927 
928  Opcode = X86::ADD32rr;
929  } else
930  return false;
931 
932  MachineInstr &AddInst =
933  *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
934  .addReg(Op0Reg)
935  .addReg(Op1Reg);
936 
937  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), CarryOutReg)
938  .addReg(X86::EFLAGS);
939 
940  if (!constrainSelectedInstRegOperands(AddInst, TII, TRI, RBI) ||
941  !RBI.constrainGenericRegister(CarryOutReg, X86::GR32RegClass, MRI))
942  return false;
943 
944  I.eraseFromParent();
945  return true;
946 }
947 
948 bool X86InstructionSelector::selectExtract(MachineInstr &I,
949  MachineRegisterInfo &MRI,
950  MachineFunction &MF) const {
951  assert((I.getOpcode() == TargetOpcode::G_EXTRACT) &&
952  "unexpected instruction");
953 
954  const unsigned DstReg = I.getOperand(0).getReg();
955  const unsigned SrcReg = I.getOperand(1).getReg();
956  int64_t Index = I.getOperand(2).getImm();
957 
958  const LLT DstTy = MRI.getType(DstReg);
959  const LLT SrcTy = MRI.getType(SrcReg);
960 
961  // Meanwile handle vector type only.
962  if (!DstTy.isVector())
963  return false;
964 
965  if (Index % DstTy.getSizeInBits() != 0)
966  return false; // Not extract subvector.
967 
968  if (Index == 0) {
969  // Replace by extract subreg copy.
970  if (!emitExtractSubreg(DstReg, SrcReg, I, MRI, MF))
971  return false;
972 
973  I.eraseFromParent();
974  return true;
975  }
976 
977  bool HasAVX = STI.hasAVX();
978  bool HasAVX512 = STI.hasAVX512();
979  bool HasVLX = STI.hasVLX();
980 
981  if (SrcTy.getSizeInBits() == 256 && DstTy.getSizeInBits() == 128) {
982  if (HasVLX)
983  I.setDesc(TII.get(X86::VEXTRACTF32x4Z256rr));
984  else if (HasAVX)
985  I.setDesc(TII.get(X86::VEXTRACTF128rr));
986  else
987  return false;
988  } else if (SrcTy.getSizeInBits() == 512 && HasAVX512) {
989  if (DstTy.getSizeInBits() == 128)
990  I.setDesc(TII.get(X86::VEXTRACTF32x4Zrr));
991  else if (DstTy.getSizeInBits() == 256)
992  I.setDesc(TII.get(X86::VEXTRACTF64x4Zrr));
993  else
994  return false;
995  } else
996  return false;
997 
998  // Convert to X86 VEXTRACT immediate.
999  Index = Index / DstTy.getSizeInBits();
1000  I.getOperand(2).setImm(Index);
1001 
1002  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1003 }
1004 
1005 bool X86InstructionSelector::emitExtractSubreg(unsigned DstReg, unsigned SrcReg,
1006  MachineInstr &I,
1007  MachineRegisterInfo &MRI,
1008  MachineFunction &MF) const {
1009  const LLT DstTy = MRI.getType(DstReg);
1010  const LLT SrcTy = MRI.getType(SrcReg);
1011  unsigned SubIdx = X86::NoSubRegister;
1012 
1013  if (!DstTy.isVector() || !SrcTy.isVector())
1014  return false;
1015 
1016  assert(SrcTy.getSizeInBits() > DstTy.getSizeInBits() &&
1017  "Incorrect Src/Dst register size");
1018 
1019  if (DstTy.getSizeInBits() == 128)
1020  SubIdx = X86::sub_xmm;
1021  else if (DstTy.getSizeInBits() == 256)
1022  SubIdx = X86::sub_ymm;
1023  else
1024  return false;
1025 
1026  const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1027  const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1028 
1029  SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
1030 
1031  if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1032  !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1033  DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1034  return false;
1035  }
1036 
1037  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), DstReg)
1038  .addReg(SrcReg, 0, SubIdx);
1039 
1040  return true;
1041 }
1042 
1043 bool X86InstructionSelector::emitInsertSubreg(unsigned DstReg, unsigned SrcReg,
1044  MachineInstr &I,
1045  MachineRegisterInfo &MRI,
1046  MachineFunction &MF) const {
1047  const LLT DstTy = MRI.getType(DstReg);
1048  const LLT SrcTy = MRI.getType(SrcReg);
1049  unsigned SubIdx = X86::NoSubRegister;
1050 
1051  // TODO: support scalar types
1052  if (!DstTy.isVector() || !SrcTy.isVector())
1053  return false;
1054 
1055  assert(SrcTy.getSizeInBits() < DstTy.getSizeInBits() &&
1056  "Incorrect Src/Dst register size");
1057 
1058  if (SrcTy.getSizeInBits() == 128)
1059  SubIdx = X86::sub_xmm;
1060  else if (SrcTy.getSizeInBits() == 256)
1061  SubIdx = X86::sub_ymm;
1062  else
1063  return false;
1064 
1065  const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1066  const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1067 
1068  if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1069  !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1070  DEBUG(dbgs() << "Failed to constrain INSERT_SUBREG\n");
1071  return false;
1072  }
1073 
1074  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY))
1075  .addReg(DstReg, RegState::DefineNoRead, SubIdx)
1076  .addReg(SrcReg);
1077 
1078  return true;
1079 }
1080 
1081 bool X86InstructionSelector::selectInsert(MachineInstr &I,
1082  MachineRegisterInfo &MRI,
1083  MachineFunction &MF) const {
1084  assert((I.getOpcode() == TargetOpcode::G_INSERT) && "unexpected instruction");
1085 
1086  const unsigned DstReg = I.getOperand(0).getReg();
1087  const unsigned SrcReg = I.getOperand(1).getReg();
1088  const unsigned InsertReg = I.getOperand(2).getReg();
1089  int64_t Index = I.getOperand(3).getImm();
1090 
1091  const LLT DstTy = MRI.getType(DstReg);
1092  const LLT InsertRegTy = MRI.getType(InsertReg);
1093 
1094  // Meanwile handle vector type only.
1095  if (!DstTy.isVector())
1096  return false;
1097 
1098  if (Index % InsertRegTy.getSizeInBits() != 0)
1099  return false; // Not insert subvector.
1100 
1101  if (Index == 0 && MRI.getVRegDef(SrcReg)->isImplicitDef()) {
1102  // Replace by subreg copy.
1103  if (!emitInsertSubreg(DstReg, InsertReg, I, MRI, MF))
1104  return false;
1105 
1106  I.eraseFromParent();
1107  return true;
1108  }
1109 
1110  bool HasAVX = STI.hasAVX();
1111  bool HasAVX512 = STI.hasAVX512();
1112  bool HasVLX = STI.hasVLX();
1113 
1114  if (DstTy.getSizeInBits() == 256 && InsertRegTy.getSizeInBits() == 128) {
1115  if (HasVLX)
1116  I.setDesc(TII.get(X86::VINSERTF32x4Z256rr));
1117  else if (HasAVX)
1118  I.setDesc(TII.get(X86::VINSERTF128rr));
1119  else
1120  return false;
1121  } else if (DstTy.getSizeInBits() == 512 && HasAVX512) {
1122  if (InsertRegTy.getSizeInBits() == 128)
1123  I.setDesc(TII.get(X86::VINSERTF32x4Zrr));
1124  else if (InsertRegTy.getSizeInBits() == 256)
1125  I.setDesc(TII.get(X86::VINSERTF64x4Zrr));
1126  else
1127  return false;
1128  } else
1129  return false;
1130 
1131  // Convert to X86 VINSERT immediate.
1132  Index = Index / InsertRegTy.getSizeInBits();
1133 
1134  I.getOperand(3).setImm(Index);
1135 
1136  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1137 }
1138 
1141  CodeGenCoverage &CoverageInfo) const {
1142  assert((I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
1143  "unexpected instruction");
1144 
1145  // Split to extracts.
1146  unsigned NumDefs = I.getNumOperands() - 1;
1147  unsigned SrcReg = I.getOperand(NumDefs).getReg();
1148  unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
1149 
1150  for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
1151  MachineInstr &ExtrInst =
1152  *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1153  TII.get(TargetOpcode::G_EXTRACT), I.getOperand(Idx).getReg())
1154  .addReg(SrcReg)
1155  .addImm(Idx * DefSize);
1156 
1157  if (!select(ExtrInst, CoverageInfo))
1158  return false;
1159  }
1160 
1161  I.eraseFromParent();
1162  return true;
1163 }
1164 
1167  CodeGenCoverage &CoverageInfo) const {
1168  assert((I.getOpcode() == TargetOpcode::G_MERGE_VALUES) &&
1169  "unexpected instruction");
1170 
1171  // Split to inserts.
1172  unsigned DstReg = I.getOperand(0).getReg();
1173  unsigned SrcReg0 = I.getOperand(1).getReg();
1174 
1175  const LLT DstTy = MRI.getType(DstReg);
1176  const LLT SrcTy = MRI.getType(SrcReg0);
1177  unsigned SrcSize = SrcTy.getSizeInBits();
1178 
1179  const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1180 
1181  // For the first src use insertSubReg.
1182  unsigned DefReg = MRI.createGenericVirtualRegister(DstTy);
1183  MRI.setRegBank(DefReg, RegBank);
1184  if (!emitInsertSubreg(DefReg, I.getOperand(1).getReg(), I, MRI, MF))
1185  return false;
1186 
1187  for (unsigned Idx = 2; Idx < I.getNumOperands(); ++Idx) {
1188  unsigned Tmp = MRI.createGenericVirtualRegister(DstTy);
1189  MRI.setRegBank(Tmp, RegBank);
1190 
1191  MachineInstr &InsertInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1192  TII.get(TargetOpcode::G_INSERT), Tmp)
1193  .addReg(DefReg)
1194  .addReg(I.getOperand(Idx).getReg())
1195  .addImm((Idx - 1) * SrcSize);
1196 
1197  DefReg = Tmp;
1198 
1199  if (!select(InsertInst, CoverageInfo))
1200  return false;
1201  }
1202 
1203  MachineInstr &CopyInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1204  TII.get(TargetOpcode::COPY), DstReg)
1205  .addReg(DefReg);
1206 
1207  if (!select(CopyInst, CoverageInfo))
1208  return false;
1209 
1210  I.eraseFromParent();
1211  return true;
1212 }
1213 
1214 bool X86InstructionSelector::selectCondBranch(MachineInstr &I,
1215  MachineRegisterInfo &MRI,
1216  MachineFunction &MF) const {
1217  assert((I.getOpcode() == TargetOpcode::G_BRCOND) && "unexpected instruction");
1218 
1219  const unsigned CondReg = I.getOperand(0).getReg();
1220  MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
1221 
1222  MachineInstr &TestInst =
1223  *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TEST8ri))
1224  .addReg(CondReg)
1225  .addImm(1);
1226  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::JNE_1))
1227  .addMBB(DestMBB);
1228 
1229  constrainSelectedInstRegOperands(TestInst, TII, TRI, RBI);
1230 
1231  I.eraseFromParent();
1232  return true;
1233 }
1234 
1235 bool X86InstructionSelector::materializeFP(MachineInstr &I,
1236  MachineRegisterInfo &MRI,
1237  MachineFunction &MF) const {
1238  assert((I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
1239  "unexpected instruction");
1240 
1241  // Can't handle alternate code models yet.
1242  CodeModel::Model CM = TM.getCodeModel();
1243  if (CM != CodeModel::Small && CM != CodeModel::Large)
1244  return false;
1245 
1246  const unsigned DstReg = I.getOperand(0).getReg();
1247  const LLT DstTy = MRI.getType(DstReg);
1248  const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1249  unsigned Align = DstTy.getSizeInBits();
1250  const DebugLoc &DbgLoc = I.getDebugLoc();
1251 
1252  unsigned Opc = getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Align);
1253 
1254  // Create the load from the constant pool.
1255  const ConstantFP *CFP = I.getOperand(1).getFPImm();
1256  unsigned CPI = MF.getConstantPool()->getConstantPoolIndex(CFP, Align);
1257  MachineInstr *LoadInst = nullptr;
1258  unsigned char OpFlag = STI.classifyLocalReference(nullptr);
1259 
1260  if (CM == CodeModel::Large && STI.is64Bit()) {
1261  // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
1262  // they cannot be folded into immediate fields.
1263 
1264  unsigned AddrReg = MRI.createVirtualRegister(&X86::GR64RegClass);
1265  BuildMI(*I.getParent(), I, DbgLoc, TII.get(X86::MOV64ri), AddrReg)
1266  .addConstantPoolIndex(CPI, 0, OpFlag);
1267 
1269  MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad,
1271 
1272  LoadInst =
1273  addDirectMem(BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg),
1274  AddrReg)
1275  .addMemOperand(MMO);
1276 
1277  } else if (CM == CodeModel::Small || !STI.is64Bit()) {
1278  // Handle the case when globals fit in our immediate field.
1279  // This is true for X86-32 always and X86-64 when in -mcmodel=small mode.
1280 
1281  // x86-32 PIC requires a PIC base register for constant pools.
1282  unsigned PICBase = 0;
1283  if (OpFlag == X86II::MO_PIC_BASE_OFFSET || OpFlag == X86II::MO_GOTOFF) {
1284  // PICBase can be allocated by TII.getGlobalBaseReg(&MF).
1285  // In DAGISEL the code that initialize it generated by the CGBR pass.
1286  return false; // TODO support the mode.
1287  } else if (STI.is64Bit() && TM.getCodeModel() == CodeModel::Small)
1288  PICBase = X86::RIP;
1289 
1290  LoadInst = addConstantPoolReference(
1291  BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg), CPI, PICBase,
1292  OpFlag);
1293  } else
1294  return false;
1295 
1296  constrainSelectedInstRegOperands(*LoadInst, TII, TRI, RBI);
1297  I.eraseFromParent();
1298  return true;
1299 }
1300 
1301 bool X86InstructionSelector::selectImplicitDefOrPHI(
1302  MachineInstr &I, MachineRegisterInfo &MRI) const {
1303  assert((I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
1304  I.getOpcode() == TargetOpcode::G_PHI) &&
1305  "unexpected instruction");
1306 
1307  unsigned DstReg = I.getOperand(0).getReg();
1308 
1309  if (!MRI.getRegClassOrNull(DstReg)) {
1310  const LLT DstTy = MRI.getType(DstReg);
1311  const TargetRegisterClass *RC = getRegClass(DstTy, DstReg, MRI);
1312 
1313  if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
1314  DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1315  << " operand\n");
1316  return false;
1317  }
1318  }
1319 
1320  if (I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1321  I.setDesc(TII.get(X86::IMPLICIT_DEF));
1322  else
1323  I.setDesc(TII.get(X86::PHI));
1324 
1325  return true;
1326 }
1327 
1330  X86Subtarget &Subtarget,
1331  X86RegisterBankInfo &RBI) {
1332  return new X86InstructionSelector(TM, Subtarget, RBI);
1333 }
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:843
MachineBasicBlock * getMBB() const
Atomic ordering constants.
static bool selectMergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
static const MachineInstrBuilder & addConstantPoolReference(const MachineInstrBuilder &MIB, unsigned CPI, unsigned GlobalBaseReg, unsigned char OpFlags)
addConstantPoolReference - This function is used to add a reference to the base of a constant value s...
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:271
unsigned getReg() const
getReg - Returns the register number.
A debug info location.
Definition: DebugLoc.h:34
const GlobalValue * GV
An instruction for reading from memory.
Definition: Instructions.h:164
void setRegBank(unsigned Reg, const RegisterBank &RegBank)
Set the register bank to RegBank for Reg.
unsigned createGenericVirtualRegister(LLT Ty)
Create and return a new generic virtual register with low-level type Ty.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
static const TargetRegisterClass * getRegClassFromGRPhysReg(unsigned Reg)
return AArch64::GPR64RegClass contains(Reg)
#define DEBUG_TYPE
bool isVector() const
A description of a memory reference used in the backend.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
const HexagonInstrInfo * TII
const ConstantFP * getFPImm() const
unsigned getNumOperands() const
Access to explicit operands of the instruction.
Definition: MachineInstr.h:296
union llvm::X86AddressMode::@463 Base
This class provides the information for the target register banks.
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
static bool isGlobalStubReference(unsigned char TargetFlag)
isGlobalStubReference - Return true if the specified TargetFlag operand is a reference to a stub for ...
Definition: X86InstrInfo.h:90
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
Reg
All possible values of the reg field in the ModR/M byte.
static StringRef getName(Value *V)
static int getRegClass(RegisterKind Is, unsigned RegWidth)
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:293
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineInstr * getVRegDef(unsigned Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
static bool isGlobalRelativeToPICBase(unsigned char TargetFlag)
isGlobalRelativeToPICBase - Return true if the specified global value reference is relative to a 32-b...
Definition: X86InstrInfo.h:106
void RemoveOperand(unsigned i)
Erase an operand from an instruction, leaving it with one fewer operand than it started with...
static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC, const TargetRegisterClass *SrcRC)
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
void ChangeToImmediate(int64_t ImmVal)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
std::pair< CondCode, bool > getX86ConditionCode(CmpInst::Predicate Predicate)
Return a pair of condition code for the given predicate and whether the instruction operands should b...
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:149
unsigned const MachineRegisterInfo * MRI
#define GET_GLOBALISEL_PREDICATES_INIT
unsigned getPointerSize(unsigned AS=0) const
Layout pointer size FIXME: The defaults need to be removed once all of the backends/clients are updat...
Definition: DataLayout.cpp:620
const GlobalValue * getGlobal() const
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:264
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:853
int64_t addOffset(int64_t LHS, int64_t RHS)
void substPhysReg(unsigned Reg, const TargetRegisterInfo &)
substPhysReg - Substitute the current register with the physical register Reg, taking any existing Su...
void setImm(int64_t immVal)
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static void X86SelectAddress(const MachineInstr &I, const MachineRegisterInfo &MRI, X86AddressMode &AM)
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
unsigned getSETFromCond(CondCode CC, bool HasMemoryOperand=false)
Return a set opcode for the given condition and whether it has a memory operand.
bool isCopy() const
Definition: MachineInstr.h:860
bool isImplicitDef() const
Definition: MachineInstr.h:834
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
enum llvm::X86AddressMode::@462 BaseType
This file declares the targeting of the RegisterBankInfo class for X86.
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
constexpr bool isInt< 32 >(int64_t x)
Definition: MathExtras.h:301
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:392
MachineOperand class - Representation of each machine instruction operand.
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:77
InstructionSelector * createX86InstructionSelector(const X86TargetMachine &TM, X86Subtarget &, X86RegisterBankInfo &)
This class implements the register bank concept.
Definition: RegisterBank.h:29
int64_t getImm() const
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
Definition: X86Subtarget.h:501
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:923
Optional< int64_t > getConstantVRegVal(unsigned VReg, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:174
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:142
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
Provides the logic to select generic machine instructions.
Representation of each machine instruction.
Definition: MachineInstr.h:60
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MO_GOTOFF - On a symbol operand this indicates that the immediate is the offset to the location of th...
Definition: X86BaseInfo.h:101
static const MachineInstrBuilder & addDirectMem(const MachineInstrBuilder &MIB, unsigned Reg)
addDirectMem - This function is used to add a direct memory reference to the current instruction – t...
static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
void setReg(unsigned Reg)
Change the register this operand corresponds to.
#define I(x, y, z)
Definition: MD5.cpp:58
void setSubReg(unsigned subReg)
LLT getType(unsigned VReg) const
Get the low-level type of VReg or LLT{} if VReg is not a generic (target independent) virtual registe...
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
#define GET_GLOBALISEL_TEMPORARIES_INIT
const TargetRegisterClass * getRegClassOrNull(unsigned Reg) const
Return the register class of Reg, or null if Reg has not been assigned a register class yet...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel...
Definition: TargetOpcodes.h:31
unsigned getSizeInBits(unsigned Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
#define DEBUG(X)
Definition: Debug.h:118
MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...
Definition: X86BaseInfo.h:87
X86AddressMode - This struct holds a generalized full x86 address mode.
void print(raw_ostream &OS, bool IsStandalone=true, bool SkipOpers=false, bool SkipDebugLoc=false, const TargetInstrInfo *TII=nullptr) const
Print this MI to OS.
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:298
const ConstantInt * getCImm() const
static const MachineInstrBuilder & addFullAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM)
unsigned getConstantPoolIndex(const Constant *C, unsigned Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one...
unsigned getID() const
Get the identifier of this register bank.
Definition: RegisterBank.h:48
unsigned getPredicate() const