LLVM  6.0.0svn
X86InstructionSelector.cpp
Go to the documentation of this file.
1 //===- X86InstructionSelector.cpp -----------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the targeting of the InstructionSelector class for
11 /// X86.
12 /// \todo This should be generated by TableGen.
13 //===----------------------------------------------------------------------===//
14 
16 #include "X86InstrBuilder.h"
17 #include "X86InstrInfo.h"
18 #include "X86RegisterBankInfo.h"
19 #include "X86RegisterInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/InstrTypes.h"
39 #include "llvm/Support/CodeGen.h"
40 #include "llvm/Support/Debug.h"
45 #include <cassert>
46 #include <cstdint>
47 #include <tuple>
48 
49 #define DEBUG_TYPE "X86-isel"
50 
51 using namespace llvm;
52 
53 namespace {
54 
55 #define GET_GLOBALISEL_PREDICATE_BITSET
56 #include "X86GenGlobalISel.inc"
57 #undef GET_GLOBALISEL_PREDICATE_BITSET
58 
59 class X86InstructionSelector : public InstructionSelector {
60 public:
61  X86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &STI,
62  const X86RegisterBankInfo &RBI);
63 
64  bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override;
65  static const char *getName() { return DEBUG_TYPE; }
66 
67 private:
68  /// tblgen-erated 'select' implementation, used as the initial selector for
69  /// the patterns that don't require complex C++.
70  bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
71 
72  // TODO: remove after supported by Tablegen-erated instruction selection.
73  unsigned getLoadStoreOp(const LLT &Ty, const RegisterBank &RB, unsigned Opc,
74  uint64_t Alignment) const;
75 
76  bool selectLoadStoreOp(MachineInstr &I, MachineRegisterInfo &MRI,
77  MachineFunction &MF) const;
78  bool selectFrameIndexOrGep(MachineInstr &I, MachineRegisterInfo &MRI,
79  MachineFunction &MF) const;
80  bool selectGlobalValue(MachineInstr &I, MachineRegisterInfo &MRI,
81  MachineFunction &MF) const;
82  bool selectConstant(MachineInstr &I, MachineRegisterInfo &MRI,
83  MachineFunction &MF) const;
84  bool selectTrunc(MachineInstr &I, MachineRegisterInfo &MRI,
85  MachineFunction &MF) const;
86  bool selectZext(MachineInstr &I, MachineRegisterInfo &MRI,
87  MachineFunction &MF) const;
88  bool selectAnyext(MachineInstr &I, MachineRegisterInfo &MRI,
89  MachineFunction &MF) const;
90  bool selectCmp(MachineInstr &I, MachineRegisterInfo &MRI,
91  MachineFunction &MF) const;
92  bool selectUadde(MachineInstr &I, MachineRegisterInfo &MRI,
93  MachineFunction &MF) const;
94  bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
96  MachineFunction &MF,
97  CodeGenCoverage &CoverageInfo) const;
99  MachineFunction &MF,
100  CodeGenCoverage &CoverageInfo) const;
101  bool selectInsert(MachineInstr &I, MachineRegisterInfo &MRI,
102  MachineFunction &MF) const;
103  bool selectExtract(MachineInstr &I, MachineRegisterInfo &MRI,
104  MachineFunction &MF) const;
105  bool selectCondBranch(MachineInstr &I, MachineRegisterInfo &MRI,
106  MachineFunction &MF) const;
107  bool materializeFP(MachineInstr &I, MachineRegisterInfo &MRI,
108  MachineFunction &MF) const;
109  bool selectImplicitDefOrPHI(MachineInstr &I, MachineRegisterInfo &MRI) const;
110 
111  // emit insert subreg instruction and insert it before MachineInstr &I
112  bool emitInsertSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
113  MachineRegisterInfo &MRI, MachineFunction &MF) const;
114  // emit extract subreg instruction and insert it before MachineInstr &I
115  bool emitExtractSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
116  MachineRegisterInfo &MRI, MachineFunction &MF) const;
117 
118  const TargetRegisterClass *getRegClass(LLT Ty, const RegisterBank &RB) const;
119  const TargetRegisterClass *getRegClass(LLT Ty, unsigned Reg,
120  MachineRegisterInfo &MRI) const;
121 
122  const X86TargetMachine &TM;
123  const X86Subtarget &STI;
124  const X86InstrInfo &TII;
125  const X86RegisterInfo &TRI;
126  const X86RegisterBankInfo &RBI;
127 
128 #define GET_GLOBALISEL_PREDICATES_DECL
129 #include "X86GenGlobalISel.inc"
130 #undef GET_GLOBALISEL_PREDICATES_DECL
131 
132 #define GET_GLOBALISEL_TEMPORARIES_DECL
133 #include "X86GenGlobalISel.inc"
134 #undef GET_GLOBALISEL_TEMPORARIES_DECL
135 };
136 
137 } // end anonymous namespace
138 
139 #define GET_GLOBALISEL_IMPL
140 #include "X86GenGlobalISel.inc"
141 #undef GET_GLOBALISEL_IMPL
142 
143 X86InstructionSelector::X86InstructionSelector(const X86TargetMachine &TM,
144  const X86Subtarget &STI,
145  const X86RegisterBankInfo &RBI)
146  : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
147  TRI(*STI.getRegisterInfo()), RBI(RBI),
149 #include "X86GenGlobalISel.inc"
152 #include "X86GenGlobalISel.inc"
154 {
155 }
156 
157 // FIXME: This should be target-independent, inferred from the types declared
158 // for each class in the bank.
159 const TargetRegisterClass *
161  if (RB.getID() == X86::GPRRegBankID) {
162  if (Ty.getSizeInBits() <= 8)
163  return &X86::GR8RegClass;
164  if (Ty.getSizeInBits() == 16)
165  return &X86::GR16RegClass;
166  if (Ty.getSizeInBits() == 32)
167  return &X86::GR32RegClass;
168  if (Ty.getSizeInBits() == 64)
169  return &X86::GR64RegClass;
170  }
171  if (RB.getID() == X86::VECRRegBankID) {
172  if (Ty.getSizeInBits() == 32)
173  return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
174  if (Ty.getSizeInBits() == 64)
175  return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
176  if (Ty.getSizeInBits() == 128)
177  return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
178  if (Ty.getSizeInBits() == 256)
179  return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
180  if (Ty.getSizeInBits() == 512)
181  return &X86::VR512RegClass;
182  }
183 
184  llvm_unreachable("Unknown RegBank!");
185 }
186 
187 const TargetRegisterClass *
189  MachineRegisterInfo &MRI) const {
190  const RegisterBank &RegBank = *RBI.getRegBank(Reg, MRI, TRI);
191  return getRegClass(Ty, RegBank);
192 }
193 
194 static unsigned getSubRegIndex(const TargetRegisterClass *RC) {
195  unsigned SubIdx = X86::NoSubRegister;
196  if (RC == &X86::GR32RegClass) {
197  SubIdx = X86::sub_32bit;
198  } else if (RC == &X86::GR16RegClass) {
199  SubIdx = X86::sub_16bit;
200  } else if (RC == &X86::GR8RegClass) {
201  SubIdx = X86::sub_8bit;
202  }
203 
204  return SubIdx;
205 }
206 
207 static const TargetRegisterClass *getRegClassFromGRPhysReg(unsigned Reg) {
208  assert(TargetRegisterInfo::isPhysicalRegister(Reg));
209  if (X86::GR64RegClass.contains(Reg))
210  return &X86::GR64RegClass;
211  if (X86::GR32RegClass.contains(Reg))
212  return &X86::GR32RegClass;
213  if (X86::GR16RegClass.contains(Reg))
214  return &X86::GR16RegClass;
215  if (X86::GR8RegClass.contains(Reg))
216  return &X86::GR8RegClass;
217 
218  llvm_unreachable("Unknown RegClass for PhysReg!");
219 }
220 
221 // Set X86 Opcode and constrain DestReg.
223  MachineRegisterInfo &MRI) const {
224  unsigned DstReg = I.getOperand(0).getReg();
225  const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
226  const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
227 
228  unsigned SrcReg = I.getOperand(1).getReg();
229  const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
230  const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
231 
232  if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
233  assert(I.isCopy() && "Generic operators do not allow physical registers");
234 
235  if (DstSize > SrcSize && SrcRegBank.getID() == X86::GPRRegBankID &&
236  DstRegBank.getID() == X86::GPRRegBankID) {
237 
238  const TargetRegisterClass *SrcRC =
239  getRegClass(MRI.getType(SrcReg), SrcRegBank);
240  const TargetRegisterClass *DstRC = getRegClassFromGRPhysReg(DstReg);
241 
242  if (SrcRC != DstRC) {
243  // This case can be generated by ABI lowering, performe anyext
244  unsigned ExtSrc = MRI.createVirtualRegister(DstRC);
245  BuildMI(*I.getParent(), I, I.getDebugLoc(),
246  TII.get(TargetOpcode::SUBREG_TO_REG))
247  .addDef(ExtSrc)
248  .addImm(0)
249  .addReg(SrcReg)
250  .addImm(getSubRegIndex(SrcRC));
251 
252  I.getOperand(1).setReg(ExtSrc);
253  }
254  }
255 
256  return true;
257  }
258 
259  assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.isCopy()) &&
260  "No phys reg on generic operators");
261  assert((DstSize == SrcSize ||
262  // Copies are a mean to setup initial types, the number of
263  // bits may not exactly match.
264  (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
265  DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI))) &&
266  "Copy with different width?!");
267 
268  const TargetRegisterClass *DstRC =
269  getRegClass(MRI.getType(DstReg), DstRegBank);
270 
271  if (SrcRegBank.getID() == X86::GPRRegBankID &&
272  DstRegBank.getID() == X86::GPRRegBankID && SrcSize > DstSize &&
273  TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
274  // Change the physical register to performe truncate.
275 
276  const TargetRegisterClass *SrcRC = getRegClassFromGRPhysReg(SrcReg);
277 
278  if (DstRC != SrcRC) {
279  I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
280  I.getOperand(1).substPhysReg(SrcReg, TRI);
281  }
282  }
283 
284  // No need to constrain SrcReg. It will get constrained when
285  // we hit another of its use or its defs.
286  // Copies do not have constraints.
287  const TargetRegisterClass *OldRC = MRI.getRegClassOrNull(DstReg);
288  if (!OldRC || !DstRC->hasSubClassEq(OldRC)) {
289  if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
290  DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
291  << " operand\n");
292  return false;
293  }
294  }
295  I.setDesc(TII.get(X86::COPY));
296  return true;
297 }
298 
299 bool X86InstructionSelector::select(MachineInstr &I,
300  CodeGenCoverage &CoverageInfo) const {
301  assert(I.getParent() && "Instruction should be in a basic block!");
302  assert(I.getParent()->getParent() && "Instruction should be in a function!");
303 
304  MachineBasicBlock &MBB = *I.getParent();
305  MachineFunction &MF = *MBB.getParent();
306  MachineRegisterInfo &MRI = MF.getRegInfo();
307 
308  unsigned Opcode = I.getOpcode();
309  if (!isPreISelGenericOpcode(Opcode)) {
310  // Certain non-generic instructions also need some special handling.
311 
312  if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
313  return false;
314 
315  if (I.isCopy())
316  return selectCopy(I, MRI);
317 
318  return true;
319  }
320 
322  "Generic instruction has unexpected implicit operands\n");
323 
324  if (selectImpl(I, CoverageInfo))
325  return true;
326 
327  DEBUG(dbgs() << " C++ instruction selection: "; I.print(dbgs()));
328 
329  // TODO: This should be implemented by tblgen.
330  switch (I.getOpcode()) {
331  default:
332  return false;
333  case TargetOpcode::G_STORE:
334  case TargetOpcode::G_LOAD:
335  return selectLoadStoreOp(I, MRI, MF);
336  case TargetOpcode::G_GEP:
337  case TargetOpcode::G_FRAME_INDEX:
338  return selectFrameIndexOrGep(I, MRI, MF);
339  case TargetOpcode::G_GLOBAL_VALUE:
340  return selectGlobalValue(I, MRI, MF);
341  case TargetOpcode::G_CONSTANT:
342  return selectConstant(I, MRI, MF);
343  case TargetOpcode::G_FCONSTANT:
344  return materializeFP(I, MRI, MF);
345  case TargetOpcode::G_TRUNC:
346  return selectTrunc(I, MRI, MF);
347  case TargetOpcode::G_ZEXT:
348  return selectZext(I, MRI, MF);
349  case TargetOpcode::G_ANYEXT:
350  return selectAnyext(I, MRI, MF);
351  case TargetOpcode::G_ICMP:
352  return selectCmp(I, MRI, MF);
353  case TargetOpcode::G_UADDE:
354  return selectUadde(I, MRI, MF);
355  case TargetOpcode::G_UNMERGE_VALUES:
356  return selectUnmergeValues(I, MRI, MF, CoverageInfo);
357  case TargetOpcode::G_MERGE_VALUES:
358  return selectMergeValues(I, MRI, MF, CoverageInfo);
359  case TargetOpcode::G_EXTRACT:
360  return selectExtract(I, MRI, MF);
361  case TargetOpcode::G_INSERT:
362  return selectInsert(I, MRI, MF);
363  case TargetOpcode::G_BRCOND:
364  return selectCondBranch(I, MRI, MF);
365  case TargetOpcode::G_IMPLICIT_DEF:
366  case TargetOpcode::G_PHI:
367  return selectImplicitDefOrPHI(I, MRI);
368  }
369 
370  return false;
371 }
372 
373 unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty,
374  const RegisterBank &RB,
375  unsigned Opc,
376  uint64_t Alignment) const {
377  bool Isload = (Opc == TargetOpcode::G_LOAD);
378  bool HasAVX = STI.hasAVX();
379  bool HasAVX512 = STI.hasAVX512();
380  bool HasVLX = STI.hasVLX();
381 
382  if (Ty == LLT::scalar(8)) {
383  if (X86::GPRRegBankID == RB.getID())
384  return Isload ? X86::MOV8rm : X86::MOV8mr;
385  } else if (Ty == LLT::scalar(16)) {
386  if (X86::GPRRegBankID == RB.getID())
387  return Isload ? X86::MOV16rm : X86::MOV16mr;
388  } else if (Ty == LLT::scalar(32) || Ty == LLT::pointer(0, 32)) {
389  if (X86::GPRRegBankID == RB.getID())
390  return Isload ? X86::MOV32rm : X86::MOV32mr;
391  if (X86::VECRRegBankID == RB.getID())
392  return Isload ? (HasAVX512 ? X86::VMOVSSZrm
393  : HasAVX ? X86::VMOVSSrm : X86::MOVSSrm)
394  : (HasAVX512 ? X86::VMOVSSZmr
395  : HasAVX ? X86::VMOVSSmr : X86::MOVSSmr);
396  } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
397  if (X86::GPRRegBankID == RB.getID())
398  return Isload ? X86::MOV64rm : X86::MOV64mr;
399  if (X86::VECRRegBankID == RB.getID())
400  return Isload ? (HasAVX512 ? X86::VMOVSDZrm
401  : HasAVX ? X86::VMOVSDrm : X86::MOVSDrm)
402  : (HasAVX512 ? X86::VMOVSDZmr
403  : HasAVX ? X86::VMOVSDmr : X86::MOVSDmr);
404  } else if (Ty.isVector() && Ty.getSizeInBits() == 128) {
405  if (Alignment >= 16)
406  return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
407  : HasAVX512
408  ? X86::VMOVAPSZ128rm_NOVLX
409  : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
410  : (HasVLX ? X86::VMOVAPSZ128mr
411  : HasAVX512
412  ? X86::VMOVAPSZ128mr_NOVLX
413  : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
414  else
415  return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
416  : HasAVX512
417  ? X86::VMOVUPSZ128rm_NOVLX
418  : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
419  : (HasVLX ? X86::VMOVUPSZ128mr
420  : HasAVX512
421  ? X86::VMOVUPSZ128mr_NOVLX
422  : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
423  } else if (Ty.isVector() && Ty.getSizeInBits() == 256) {
424  if (Alignment >= 32)
425  return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
426  : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
427  : X86::VMOVAPSYrm)
428  : (HasVLX ? X86::VMOVAPSZ256mr
429  : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
430  : X86::VMOVAPSYmr);
431  else
432  return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
433  : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
434  : X86::VMOVUPSYrm)
435  : (HasVLX ? X86::VMOVUPSZ256mr
436  : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
437  : X86::VMOVUPSYmr);
438  } else if (Ty.isVector() && Ty.getSizeInBits() == 512) {
439  if (Alignment >= 64)
440  return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
441  else
442  return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
443  }
444  return Opc;
445 }
446 
447 // Fill in an address from the given instruction.
448 static void X86SelectAddress(const MachineInstr &I,
449  const MachineRegisterInfo &MRI,
450  X86AddressMode &AM) {
451  assert(I.getOperand(0).isReg() && "unsupported opperand.");
452  assert(MRI.getType(I.getOperand(0).getReg()).isPointer() &&
453  "unsupported type.");
454 
455  if (I.getOpcode() == TargetOpcode::G_GEP) {
456  if (auto COff = getConstantVRegVal(I.getOperand(2).getReg(), MRI)) {
457  int64_t Imm = *COff;
458  if (isInt<32>(Imm)) { // Check for displacement overflow.
459  AM.Disp = static_cast<int32_t>(Imm);
460  AM.Base.Reg = I.getOperand(1).getReg();
461  return;
462  }
463  }
464  } else if (I.getOpcode() == TargetOpcode::G_FRAME_INDEX) {
465  AM.Base.FrameIndex = I.getOperand(1).getIndex();
466  AM.BaseType = X86AddressMode::FrameIndexBase;
467  return;
468  }
469 
470  // Default behavior.
471  AM.Base.Reg = I.getOperand(0).getReg();
472 }
473 
474 bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
475  MachineRegisterInfo &MRI,
476  MachineFunction &MF) const {
477  unsigned Opc = I.getOpcode();
478 
479  assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
480  "unexpected instruction");
481 
482  const unsigned DefReg = I.getOperand(0).getReg();
483  LLT Ty = MRI.getType(DefReg);
484  const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
485 
486  auto &MemOp = **I.memoperands_begin();
487  if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
488  DEBUG(dbgs() << "Atomic load/store not supported yet\n");
489  return false;
490  }
491 
492  unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlignment());
493  if (NewOpc == Opc)
494  return false;
495 
496  X86AddressMode AM;
497  X86SelectAddress(*MRI.getVRegDef(I.getOperand(1).getReg()), MRI, AM);
498 
499  I.setDesc(TII.get(NewOpc));
500  MachineInstrBuilder MIB(MF, I);
501  if (Opc == TargetOpcode::G_LOAD) {
502  I.RemoveOperand(1);
503  addFullAddress(MIB, AM);
504  } else {
505  // G_STORE (VAL, Addr), X86Store instruction (Addr, VAL)
506  I.RemoveOperand(1);
507  I.RemoveOperand(0);
508  addFullAddress(MIB, AM).addUse(DefReg);
509  }
510  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
511 }
512 
513 static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI) {
514  if (Ty == LLT::pointer(0, 64))
515  return X86::LEA64r;
516  else if (Ty == LLT::pointer(0, 32))
517  return STI.isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r;
518  else
519  llvm_unreachable("Can't get LEA opcode. Unsupported type.");
520 }
521 
522 bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
523  MachineRegisterInfo &MRI,
524  MachineFunction &MF) const {
525  unsigned Opc = I.getOpcode();
526 
527  assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_GEP) &&
528  "unexpected instruction");
529 
530  const unsigned DefReg = I.getOperand(0).getReg();
531  LLT Ty = MRI.getType(DefReg);
532 
533  // Use LEA to calculate frame index and GEP
534  unsigned NewOpc = getLeaOP(Ty, STI);
535  I.setDesc(TII.get(NewOpc));
536  MachineInstrBuilder MIB(MF, I);
537 
538  if (Opc == TargetOpcode::G_FRAME_INDEX) {
539  addOffset(MIB, 0);
540  } else {
541  MachineOperand &InxOp = I.getOperand(2);
542  I.addOperand(InxOp); // set IndexReg
543  InxOp.ChangeToImmediate(1); // set Scale
544  MIB.addImm(0).addReg(0);
545  }
546 
547  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
548 }
549 
550 bool X86InstructionSelector::selectGlobalValue(MachineInstr &I,
551  MachineRegisterInfo &MRI,
552  MachineFunction &MF) const {
553  assert((I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
554  "unexpected instruction");
555 
556  auto GV = I.getOperand(1).getGlobal();
557  if (GV->isThreadLocal()) {
558  return false; // TODO: we don't support TLS yet.
559  }
560 
561  // Can't handle alternate code models yet.
562  if (TM.getCodeModel() != CodeModel::Small)
563  return false;
564 
565  X86AddressMode AM;
566  AM.GV = GV;
567  AM.GVOpFlags = STI.classifyGlobalReference(GV);
568 
569  // TODO: The ABI requires an extra load. not supported yet.
571  return false;
572 
573  // TODO: This reference is relative to the pic base. not supported yet.
575  return false;
576 
577  if (STI.isPICStyleRIPRel()) {
578  // Use rip-relative addressing.
579  assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
580  AM.Base.Reg = X86::RIP;
581  }
582 
583  const unsigned DefReg = I.getOperand(0).getReg();
584  LLT Ty = MRI.getType(DefReg);
585  unsigned NewOpc = getLeaOP(Ty, STI);
586 
587  I.setDesc(TII.get(NewOpc));
588  MachineInstrBuilder MIB(MF, I);
589 
590  I.RemoveOperand(1);
591  addFullAddress(MIB, AM);
592 
593  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
594 }
595 
596 bool X86InstructionSelector::selectConstant(MachineInstr &I,
597  MachineRegisterInfo &MRI,
598  MachineFunction &MF) const {
599  assert((I.getOpcode() == TargetOpcode::G_CONSTANT) &&
600  "unexpected instruction");
601 
602  const unsigned DefReg = I.getOperand(0).getReg();
603  LLT Ty = MRI.getType(DefReg);
604 
605  if (RBI.getRegBank(DefReg, MRI, TRI)->getID() != X86::GPRRegBankID)
606  return false;
607 
608  uint64_t Val = 0;
609  if (I.getOperand(1).isCImm()) {
610  Val = I.getOperand(1).getCImm()->getZExtValue();
611  I.getOperand(1).ChangeToImmediate(Val);
612  } else if (I.getOperand(1).isImm()) {
613  Val = I.getOperand(1).getImm();
614  } else
615  llvm_unreachable("Unsupported operand type.");
616 
617  unsigned NewOpc;
618  switch (Ty.getSizeInBits()) {
619  case 8:
620  NewOpc = X86::MOV8ri;
621  break;
622  case 16:
623  NewOpc = X86::MOV16ri;
624  break;
625  case 32:
626  NewOpc = X86::MOV32ri;
627  break;
628  case 64:
629  // TODO: in case isUInt<32>(Val), X86::MOV32ri can be used
630  if (isInt<32>(Val))
631  NewOpc = X86::MOV64ri32;
632  else
633  NewOpc = X86::MOV64ri;
634  break;
635  default:
636  llvm_unreachable("Can't select G_CONSTANT, unsupported type.");
637  }
638 
639  I.setDesc(TII.get(NewOpc));
640  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
641 }
642 
643 bool X86InstructionSelector::selectTrunc(MachineInstr &I,
644  MachineRegisterInfo &MRI,
645  MachineFunction &MF) const {
646  assert((I.getOpcode() == TargetOpcode::G_TRUNC) && "unexpected instruction");
647 
648  const unsigned DstReg = I.getOperand(0).getReg();
649  const unsigned SrcReg = I.getOperand(1).getReg();
650 
651  const LLT DstTy = MRI.getType(DstReg);
652  const LLT SrcTy = MRI.getType(SrcReg);
653 
654  const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
655  const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
656 
657  if (DstRB.getID() != SrcRB.getID()) {
658  DEBUG(dbgs() << "G_TRUNC input/output on different banks\n");
659  return false;
660  }
661 
662  if (DstRB.getID() != X86::GPRRegBankID)
663  return false;
664 
665  const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
666  if (!DstRC)
667  return false;
668 
669  const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
670  if (!SrcRC)
671  return false;
672 
673  unsigned SubIdx;
674  if (DstRC == SrcRC) {
675  // Nothing to be done
676  SubIdx = X86::NoSubRegister;
677  } else if (DstRC == &X86::GR32RegClass) {
678  SubIdx = X86::sub_32bit;
679  } else if (DstRC == &X86::GR16RegClass) {
680  SubIdx = X86::sub_16bit;
681  } else if (DstRC == &X86::GR8RegClass) {
682  SubIdx = X86::sub_8bit;
683  } else {
684  return false;
685  }
686 
687  SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
688 
689  if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
690  !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
691  DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
692  return false;
693  }
694 
695  I.getOperand(1).setSubReg(SubIdx);
696 
697  I.setDesc(TII.get(X86::COPY));
698  return true;
699 }
700 
701 bool X86InstructionSelector::selectZext(MachineInstr &I,
702  MachineRegisterInfo &MRI,
703  MachineFunction &MF) const {
704  assert((I.getOpcode() == TargetOpcode::G_ZEXT) && "unexpected instruction");
705 
706  const unsigned DstReg = I.getOperand(0).getReg();
707  const unsigned SrcReg = I.getOperand(1).getReg();
708 
709  const LLT DstTy = MRI.getType(DstReg);
710  const LLT SrcTy = MRI.getType(SrcReg);
711 
712  if (SrcTy != LLT::scalar(1))
713  return false;
714 
715  unsigned AndOpc;
716  if (DstTy == LLT::scalar(8))
717  AndOpc = X86::AND8ri;
718  else if (DstTy == LLT::scalar(16))
719  AndOpc = X86::AND16ri8;
720  else if (DstTy == LLT::scalar(32))
721  AndOpc = X86::AND32ri8;
722  else if (DstTy == LLT::scalar(64))
723  AndOpc = X86::AND64ri8;
724  else
725  return false;
726 
727  unsigned DefReg = SrcReg;
728  if (DstTy != LLT::scalar(8)) {
729  DefReg = MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
730  BuildMI(*I.getParent(), I, I.getDebugLoc(),
731  TII.get(TargetOpcode::SUBREG_TO_REG), DefReg)
732  .addImm(0)
733  .addReg(SrcReg)
734  .addImm(X86::sub_8bit);
735  }
736 
737  MachineInstr &AndInst =
738  *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AndOpc), DstReg)
739  .addReg(DefReg)
740  .addImm(1);
741 
742  constrainSelectedInstRegOperands(AndInst, TII, TRI, RBI);
743 
744  I.eraseFromParent();
745  return true;
746 }
747 
748 bool X86InstructionSelector::selectAnyext(MachineInstr &I,
749  MachineRegisterInfo &MRI,
750  MachineFunction &MF) const {
751  assert((I.getOpcode() == TargetOpcode::G_ANYEXT) && "unexpected instruction");
752 
753  const unsigned DstReg = I.getOperand(0).getReg();
754  const unsigned SrcReg = I.getOperand(1).getReg();
755 
756  const LLT DstTy = MRI.getType(DstReg);
757  const LLT SrcTy = MRI.getType(SrcReg);
758 
759  const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
760  const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
761 
762  assert(DstRB.getID() == SrcRB.getID() &&
763  "G_ANYEXT input/output on different banks\n");
764 
765  assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
766  "G_ANYEXT incorrect operand size");
767 
768  if (DstRB.getID() != X86::GPRRegBankID)
769  return false;
770 
771  const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
772  const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
773 
774  if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
775  !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
776  DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
777  << " operand\n");
778  return false;
779  }
780 
781  if (SrcRC == DstRC) {
782  I.setDesc(TII.get(X86::COPY));
783  return true;
784  }
785 
786  BuildMI(*I.getParent(), I, I.getDebugLoc(),
787  TII.get(TargetOpcode::SUBREG_TO_REG))
788  .addDef(DstReg)
789  .addImm(0)
790  .addReg(SrcReg)
791  .addImm(getSubRegIndex(SrcRC));
792 
793  I.eraseFromParent();
794  return true;
795 }
796 
797 bool X86InstructionSelector::selectCmp(MachineInstr &I,
798  MachineRegisterInfo &MRI,
799  MachineFunction &MF) const {
800  assert((I.getOpcode() == TargetOpcode::G_ICMP) && "unexpected instruction");
801 
802  X86::CondCode CC;
803  bool SwapArgs;
804  std::tie(CC, SwapArgs) = X86::getX86ConditionCode(
806  unsigned OpSet = X86::getSETFromCond(CC);
807 
808  unsigned LHS = I.getOperand(2).getReg();
809  unsigned RHS = I.getOperand(3).getReg();
810 
811  if (SwapArgs)
812  std::swap(LHS, RHS);
813 
814  unsigned OpCmp;
815  LLT Ty = MRI.getType(LHS);
816 
817  switch (Ty.getSizeInBits()) {
818  default:
819  return false;
820  case 8:
821  OpCmp = X86::CMP8rr;
822  break;
823  case 16:
824  OpCmp = X86::CMP16rr;
825  break;
826  case 32:
827  OpCmp = X86::CMP32rr;
828  break;
829  case 64:
830  OpCmp = X86::CMP64rr;
831  break;
832  }
833 
835  *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
836  .addReg(LHS)
837  .addReg(RHS);
838 
839  MachineInstr &SetInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
840  TII.get(OpSet), I.getOperand(0).getReg());
841 
842  constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
843  constrainSelectedInstRegOperands(SetInst, TII, TRI, RBI);
844 
845  I.eraseFromParent();
846  return true;
847 }
848 
849 bool X86InstructionSelector::selectUadde(MachineInstr &I,
850  MachineRegisterInfo &MRI,
851  MachineFunction &MF) const {
852  assert((I.getOpcode() == TargetOpcode::G_UADDE) && "unexpected instruction");
853 
854  const unsigned DstReg = I.getOperand(0).getReg();
855  const unsigned CarryOutReg = I.getOperand(1).getReg();
856  const unsigned Op0Reg = I.getOperand(2).getReg();
857  const unsigned Op1Reg = I.getOperand(3).getReg();
858  unsigned CarryInReg = I.getOperand(4).getReg();
859 
860  const LLT DstTy = MRI.getType(DstReg);
861 
862  if (DstTy != LLT::scalar(32))
863  return false;
864 
865  // find CarryIn def instruction.
866  MachineInstr *Def = MRI.getVRegDef(CarryInReg);
867  while (Def->getOpcode() == TargetOpcode::G_TRUNC) {
868  CarryInReg = Def->getOperand(1).getReg();
869  Def = MRI.getVRegDef(CarryInReg);
870  }
871 
872  unsigned Opcode;
873  if (Def->getOpcode() == TargetOpcode::G_UADDE) {
874  // carry set by prev ADD.
875 
876  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), X86::EFLAGS)
877  .addReg(CarryInReg);
878 
879  if (!RBI.constrainGenericRegister(CarryInReg, X86::GR32RegClass, MRI))
880  return false;
881 
882  Opcode = X86::ADC32rr;
883  } else if (auto val = getConstantVRegVal(CarryInReg, MRI)) {
884  // carry is constant, support only 0.
885  if (*val != 0)
886  return false;
887 
888  Opcode = X86::ADD32rr;
889  } else
890  return false;
891 
892  MachineInstr &AddInst =
893  *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
894  .addReg(Op0Reg)
895  .addReg(Op1Reg);
896 
897  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), CarryOutReg)
898  .addReg(X86::EFLAGS);
899 
900  if (!constrainSelectedInstRegOperands(AddInst, TII, TRI, RBI) ||
901  !RBI.constrainGenericRegister(CarryOutReg, X86::GR32RegClass, MRI))
902  return false;
903 
904  I.eraseFromParent();
905  return true;
906 }
907 
908 bool X86InstructionSelector::selectExtract(MachineInstr &I,
909  MachineRegisterInfo &MRI,
910  MachineFunction &MF) const {
911  assert((I.getOpcode() == TargetOpcode::G_EXTRACT) &&
912  "unexpected instruction");
913 
914  const unsigned DstReg = I.getOperand(0).getReg();
915  const unsigned SrcReg = I.getOperand(1).getReg();
916  int64_t Index = I.getOperand(2).getImm();
917 
918  const LLT DstTy = MRI.getType(DstReg);
919  const LLT SrcTy = MRI.getType(SrcReg);
920 
921  // Meanwile handle vector type only.
922  if (!DstTy.isVector())
923  return false;
924 
925  if (Index % DstTy.getSizeInBits() != 0)
926  return false; // Not extract subvector.
927 
928  if (Index == 0) {
929  // Replace by extract subreg copy.
930  if (!emitExtractSubreg(DstReg, SrcReg, I, MRI, MF))
931  return false;
932 
933  I.eraseFromParent();
934  return true;
935  }
936 
937  bool HasAVX = STI.hasAVX();
938  bool HasAVX512 = STI.hasAVX512();
939  bool HasVLX = STI.hasVLX();
940 
941  if (SrcTy.getSizeInBits() == 256 && DstTy.getSizeInBits() == 128) {
942  if (HasVLX)
943  I.setDesc(TII.get(X86::VEXTRACTF32x4Z256rr));
944  else if (HasAVX)
945  I.setDesc(TII.get(X86::VEXTRACTF128rr));
946  else
947  return false;
948  } else if (SrcTy.getSizeInBits() == 512 && HasAVX512) {
949  if (DstTy.getSizeInBits() == 128)
950  I.setDesc(TII.get(X86::VEXTRACTF32x4Zrr));
951  else if (DstTy.getSizeInBits() == 256)
952  I.setDesc(TII.get(X86::VEXTRACTF64x4Zrr));
953  else
954  return false;
955  } else
956  return false;
957 
958  // Convert to X86 VEXTRACT immediate.
959  Index = Index / DstTy.getSizeInBits();
960  I.getOperand(2).setImm(Index);
961 
962  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
963 }
964 
965 bool X86InstructionSelector::emitExtractSubreg(unsigned DstReg, unsigned SrcReg,
966  MachineInstr &I,
967  MachineRegisterInfo &MRI,
968  MachineFunction &MF) const {
969  const LLT DstTy = MRI.getType(DstReg);
970  const LLT SrcTy = MRI.getType(SrcReg);
971  unsigned SubIdx = X86::NoSubRegister;
972 
973  if (!DstTy.isVector() || !SrcTy.isVector())
974  return false;
975 
976  assert(SrcTy.getSizeInBits() > DstTy.getSizeInBits() &&
977  "Incorrect Src/Dst register size");
978 
979  if (DstTy.getSizeInBits() == 128)
980  SubIdx = X86::sub_xmm;
981  else if (DstTy.getSizeInBits() == 256)
982  SubIdx = X86::sub_ymm;
983  else
984  return false;
985 
986  const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
987  const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
988 
989  SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
990 
991  if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
992  !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
993  DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
994  return false;
995  }
996 
997  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), DstReg)
998  .addReg(SrcReg, 0, SubIdx);
999 
1000  return true;
1001 }
1002 
1003 bool X86InstructionSelector::emitInsertSubreg(unsigned DstReg, unsigned SrcReg,
1004  MachineInstr &I,
1005  MachineRegisterInfo &MRI,
1006  MachineFunction &MF) const {
1007  const LLT DstTy = MRI.getType(DstReg);
1008  const LLT SrcTy = MRI.getType(SrcReg);
1009  unsigned SubIdx = X86::NoSubRegister;
1010 
1011  // TODO: support scalar types
1012  if (!DstTy.isVector() || !SrcTy.isVector())
1013  return false;
1014 
1015  assert(SrcTy.getSizeInBits() < DstTy.getSizeInBits() &&
1016  "Incorrect Src/Dst register size");
1017 
1018  if (SrcTy.getSizeInBits() == 128)
1019  SubIdx = X86::sub_xmm;
1020  else if (SrcTy.getSizeInBits() == 256)
1021  SubIdx = X86::sub_ymm;
1022  else
1023  return false;
1024 
1025  const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1026  const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1027 
1028  if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1029  !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1030  DEBUG(dbgs() << "Failed to constrain INSERT_SUBREG\n");
1031  return false;
1032  }
1033 
1034  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY))
1035  .addReg(DstReg, RegState::DefineNoRead, SubIdx)
1036  .addReg(SrcReg);
1037 
1038  return true;
1039 }
1040 
1041 bool X86InstructionSelector::selectInsert(MachineInstr &I,
1042  MachineRegisterInfo &MRI,
1043  MachineFunction &MF) const {
1044  assert((I.getOpcode() == TargetOpcode::G_INSERT) && "unexpected instruction");
1045 
1046  const unsigned DstReg = I.getOperand(0).getReg();
1047  const unsigned SrcReg = I.getOperand(1).getReg();
1048  const unsigned InsertReg = I.getOperand(2).getReg();
1049  int64_t Index = I.getOperand(3).getImm();
1050 
1051  const LLT DstTy = MRI.getType(DstReg);
1052  const LLT InsertRegTy = MRI.getType(InsertReg);
1053 
1054  // Meanwile handle vector type only.
1055  if (!DstTy.isVector())
1056  return false;
1057 
1058  if (Index % InsertRegTy.getSizeInBits() != 0)
1059  return false; // Not insert subvector.
1060 
1061  if (Index == 0 && MRI.getVRegDef(SrcReg)->isImplicitDef()) {
1062  // Replace by subreg copy.
1063  if (!emitInsertSubreg(DstReg, InsertReg, I, MRI, MF))
1064  return false;
1065 
1066  I.eraseFromParent();
1067  return true;
1068  }
1069 
1070  bool HasAVX = STI.hasAVX();
1071  bool HasAVX512 = STI.hasAVX512();
1072  bool HasVLX = STI.hasVLX();
1073 
1074  if (DstTy.getSizeInBits() == 256 && InsertRegTy.getSizeInBits() == 128) {
1075  if (HasVLX)
1076  I.setDesc(TII.get(X86::VINSERTF32x4Z256rr));
1077  else if (HasAVX)
1078  I.setDesc(TII.get(X86::VINSERTF128rr));
1079  else
1080  return false;
1081  } else if (DstTy.getSizeInBits() == 512 && HasAVX512) {
1082  if (InsertRegTy.getSizeInBits() == 128)
1083  I.setDesc(TII.get(X86::VINSERTF32x4Zrr));
1084  else if (InsertRegTy.getSizeInBits() == 256)
1085  I.setDesc(TII.get(X86::VINSERTF64x4Zrr));
1086  else
1087  return false;
1088  } else
1089  return false;
1090 
1091  // Convert to X86 VINSERT immediate.
1092  Index = Index / InsertRegTy.getSizeInBits();
1093 
1094  I.getOperand(3).setImm(Index);
1095 
1096  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1097 }
1098 
1101  CodeGenCoverage &CoverageInfo) const {
1102  assert((I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
1103  "unexpected instruction");
1104 
1105  // Split to extracts.
1106  unsigned NumDefs = I.getNumOperands() - 1;
1107  unsigned SrcReg = I.getOperand(NumDefs).getReg();
1108  unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
1109 
1110  for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
1111  MachineInstr &ExtrInst =
1112  *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1113  TII.get(TargetOpcode::G_EXTRACT), I.getOperand(Idx).getReg())
1114  .addReg(SrcReg)
1115  .addImm(Idx * DefSize);
1116 
1117  if (!select(ExtrInst, CoverageInfo))
1118  return false;
1119  }
1120 
1121  I.eraseFromParent();
1122  return true;
1123 }
1124 
1127  CodeGenCoverage &CoverageInfo) const {
1128  assert((I.getOpcode() == TargetOpcode::G_MERGE_VALUES) &&
1129  "unexpected instruction");
1130 
1131  // Split to inserts.
1132  unsigned DstReg = I.getOperand(0).getReg();
1133  unsigned SrcReg0 = I.getOperand(1).getReg();
1134 
1135  const LLT DstTy = MRI.getType(DstReg);
1136  const LLT SrcTy = MRI.getType(SrcReg0);
1137  unsigned SrcSize = SrcTy.getSizeInBits();
1138 
1139  const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1140 
1141  // For the first src use insertSubReg.
1142  unsigned DefReg = MRI.createGenericVirtualRegister(DstTy);
1143  MRI.setRegBank(DefReg, RegBank);
1144  if (!emitInsertSubreg(DefReg, I.getOperand(1).getReg(), I, MRI, MF))
1145  return false;
1146 
1147  for (unsigned Idx = 2; Idx < I.getNumOperands(); ++Idx) {
1148  unsigned Tmp = MRI.createGenericVirtualRegister(DstTy);
1149  MRI.setRegBank(Tmp, RegBank);
1150 
1151  MachineInstr &InsertInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1152  TII.get(TargetOpcode::G_INSERT), Tmp)
1153  .addReg(DefReg)
1154  .addReg(I.getOperand(Idx).getReg())
1155  .addImm((Idx - 1) * SrcSize);
1156 
1157  DefReg = Tmp;
1158 
1159  if (!select(InsertInst, CoverageInfo))
1160  return false;
1161  }
1162 
1163  MachineInstr &CopyInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1164  TII.get(TargetOpcode::COPY), DstReg)
1165  .addReg(DefReg);
1166 
1167  if (!select(CopyInst, CoverageInfo))
1168  return false;
1169 
1170  I.eraseFromParent();
1171  return true;
1172 }
1173 
1174 bool X86InstructionSelector::selectCondBranch(MachineInstr &I,
1175  MachineRegisterInfo &MRI,
1176  MachineFunction &MF) const {
1177  assert((I.getOpcode() == TargetOpcode::G_BRCOND) && "unexpected instruction");
1178 
1179  const unsigned CondReg = I.getOperand(0).getReg();
1180  MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
1181 
1182  MachineInstr &TestInst =
1183  *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TEST8ri))
1184  .addReg(CondReg)
1185  .addImm(1);
1186  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::JNE_1))
1187  .addMBB(DestMBB);
1188 
1189  constrainSelectedInstRegOperands(TestInst, TII, TRI, RBI);
1190 
1191  I.eraseFromParent();
1192  return true;
1193 }
1194 
1195 bool X86InstructionSelector::materializeFP(MachineInstr &I,
1196  MachineRegisterInfo &MRI,
1197  MachineFunction &MF) const {
1198  assert((I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
1199  "unexpected instruction");
1200 
1201  // Can't handle alternate code models yet.
1202  CodeModel::Model CM = TM.getCodeModel();
1203  if (CM != CodeModel::Small && CM != CodeModel::Large)
1204  return false;
1205 
1206  const unsigned DstReg = I.getOperand(0).getReg();
1207  const LLT DstTy = MRI.getType(DstReg);
1208  const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1209  unsigned Align = DstTy.getSizeInBits();
1210  const DebugLoc &DbgLoc = I.getDebugLoc();
1211 
1212  unsigned Opc = getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Align);
1213 
1214  // Create the load from the constant pool.
1215  const ConstantFP *CFP = I.getOperand(1).getFPImm();
1216  unsigned CPI = MF.getConstantPool()->getConstantPoolIndex(CFP, Align);
1217  MachineInstr *LoadInst = nullptr;
1218  unsigned char OpFlag = STI.classifyLocalReference(nullptr);
1219 
1220  if (CM == CodeModel::Large && STI.is64Bit()) {
1221  // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
1222  // they cannot be folded into immediate fields.
1223 
1224  unsigned AddrReg = MRI.createVirtualRegister(&X86::GR64RegClass);
1225  BuildMI(*I.getParent(), I, DbgLoc, TII.get(X86::MOV64ri), AddrReg)
1226  .addConstantPoolIndex(CPI, 0, OpFlag);
1227 
1229  MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad,
1231 
1232  LoadInst =
1233  addDirectMem(BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg),
1234  AddrReg)
1235  .addMemOperand(MMO);
1236 
1237  } else if (CM == CodeModel::Small || !STI.is64Bit()) {
1238  // Handle the case when globals fit in our immediate field.
1239  // This is true for X86-32 always and X86-64 when in -mcmodel=small mode.
1240 
1241  // x86-32 PIC requires a PIC base register for constant pools.
1242  unsigned PICBase = 0;
1243  if (OpFlag == X86II::MO_PIC_BASE_OFFSET || OpFlag == X86II::MO_GOTOFF) {
1244  // PICBase can be allocated by TII.getGlobalBaseReg(&MF).
1245  // In DAGISEL the code that initialize it generated by the CGBR pass.
1246  return false; // TODO support the mode.
1247  } else if (STI.is64Bit() && TM.getCodeModel() == CodeModel::Small)
1248  PICBase = X86::RIP;
1249 
1250  LoadInst = addConstantPoolReference(
1251  BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg), CPI, PICBase,
1252  OpFlag);
1253  } else
1254  return false;
1255 
1256  constrainSelectedInstRegOperands(*LoadInst, TII, TRI, RBI);
1257  I.eraseFromParent();
1258  return true;
1259 }
1260 
1261 bool X86InstructionSelector::selectImplicitDefOrPHI(
1262  MachineInstr &I, MachineRegisterInfo &MRI) const {
1263  assert((I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
1264  I.getOpcode() == TargetOpcode::G_PHI) &&
1265  "unexpected instruction");
1266 
1267  unsigned DstReg = I.getOperand(0).getReg();
1268 
1269  if (!MRI.getRegClassOrNull(DstReg)) {
1270  const LLT DstTy = MRI.getType(DstReg);
1271  const TargetRegisterClass *RC = getRegClass(DstTy, DstReg, MRI);
1272 
1273  if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
1274  DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1275  << " operand\n");
1276  return false;
1277  }
1278  }
1279 
1280  if (I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1281  I.setDesc(TII.get(X86::IMPLICIT_DEF));
1282  else
1283  I.setDesc(TII.get(X86::PHI));
1284 
1285  return true;
1286 }
1287 
1290  X86Subtarget &Subtarget,
1291  X86RegisterBankInfo &RBI) {
1292  return new X86InstructionSelector(TM, Subtarget, RBI);
1293 }
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:843
MachineBasicBlock * getMBB() const
Atomic ordering constants.
static bool selectMergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
static const MachineInstrBuilder & addConstantPoolReference(const MachineInstrBuilder &MIB, unsigned CPI, unsigned GlobalBaseReg, unsigned char OpFlags)
addConstantPoolReference - This function is used to add a reference to the base of a constant value s...
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:268
unsigned getReg() const
getReg - Returns the register number.
A debug info location.
Definition: DebugLoc.h:34
const GlobalValue * GV
An instruction for reading from memory.
Definition: Instructions.h:164
void setRegBank(unsigned Reg, const RegisterBank &RegBank)
Set the register bank to RegBank for Reg.
unsigned createGenericVirtualRegister(LLT Ty)
Create and return a new generic virtual register with low-level type Ty.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
static const TargetRegisterClass * getRegClassFromGRPhysReg(unsigned Reg)
return AArch64::GPR64RegClass contains(Reg)
#define DEBUG_TYPE
bool isVector() const
A description of a memory reference used in the backend.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
const HexagonInstrInfo * TII
const ConstantFP * getFPImm() const
unsigned getNumOperands() const
Access to explicit operands of the instruction.
Definition: MachineInstr.h:293
This class provides the information for the target register banks.
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
static bool isGlobalStubReference(unsigned char TargetFlag)
isGlobalStubReference - Return true if the specified TargetFlag operand is a reference to a stub for ...
Definition: X86InstrInfo.h:90
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
Reg
All possible values of the reg field in the ModR/M byte.
static StringRef getName(Value *V)
static int getRegClass(RegisterKind Is, unsigned RegWidth)
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:290
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineInstr * getVRegDef(unsigned Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
static bool isGlobalRelativeToPICBase(unsigned char TargetFlag)
isGlobalRelativeToPICBase - Return true if the specified global value reference is relative to a 32-b...
Definition: X86InstrInfo.h:106
void RemoveOperand(unsigned i)
Erase an operand from an instruction, leaving it with one fewer operand than it started with...
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
void ChangeToImmediate(int64_t ImmVal)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
std::pair< CondCode, bool > getX86ConditionCode(CmpInst::Predicate Predicate)
Return a pair of condition code for the given predicate and whether the instruction operands should b...
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:149
unsigned const MachineRegisterInfo * MRI
#define GET_GLOBALISEL_PREDICATES_INIT
unsigned getPointerSize(unsigned AS=0) const
Layout pointer size FIXME: The defaults need to be removed once all of the backends/clients are updat...
Definition: DataLayout.cpp:605
const GlobalValue * getGlobal() const
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:264
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
union llvm::X86AddressMode::@454 Base
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:853
int64_t addOffset(int64_t LHS, int64_t RHS)
void substPhysReg(unsigned Reg, const TargetRegisterInfo &)
substPhysReg - Substitute the current register with the physical register Reg, taking any existing Su...
void setImm(int64_t immVal)
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static void X86SelectAddress(const MachineInstr &I, const MachineRegisterInfo &MRI, X86AddressMode &AM)
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
unsigned getSETFromCond(CondCode CC, bool HasMemoryOperand=false)
Return a set opcode for the given condition and whether it has a memory operand.
bool isCopy() const
Definition: MachineInstr.h:857
bool isImplicitDef() const
Definition: MachineInstr.h:831
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
This file declares the targeting of the RegisterBankInfo class for X86.
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
constexpr bool isInt< 32 >(int64_t x)
Definition: MathExtras.h:301
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:389
MachineOperand class - Representation of each machine instruction operand.
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
InstructionSelector * createX86InstructionSelector(const X86TargetMachine &TM, X86Subtarget &, X86RegisterBankInfo &)
This class implements the register bank concept.
Definition: RegisterBank.h:29
int64_t getImm() const
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
Definition: X86Subtarget.h:438
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:923
Optional< int64_t > getConstantVRegVal(unsigned VReg, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:109
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:139
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
Provides the logic to select generic machine instructions.
Representation of each machine instruction.
Definition: MachineInstr.h:59
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MO_GOTOFF - On a symbol operand this indicates that the immediate is the offset to the location of th...
Definition: X86BaseInfo.h:99
static const MachineInstrBuilder & addDirectMem(const MachineInstrBuilder &MIB, unsigned Reg)
addDirectMem - This function is used to add a direct memory reference to the current instruction – t...
static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
void setReg(unsigned Reg)
Change the register this operand corresponds to.
#define I(x, y, z)
Definition: MD5.cpp:58
void setSubReg(unsigned subReg)
LLT getType(unsigned VReg) const
Get the low-level type of VReg or LLT{} if VReg is not a generic (target independent) virtual registe...
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
void print(raw_ostream &OS, bool SkipOpers=false, bool SkipDebugLoc=false, const TargetInstrInfo *TII=nullptr) const
Debugging supportPrint this MI to OS.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
#define GET_GLOBALISEL_TEMPORARIES_INIT
const TargetRegisterClass * getRegClassOrNull(unsigned Reg) const
Return the register class of Reg, or null if Reg has not been assigned a register class yet...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel...
Definition: TargetOpcodes.h:31
unsigned getSizeInBits(unsigned Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
#define DEBUG(X)
Definition: Debug.h:118
MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...
Definition: X86BaseInfo.h:85
X86AddressMode - This struct holds a generalized full x86 address mode.
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:295
const ConstantInt * getCImm() const
enum llvm::X86AddressMode::@453 BaseType
static const MachineInstrBuilder & addFullAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM)
unsigned getConstantPoolIndex(const Constant *C, unsigned Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one...
unsigned getID() const
Get the identifier of this register bank.
Definition: RegisterBank.h:48
unsigned getPredicate() const