LLVM  15.0.0git
X86ISelDAGToDAG.cpp
Go to the documentation of this file.
1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines a DAG pattern matching instruction selector for X86,
10 // converting from a legalized dag to a X86 dag.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "X86.h"
15 #include "X86MachineFunctionInfo.h"
16 #include "X86RegisterInfo.h"
17 #include "X86Subtarget.h"
18 #include "X86TargetMachine.h"
19 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Config/llvm-config.h"
23 #include "llvm/IR/ConstantRange.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/IR/Instructions.h"
26 #include "llvm/IR/Intrinsics.h"
27 #include "llvm/IR/IntrinsicsX86.h"
28 #include "llvm/IR/Type.h"
29 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/KnownBits.h"
33 #include <cstdint>
34 
35 using namespace llvm;
36 
37 #define DEBUG_TYPE "x86-isel"
38 
39 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
40 
41 static cl::opt<bool> AndImmShrink("x86-and-imm-shrink", cl::init(true),
42  cl::desc("Enable setting constant bits to reduce size of mask immediates"),
43  cl::Hidden);
44 
46  "x86-promote-anyext-load", cl::init(true),
47  cl::desc("Enable promoting aligned anyext load to wider load"), cl::Hidden);
48 
50 
51 //===----------------------------------------------------------------------===//
52 // Pattern Matcher Implementation
53 //===----------------------------------------------------------------------===//
54 
55 namespace {
56  /// This corresponds to X86AddressMode, but uses SDValue's instead of register
57  /// numbers for the leaves of the matched tree.
58  struct X86ISelAddressMode {
59  enum {
60  RegBase,
61  FrameIndexBase
62  } BaseType = RegBase;
63 
64  // This is really a union, discriminated by BaseType!
65  SDValue Base_Reg;
66  int Base_FrameIndex = 0;
67 
68  unsigned Scale = 1;
69  SDValue IndexReg;
70  int32_t Disp = 0;
71  SDValue Segment;
72  const GlobalValue *GV = nullptr;
73  const Constant *CP = nullptr;
74  const BlockAddress *BlockAddr = nullptr;
75  const char *ES = nullptr;
76  MCSymbol *MCSym = nullptr;
77  int JT = -1;
78  Align Alignment; // CP alignment.
79  unsigned char SymbolFlags = X86II::MO_NO_FLAG; // X86II::MO_*
80  bool NegateIndex = false;
81 
82  X86ISelAddressMode() = default;
83 
84  bool hasSymbolicDisplacement() const {
85  return GV != nullptr || CP != nullptr || ES != nullptr ||
86  MCSym != nullptr || JT != -1 || BlockAddr != nullptr;
87  }
88 
89  bool hasBaseOrIndexReg() const {
90  return BaseType == FrameIndexBase ||
91  IndexReg.getNode() != nullptr || Base_Reg.getNode() != nullptr;
92  }
93 
94  /// Return true if this addressing mode is already RIP-relative.
95  bool isRIPRelative() const {
96  if (BaseType != RegBase) return false;
97  if (RegisterSDNode *RegNode =
98  dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode()))
99  return RegNode->getReg() == X86::RIP;
100  return false;
101  }
102 
103  void setBaseReg(SDValue Reg) {
104  BaseType = RegBase;
105  Base_Reg = Reg;
106  }
107 
108 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
109  void dump(SelectionDAG *DAG = nullptr) {
110  dbgs() << "X86ISelAddressMode " << this << '\n';
111  dbgs() << "Base_Reg ";
112  if (Base_Reg.getNode())
113  Base_Reg.getNode()->dump(DAG);
114  else
115  dbgs() << "nul\n";
116  if (BaseType == FrameIndexBase)
117  dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n';
118  dbgs() << " Scale " << Scale << '\n'
119  << "IndexReg ";
120  if (NegateIndex)
121  dbgs() << "negate ";
122  if (IndexReg.getNode())
123  IndexReg.getNode()->dump(DAG);
124  else
125  dbgs() << "nul\n";
126  dbgs() << " Disp " << Disp << '\n'
127  << "GV ";
128  if (GV)
129  GV->dump();
130  else
131  dbgs() << "nul";
132  dbgs() << " CP ";
133  if (CP)
134  CP->dump();
135  else
136  dbgs() << "nul";
137  dbgs() << '\n'
138  << "ES ";
139  if (ES)
140  dbgs() << ES;
141  else
142  dbgs() << "nul";
143  dbgs() << " MCSym ";
144  if (MCSym)
145  dbgs() << MCSym;
146  else
147  dbgs() << "nul";
148  dbgs() << " JT" << JT << " Align" << Alignment.value() << '\n';
149  }
150 #endif
151  };
152 }
153 
154 namespace {
155  //===--------------------------------------------------------------------===//
156  /// ISel - X86-specific code to select X86 machine instructions for
157  /// SelectionDAG operations.
158  ///
159  class X86DAGToDAGISel final : public SelectionDAGISel {
160  /// Keep a pointer to the X86Subtarget around so that we can
161  /// make the right decision when generating code for different targets.
162  const X86Subtarget *Subtarget;
163 
164  /// If true, selector should try to optimize for minimum code size.
165  bool OptForMinSize;
166 
167  /// Disable direct TLS access through segment registers.
168  bool IndirectTlsSegRefs;
169 
170  public:
171  explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
172  : SelectionDAGISel(tm, OptLevel), Subtarget(nullptr),
173  OptForMinSize(false), IndirectTlsSegRefs(false) {}
174 
175  StringRef getPassName() const override {
176  return "X86 DAG->DAG Instruction Selection";
177  }
178 
179  bool runOnMachineFunction(MachineFunction &MF) override {
180  // Reset the subtarget each time through.
181  Subtarget = &MF.getSubtarget<X86Subtarget>();
182  IndirectTlsSegRefs = MF.getFunction().hasFnAttribute(
183  "indirect-tls-seg-refs");
184 
185  // OptFor[Min]Size are used in pattern predicates that isel is matching.
186  OptForMinSize = MF.getFunction().hasMinSize();
187  assert((!OptForMinSize || MF.getFunction().hasOptSize()) &&
188  "OptForMinSize implies OptForSize");
189 
191  return true;
192  }
193 
194  void emitFunctionEntryCode() override;
195 
196  bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const override;
197 
198  void PreprocessISelDAG() override;
199  void PostprocessISelDAG() override;
200 
201 // Include the pieces autogenerated from the target description.
202 #include "X86GenDAGISel.inc"
203 
204  private:
205  void Select(SDNode *N) override;
206 
207  bool foldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
208  bool matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM,
209  bool AllowSegmentRegForX32 = false);
210  bool matchWrapper(SDValue N, X86ISelAddressMode &AM);
211  bool matchAddress(SDValue N, X86ISelAddressMode &AM);
212  bool matchVectorAddress(SDValue N, X86ISelAddressMode &AM);
213  bool matchAdd(SDValue &N, X86ISelAddressMode &AM, unsigned Depth);
214  bool matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
215  unsigned Depth);
216  bool matchVectorAddressRecursively(SDValue N, X86ISelAddressMode &AM,
217  unsigned Depth);
218  bool matchAddressBase(SDValue N, X86ISelAddressMode &AM);
219  bool selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
220  SDValue &Scale, SDValue &Index, SDValue &Disp,
221  SDValue &Segment);
222  bool selectVectorAddr(MemSDNode *Parent, SDValue BasePtr, SDValue IndexOp,
223  SDValue ScaleOp, SDValue &Base, SDValue &Scale,
224  SDValue &Index, SDValue &Disp, SDValue &Segment);
225  bool selectMOV64Imm32(SDValue N, SDValue &Imm);
226  bool selectLEAAddr(SDValue N, SDValue &Base,
227  SDValue &Scale, SDValue &Index, SDValue &Disp,
228  SDValue &Segment);
229  bool selectLEA64_32Addr(SDValue N, SDValue &Base,
230  SDValue &Scale, SDValue &Index, SDValue &Disp,
231  SDValue &Segment);
232  bool selectTLSADDRAddr(SDValue N, SDValue &Base,
233  SDValue &Scale, SDValue &Index, SDValue &Disp,
234  SDValue &Segment);
235  bool selectRelocImm(SDValue N, SDValue &Op);
236 
237  bool tryFoldLoad(SDNode *Root, SDNode *P, SDValue N,
238  SDValue &Base, SDValue &Scale,
239  SDValue &Index, SDValue &Disp,
240  SDValue &Segment);
241 
242  // Convenience method where P is also root.
243  bool tryFoldLoad(SDNode *P, SDValue N,
244  SDValue &Base, SDValue &Scale,
245  SDValue &Index, SDValue &Disp,
246  SDValue &Segment) {
247  return tryFoldLoad(P, P, N, Base, Scale, Index, Disp, Segment);
248  }
249 
250  bool tryFoldBroadcast(SDNode *Root, SDNode *P, SDValue N,
251  SDValue &Base, SDValue &Scale,
252  SDValue &Index, SDValue &Disp,
253  SDValue &Segment);
254 
255  bool isProfitableToFormMaskedOp(SDNode *N) const;
256 
257  /// Implement addressing mode selection for inline asm expressions.
258  bool SelectInlineAsmMemoryOperand(const SDValue &Op,
259  unsigned ConstraintID,
260  std::vector<SDValue> &OutOps) override;
261 
262  void emitSpecialCodeForMain();
263 
264  inline void getAddressOperands(X86ISelAddressMode &AM, const SDLoc &DL,
265  MVT VT, SDValue &Base, SDValue &Scale,
266  SDValue &Index, SDValue &Disp,
267  SDValue &Segment) {
268  if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
269  Base = CurDAG->getTargetFrameIndex(
270  AM.Base_FrameIndex, TLI->getPointerTy(CurDAG->getDataLayout()));
271  else if (AM.Base_Reg.getNode())
272  Base = AM.Base_Reg;
273  else
274  Base = CurDAG->getRegister(0, VT);
275 
276  Scale = getI8Imm(AM.Scale, DL);
277 
278  // Negate the index if needed.
279  if (AM.NegateIndex) {
280  unsigned NegOpc = VT == MVT::i64 ? X86::NEG64r : X86::NEG32r;
281  SDValue Neg = SDValue(CurDAG->getMachineNode(NegOpc, DL, VT, MVT::i32,
282  AM.IndexReg), 0);
283  AM.IndexReg = Neg;
284  }
285 
286  if (AM.IndexReg.getNode())
287  Index = AM.IndexReg;
288  else
289  Index = CurDAG->getRegister(0, VT);
290 
291  // These are 32-bit even in 64-bit mode since RIP-relative offset
292  // is 32-bit.
293  if (AM.GV)
294  Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(),
295  MVT::i32, AM.Disp,
296  AM.SymbolFlags);
297  else if (AM.CP)
298  Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, AM.Alignment,
299  AM.Disp, AM.SymbolFlags);
300  else if (AM.ES) {
301  assert(!AM.Disp && "Non-zero displacement is ignored with ES.");
302  Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
303  } else if (AM.MCSym) {
304  assert(!AM.Disp && "Non-zero displacement is ignored with MCSym.");
305  assert(AM.SymbolFlags == 0 && "oo");
306  Disp = CurDAG->getMCSymbol(AM.MCSym, MVT::i32);
307  } else if (AM.JT != -1) {
308  assert(!AM.Disp && "Non-zero displacement is ignored with JT.");
309  Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
310  } else if (AM.BlockAddr)
311  Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp,
312  AM.SymbolFlags);
313  else
314  Disp = CurDAG->getTargetConstant(AM.Disp, DL, MVT::i32);
315 
316  if (AM.Segment.getNode())
317  Segment = AM.Segment;
318  else
319  Segment = CurDAG->getRegister(0, MVT::i16);
320  }
321 
322  // Utility function to determine whether we should avoid selecting
323  // immediate forms of instructions for better code size or not.
324  // At a high level, we'd like to avoid such instructions when
325  // we have similar constants used within the same basic block
326  // that can be kept in a register.
327  //
328  bool shouldAvoidImmediateInstFormsForSize(SDNode *N) const {
329  uint32_t UseCount = 0;
330 
331  // Do not want to hoist if we're not optimizing for size.
332  // TODO: We'd like to remove this restriction.
333  // See the comment in X86InstrInfo.td for more info.
334  if (!CurDAG->shouldOptForSize())
335  return false;
336 
337  // Walk all the users of the immediate.
338  for (const SDNode *User : N->uses()) {
339  if (UseCount >= 2)
340  break;
341 
342  // This user is already selected. Count it as a legitimate use and
343  // move on.
344  if (User->isMachineOpcode()) {
345  UseCount++;
346  continue;
347  }
348 
349  // We want to count stores of immediates as real uses.
350  if (User->getOpcode() == ISD::STORE &&
351  User->getOperand(1).getNode() == N) {
352  UseCount++;
353  continue;
354  }
355 
356  // We don't currently match users that have > 2 operands (except
357  // for stores, which are handled above)
358  // Those instruction won't match in ISEL, for now, and would
359  // be counted incorrectly.
360  // This may change in the future as we add additional instruction
361  // types.
362  if (User->getNumOperands() != 2)
363  continue;
364 
365  // If this is a sign-extended 8-bit integer immediate used in an ALU
366  // instruction, there is probably an opcode encoding to save space.
367  auto *C = dyn_cast<ConstantSDNode>(N);
368  if (C && isInt<8>(C->getSExtValue()))
369  continue;
370 
371  // Immediates that are used for offsets as part of stack
372  // manipulation should be left alone. These are typically
373  // used to indicate SP offsets for argument passing and
374  // will get pulled into stores/pushes (implicitly).
375  if (User->getOpcode() == X86ISD::ADD ||
376  User->getOpcode() == ISD::ADD ||
377  User->getOpcode() == X86ISD::SUB ||
378  User->getOpcode() == ISD::SUB) {
379 
380  // Find the other operand of the add/sub.
381  SDValue OtherOp = User->getOperand(0);
382  if (OtherOp.getNode() == N)
383  OtherOp = User->getOperand(1);
384 
385  // Don't count if the other operand is SP.
386  RegisterSDNode *RegNode;
387  if (OtherOp->getOpcode() == ISD::CopyFromReg &&
388  (RegNode = dyn_cast_or_null<RegisterSDNode>(
389  OtherOp->getOperand(1).getNode())))
390  if ((RegNode->getReg() == X86::ESP) ||
391  (RegNode->getReg() == X86::RSP))
392  continue;
393  }
394 
395  // ... otherwise, count this and move on.
396  UseCount++;
397  }
398 
399  // If we have more than 1 use, then recommend for hoisting.
400  return (UseCount > 1);
401  }
402 
403  /// Return a target constant with the specified value of type i8.
404  inline SDValue getI8Imm(unsigned Imm, const SDLoc &DL) {
405  return CurDAG->getTargetConstant(Imm, DL, MVT::i8);
406  }
407 
408  /// Return a target constant with the specified value, of type i32.
409  inline SDValue getI32Imm(unsigned Imm, const SDLoc &DL) {
410  return CurDAG->getTargetConstant(Imm, DL, MVT::i32);
411  }
412 
413  /// Return a target constant with the specified value, of type i64.
414  inline SDValue getI64Imm(uint64_t Imm, const SDLoc &DL) {
415  return CurDAG->getTargetConstant(Imm, DL, MVT::i64);
416  }
417 
418  SDValue getExtractVEXTRACTImmediate(SDNode *N, unsigned VecWidth,
419  const SDLoc &DL) {
420  assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width");
421  uint64_t Index = N->getConstantOperandVal(1);
422  MVT VecVT = N->getOperand(0).getSimpleValueType();
423  return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL);
424  }
425 
426  SDValue getInsertVINSERTImmediate(SDNode *N, unsigned VecWidth,
427  const SDLoc &DL) {
428  assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width");
429  uint64_t Index = N->getConstantOperandVal(2);
430  MVT VecVT = N->getSimpleValueType(0);
431  return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL);
432  }
433 
434  SDValue getPermuteVINSERTCommutedImmediate(SDNode *N, unsigned VecWidth,
435  const SDLoc &DL) {
436  assert(VecWidth == 128 && "Unexpected vector width");
437  uint64_t Index = N->getConstantOperandVal(2);
438  MVT VecVT = N->getSimpleValueType(0);
439  uint64_t InsertIdx = (Index * VecVT.getScalarSizeInBits()) / VecWidth;
440  assert((InsertIdx == 0 || InsertIdx == 1) && "Bad insertf128 index");
441  // vinsert(0,sub,vec) -> [sub0][vec1] -> vperm2x128(0x30,vec,sub)
442  // vinsert(1,sub,vec) -> [vec0][sub0] -> vperm2x128(0x02,vec,sub)
443  return getI8Imm(InsertIdx ? 0x02 : 0x30, DL);
444  }
445 
446  SDValue getSBBZero(SDNode *N) {
447  SDLoc dl(N);
448  MVT VT = N->getSimpleValueType(0);
449 
450  // Create zero.
451  SDVTList VTs = CurDAG->getVTList(MVT::i32, MVT::i32);
452  SDValue Zero =
453  SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, VTs, None), 0);
454  if (VT == MVT::i64) {
455  Zero = SDValue(
456  CurDAG->getMachineNode(
457  TargetOpcode::SUBREG_TO_REG, dl, MVT::i64,
458  CurDAG->getTargetConstant(0, dl, MVT::i64), Zero,
459  CurDAG->getTargetConstant(X86::sub_32bit, dl, MVT::i32)),
460  0);
461  }
462 
463  // Copy flags to the EFLAGS register and glue it to next node.
464  unsigned Opcode = N->getOpcode();
465  assert((Opcode == X86ISD::SBB || Opcode == X86ISD::SETCC_CARRY) &&
466  "Unexpected opcode for SBB materialization");
467  unsigned FlagOpIndex = Opcode == X86ISD::SBB ? 2 : 1;
468  SDValue EFLAGS =
469  CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EFLAGS,
470  N->getOperand(FlagOpIndex), SDValue());
471 
472  // Create a 64-bit instruction if the result is 64-bits otherwise use the
473  // 32-bit version.
474  unsigned Opc = VT == MVT::i64 ? X86::SBB64rr : X86::SBB32rr;
475  MVT SBBVT = VT == MVT::i64 ? MVT::i64 : MVT::i32;
476  VTs = CurDAG->getVTList(SBBVT, MVT::i32);
477  return SDValue(
478  CurDAG->getMachineNode(Opc, dl, VTs,
479  {Zero, Zero, EFLAGS, EFLAGS.getValue(1)}),
480  0);
481  }
482 
483  // Helper to detect unneeded and instructions on shift amounts. Called
484  // from PatFrags in tablegen.
485  bool isUnneededShiftMask(SDNode *N, unsigned Width) const {
486  assert(N->getOpcode() == ISD::AND && "Unexpected opcode");
487  const APInt &Val = cast<ConstantSDNode>(N->getOperand(1))->getAPIntValue();
488 
489  if (Val.countTrailingOnes() >= Width)
490  return true;
491 
492  APInt Mask = Val | CurDAG->computeKnownBits(N->getOperand(0)).Zero;
493  return Mask.countTrailingOnes() >= Width;
494  }
495 
496  /// Return an SDNode that returns the value of the global base register.
497  /// Output instructions required to initialize the global base register,
498  /// if necessary.
499  SDNode *getGlobalBaseReg();
500 
501  /// Return a reference to the TargetMachine, casted to the target-specific
502  /// type.
503  const X86TargetMachine &getTargetMachine() const {
504  return static_cast<const X86TargetMachine &>(TM);
505  }
506 
507  /// Return a reference to the TargetInstrInfo, casted to the target-specific
508  /// type.
509  const X86InstrInfo *getInstrInfo() const {
510  return Subtarget->getInstrInfo();
511  }
512 
513  /// Return a condition code of the given SDNode
514  X86::CondCode getCondFromNode(SDNode *N) const;
515 
516  /// Address-mode matching performs shift-of-and to and-of-shift
517  /// reassociation in order to expose more scaled addressing
518  /// opportunities.
519  bool ComplexPatternFuncMutatesDAG() const override {
520  return true;
521  }
522 
523  bool isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const;
524 
525  // Indicates we should prefer to use a non-temporal load for this load.
526  bool useNonTemporalLoad(LoadSDNode *N) const {
527  if (!N->isNonTemporal())
528  return false;
529 
530  unsigned StoreSize = N->getMemoryVT().getStoreSize();
531 
532  if (N->getAlign().value() < StoreSize)
533  return false;
534 
535  switch (StoreSize) {
536  default: llvm_unreachable("Unsupported store size");
537  case 4:
538  case 8:
539  return false;
540  case 16:
541  return Subtarget->hasSSE41();
542  case 32:
543  return Subtarget->hasAVX2();
544  case 64:
545  return Subtarget->hasAVX512();
546  }
547  }
548 
549  bool foldLoadStoreIntoMemOperand(SDNode *Node);
550  MachineSDNode *matchBEXTRFromAndImm(SDNode *Node);
551  bool matchBitExtract(SDNode *Node);
552  bool shrinkAndImmediate(SDNode *N);
553  bool isMaskZeroExtended(SDNode *N) const;
554  bool tryShiftAmountMod(SDNode *N);
555  bool tryShrinkShlLogicImm(SDNode *N);
556  bool tryVPTERNLOG(SDNode *N);
557  bool matchVPTERNLOG(SDNode *Root, SDNode *ParentA, SDNode *ParentB,
558  SDNode *ParentC, SDValue A, SDValue B, SDValue C,
559  uint8_t Imm);
560  bool tryVPTESTM(SDNode *Root, SDValue Setcc, SDValue Mask);
561  bool tryMatchBitSelect(SDNode *N);
562 
563  MachineSDNode *emitPCMPISTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad,
564  const SDLoc &dl, MVT VT, SDNode *Node);
565  MachineSDNode *emitPCMPESTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad,
566  const SDLoc &dl, MVT VT, SDNode *Node,
567  SDValue &InFlag);
568 
569  bool tryOptimizeRem8Extend(SDNode *N);
570 
571  bool onlyUsesZeroFlag(SDValue Flags) const;
572  bool hasNoSignFlagUses(SDValue Flags) const;
573  bool hasNoCarryFlagUses(SDValue Flags) const;
574  };
575 }
576 
577 
578 // Returns true if this masked compare can be implemented legally with this
579 // type.
580 static bool isLegalMaskCompare(SDNode *N, const X86Subtarget *Subtarget) {
581  unsigned Opcode = N->getOpcode();
582  if (Opcode == X86ISD::CMPM || Opcode == X86ISD::CMPMM ||
583  Opcode == X86ISD::STRICT_CMPM || Opcode == ISD::SETCC ||
584  Opcode == X86ISD::CMPMM_SAE || Opcode == X86ISD::VFPCLASS) {
585  // We can get 256-bit 8 element types here without VLX being enabled. When
586  // this happens we will use 512-bit operations and the mask will not be
587  // zero extended.
588  EVT OpVT = N->getOperand(0).getValueType();
589  // The first operand of X86ISD::STRICT_CMPM is chain, so we need to get the
590  // second operand.
591  if (Opcode == X86ISD::STRICT_CMPM)
592  OpVT = N->getOperand(1).getValueType();
593  if (OpVT.is256BitVector() || OpVT.is128BitVector())
594  return Subtarget->hasVLX();
595 
596  return true;
597  }
598  // Scalar opcodes use 128 bit registers, but aren't subject to the VLX check.
599  if (Opcode == X86ISD::VFPCLASSS || Opcode == X86ISD::FSETCCM ||
600  Opcode == X86ISD::FSETCCM_SAE)
601  return true;
602 
603  return false;
604 }
605 
606 // Returns true if we can assume the writer of the mask has zero extended it
607 // for us.
608 bool X86DAGToDAGISel::isMaskZeroExtended(SDNode *N) const {
609  // If this is an AND, check if we have a compare on either side. As long as
610  // one side guarantees the mask is zero extended, the AND will preserve those
611  // zeros.
612  if (N->getOpcode() == ISD::AND)
613  return isLegalMaskCompare(N->getOperand(0).getNode(), Subtarget) ||
614  isLegalMaskCompare(N->getOperand(1).getNode(), Subtarget);
615 
616  return isLegalMaskCompare(N, Subtarget);
617 }
618 
619 bool
620 X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
621  if (OptLevel == CodeGenOpt::None) return false;
622 
623  if (!N.hasOneUse())
624  return false;
625 
626  if (N.getOpcode() != ISD::LOAD)
627  return true;
628 
629  // Don't fold non-temporal loads if we have an instruction for them.
630  if (useNonTemporalLoad(cast<LoadSDNode>(N)))
631  return false;
632 
633  // If N is a load, do additional profitability checks.
634  if (U == Root) {
635  switch (U->getOpcode()) {
636  default: break;
637  case X86ISD::ADD:
638  case X86ISD::ADC:
639  case X86ISD::SUB:
640  case X86ISD::SBB:
641  case X86ISD::AND:
642  case X86ISD::XOR:
643  case X86ISD::OR:
644  case ISD::ADD:
645  case ISD::ADDCARRY:
646  case ISD::AND:
647  case ISD::OR:
648  case ISD::XOR: {
649  SDValue Op1 = U->getOperand(1);
650 
651  // If the other operand is a 8-bit immediate we should fold the immediate
652  // instead. This reduces code size.
653  // e.g.
654  // movl 4(%esp), %eax
655  // addl $4, %eax
656  // vs.
657  // movl $4, %eax
658  // addl 4(%esp), %eax
659  // The former is 2 bytes shorter. In case where the increment is 1, then
660  // the saving can be 4 bytes (by using incl %eax).
661  if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1)) {
662  if (Imm->getAPIntValue().isSignedIntN(8))
663  return false;
664 
665  // If this is a 64-bit AND with an immediate that fits in 32-bits,
666  // prefer using the smaller and over folding the load. This is needed to
667  // make sure immediates created by shrinkAndImmediate are always folded.
668  // Ideally we would narrow the load during DAG combine and get the
669  // best of both worlds.
670  if (U->getOpcode() == ISD::AND &&
671  Imm->getAPIntValue().getBitWidth() == 64 &&
672  Imm->getAPIntValue().isIntN(32))
673  return false;
674 
675  // If this really a zext_inreg that can be represented with a movzx
676  // instruction, prefer that.
677  // TODO: We could shrink the load and fold if it is non-volatile.
678  if (U->getOpcode() == ISD::AND &&
679  (Imm->getAPIntValue() == UINT8_MAX ||
680  Imm->getAPIntValue() == UINT16_MAX ||
681  Imm->getAPIntValue() == UINT32_MAX))
682  return false;
683 
684  // ADD/SUB with can negate the immediate and use the opposite operation
685  // to fit 128 into a sign extended 8 bit immediate.
686  if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB) &&
687  (-Imm->getAPIntValue()).isSignedIntN(8))
688  return false;
689 
690  if ((U->getOpcode() == X86ISD::ADD || U->getOpcode() == X86ISD::SUB) &&
691  (-Imm->getAPIntValue()).isSignedIntN(8) &&
692  hasNoCarryFlagUses(SDValue(U, 1)))
693  return false;
694  }
695 
696  // If the other operand is a TLS address, we should fold it instead.
697  // This produces
698  // movl %gs:0, %eax
699  // leal i@NTPOFF(%eax), %eax
700  // instead of
701  // movl $i@NTPOFF, %eax
702  // addl %gs:0, %eax
703  // if the block also has an access to a second TLS address this will save
704  // a load.
705  // FIXME: This is probably also true for non-TLS addresses.
706  if (Op1.getOpcode() == X86ISD::Wrapper) {
707  SDValue Val = Op1.getOperand(0);
709  return false;
710  }
711 
712  // Don't fold load if this matches the BTS/BTR/BTC patterns.
713  // BTS: (or X, (shl 1, n))
714  // BTR: (and X, (rotl -2, n))
715  // BTC: (xor X, (shl 1, n))
716  if (U->getOpcode() == ISD::OR || U->getOpcode() == ISD::XOR) {
717  if (U->getOperand(0).getOpcode() == ISD::SHL &&
719  return false;
720 
721  if (U->getOperand(1).getOpcode() == ISD::SHL &&
723  return false;
724  }
725  if (U->getOpcode() == ISD::AND) {
726  SDValue U0 = U->getOperand(0);
727  SDValue U1 = U->getOperand(1);
728  if (U0.getOpcode() == ISD::ROTL) {
729  auto *C = dyn_cast<ConstantSDNode>(U0.getOperand(0));
730  if (C && C->getSExtValue() == -2)
731  return false;
732  }
733 
734  if (U1.getOpcode() == ISD::ROTL) {
735  auto *C = dyn_cast<ConstantSDNode>(U1.getOperand(0));
736  if (C && C->getSExtValue() == -2)
737  return false;
738  }
739  }
740 
741  break;
742  }
743  case ISD::SHL:
744  case ISD::SRA:
745  case ISD::SRL:
746  // Don't fold a load into a shift by immediate. The BMI2 instructions
747  // support folding a load, but not an immediate. The legacy instructions
748  // support folding an immediate, but can't fold a load. Folding an
749  // immediate is preferable to folding a load.
750  if (isa<ConstantSDNode>(U->getOperand(1)))
751  return false;
752 
753  break;
754  }
755  }
756 
757  // Prevent folding a load if this can implemented with an insert_subreg or
758  // a move that implicitly zeroes.
759  if (Root->getOpcode() == ISD::INSERT_SUBVECTOR &&
760  isNullConstant(Root->getOperand(2)) &&
761  (Root->getOperand(0).isUndef() ||
763  return false;
764 
765  return true;
766 }
767 
768 // Indicates it is profitable to form an AVX512 masked operation. Returning
769 // false will favor a masked register-register masked move or vblendm and the
770 // operation will be selected separately.
771 bool X86DAGToDAGISel::isProfitableToFormMaskedOp(SDNode *N) const {
772  assert(
773  (N->getOpcode() == ISD::VSELECT || N->getOpcode() == X86ISD::SELECTS) &&
774  "Unexpected opcode!");
775 
776  // If the operation has additional users, the operation will be duplicated.
777  // Check the use count to prevent that.
778  // FIXME: Are there cheap opcodes we might want to duplicate?
779  return N->getOperand(1).hasOneUse();
780 }
781 
782 /// Replace the original chain operand of the call with
783 /// load's chain operand and move load below the call's chain operand.
785  SDValue Call, SDValue OrigChain) {
787  SDValue Chain = OrigChain.getOperand(0);
788  if (Chain.getNode() == Load.getNode())
789  Ops.push_back(Load.getOperand(0));
790  else {
791  assert(Chain.getOpcode() == ISD::TokenFactor &&
792  "Unexpected chain operand");
793  for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
794  if (Chain.getOperand(i).getNode() == Load.getNode())
795  Ops.push_back(Load.getOperand(0));
796  else
797  Ops.push_back(Chain.getOperand(i));
798  SDValue NewChain =
799  CurDAG->getNode(ISD::TokenFactor, SDLoc(Load), MVT::Other, Ops);
800  Ops.clear();
801  Ops.push_back(NewChain);
802  }
803  Ops.append(OrigChain->op_begin() + 1, OrigChain->op_end());
804  CurDAG->UpdateNodeOperands(OrigChain.getNode(), Ops);
805  CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
806  Load.getOperand(1), Load.getOperand(2));
807 
808  Ops.clear();
809  Ops.push_back(SDValue(Load.getNode(), 1));
810  Ops.append(Call->op_begin() + 1, Call->op_end());
811  CurDAG->UpdateNodeOperands(Call.getNode(), Ops);
812 }
813 
814 /// Return true if call address is a load and it can be
815 /// moved below CALLSEQ_START and the chains leading up to the call.
816 /// Return the CALLSEQ_START by reference as a second output.
817 /// In the case of a tail call, there isn't a callseq node between the call
818 /// chain and the load.
819 static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
820  // The transformation is somewhat dangerous if the call's chain was glued to
821  // the call. After MoveBelowOrigChain the load is moved between the call and
822  // the chain, this can create a cycle if the load is not folded. So it is
823  // *really* important that we are sure the load will be folded.
824  if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
825  return false;
826  LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
827  if (!LD ||
828  !LD->isSimple() ||
829  LD->getAddressingMode() != ISD::UNINDEXED ||
830  LD->getExtensionType() != ISD::NON_EXTLOAD)
831  return false;
832 
833  // Now let's find the callseq_start.
834  while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
835  if (!Chain.hasOneUse())
836  return false;
837  Chain = Chain.getOperand(0);
838  }
839 
840  if (!Chain.getNumOperands())
841  return false;
842  // Since we are not checking for AA here, conservatively abort if the chain
843  // writes to memory. It's not safe to move the callee (a load) across a store.
844  if (isa<MemSDNode>(Chain.getNode()) &&
845  cast<MemSDNode>(Chain.getNode())->writeMem())
846  return false;
847  if (Chain.getOperand(0).getNode() == Callee.getNode())
848  return true;
849  if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
850  Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
851  Callee.getValue(1).hasOneUse())
852  return true;
853  return false;
854 }
855 
856 static bool isEndbrImm64(uint64_t Imm) {
857 // There may be some other prefix bytes between 0xF3 and 0x0F1EFA.
858 // i.g: 0xF3660F1EFA, 0xF3670F1EFA
859  if ((Imm & 0x00FFFFFF) != 0x0F1EFA)
860  return false;
861 
862  uint8_t OptionalPrefixBytes [] = {0x26, 0x2e, 0x36, 0x3e, 0x64,
863  0x65, 0x66, 0x67, 0xf0, 0xf2};
864  int i = 24; // 24bit 0x0F1EFA has matched
865  while (i < 64) {
866  uint8_t Byte = (Imm >> i) & 0xFF;
867  if (Byte == 0xF3)
868  return true;
869  if (!llvm::is_contained(OptionalPrefixBytes, Byte))
870  return false;
871  i += 8;
872  }
873 
874  return false;
875 }
876 
877 void X86DAGToDAGISel::PreprocessISelDAG() {
878  bool MadeChange = false;
879  for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
880  E = CurDAG->allnodes_end(); I != E; ) {
881  SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
882 
883  // This is for CET enhancement.
884  //
885  // ENDBR32 and ENDBR64 have specific opcodes:
886  // ENDBR32: F3 0F 1E FB
887  // ENDBR64: F3 0F 1E FA
888  // And we want that attackers won’t find unintended ENDBR32/64
889  // opcode matches in the binary
890  // Here’s an example:
891  // If the compiler had to generate asm for the following code:
892  // a = 0xF30F1EFA
893  // it could, for example, generate:
894  // mov 0xF30F1EFA, dword ptr[a]
895  // In such a case, the binary would include a gadget that starts
896  // with a fake ENDBR64 opcode. Therefore, we split such generation
897  // into multiple operations, let it not shows in the binary
898  if (N->getOpcode() == ISD::Constant) {
899  MVT VT = N->getSimpleValueType(0);
900  int64_t Imm = cast<ConstantSDNode>(N)->getSExtValue();
901  int32_t EndbrImm = Subtarget->is64Bit() ? 0xF30F1EFA : 0xF30F1EFB;
902  if (Imm == EndbrImm || isEndbrImm64(Imm)) {
903  // Check that the cf-protection-branch is enabled.
904  Metadata *CFProtectionBranch =
905  MF->getMMI().getModule()->getModuleFlag("cf-protection-branch");
906  if (CFProtectionBranch || IndirectBranchTracking) {
907  SDLoc dl(N);
908  SDValue Complement = CurDAG->getConstant(~Imm, dl, VT, false, true);
909  Complement = CurDAG->getNOT(dl, Complement, VT);
910  --I;
911  CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Complement);
912  ++I;
913  MadeChange = true;
914  continue;
915  }
916  }
917  }
918 
919  // If this is a target specific AND node with no flag usages, turn it back
920  // into ISD::AND to enable test instruction matching.
921  if (N->getOpcode() == X86ISD::AND && !N->hasAnyUseOfValue(1)) {
922  SDValue Res = CurDAG->getNode(ISD::AND, SDLoc(N), N->getValueType(0),
923  N->getOperand(0), N->getOperand(1));
924  --I;
925  CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
926  ++I;
927  MadeChange = true;
928  continue;
929  }
930 
931  // Convert vector increment or decrement to sub/add with an all-ones
932  // constant:
933  // add X, <1, 1...> --> sub X, <-1, -1...>
934  // sub X, <1, 1...> --> add X, <-1, -1...>
935  // The all-ones vector constant can be materialized using a pcmpeq
936  // instruction that is commonly recognized as an idiom (has no register
937  // dependency), so that's better/smaller than loading a splat 1 constant.
938  //
939  // But don't do this if it would inhibit a potentially profitable load
940  // folding opportunity for the other operand. That only occurs with the
941  // intersection of:
942  // (1) The other operand (op0) is load foldable.
943  // (2) The op is an add (otherwise, we are *creating* an add and can still
944  // load fold the other op).
945  // (3) The target has AVX (otherwise, we have a destructive add and can't
946  // load fold the other op without killing the constant op).
947  // (4) The constant 1 vector has multiple uses (so it is profitable to load
948  // into a register anyway).
949  auto mayPreventLoadFold = [&]() {
950  return X86::mayFoldLoad(N->getOperand(0), *Subtarget) &&
951  N->getOpcode() == ISD::ADD && Subtarget->hasAVX() &&
952  !N->getOperand(1).hasOneUse();
953  };
954  if ((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
955  N->getSimpleValueType(0).isVector() && !mayPreventLoadFold()) {
956  APInt SplatVal;
957  if (X86::isConstantSplat(N->getOperand(1), SplatVal) &&
958  SplatVal.isOne()) {
959  SDLoc DL(N);
960 
961  MVT VT = N->getSimpleValueType(0);
962  unsigned NumElts = VT.getSizeInBits() / 32;
963  SDValue AllOnes =
964  CurDAG->getAllOnesConstant(DL, MVT::getVectorVT(MVT::i32, NumElts));
965  AllOnes = CurDAG->getBitcast(VT, AllOnes);
966 
967  unsigned NewOpcode = N->getOpcode() == ISD::ADD ? ISD::SUB : ISD::ADD;
968  SDValue Res =
969  CurDAG->getNode(NewOpcode, DL, VT, N->getOperand(0), AllOnes);
970  --I;
971  CurDAG->ReplaceAllUsesWith(N, Res.getNode());
972  ++I;
973  MadeChange = true;
974  continue;
975  }
976  }
977 
978  switch (N->getOpcode()) {
979  case X86ISD::VBROADCAST: {
980  MVT VT = N->getSimpleValueType(0);
981  // Emulate v32i16/v64i8 broadcast without BWI.
982  if (!Subtarget->hasBWI() && (VT == MVT::v32i16 || VT == MVT::v64i8)) {
983  MVT NarrowVT = VT == MVT::v32i16 ? MVT::v16i16 : MVT::v32i8;
984  SDLoc dl(N);
985  SDValue NarrowBCast =
986  CurDAG->getNode(X86ISD::VBROADCAST, dl, NarrowVT, N->getOperand(0));
987  SDValue Res =
988  CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, CurDAG->getUNDEF(VT),
989  NarrowBCast, CurDAG->getIntPtrConstant(0, dl));
990  unsigned Index = VT == MVT::v32i16 ? 16 : 32;
991  Res = CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, NarrowBCast,
992  CurDAG->getIntPtrConstant(Index, dl));
993 
994  --I;
995  CurDAG->ReplaceAllUsesWith(N, Res.getNode());
996  ++I;
997  MadeChange = true;
998  continue;
999  }
1000 
1001  break;
1002  }
1003  case X86ISD::VBROADCAST_LOAD: {
1004  MVT VT = N->getSimpleValueType(0);
1005  // Emulate v32i16/v64i8 broadcast without BWI.
1006  if (!Subtarget->hasBWI() && (VT == MVT::v32i16 || VT == MVT::v64i8)) {
1007  MVT NarrowVT = VT == MVT::v32i16 ? MVT::v16i16 : MVT::v32i8;
1008  auto *MemNode = cast<MemSDNode>(N);
1009  SDLoc dl(N);
1010  SDVTList VTs = CurDAG->getVTList(NarrowVT, MVT::Other);
1011  SDValue Ops[] = {MemNode->getChain(), MemNode->getBasePtr()};
1012  SDValue NarrowBCast = CurDAG->getMemIntrinsicNode(
1013  X86ISD::VBROADCAST_LOAD, dl, VTs, Ops, MemNode->getMemoryVT(),
1014  MemNode->getMemOperand());
1015  SDValue Res =
1016  CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, CurDAG->getUNDEF(VT),
1017  NarrowBCast, CurDAG->getIntPtrConstant(0, dl));
1018  unsigned Index = VT == MVT::v32i16 ? 16 : 32;
1019  Res = CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, NarrowBCast,
1020  CurDAG->getIntPtrConstant(Index, dl));
1021 
1022  --I;
1023  SDValue To[] = {Res, NarrowBCast.getValue(1)};
1024  CurDAG->ReplaceAllUsesWith(N, To);
1025  ++I;
1026  MadeChange = true;
1027  continue;
1028  }
1029 
1030  break;
1031  }
1032  case ISD::VSELECT: {
1033  // Replace VSELECT with non-mask conditions with with BLENDV.
1034  if (N->getOperand(0).getValueType().getVectorElementType() == MVT::i1)
1035  break;
1036 
1037  assert(Subtarget->hasSSE41() && "Expected SSE4.1 support!");
1038  SDValue Blendv =
1039  CurDAG->getNode(X86ISD::BLENDV, SDLoc(N), N->getValueType(0),
1040  N->getOperand(0), N->getOperand(1), N->getOperand(2));
1041  --I;
1042  CurDAG->ReplaceAllUsesWith(N, Blendv.getNode());
1043  ++I;
1044  MadeChange = true;
1045  continue;
1046  }
1047  case ISD::FP_ROUND:
1048  case ISD::STRICT_FP_ROUND:
1049  case ISD::FP_TO_SINT:
1050  case ISD::FP_TO_UINT:
1052  case ISD::STRICT_FP_TO_UINT: {
1053  // Replace vector fp_to_s/uint with their X86 specific equivalent so we
1054  // don't need 2 sets of patterns.
1055  if (!N->getSimpleValueType(0).isVector())
1056  break;
1057 
1058  unsigned NewOpc;
1059  switch (N->getOpcode()) {
1060  default: llvm_unreachable("Unexpected opcode!");
1061  case ISD::FP_ROUND: NewOpc = X86ISD::VFPROUND; break;
1062  case ISD::STRICT_FP_ROUND: NewOpc = X86ISD::STRICT_VFPROUND; break;
1063  case ISD::STRICT_FP_TO_SINT: NewOpc = X86ISD::STRICT_CVTTP2SI; break;
1064  case ISD::FP_TO_SINT: NewOpc = X86ISD::CVTTP2SI; break;
1065  case ISD::STRICT_FP_TO_UINT: NewOpc = X86ISD::STRICT_CVTTP2UI; break;
1066  case ISD::FP_TO_UINT: NewOpc = X86ISD::CVTTP2UI; break;
1067  }
1068  SDValue Res;
1069  if (N->isStrictFPOpcode())
1070  Res =
1071  CurDAG->getNode(NewOpc, SDLoc(N), {N->getValueType(0), MVT::Other},
1072  {N->getOperand(0), N->getOperand(1)});
1073  else
1074  Res =
1075  CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
1076  N->getOperand(0));
1077  --I;
1078  CurDAG->ReplaceAllUsesWith(N, Res.getNode());
1079  ++I;
1080  MadeChange = true;
1081  continue;
1082  }
1083  case ISD::SHL:
1084  case ISD::SRA:
1085  case ISD::SRL: {
1086  // Replace vector shifts with their X86 specific equivalent so we don't
1087  // need 2 sets of patterns.
1088  if (!N->getValueType(0).isVector())
1089  break;
1090 
1091  unsigned NewOpc;
1092  switch (N->getOpcode()) {
1093  default: llvm_unreachable("Unexpected opcode!");
1094  case ISD::SHL: NewOpc = X86ISD::VSHLV; break;
1095  case ISD::SRA: NewOpc = X86ISD::VSRAV; break;
1096  case ISD::SRL: NewOpc = X86ISD::VSRLV; break;
1097  }
1098  SDValue Res = CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
1099  N->getOperand(0), N->getOperand(1));
1100  --I;
1101  CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
1102  ++I;
1103  MadeChange = true;
1104  continue;
1105  }
1106  case ISD::ANY_EXTEND:
1108  // Replace vector any extend with the zero extend equivalents so we don't
1109  // need 2 sets of patterns. Ignore vXi1 extensions.
1110  if (!N->getValueType(0).isVector())
1111  break;
1112 
1113  unsigned NewOpc;
1114  if (N->getOperand(0).getScalarValueSizeInBits() == 1) {
1115  assert(N->getOpcode() == ISD::ANY_EXTEND &&
1116  "Unexpected opcode for mask vector!");
1117  NewOpc = ISD::SIGN_EXTEND;
1118  } else {
1119  NewOpc = N->getOpcode() == ISD::ANY_EXTEND
1122  }
1123 
1124  SDValue Res = CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
1125  N->getOperand(0));
1126  --I;
1127  CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
1128  ++I;
1129  MadeChange = true;
1130  continue;
1131  }
1132  case ISD::FCEIL:
1133  case ISD::STRICT_FCEIL:
1134  case ISD::FFLOOR:
1135  case ISD::STRICT_FFLOOR:
1136  case ISD::FTRUNC:
1137  case ISD::STRICT_FTRUNC:
1138  case ISD::FROUNDEVEN:
1140  case ISD::FNEARBYINT:
1142  case ISD::FRINT:
1143  case ISD::STRICT_FRINT: {
1144  // Replace fp rounding with their X86 specific equivalent so we don't
1145  // need 2 sets of patterns.
1146  unsigned Imm;
1147  switch (N->getOpcode()) {
1148  default: llvm_unreachable("Unexpected opcode!");
1149  case ISD::STRICT_FCEIL:
1150  case ISD::FCEIL: Imm = 0xA; break;
1151  case ISD::STRICT_FFLOOR:
1152  case ISD::FFLOOR: Imm = 0x9; break;
1153  case ISD::STRICT_FTRUNC:
1154  case ISD::FTRUNC: Imm = 0xB; break;
1156  case ISD::FROUNDEVEN: Imm = 0x8; break;
1158  case ISD::FNEARBYINT: Imm = 0xC; break;
1159  case ISD::STRICT_FRINT:
1160  case ISD::FRINT: Imm = 0x4; break;
1161  }
1162  SDLoc dl(N);
1163  bool IsStrict = N->isStrictFPOpcode();
1164  SDValue Res;
1165  if (IsStrict)
1166  Res = CurDAG->getNode(X86ISD::STRICT_VRNDSCALE, dl,
1167  {N->getValueType(0), MVT::Other},
1168  {N->getOperand(0), N->getOperand(1),
1169  CurDAG->getTargetConstant(Imm, dl, MVT::i32)});
1170  else
1171  Res = CurDAG->getNode(X86ISD::VRNDSCALE, dl, N->getValueType(0),
1172  N->getOperand(0),
1173  CurDAG->getTargetConstant(Imm, dl, MVT::i32));
1174  --I;
1175  CurDAG->ReplaceAllUsesWith(N, Res.getNode());
1176  ++I;
1177  MadeChange = true;
1178  continue;
1179  }
1180  case X86ISD::FANDN:
1181  case X86ISD::FAND:
1182  case X86ISD::FOR:
1183  case X86ISD::FXOR: {
1184  // Widen scalar fp logic ops to vector to reduce isel patterns.
1185  // FIXME: Can we do this during lowering/combine.
1186  MVT VT = N->getSimpleValueType(0);
1187  if (VT.isVector() || VT == MVT::f128)
1188  break;
1189 
1190  MVT VecVT = VT == MVT::f64 ? MVT::v2f64
1191  : VT == MVT::f32 ? MVT::v4f32
1192  : MVT::v8f16;
1193 
1194  SDLoc dl(N);
1195  SDValue Op0 = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT,
1196  N->getOperand(0));
1197  SDValue Op1 = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT,
1198  N->getOperand(1));
1199 
1200  SDValue Res;
1201  if (Subtarget->hasSSE2()) {
1202  EVT IntVT = EVT(VecVT).changeVectorElementTypeToInteger();
1203  Op0 = CurDAG->getNode(ISD::BITCAST, dl, IntVT, Op0);
1204  Op1 = CurDAG->getNode(ISD::BITCAST, dl, IntVT, Op1);
1205  unsigned Opc;
1206  switch (N->getOpcode()) {
1207  default: llvm_unreachable("Unexpected opcode!");
1208  case X86ISD::FANDN: Opc = X86ISD::ANDNP; break;
1209  case X86ISD::FAND: Opc = ISD::AND; break;
1210  case X86ISD::FOR: Opc = ISD::OR; break;
1211  case X86ISD::FXOR: Opc = ISD::XOR; break;
1212  }
1213  Res = CurDAG->getNode(Opc, dl, IntVT, Op0, Op1);
1214  Res = CurDAG->getNode(ISD::BITCAST, dl, VecVT, Res);
1215  } else {
1216  Res = CurDAG->getNode(N->getOpcode(), dl, VecVT, Op0, Op1);
1217  }
1218  Res = CurDAG->getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res,
1219  CurDAG->getIntPtrConstant(0, dl));
1220  --I;
1221  CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
1222  ++I;
1223  MadeChange = true;
1224  continue;
1225  }
1226  }
1227 
1228  if (OptLevel != CodeGenOpt::None &&
1229  // Only do this when the target can fold the load into the call or
1230  // jmp.
1231  !Subtarget->useIndirectThunkCalls() &&
1232  ((N->getOpcode() == X86ISD::CALL && !Subtarget->slowTwoMemOps()) ||
1233  (N->getOpcode() == X86ISD::TC_RETURN &&
1234  (Subtarget->is64Bit() ||
1235  !getTargetMachine().isPositionIndependent())))) {
1236  /// Also try moving call address load from outside callseq_start to just
1237  /// before the call to allow it to be folded.
1238  ///
1239  /// [Load chain]
1240  /// ^
1241  /// |
1242  /// [Load]
1243  /// ^ ^
1244  /// | |
1245  /// / \--
1246  /// / |
1247  ///[CALLSEQ_START] |
1248  /// ^ |
1249  /// | |
1250  /// [LOAD/C2Reg] |
1251  /// | |
1252  /// \ /
1253  /// \ /
1254  /// [CALL]
1255  bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
1256  SDValue Chain = N->getOperand(0);
1257  SDValue Load = N->getOperand(1);
1258  if (!isCalleeLoad(Load, Chain, HasCallSeq))
1259  continue;
1260  moveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
1261  ++NumLoadMoved;
1262  MadeChange = true;
1263  continue;
1264  }
1265 
1266  // Lower fpround and fpextend nodes that target the FP stack to be store and
1267  // load to the stack. This is a gross hack. We would like to simply mark
1268  // these as being illegal, but when we do that, legalize produces these when
1269  // it expands calls, then expands these in the same legalize pass. We would
1270  // like dag combine to be able to hack on these between the call expansion
1271  // and the node legalization. As such this pass basically does "really
1272  // late" legalization of these inline with the X86 isel pass.
1273  // FIXME: This should only happen when not compiled with -O0.
1274  switch (N->getOpcode()) {
1275  default: continue;
1276  case ISD::FP_ROUND:
1277  case ISD::FP_EXTEND:
1278  {
1279  MVT SrcVT = N->getOperand(0).getSimpleValueType();
1280  MVT DstVT = N->getSimpleValueType(0);
1281 
1282  // If any of the sources are vectors, no fp stack involved.
1283  if (SrcVT.isVector() || DstVT.isVector())
1284  continue;
1285 
1286  // If the source and destination are SSE registers, then this is a legal
1287  // conversion that should not be lowered.
1288  const X86TargetLowering *X86Lowering =
1289  static_cast<const X86TargetLowering *>(TLI);
1290  bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
1291  bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
1292  if (SrcIsSSE && DstIsSSE)
1293  continue;
1294 
1295  if (!SrcIsSSE && !DstIsSSE) {
1296  // If this is an FPStack extension, it is a noop.
1297  if (N->getOpcode() == ISD::FP_EXTEND)
1298  continue;
1299  // If this is a value-preserving FPStack truncation, it is a noop.
1300  if (N->getConstantOperandVal(1))
1301  continue;
1302  }
1303 
1304  // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
1305  // FPStack has extload and truncstore. SSE can fold direct loads into other
1306  // operations. Based on this, decide what we want to do.
1307  MVT MemVT = (N->getOpcode() == ISD::FP_ROUND) ? DstVT : SrcVT;
1308  SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
1309  int SPFI = cast<FrameIndexSDNode>(MemTmp)->getIndex();
1310  MachinePointerInfo MPI =
1311  MachinePointerInfo::getFixedStack(CurDAG->getMachineFunction(), SPFI);
1312  SDLoc dl(N);
1313 
1314  // FIXME: optimize the case where the src/dest is a load or store?
1315 
1316  SDValue Store = CurDAG->getTruncStore(
1317  CurDAG->getEntryNode(), dl, N->getOperand(0), MemTmp, MPI, MemVT);
1318  SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store,
1319  MemTmp, MPI, MemVT);
1320 
1321  // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
1322  // extload we created. This will cause general havok on the dag because
1323  // anything below the conversion could be folded into other existing nodes.
1324  // To avoid invalidating 'I', back it up to the convert node.
1325  --I;
1326  CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
1327  break;
1328  }
1329 
1330  //The sequence of events for lowering STRICT_FP versions of these nodes requires
1331  //dealing with the chain differently, as there is already a preexisting chain.
1332  case ISD::STRICT_FP_ROUND:
1333  case ISD::STRICT_FP_EXTEND:
1334  {
1335  MVT SrcVT = N->getOperand(1).getSimpleValueType();
1336  MVT DstVT = N->getSimpleValueType(0);
1337 
1338  // If any of the sources are vectors, no fp stack involved.
1339  if (SrcVT.isVector() || DstVT.isVector())
1340  continue;
1341 
1342  // If the source and destination are SSE registers, then this is a legal
1343  // conversion that should not be lowered.
1344  const X86TargetLowering *X86Lowering =
1345  static_cast<const X86TargetLowering *>(TLI);
1346  bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
1347  bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
1348  if (SrcIsSSE && DstIsSSE)
1349  continue;
1350 
1351  if (!SrcIsSSE && !DstIsSSE) {
1352  // If this is an FPStack extension, it is a noop.
1353  if (N->getOpcode() == ISD::STRICT_FP_EXTEND)
1354  continue;
1355  // If this is a value-preserving FPStack truncation, it is a noop.
1356  if (N->getConstantOperandVal(2))
1357  continue;
1358  }
1359 
1360  // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
1361  // FPStack has extload and truncstore. SSE can fold direct loads into other
1362  // operations. Based on this, decide what we want to do.
1363  MVT MemVT = (N->getOpcode() == ISD::STRICT_FP_ROUND) ? DstVT : SrcVT;
1364  SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
1365  int SPFI = cast<FrameIndexSDNode>(MemTmp)->getIndex();
1366  MachinePointerInfo MPI =
1367  MachinePointerInfo::getFixedStack(CurDAG->getMachineFunction(), SPFI);
1368  SDLoc dl(N);
1369 
1370  // FIXME: optimize the case where the src/dest is a load or store?
1371 
1372  //Since the operation is StrictFP, use the preexisting chain.
1373  SDValue Store, Result;
1374  if (!SrcIsSSE) {
1375  SDVTList VTs = CurDAG->getVTList(MVT::Other);
1376  SDValue Ops[] = {N->getOperand(0), N->getOperand(1), MemTmp};
1377  Store = CurDAG->getMemIntrinsicNode(X86ISD::FST, dl, VTs, Ops, MemVT,
1378  MPI, /*Align*/ None,
1380  if (N->getFlags().hasNoFPExcept()) {
1381  SDNodeFlags Flags = Store->getFlags();
1382  Flags.setNoFPExcept(true);
1383  Store->setFlags(Flags);
1384  }
1385  } else {
1386  assert(SrcVT == MemVT && "Unexpected VT!");
1387  Store = CurDAG->getStore(N->getOperand(0), dl, N->getOperand(1), MemTmp,
1388  MPI);
1389  }
1390 
1391  if (!DstIsSSE) {
1392  SDVTList VTs = CurDAG->getVTList(DstVT, MVT::Other);
1393  SDValue Ops[] = {Store, MemTmp};
1394  Result = CurDAG->getMemIntrinsicNode(
1395  X86ISD::FLD, dl, VTs, Ops, MemVT, MPI,
1396  /*Align*/ None, MachineMemOperand::MOLoad);
1397  if (N->getFlags().hasNoFPExcept()) {
1398  SDNodeFlags Flags = Result->getFlags();
1399  Flags.setNoFPExcept(true);
1400  Result->setFlags(Flags);
1401  }
1402  } else {
1403  assert(DstVT == MemVT && "Unexpected VT!");
1404  Result = CurDAG->getLoad(DstVT, dl, Store, MemTmp, MPI);
1405  }
1406 
1407  // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
1408  // extload we created. This will cause general havok on the dag because
1409  // anything below the conversion could be folded into other existing nodes.
1410  // To avoid invalidating 'I', back it up to the convert node.
1411  --I;
1412  CurDAG->ReplaceAllUsesWith(N, Result.getNode());
1413  break;
1414  }
1415  }
1416 
1417 
1418  // Now that we did that, the node is dead. Increment the iterator to the
1419  // next node to process, then delete N.
1420  ++I;
1421  MadeChange = true;
1422  }
1423 
1424  // Remove any dead nodes that may have been left behind.
1425  if (MadeChange)
1426  CurDAG->RemoveDeadNodes();
1427 }
1428 
1429 // Look for a redundant movzx/movsx that can occur after an 8-bit divrem.
1430 bool X86DAGToDAGISel::tryOptimizeRem8Extend(SDNode *N) {
1431  unsigned Opc = N->getMachineOpcode();
1432  if (Opc != X86::MOVZX32rr8 && Opc != X86::MOVSX32rr8 &&
1433  Opc != X86::MOVSX64rr8)
1434  return false;
1435 
1436  SDValue N0 = N->getOperand(0);
1437 
1438  // We need to be extracting the lower bit of an extend.
1439  if (!N0.isMachineOpcode() ||
1440  N0.getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG ||
1441  N0.getConstantOperandVal(1) != X86::sub_8bit)
1442  return false;
1443 
1444  // We're looking for either a movsx or movzx to match the original opcode.
1445  unsigned ExpectedOpc = Opc == X86::MOVZX32rr8 ? X86::MOVZX32rr8_NOREX
1446  : X86::MOVSX32rr8_NOREX;
1447  SDValue N00 = N0.getOperand(0);
1448  if (!N00.isMachineOpcode() || N00.getMachineOpcode() != ExpectedOpc)
1449  return false;
1450 
1451  if (Opc == X86::MOVSX64rr8) {
1452  // If we had a sign extend from 8 to 64 bits. We still need to go from 32
1453  // to 64.
1454  MachineSDNode *Extend = CurDAG->getMachineNode(X86::MOVSX64rr32, SDLoc(N),
1455  MVT::i64, N00);
1456  ReplaceUses(N, Extend);
1457  } else {
1458  // Ok we can drop this extend and just use the original extend.
1459  ReplaceUses(N, N00.getNode());
1460  }
1461 
1462  return true;
1463 }
1464 
1465 void X86DAGToDAGISel::PostprocessISelDAG() {
1466  // Skip peepholes at -O0.
1467  if (TM.getOptLevel() == CodeGenOpt::None)
1468  return;
1469 
1470  SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
1471 
1472  bool MadeChange = false;
1473  while (Position != CurDAG->allnodes_begin()) {
1474  SDNode *N = &*--Position;
1475  // Skip dead nodes and any non-machine opcodes.
1476  if (N->use_empty() || !N->isMachineOpcode())
1477  continue;
1478 
1479  if (tryOptimizeRem8Extend(N)) {
1480  MadeChange = true;
1481  continue;
1482  }
1483 
1484  // Look for a TESTrr+ANDrr pattern where both operands of the test are
1485  // the same. Rewrite to remove the AND.
1486  unsigned Opc = N->getMachineOpcode();
1487  if ((Opc == X86::TEST8rr || Opc == X86::TEST16rr ||
1488  Opc == X86::TEST32rr || Opc == X86::TEST64rr) &&
1489  N->getOperand(0) == N->getOperand(1) &&
1490  N->isOnlyUserOf(N->getOperand(0).getNode()) &&
1491  N->getOperand(0).isMachineOpcode()) {
1492  SDValue And = N->getOperand(0);
1493  unsigned N0Opc = And.getMachineOpcode();
1494  if (N0Opc == X86::AND8rr || N0Opc == X86::AND16rr ||
1495  N0Opc == X86::AND32rr || N0Opc == X86::AND64rr) {
1496  MachineSDNode *Test = CurDAG->getMachineNode(Opc, SDLoc(N),
1497  MVT::i32,
1498  And.getOperand(0),
1499  And.getOperand(1));
1500  ReplaceUses(N, Test);
1501  MadeChange = true;
1502  continue;
1503  }
1504  if (N0Opc == X86::AND8rm || N0Opc == X86::AND16rm ||
1505  N0Opc == X86::AND32rm || N0Opc == X86::AND64rm) {
1506  unsigned NewOpc;
1507  switch (N0Opc) {
1508  case X86::AND8rm: NewOpc = X86::TEST8mr; break;
1509  case X86::AND16rm: NewOpc = X86::TEST16mr; break;
1510  case X86::AND32rm: NewOpc = X86::TEST32mr; break;
1511  case X86::AND64rm: NewOpc = X86::TEST64mr; break;
1512  }
1513 
1514  // Need to swap the memory and register operand.
1515  SDValue Ops[] = { And.getOperand(1),
1516  And.getOperand(2),
1517  And.getOperand(3),
1518  And.getOperand(4),
1519  And.getOperand(5),
1520  And.getOperand(0),
1521  And.getOperand(6) /* Chain */ };
1522  MachineSDNode *Test = CurDAG->getMachineNode(NewOpc, SDLoc(N),
1523  MVT::i32, MVT::Other, Ops);
1524  CurDAG->setNodeMemRefs(
1525  Test, cast<MachineSDNode>(And.getNode())->memoperands());
1526  ReplaceUses(N, Test);
1527  MadeChange = true;
1528  continue;
1529  }
1530  }
1531 
1532  // Look for a KAND+KORTEST and turn it into KTEST if only the zero flag is
1533  // used. We're doing this late so we can prefer to fold the AND into masked
1534  // comparisons. Doing that can be better for the live range of the mask
1535  // register.
1536  if ((Opc == X86::KORTESTBrr || Opc == X86::KORTESTWrr ||
1537  Opc == X86::KORTESTDrr || Opc == X86::KORTESTQrr) &&
1538  N->getOperand(0) == N->getOperand(1) &&
1539  N->isOnlyUserOf(N->getOperand(0).getNode()) &&
1540  N->getOperand(0).isMachineOpcode() &&
1541  onlyUsesZeroFlag(SDValue(N, 0))) {
1542  SDValue And = N->getOperand(0);
1543  unsigned N0Opc = And.getMachineOpcode();
1544  // KANDW is legal with AVX512F, but KTESTW requires AVX512DQ. The other
1545  // KAND instructions and KTEST use the same ISA feature.
1546  if (N0Opc == X86::KANDBrr ||
1547  (N0Opc == X86::KANDWrr && Subtarget->hasDQI()) ||
1548  N0Opc == X86::KANDDrr || N0Opc == X86::KANDQrr) {
1549  unsigned NewOpc;
1550  switch (Opc) {
1551  default: llvm_unreachable("Unexpected opcode!");
1552  case X86::KORTESTBrr: NewOpc = X86::KTESTBrr; break;
1553  case X86::KORTESTWrr: NewOpc = X86::KTESTWrr; break;
1554  case X86::KORTESTDrr: NewOpc = X86::KTESTDrr; break;
1555  case X86::KORTESTQrr: NewOpc = X86::KTESTQrr; break;
1556  }
1557  MachineSDNode *KTest = CurDAG->getMachineNode(NewOpc, SDLoc(N),
1558  MVT::i32,
1559  And.getOperand(0),
1560  And.getOperand(1));
1561  ReplaceUses(N, KTest);
1562  MadeChange = true;
1563  continue;
1564  }
1565  }
1566 
1567  // Attempt to remove vectors moves that were inserted to zero upper bits.
1568  if (Opc != TargetOpcode::SUBREG_TO_REG)
1569  continue;
1570 
1571  unsigned SubRegIdx = N->getConstantOperandVal(2);
1572  if (SubRegIdx != X86::sub_xmm && SubRegIdx != X86::sub_ymm)
1573  continue;
1574 
1575  SDValue Move = N->getOperand(1);
1576  if (!Move.isMachineOpcode())
1577  continue;
1578 
1579  // Make sure its one of the move opcodes we recognize.
1580  switch (Move.getMachineOpcode()) {
1581  default:
1582  continue;
1583  case X86::VMOVAPDrr: case X86::VMOVUPDrr:
1584  case X86::VMOVAPSrr: case X86::VMOVUPSrr:
1585  case X86::VMOVDQArr: case X86::VMOVDQUrr:
1586  case X86::VMOVAPDYrr: case X86::VMOVUPDYrr:
1587  case X86::VMOVAPSYrr: case X86::VMOVUPSYrr:
1588  case X86::VMOVDQAYrr: case X86::VMOVDQUYrr:
1589  case X86::VMOVAPDZ128rr: case X86::VMOVUPDZ128rr:
1590  case X86::VMOVAPSZ128rr: case X86::VMOVUPSZ128rr:
1591  case X86::VMOVDQA32Z128rr: case X86::VMOVDQU32Z128rr:
1592  case X86::VMOVDQA64Z128rr: case X86::VMOVDQU64Z128rr:
1593  case X86::VMOVAPDZ256rr: case X86::VMOVUPDZ256rr:
1594  case X86::VMOVAPSZ256rr: case X86::VMOVUPSZ256rr:
1595  case X86::VMOVDQA32Z256rr: case X86::VMOVDQU32Z256rr:
1596  case X86::VMOVDQA64Z256rr: case X86::VMOVDQU64Z256rr:
1597  break;
1598  }
1599 
1600  SDValue In = Move.getOperand(0);
1601  if (!In.isMachineOpcode() ||
1602  In.getMachineOpcode() <= TargetOpcode::GENERIC_OP_END)
1603  continue;
1604 
1605  // Make sure the instruction has a VEX, XOP, or EVEX prefix. This covers
1606  // the SHA instructions which use a legacy encoding.
1607  uint64_t TSFlags = getInstrInfo()->get(In.getMachineOpcode()).TSFlags;
1608  if ((TSFlags & X86II::EncodingMask) != X86II::VEX &&
1609  (TSFlags & X86II::EncodingMask) != X86II::EVEX &&
1610  (TSFlags & X86II::EncodingMask) != X86II::XOP)
1611  continue;
1612 
1613  // Producing instruction is another vector instruction. We can drop the
1614  // move.
1615  CurDAG->UpdateNodeOperands(N, N->getOperand(0), In, N->getOperand(2));
1616  MadeChange = true;
1617  }
1618 
1619  if (MadeChange)
1620  CurDAG->RemoveDeadNodes();
1621 }
1622 
1623 
1624 /// Emit any code that needs to be executed only in the main function.
1625 void X86DAGToDAGISel::emitSpecialCodeForMain() {
1626  if (Subtarget->isTargetCygMing()) {
1628  auto &DL = CurDAG->getDataLayout();
1629 
1630  TargetLowering::CallLoweringInfo CLI(*CurDAG);
1631  CLI.setChain(CurDAG->getRoot())
1632  .setCallee(CallingConv::C, Type::getVoidTy(*CurDAG->getContext()),
1633  CurDAG->getExternalSymbol("__main", TLI->getPointerTy(DL)),
1634  std::move(Args));
1635  const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
1636  std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
1637  CurDAG->setRoot(Result.second);
1638  }
1639 }
1640 
1641 void X86DAGToDAGISel::emitFunctionEntryCode() {
1642  // If this is main, emit special code for main.
1643  const Function &F = MF->getFunction();
1644  if (F.hasExternalLinkage() && F.getName() == "main")
1645  emitSpecialCodeForMain();
1646 }
1647 
1648 static bool isDispSafeForFrameIndex(int64_t Val) {
1649  // On 64-bit platforms, we can run into an issue where a frame index
1650  // includes a displacement that, when added to the explicit displacement,
1651  // will overflow the displacement field. Assuming that the frame index
1652  // displacement fits into a 31-bit integer (which is only slightly more
1653  // aggressive than the current fundamental assumption that it fits into
1654  // a 32-bit integer), a 31-bit disp should always be safe.
1655  return isInt<31>(Val);
1656 }
1657 
1658 bool X86DAGToDAGISel::foldOffsetIntoAddress(uint64_t Offset,
1659  X86ISelAddressMode &AM) {
1660  // We may have already matched a displacement and the caller just added the
1661  // symbolic displacement. So we still need to do the checks even if Offset
1662  // is zero.
1663 
1664  int64_t Val = AM.Disp + Offset;
1665 
1666  // Cannot combine ExternalSymbol displacements with integer offsets.
1667  if (Val != 0 && (AM.ES || AM.MCSym))
1668  return true;
1669 
1670  CodeModel::Model M = TM.getCodeModel();
1671  if (Subtarget->is64Bit()) {
1672  if (Val != 0 &&
1674  AM.hasSymbolicDisplacement()))
1675  return true;
1676  // In addition to the checks required for a register base, check that
1677  // we do not try to use an unsafe Disp with a frame index.
1678  if (AM.BaseType == X86ISelAddressMode::FrameIndexBase &&
1680  return true;
1681  }
1682  AM.Disp = Val;
1683  return false;
1684 
1685 }
1686 
1687 bool X86DAGToDAGISel::matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM,
1688  bool AllowSegmentRegForX32) {
1689  SDValue Address = N->getOperand(1);
1690 
1691  // load gs:0 -> GS segment register.
1692  // load fs:0 -> FS segment register.
1693  //
1694  // This optimization is generally valid because the GNU TLS model defines that
1695  // gs:0 (or fs:0 on X86-64) contains its own address. However, for X86-64 mode
1696  // with 32-bit registers, as we get in ILP32 mode, those registers are first
1697  // zero-extended to 64 bits and then added it to the base address, which gives
1698  // unwanted results when the register holds a negative value.
1699  // For more information see http://people.redhat.com/drepper/tls.pdf
1700  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address)) {
1701  if (C->getSExtValue() == 0 && AM.Segment.getNode() == nullptr &&
1702  !IndirectTlsSegRefs &&
1703  (Subtarget->isTargetGlibc() || Subtarget->isTargetAndroid() ||
1704  Subtarget->isTargetFuchsia())) {
1705  if (Subtarget->isTarget64BitILP32() && !AllowSegmentRegForX32)
1706  return true;
1707  switch (N->getPointerInfo().getAddrSpace()) {
1708  case X86AS::GS:
1709  AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1710  return false;
1711  case X86AS::FS:
1712  AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1713  return false;
1714  // Address space X86AS::SS is not handled here, because it is not used to
1715  // address TLS areas.
1716  }
1717  }
1718  }
1719 
1720  return true;
1721 }
1722 
1723 /// Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes into an addressing
1724 /// mode. These wrap things that will resolve down into a symbol reference.
1725 /// If no match is possible, this returns true, otherwise it returns false.
1726 bool X86DAGToDAGISel::matchWrapper(SDValue N, X86ISelAddressMode &AM) {
1727  // If the addressing mode already has a symbol as the displacement, we can
1728  // never match another symbol.
1729  if (AM.hasSymbolicDisplacement())
1730  return true;
1731 
1732  bool IsRIPRelTLS = false;
1733  bool IsRIPRel = N.getOpcode() == X86ISD::WrapperRIP;
1734  if (IsRIPRel) {
1735  SDValue Val = N.getOperand(0);
1737  IsRIPRelTLS = true;
1738  }
1739 
1740  // We can't use an addressing mode in the 64-bit large code model.
1741  // Global TLS addressing is an exception. In the medium code model,
1742  // we use can use a mode when RIP wrappers are present.
1743  // That signifies access to globals that are known to be "near",
1744  // such as the GOT itself.
1745  CodeModel::Model M = TM.getCodeModel();
1746  if (Subtarget->is64Bit() &&
1747  ((M == CodeModel::Large && !IsRIPRelTLS) ||
1748  (M == CodeModel::Medium && !IsRIPRel)))
1749  return true;
1750 
1751  // Base and index reg must be 0 in order to use %rip as base.
1752  if (IsRIPRel && AM.hasBaseOrIndexReg())
1753  return true;
1754 
1755  // Make a local copy in case we can't do this fold.
1756  X86ISelAddressMode Backup = AM;
1757 
1758  int64_t Offset = 0;
1759  SDValue N0 = N.getOperand(0);
1760  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
1761  AM.GV = G->getGlobal();
1762  AM.SymbolFlags = G->getTargetFlags();
1763  Offset = G->getOffset();
1764  } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
1765  AM.CP = CP->getConstVal();
1766  AM.Alignment = CP->getAlign();
1767  AM.SymbolFlags = CP->getTargetFlags();
1768  Offset = CP->getOffset();
1769  } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
1770  AM.ES = S->getSymbol();
1771  AM.SymbolFlags = S->getTargetFlags();
1772  } else if (auto *S = dyn_cast<MCSymbolSDNode>(N0)) {
1773  AM.MCSym = S->getMCSymbol();
1774  } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
1775  AM.JT = J->getIndex();
1776  AM.SymbolFlags = J->getTargetFlags();
1777  } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
1778  AM.BlockAddr = BA->getBlockAddress();
1779  AM.SymbolFlags = BA->getTargetFlags();
1780  Offset = BA->getOffset();
1781  } else
1782  llvm_unreachable("Unhandled symbol reference node.");
1783 
1784  if (foldOffsetIntoAddress(Offset, AM)) {
1785  AM = Backup;
1786  return true;
1787  }
1788 
1789  if (IsRIPRel)
1790  AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
1791 
1792  // Commit the changes now that we know this fold is safe.
1793  return false;
1794 }
1795 
1796 /// Add the specified node to the specified addressing mode, returning true if
1797 /// it cannot be done. This just pattern matches for the addressing mode.
1798 bool X86DAGToDAGISel::matchAddress(SDValue N, X86ISelAddressMode &AM) {
1799  if (matchAddressRecursively(N, AM, 0))
1800  return true;
1801 
1802  // Post-processing: Make a second attempt to fold a load, if we now know
1803  // that there will not be any other register. This is only performed for
1804  // 64-bit ILP32 mode since 32-bit mode and 64-bit LP64 mode will have folded
1805  // any foldable load the first time.
1806  if (Subtarget->isTarget64BitILP32() &&
1807  AM.BaseType == X86ISelAddressMode::RegBase &&
1808  AM.Base_Reg.getNode() != nullptr && AM.IndexReg.getNode() == nullptr) {
1809  SDValue Save_Base_Reg = AM.Base_Reg;
1810  if (auto *LoadN = dyn_cast<LoadSDNode>(Save_Base_Reg)) {
1811  AM.Base_Reg = SDValue();
1812  if (matchLoadInAddress(LoadN, AM, /*AllowSegmentRegForX32=*/true))
1813  AM.Base_Reg = Save_Base_Reg;
1814  }
1815  }
1816 
1817  // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
1818  // a smaller encoding and avoids a scaled-index.
1819  if (AM.Scale == 2 &&
1820  AM.BaseType == X86ISelAddressMode::RegBase &&
1821  AM.Base_Reg.getNode() == nullptr) {
1822  AM.Base_Reg = AM.IndexReg;
1823  AM.Scale = 1;
1824  }
1825 
1826  // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
1827  // because it has a smaller encoding.
1828  // TODO: Which other code models can use this?
1829  switch (TM.getCodeModel()) {
1830  default: break;
1831  case CodeModel::Small:
1832  case CodeModel::Kernel:
1833  if (Subtarget->is64Bit() &&
1834  AM.Scale == 1 &&
1835  AM.BaseType == X86ISelAddressMode::RegBase &&
1836  AM.Base_Reg.getNode() == nullptr &&
1837  AM.IndexReg.getNode() == nullptr &&
1838  AM.SymbolFlags == X86II::MO_NO_FLAG &&
1839  AM.hasSymbolicDisplacement())
1840  AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
1841  break;
1842  }
1843 
1844  return false;
1845 }
1846 
1847 bool X86DAGToDAGISel::matchAdd(SDValue &N, X86ISelAddressMode &AM,
1848  unsigned Depth) {
1849  // Add an artificial use to this node so that we can keep track of
1850  // it if it gets CSE'd with a different node.
1851  HandleSDNode Handle(N);
1852 
1853  X86ISelAddressMode Backup = AM;
1854  if (!matchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
1855  !matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
1856  return false;
1857  AM = Backup;
1858 
1859  // Try again after commutating the operands.
1860  if (!matchAddressRecursively(Handle.getValue().getOperand(1), AM,
1861  Depth + 1) &&
1862  !matchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth + 1))
1863  return false;
1864  AM = Backup;
1865 
1866  // If we couldn't fold both operands into the address at the same time,
1867  // see if we can just put each operand into a register and fold at least
1868  // the add.
1869  if (AM.BaseType == X86ISelAddressMode::RegBase &&
1870  !AM.Base_Reg.getNode() &&
1871  !AM.IndexReg.getNode()) {
1872  N = Handle.getValue();
1873  AM.Base_Reg = N.getOperand(0);
1874  AM.IndexReg = N.getOperand(1);
1875  AM.Scale = 1;
1876  return false;
1877  }
1878  N = Handle.getValue();
1879  return true;
1880 }
1881 
1882 // Insert a node into the DAG at least before the Pos node's position. This
1883 // will reposition the node as needed, and will assign it a node ID that is <=
1884 // the Pos node's ID. Note that this does *not* preserve the uniqueness of node
1885 // IDs! The selection DAG must no longer depend on their uniqueness when this
1886 // is used.
1887 static void insertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) {
1888  if (N->getNodeId() == -1 ||
1891  DAG.RepositionNode(Pos->getIterator(), N.getNode());
1892  // Mark Node as invalid for pruning as after this it may be a successor to a
1893  // selected node but otherwise be in the same position of Pos.
1894  // Conservatively mark it with the same -abs(Id) to assure node id
1895  // invariant is preserved.
1896  N->setNodeId(Pos->getNodeId());
1898  }
1899 }
1900 
1901 // Transform "(X >> (8-C1)) & (0xff << C1)" to "((X >> 8) & 0xff) << C1" if
1902 // safe. This allows us to convert the shift and and into an h-register
1903 // extract and a scaled index. Returns false if the simplification is
1904 // performed.
1906  uint64_t Mask,
1908  X86ISelAddressMode &AM) {
1909  if (Shift.getOpcode() != ISD::SRL ||
1910  !isa<ConstantSDNode>(Shift.getOperand(1)) ||
1911  !Shift.hasOneUse())
1912  return true;
1913 
1914  int ScaleLog = 8 - Shift.getConstantOperandVal(1);
1915  if (ScaleLog <= 0 || ScaleLog >= 4 ||
1916  Mask != (0xffu << ScaleLog))
1917  return true;
1918 
1919  MVT VT = N.getSimpleValueType();
1920  SDLoc DL(N);
1921  SDValue Eight = DAG.getConstant(8, DL, MVT::i8);
1922  SDValue NewMask = DAG.getConstant(0xff, DL, VT);
1923  SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight);
1924  SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask);
1925  SDValue ShlCount = DAG.getConstant(ScaleLog, DL, MVT::i8);
1926  SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount);
1927 
1928  // Insert the new nodes into the topological ordering. We must do this in
1929  // a valid topological ordering as nothing is going to go back and re-sort
1930  // these nodes. We continually insert before 'N' in sequence as this is
1931  // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1932  // hierarchy left to express.
1933  insertDAGNode(DAG, N, Eight);
1934  insertDAGNode(DAG, N, Srl);
1935  insertDAGNode(DAG, N, NewMask);
1936  insertDAGNode(DAG, N, And);
1937  insertDAGNode(DAG, N, ShlCount);
1938  insertDAGNode(DAG, N, Shl);
1939  DAG.ReplaceAllUsesWith(N, Shl);
1940  DAG.RemoveDeadNode(N.getNode());
1941  AM.IndexReg = And;
1942  AM.Scale = (1 << ScaleLog);
1943  return false;
1944 }
1945 
1946 // Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this
1947 // allows us to fold the shift into this addressing mode. Returns false if the
1948 // transform succeeded.
1950  X86ISelAddressMode &AM) {
1951  SDValue Shift = N.getOperand(0);
1952 
1953  // Use a signed mask so that shifting right will insert sign bits. These
1954  // bits will be removed when we shift the result left so it doesn't matter
1955  // what we use. This might allow a smaller immediate encoding.
1956  int64_t Mask = cast<ConstantSDNode>(N->getOperand(1))->getSExtValue();
1957 
1958  // If we have an any_extend feeding the AND, look through it to see if there
1959  // is a shift behind it. But only if the AND doesn't use the extended bits.
1960  // FIXME: Generalize this to other ANY_EXTEND than i32 to i64?
1961  bool FoundAnyExtend = false;
1962  if (Shift.getOpcode() == ISD::ANY_EXTEND && Shift.hasOneUse() &&
1963  Shift.getOperand(0).getSimpleValueType() == MVT::i32 &&
1964  isUInt<32>(Mask)) {
1965  FoundAnyExtend = true;
1966  Shift = Shift.getOperand(0);
1967  }
1968 
1969  if (Shift.getOpcode() != ISD::SHL ||
1970  !isa<ConstantSDNode>(Shift.getOperand(1)))
1971  return true;
1972 
1973  SDValue X = Shift.getOperand(0);
1974 
1975  // Not likely to be profitable if either the AND or SHIFT node has more
1976  // than one use (unless all uses are for address computation). Besides,
1977  // isel mechanism requires their node ids to be reused.
1978  if (!N.hasOneUse() || !Shift.hasOneUse())
1979  return true;
1980 
1981  // Verify that the shift amount is something we can fold.
1982  unsigned ShiftAmt = Shift.getConstantOperandVal(1);
1983  if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3)
1984  return true;
1985 
1986  MVT VT = N.getSimpleValueType();
1987  SDLoc DL(N);
1988  if (FoundAnyExtend) {
1989  SDValue NewX = DAG.getNode(ISD::ANY_EXTEND, DL, VT, X);
1990  insertDAGNode(DAG, N, NewX);
1991  X = NewX;
1992  }
1993 
1994  SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, DL, VT);
1995  SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask);
1996  SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1));
1997 
1998  // Insert the new nodes into the topological ordering. We must do this in
1999  // a valid topological ordering as nothing is going to go back and re-sort
2000  // these nodes. We continually insert before 'N' in sequence as this is
2001  // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
2002  // hierarchy left to express.
2003  insertDAGNode(DAG, N, NewMask);
2004  insertDAGNode(DAG, N, NewAnd);
2005  insertDAGNode(DAG, N, NewShift);
2006  DAG.ReplaceAllUsesWith(N, NewShift);
2007  DAG.RemoveDeadNode(N.getNode());
2008 
2009  AM.Scale = 1 << ShiftAmt;
2010  AM.IndexReg = NewAnd;
2011  return false;
2012 }
2013 
2014 // Implement some heroics to detect shifts of masked values where the mask can
2015 // be replaced by extending the shift and undoing that in the addressing mode
2016 // scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and
2017 // (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in
2018 // the addressing mode. This results in code such as:
2019 //
2020 // int f(short *y, int *lookup_table) {
2021 // ...
2022 // return *y + lookup_table[*y >> 11];
2023 // }
2024 //
2025 // Turning into:
2026 // movzwl (%rdi), %eax
2027 // movl %eax, %ecx
2028 // shrl $11, %ecx
2029 // addl (%rsi,%rcx,4), %eax
2030 //
2031 // Instead of:
2032 // movzwl (%rdi), %eax
2033 // movl %eax, %ecx
2034 // shrl $9, %ecx
2035 // andl $124, %rcx
2036 // addl (%rsi,%rcx), %eax
2037 //
2038 // Note that this function assumes the mask is provided as a mask *after* the
2039 // value is shifted. The input chain may or may not match that, but computing
2040 // such a mask is trivial.
2042  uint64_t Mask,
2044  X86ISelAddressMode &AM) {
2045  if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() ||
2046  !isa<ConstantSDNode>(Shift.getOperand(1)))
2047  return true;
2048 
2049  unsigned ShiftAmt = Shift.getConstantOperandVal(1);
2050  unsigned MaskLZ = countLeadingZeros(Mask);
2051  unsigned MaskTZ = countTrailingZeros(Mask);
2052 
2053  // The amount of shift we're trying to fit into the addressing mode is taken
2054  // from the trailing zeros of the mask.
2055  unsigned AMShiftAmt = MaskTZ;
2056 
2057  // There is nothing we can do here unless the mask is removing some bits.
2058  // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
2059  if (AMShiftAmt == 0 || AMShiftAmt > 3) return true;
2060 
2061  // We also need to ensure that mask is a continuous run of bits.
2062  if (countTrailingOnes(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true;
2063 
2064  // Scale the leading zero count down based on the actual size of the value.
2065  // Also scale it down based on the size of the shift.
2066  unsigned ScaleDown = (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt;
2067  if (MaskLZ < ScaleDown)
2068  return true;
2069  MaskLZ -= ScaleDown;
2070 
2071  // The final check is to ensure that any masked out high bits of X are
2072  // already known to be zero. Otherwise, the mask has a semantic impact
2073  // other than masking out a couple of low bits. Unfortunately, because of
2074  // the mask, zero extensions will be removed from operands in some cases.
2075  // This code works extra hard to look through extensions because we can
2076  // replace them with zero extensions cheaply if necessary.
2077  bool ReplacingAnyExtend = false;
2078  if (X.getOpcode() == ISD::ANY_EXTEND) {
2079  unsigned ExtendBits = X.getSimpleValueType().getSizeInBits() -
2080  X.getOperand(0).getSimpleValueType().getSizeInBits();
2081  // Assume that we'll replace the any-extend with a zero-extend, and
2082  // narrow the search to the extended value.
2083  X = X.getOperand(0);
2084  MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits;
2085  ReplacingAnyExtend = true;
2086  }
2087  APInt MaskedHighBits =
2088  APInt::getHighBitsSet(X.getSimpleValueType().getSizeInBits(), MaskLZ);
2089  KnownBits Known = DAG.computeKnownBits(X);
2090  if (MaskedHighBits != Known.Zero) return true;
2091 
2092  // We've identified a pattern that can be transformed into a single shift
2093  // and an addressing mode. Make it so.
2094  MVT VT = N.getSimpleValueType();
2095  if (ReplacingAnyExtend) {
2096  assert(X.getValueType() != VT);
2097  // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
2098  SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(X), VT, X);
2099  insertDAGNode(DAG, N, NewX);
2100  X = NewX;
2101  }
2102  SDLoc DL(N);
2103  SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8);
2104  SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
2105  SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8);
2106  SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt);
2107 
2108  // Insert the new nodes into the topological ordering. We must do this in
2109  // a valid topological ordering as nothing is going to go back and re-sort
2110  // these nodes. We continually insert before 'N' in sequence as this is
2111  // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
2112  // hierarchy left to express.
2113  insertDAGNode(DAG, N, NewSRLAmt);
2114  insertDAGNode(DAG, N, NewSRL);
2115  insertDAGNode(DAG, N, NewSHLAmt);
2116  insertDAGNode(DAG, N, NewSHL);
2117  DAG.ReplaceAllUsesWith(N, NewSHL);
2118  DAG.RemoveDeadNode(N.getNode());
2119 
2120  AM.Scale = 1 << AMShiftAmt;
2121  AM.IndexReg = NewSRL;
2122  return false;
2123 }
2124 
2125 // Transform "(X >> SHIFT) & (MASK << C1)" to
2126 // "((X >> (SHIFT + C1)) & (MASK)) << C1". Everything before the SHL will be
2127 // matched to a BEXTR later. Returns false if the simplification is performed.
2129  uint64_t Mask,
2131  X86ISelAddressMode &AM,
2132  const X86Subtarget &Subtarget) {
2133  if (Shift.getOpcode() != ISD::SRL ||
2134  !isa<ConstantSDNode>(Shift.getOperand(1)) ||
2135  !Shift.hasOneUse() || !N.hasOneUse())
2136  return true;
2137 
2138  // Only do this if BEXTR will be matched by matchBEXTRFromAndImm.
2139  if (!Subtarget.hasTBM() &&
2140  !(Subtarget.hasBMI() && Subtarget.hasFastBEXTR()))
2141  return true;
2142 
2143  // We need to ensure that mask is a continuous run of bits.
2144  if (!isShiftedMask_64(Mask)) return true;
2145 
2146  unsigned ShiftAmt = Shift.getConstantOperandVal(1);
2147 
2148  // The amount of shift we're trying to fit into the addressing mode is taken
2149  // from the trailing zeros of the mask.
2150  unsigned AMShiftAmt = countTrailingZeros(Mask);
2151 
2152  // There is nothing we can do here unless the mask is removing some bits.
2153  // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
2154  if (AMShiftAmt == 0 || AMShiftAmt > 3) return true;
2155 
2156  MVT VT = N.getSimpleValueType();
2157  SDLoc DL(N);
2158  SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8);
2159  SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
2160  SDValue NewMask = DAG.getConstant(Mask >> AMShiftAmt, DL, VT);
2161  SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, NewSRL, NewMask);
2162  SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8);
2163  SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewAnd, NewSHLAmt);
2164 
2165  // Insert the new nodes into the topological ordering. We must do this in
2166  // a valid topological ordering as nothing is going to go back and re-sort
2167  // these nodes. We continually insert before 'N' in sequence as this is
2168  // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
2169  // hierarchy left to express.
2170  insertDAGNode(DAG, N, NewSRLAmt);
2171  insertDAGNode(DAG, N, NewSRL);
2172  insertDAGNode(DAG, N, NewMask);
2173  insertDAGNode(DAG, N, NewAnd);
2174  insertDAGNode(DAG, N, NewSHLAmt);
2175  insertDAGNode(DAG, N, NewSHL);
2176  DAG.ReplaceAllUsesWith(N, NewSHL);
2177  DAG.RemoveDeadNode(N.getNode());
2178 
2179  AM.Scale = 1 << AMShiftAmt;
2180  AM.IndexReg = NewAnd;
2181  return false;
2182 }
2183 
2184 bool X86DAGToDAGISel::matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
2185  unsigned Depth) {
2186  SDLoc dl(N);
2187  LLVM_DEBUG({
2188  dbgs() << "MatchAddress: ";
2189  AM.dump(CurDAG);
2190  });
2191  // Limit recursion.
2192  if (Depth > 5)
2193  return matchAddressBase(N, AM);
2194 
2195  // If this is already a %rip relative address, we can only merge immediates
2196  // into it. Instead of handling this in every case, we handle it here.
2197  // RIP relative addressing: %rip + 32-bit displacement!
2198  if (AM.isRIPRelative()) {
2199  // FIXME: JumpTable and ExternalSymbol address currently don't like
2200  // displacements. It isn't very important, but this should be fixed for
2201  // consistency.
2202  if (!(AM.ES || AM.MCSym) && AM.JT != -1)
2203  return true;
2204 
2205  if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N))
2206  if (!foldOffsetIntoAddress(Cst->getSExtValue(), AM))
2207  return false;
2208  return true;
2209  }
2210 
2211  switch (N.getOpcode()) {
2212  default: break;
2213  case ISD::LOCAL_RECOVER: {
2214  if (!AM.hasSymbolicDisplacement() && AM.Disp == 0)
2215  if (const auto *ESNode = dyn_cast<MCSymbolSDNode>(N.getOperand(0))) {
2216  // Use the symbol and don't prefix it.
2217  AM.MCSym = ESNode->getMCSymbol();
2218  return false;
2219  }
2220  break;
2221  }
2222  case ISD::Constant: {
2223  uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
2224  if (!foldOffsetIntoAddress(Val, AM))
2225  return false;
2226  break;
2227  }
2228 
2229  case X86ISD::Wrapper:
2230  case X86ISD::WrapperRIP:
2231  if (!matchWrapper(N, AM))
2232  return false;
2233  break;
2234 
2235  case ISD::LOAD:
2236  if (!matchLoadInAddress(cast<LoadSDNode>(N), AM))
2237  return false;
2238  break;
2239 
2240  case ISD::FrameIndex:
2241  if (AM.BaseType == X86ISelAddressMode::RegBase &&
2242  AM.Base_Reg.getNode() == nullptr &&
2243  (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) {
2244  AM.BaseType = X86ISelAddressMode::FrameIndexBase;
2245  AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
2246  return false;
2247  }
2248  break;
2249 
2250  case ISD::SHL:
2251  if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
2252  break;
2253 
2254  if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
2255  unsigned Val = CN->getZExtValue();
2256  // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
2257  // that the base operand remains free for further matching. If
2258  // the base doesn't end up getting used, a post-processing step
2259  // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
2260  if (Val == 1 || Val == 2 || Val == 3) {
2261  AM.Scale = 1 << Val;
2262  SDValue ShVal = N.getOperand(0);
2263 
2264  // Okay, we know that we have a scale by now. However, if the scaled
2265  // value is an add of something and a constant, we can fold the
2266  // constant into the disp field here.
2267  if (CurDAG->isBaseWithConstantOffset(ShVal)) {
2268  AM.IndexReg = ShVal.getOperand(0);
2269  ConstantSDNode *AddVal = cast<ConstantSDNode>(ShVal.getOperand(1));
2270  uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val;
2271  if (!foldOffsetIntoAddress(Disp, AM))
2272  return false;
2273  }
2274 
2275  AM.IndexReg = ShVal;
2276  return false;
2277  }
2278  }
2279  break;
2280 
2281  case ISD::SRL: {
2282  // Scale must not be used already.
2283  if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
2284 
2285  // We only handle up to 64-bit values here as those are what matter for
2286  // addressing mode optimizations.
2287  assert(N.getSimpleValueType().getSizeInBits() <= 64 &&
2288  "Unexpected value size!");
2289 
2290  SDValue And = N.getOperand(0);
2291  if (And.getOpcode() != ISD::AND) break;
2292  SDValue X = And.getOperand(0);
2293 
2294  // The mask used for the transform is expected to be post-shift, but we
2295  // found the shift first so just apply the shift to the mask before passing
2296  // it down.
2297  if (!isa<ConstantSDNode>(N.getOperand(1)) ||
2298  !isa<ConstantSDNode>(And.getOperand(1)))
2299  break;
2300  uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1);
2301 
2302  // Try to fold the mask and shift into the scale, and return false if we
2303  // succeed.
2304  if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM))
2305  return false;
2306  break;
2307  }
2308 
2309  case ISD::SMUL_LOHI:
2310  case ISD::UMUL_LOHI:
2311  // A mul_lohi where we need the low part can be folded as a plain multiply.
2312  if (N.getResNo() != 0) break;
2314  case ISD::MUL:
2315  case X86ISD::MUL_IMM:
2316  // X*[3,5,9] -> X+X*[2,4,8]
2317  if (AM.BaseType == X86ISelAddressMode::RegBase &&
2318  AM.Base_Reg.getNode() == nullptr &&
2319  AM.IndexReg.getNode() == nullptr) {
2320  if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1)))
2321  if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
2322  CN->getZExtValue() == 9) {
2323  AM.Scale = unsigned(CN->getZExtValue())-1;
2324 
2325  SDValue MulVal = N.getOperand(0);
2326  SDValue Reg;
2327 
2328  // Okay, we know that we have a scale by now. However, if the scaled
2329  // value is an add of something and a constant, we can fold the
2330  // constant into the disp field here.
2331  if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
2332  isa<ConstantSDNode>(MulVal.getOperand(1))) {
2333  Reg = MulVal.getOperand(0);
2334  ConstantSDNode *AddVal =
2335  cast<ConstantSDNode>(MulVal.getOperand(1));
2336  uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue();
2337  if (foldOffsetIntoAddress(Disp, AM))
2338  Reg = N.getOperand(0);
2339  } else {
2340  Reg = N.getOperand(0);
2341  }
2342 
2343  AM.IndexReg = AM.Base_Reg = Reg;
2344  return false;
2345  }
2346  }
2347  break;
2348 
2349  case ISD::SUB: {
2350  // Given A-B, if A can be completely folded into the address and
2351  // the index field with the index field unused, use -B as the index.
2352  // This is a win if a has multiple parts that can be folded into
2353  // the address. Also, this saves a mov if the base register has
2354  // other uses, since it avoids a two-address sub instruction, however
2355  // it costs an additional mov if the index register has other uses.
2356 
2357  // Add an artificial use to this node so that we can keep track of
2358  // it if it gets CSE'd with a different node.
2359  HandleSDNode Handle(N);
2360 
2361  // Test if the LHS of the sub can be folded.
2362  X86ISelAddressMode Backup = AM;
2363  if (matchAddressRecursively(N.getOperand(0), AM, Depth+1)) {
2364  N = Handle.getValue();
2365  AM = Backup;
2366  break;
2367  }
2368  N = Handle.getValue();
2369  // Test if the index field is free for use.
2370  if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
2371  AM = Backup;
2372  break;
2373  }
2374 
2375  int Cost = 0;
2376  SDValue RHS = N.getOperand(1);
2377  // If the RHS involves a register with multiple uses, this
2378  // transformation incurs an extra mov, due to the neg instruction
2379  // clobbering its operand.
2380  if (!RHS.getNode()->hasOneUse() ||
2381  RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
2382  RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
2383  RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
2384  (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
2385  RHS.getOperand(0).getValueType() == MVT::i32))
2386  ++Cost;
2387  // If the base is a register with multiple uses, this
2388  // transformation may save a mov.
2389  if ((AM.BaseType == X86ISelAddressMode::RegBase && AM.Base_Reg.getNode() &&
2390  !AM.Base_Reg.getNode()->hasOneUse()) ||
2391  AM.BaseType == X86ISelAddressMode::FrameIndexBase)
2392  --Cost;
2393  // If the folded LHS was interesting, this transformation saves
2394  // address arithmetic.
2395  if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
2396  ((AM.Disp != 0) && (Backup.Disp == 0)) +
2397  (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
2398  --Cost;
2399  // If it doesn't look like it may be an overall win, don't do it.
2400  if (Cost >= 0) {
2401  AM = Backup;
2402  break;
2403  }
2404 
2405  // Ok, the transformation is legal and appears profitable. Go for it.
2406  // Negation will be emitted later to avoid creating dangling nodes if this
2407  // was an unprofitable LEA.
2408  AM.IndexReg = RHS;
2409  AM.NegateIndex = true;
2410  AM.Scale = 1;
2411  return false;
2412  }
2413 
2414  case ISD::ADD:
2415  if (!matchAdd(N, AM, Depth))
2416  return false;
2417  break;
2418 
2419  case ISD::OR:
2420  // We want to look through a transform in InstCombine and DAGCombiner that
2421  // turns 'add' into 'or', so we can treat this 'or' exactly like an 'add'.
2422  // Example: (or (and x, 1), (shl y, 3)) --> (add (and x, 1), (shl y, 3))
2423  // An 'lea' can then be used to match the shift (multiply) and add:
2424  // and $1, %esi
2425  // lea (%rsi, %rdi, 8), %rax
2426  if (CurDAG->haveNoCommonBitsSet(N.getOperand(0), N.getOperand(1)) &&
2427  !matchAdd(N, AM, Depth))
2428  return false;
2429  break;
2430 
2431  case ISD::XOR:
2432  // We want to look through a transform in InstCombine that
2433  // turns 'add' with min_signed_val into 'xor', so we can treat this 'xor'
2434  // exactly like an 'add'.
2435  if (isMinSignedConstant(N.getOperand(1)) && !matchAdd(N, AM, Depth))
2436  return false;
2437  break;
2438 
2439  case ISD::AND: {
2440  // Perform some heroic transforms on an and of a constant-count shift
2441  // with a constant to enable use of the scaled offset field.
2442 
2443  // Scale must not be used already.
2444  if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
2445 
2446  // We only handle up to 64-bit values here as those are what matter for
2447  // addressing mode optimizations.
2448  assert(N.getSimpleValueType().getSizeInBits() <= 64 &&
2449  "Unexpected value size!");
2450 
2451  if (!isa<ConstantSDNode>(N.getOperand(1)))
2452  break;
2453 
2454  if (N.getOperand(0).getOpcode() == ISD::SRL) {
2455  SDValue Shift = N.getOperand(0);
2456  SDValue X = Shift.getOperand(0);
2457 
2458  uint64_t Mask = N.getConstantOperandVal(1);
2459 
2460  // Try to fold the mask and shift into an extract and scale.
2461  if (!foldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM))
2462  return false;
2463 
2464  // Try to fold the mask and shift directly into the scale.
2465  if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM))
2466  return false;
2467 
2468  // Try to fold the mask and shift into BEXTR and scale.
2469  if (!foldMaskedShiftToBEXTR(*CurDAG, N, Mask, Shift, X, AM, *Subtarget))
2470  return false;
2471  }
2472 
2473  // Try to swap the mask and shift to place shifts which can be done as
2474  // a scale on the outside of the mask.
2475  if (!foldMaskedShiftToScaledMask(*CurDAG, N, AM))
2476  return false;
2477 
2478  break;
2479  }
2480  case ISD::ZERO_EXTEND: {
2481  // Try to widen a zexted shift left to the same size as its use, so we can
2482  // match the shift as a scale factor.
2483  if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
2484  break;
2485  if (N.getOperand(0).getOpcode() != ISD::SHL || !N.getOperand(0).hasOneUse())
2486  break;
2487 
2488  // Give up if the shift is not a valid scale factor [1,2,3].
2489  SDValue Shl = N.getOperand(0);
2490  auto *ShAmtC = dyn_cast<ConstantSDNode>(Shl.getOperand(1));
2491  if (!ShAmtC || ShAmtC->getZExtValue() > 3)
2492  break;
2493 
2494  // The narrow shift must only shift out zero bits (it must be 'nuw').
2495  // That makes it safe to widen to the destination type.
2497  ShAmtC->getZExtValue());
2498  if (!CurDAG->MaskedValueIsZero(Shl.getOperand(0), HighZeros))
2499  break;
2500 
2501  // zext (shl nuw i8 %x, C) to i32 --> shl (zext i8 %x to i32), (zext C)
2502  MVT VT = N.getSimpleValueType();
2503  SDLoc DL(N);
2504  SDValue Zext = CurDAG->getNode(ISD::ZERO_EXTEND, DL, VT, Shl.getOperand(0));
2505  SDValue NewShl = CurDAG->getNode(ISD::SHL, DL, VT, Zext, Shl.getOperand(1));
2506 
2507  // Convert the shift to scale factor.
2508  AM.Scale = 1 << ShAmtC->getZExtValue();
2509  AM.IndexReg = Zext;
2510 
2511  insertDAGNode(*CurDAG, N, Zext);
2512  insertDAGNode(*CurDAG, N, NewShl);
2513  CurDAG->ReplaceAllUsesWith(N, NewShl);
2514  CurDAG->RemoveDeadNode(N.getNode());
2515  return false;
2516  }
2517  }
2518 
2519  return matchAddressBase(N, AM);
2520 }
2521 
2522 /// Helper for MatchAddress. Add the specified node to the
2523 /// specified addressing mode without any further recursion.
2524 bool X86DAGToDAGISel::matchAddressBase(SDValue N, X86ISelAddressMode &AM) {
2525  // Is the base register already occupied?
2526  if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
2527  // If so, check to see if the scale index register is set.
2528  if (!AM.IndexReg.getNode()) {
2529  AM.IndexReg = N;
2530  AM.Scale = 1;
2531  return false;
2532  }
2533 
2534  // Otherwise, we cannot select it.
2535  return true;
2536  }
2537 
2538  // Default, generate it as a register.
2539  AM.BaseType = X86ISelAddressMode::RegBase;
2540  AM.Base_Reg = N;
2541  return false;
2542 }
2543 
2544 bool X86DAGToDAGISel::matchVectorAddressRecursively(SDValue N,
2545  X86ISelAddressMode &AM,
2546  unsigned Depth) {
2547  SDLoc dl(N);
2548  LLVM_DEBUG({
2549  dbgs() << "MatchVectorAddress: ";
2550  AM.dump(CurDAG);
2551  });
2552  // Limit recursion.
2553  if (Depth > 5)
2554  return matchAddressBase(N, AM);
2555 
2556  // TODO: Support other operations.
2557  switch (N.getOpcode()) {
2558  case ISD::Constant: {
2559  uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
2560  if (!foldOffsetIntoAddress(Val, AM))
2561  return false;
2562  break;
2563  }
2564  case X86ISD::Wrapper:
2565  if (!matchWrapper(N, AM))
2566  return false;
2567  break;
2568  case ISD::ADD: {
2569  // Add an artificial use to this node so that we can keep track of
2570  // it if it gets CSE'd with a different node.
2571  HandleSDNode Handle(N);
2572 
2573  X86ISelAddressMode Backup = AM;
2574  if (!matchVectorAddressRecursively(N.getOperand(0), AM, Depth + 1) &&
2575  !matchVectorAddressRecursively(Handle.getValue().getOperand(1), AM,
2576  Depth + 1))
2577  return false;
2578  AM = Backup;
2579 
2580  // Try again after commuting the operands.
2581  if (!matchVectorAddressRecursively(Handle.getValue().getOperand(1), AM,
2582  Depth + 1) &&
2583  !matchVectorAddressRecursively(Handle.getValue().getOperand(0), AM,
2584  Depth + 1))
2585  return false;
2586  AM = Backup;
2587 
2588  N = Handle.getValue();
2589  break;
2590  }
2591  }
2592 
2593  return matchAddressBase(N, AM);
2594 }
2595 
2596 /// Helper for selectVectorAddr. Handles things that can be folded into a
2597 /// gather/scatter address. The index register and scale should have already
2598 /// been handled.
2599 bool X86DAGToDAGISel::matchVectorAddress(SDValue N, X86ISelAddressMode &AM) {
2600  return matchVectorAddressRecursively(N, AM, 0);
2601 }
2602 
2603 bool X86DAGToDAGISel::selectVectorAddr(MemSDNode *Parent, SDValue BasePtr,
2604  SDValue IndexOp, SDValue ScaleOp,
2605  SDValue &Base, SDValue &Scale,
2606  SDValue &Index, SDValue &Disp,
2607  SDValue &Segment) {
2608  X86ISelAddressMode AM;
2609  AM.IndexReg = IndexOp;
2610  AM.Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
2611 
2612  unsigned AddrSpace = Parent->getPointerInfo().getAddrSpace();
2613  if (AddrSpace == X86AS::GS)
2614  AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
2615  if (AddrSpace == X86AS::FS)
2616  AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
2617  if (AddrSpace == X86AS::SS)
2618  AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
2619 
2620  SDLoc DL(BasePtr);
2621  MVT VT = BasePtr.getSimpleValueType();
2622 
2623  // Try to match into the base and displacement fields.
2624  if (matchVectorAddress(BasePtr, AM))
2625  return false;
2626 
2627  getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2628  return true;
2629 }
2630 
2631 /// Returns true if it is able to pattern match an addressing mode.
2632 /// It returns the operands which make up the maximal addressing mode it can
2633 /// match by reference.
2634 ///
2635 /// Parent is the parent node of the addr operand that is being matched. It
2636 /// is always a load, store, atomic node, or null. It is only null when
2637 /// checking memory operands for inline asm nodes.
2638 bool X86DAGToDAGISel::selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
2639  SDValue &Scale, SDValue &Index,
2640  SDValue &Disp, SDValue &Segment) {
2641  X86ISelAddressMode AM;
2642 
2643  if (Parent &&
2644  // This list of opcodes are all the nodes that have an "addr:$ptr" operand
2645  // that are not a MemSDNode, and thus don't have proper addrspace info.
2646  Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
2647  Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
2648  Parent->getOpcode() != X86ISD::TLSCALL && // Fixme
2649  Parent->getOpcode() != X86ISD::ENQCMD && // Fixme
2650  Parent->getOpcode() != X86ISD::ENQCMDS && // Fixme
2651  Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp
2652  Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp
2653  unsigned AddrSpace =
2654  cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
2655  if (AddrSpace == X86AS::GS)
2656  AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
2657  if (AddrSpace == X86AS::FS)
2658  AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
2659  if (AddrSpace == X86AS::SS)
2660  AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
2661  }
2662 
2663  // Save the DL and VT before calling matchAddress, it can invalidate N.
2664  SDLoc DL(N);
2665  MVT VT = N.getSimpleValueType();
2666 
2667  if (matchAddress(N, AM))
2668  return false;
2669 
2670  getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2671  return true;
2672 }
2673 
2674 bool X86DAGToDAGISel::selectMOV64Imm32(SDValue N, SDValue &Imm) {
2675  // In static codegen with small code model, we can get the address of a label
2676  // into a register with 'movl'
2677  if (N->getOpcode() != X86ISD::Wrapper)
2678  return false;
2679 
2680  N = N.getOperand(0);
2681 
2682  // At least GNU as does not accept 'movl' for TPOFF relocations.
2683  // FIXME: We could use 'movl' when we know we are targeting MC.
2684  if (N->getOpcode() == ISD::TargetGlobalTLSAddress)
2685  return false;
2686 
2687  Imm = N;
2688  if (N->getOpcode() != ISD::TargetGlobalAddress)
2689  return TM.getCodeModel() == CodeModel::Small;
2690 
2692  cast<GlobalAddressSDNode>(N)->getGlobal()->getAbsoluteSymbolRange();
2693  if (!CR)
2694  return TM.getCodeModel() == CodeModel::Small;
2695 
2696  return CR->getUnsignedMax().ult(1ull << 32);
2697 }
2698 
2699 bool X86DAGToDAGISel::selectLEA64_32Addr(SDValue N, SDValue &Base,
2700  SDValue &Scale, SDValue &Index,
2701  SDValue &Disp, SDValue &Segment) {
2702  // Save the debug loc before calling selectLEAAddr, in case it invalidates N.
2703  SDLoc DL(N);
2704 
2705  if (!selectLEAAddr(N, Base, Scale, Index, Disp, Segment))
2706  return false;
2707 
2708  RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Base);
2709  if (RN && RN->getReg() == 0)
2710  Base = CurDAG->getRegister(0, MVT::i64);
2711  else if (Base.getValueType() == MVT::i32 && !isa<FrameIndexSDNode>(Base)) {
2712  // Base could already be %rip, particularly in the x32 ABI.
2713  SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, DL,
2714  MVT::i64), 0);
2715  Base = CurDAG->getTargetInsertSubreg(X86::sub_32bit, DL, MVT::i64, ImplDef,
2716  Base);
2717  }
2718 
2719  RN = dyn_cast<RegisterSDNode>(Index);
2720  if (RN && RN->getReg() == 0)
2721  Index = CurDAG->getRegister(0, MVT::i64);
2722  else {
2723  assert(Index.getValueType() == MVT::i32 &&
2724  "Expect to be extending 32-bit registers for use in LEA");
2725  SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, DL,
2726  MVT::i64), 0);
2727  Index = CurDAG->getTargetInsertSubreg(X86::sub_32bit, DL, MVT::i64, ImplDef,
2728  Index);
2729  }
2730 
2731  return true;
2732 }
2733 
2734 /// Calls SelectAddr and determines if the maximal addressing
2735 /// mode it matches can be cost effectively emitted as an LEA instruction.
2736 bool X86DAGToDAGISel::selectLEAAddr(SDValue N,
2737  SDValue &Base, SDValue &Scale,
2738  SDValue &Index, SDValue &Disp,
2739  SDValue &Segment) {
2740  X86ISelAddressMode AM;
2741 
2742  // Save the DL and VT before calling matchAddress, it can invalidate N.
2743  SDLoc DL(N);
2744  MVT VT = N.getSimpleValueType();
2745 
2746  // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
2747  // segments.
2748  SDValue Copy = AM.Segment;
2749  SDValue T = CurDAG->getRegister(0, MVT::i32);
2750  AM.Segment = T;
2751  if (matchAddress(N, AM))
2752  return false;
2753  assert (T == AM.Segment);
2754  AM.Segment = Copy;
2755 
2756  unsigned Complexity = 0;
2757  if (AM.BaseType == X86ISelAddressMode::RegBase && AM.Base_Reg.getNode())
2758  Complexity = 1;
2759  else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
2760  Complexity = 4;
2761 
2762  if (AM.IndexReg.getNode())
2763  Complexity++;
2764 
2765  // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
2766  // a simple shift.
2767  if (AM.Scale > 1)
2768  Complexity++;
2769 
2770  // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
2771  // to a LEA. This is determined with some experimentation but is by no means
2772  // optimal (especially for code size consideration). LEA is nice because of
2773  // its three-address nature. Tweak the cost function again when we can run
2774  // convertToThreeAddress() at register allocation time.
2775  if (AM.hasSymbolicDisplacement()) {
2776  // For X86-64, always use LEA to materialize RIP-relative addresses.
2777  if (Subtarget->is64Bit())
2778  Complexity = 4;
2779  else
2780  Complexity += 2;
2781  }
2782 
2783  // Heuristic: try harder to form an LEA from ADD if the operands set flags.
2784  // Unlike ADD, LEA does not affect flags, so we will be less likely to require
2785  // duplicating flag-producing instructions later in the pipeline.
2786  if (N.getOpcode() == ISD::ADD) {
2787  auto isMathWithFlags = [](SDValue V) {
2788  switch (V.getOpcode()) {
2789  case X86ISD::ADD:
2790  case X86ISD::SUB:
2791  case X86ISD::ADC:
2792  case X86ISD::SBB:
2793  case X86ISD::SMUL:
2794  case X86ISD::UMUL:
2795  /* TODO: These opcodes can be added safely, but we may want to justify
2796  their inclusion for different reasons (better for reg-alloc).
2797  case X86ISD::OR:
2798  case X86ISD::XOR:
2799  case X86ISD::AND:
2800  */
2801  // Value 1 is the flag output of the node - verify it's not dead.
2802  return !SDValue(V.getNode(), 1).use_empty();
2803  default:
2804  return false;
2805  }
2806  };
2807  // TODO: We might want to factor in whether there's a load folding
2808  // opportunity for the math op that disappears with LEA.
2809  if (isMathWithFlags(N.getOperand(0)) || isMathWithFlags(N.getOperand(1)))
2810  Complexity++;
2811  }
2812 
2813  if (AM.Disp)
2814  Complexity++;
2815 
2816  // If it isn't worth using an LEA, reject it.
2817  if (Complexity <= 2)
2818  return false;
2819 
2820  getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2821  return true;
2822 }
2823 
2824 /// This is only run on TargetGlobalTLSAddress nodes.
2825 bool X86DAGToDAGISel::selectTLSADDRAddr(SDValue N, SDValue &Base,
2826  SDValue &Scale, SDValue &Index,
2827  SDValue &Disp, SDValue &Segment) {
2828  assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
2829  const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
2830 
2831  X86ISelAddressMode AM;
2832  AM.GV = GA->getGlobal();
2833  AM.Disp += GA->getOffset();
2834  AM.SymbolFlags = GA->getTargetFlags();
2835 
2836  if (Subtarget->is32Bit()) {
2837  AM.Scale = 1;
2838  AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
2839  }
2840 
2841  MVT VT = N.getSimpleValueType();
2842  getAddressOperands(AM, SDLoc(N), VT, Base, Scale, Index, Disp, Segment);
2843  return true;
2844 }
2845 
2846 bool X86DAGToDAGISel::selectRelocImm(SDValue N, SDValue &Op) {
2847  // Keep track of the original value type and whether this value was
2848  // truncated. If we see a truncation from pointer type to VT that truncates
2849  // bits that are known to be zero, we can use a narrow reference.
2850  EVT VT = N.getValueType();
2851  bool WasTruncated = false;
2852  if (N.getOpcode() == ISD::TRUNCATE) {
2853  WasTruncated = true;
2854  N = N.getOperand(0);
2855  }
2856 
2857  if (N.getOpcode() != X86ISD::Wrapper)
2858  return false;
2859 
2860  // We can only use non-GlobalValues as immediates if they were not truncated,
2861  // as we do not have any range information. If we have a GlobalValue and the
2862  // address was not truncated, we can select it as an operand directly.
2863  unsigned Opc = N.getOperand(0)->getOpcode();
2864  if (Opc != ISD::TargetGlobalAddress || !WasTruncated) {
2865  Op = N.getOperand(0);
2866  // We can only select the operand directly if we didn't have to look past a
2867  // truncate.
2868  return !WasTruncated;
2869  }
2870 
2871  // Check that the global's range fits into VT.
2872  auto *GA = cast<GlobalAddressSDNode>(N.getOperand(0));
2874  if (!CR || CR->getUnsignedMax().uge(1ull << VT.getSizeInBits()))
2875  return false;
2876 
2877  // Okay, we can use a narrow reference.
2878  Op = CurDAG->getTargetGlobalAddress(GA->getGlobal(), SDLoc(N), VT,
2879  GA->getOffset(), GA->getTargetFlags());
2880  return true;
2881 }
2882 
2883 bool X86DAGToDAGISel::tryFoldLoad(SDNode *Root, SDNode *P, SDValue N,
2884  SDValue &Base, SDValue &Scale,
2885  SDValue &Index, SDValue &Disp,
2886  SDValue &Segment) {
2887  assert(Root && P && "Unknown root/parent nodes");
2888  if (!ISD::isNON_EXTLoad(N.getNode()) ||
2889  !IsProfitableToFold(N, P, Root) ||
2890  !IsLegalToFold(N, P, Root, OptLevel))
2891  return false;
2892 
2893  return selectAddr(N.getNode(),
2894  N.getOperand(1), Base, Scale, Index, Disp, Segment);
2895 }
2896 
2897 bool X86DAGToDAGISel::tryFoldBroadcast(SDNode *Root, SDNode *P, SDValue N,
2898  SDValue &Base, SDValue &Scale,
2899  SDValue &Index, SDValue &Disp,
2900  SDValue &Segment) {
2901  assert(Root && P && "Unknown root/parent nodes");
2902  if (N->getOpcode() != X86ISD::VBROADCAST_LOAD ||
2903  !IsProfitableToFold(N, P, Root) ||
2904  !IsLegalToFold(N, P, Root, OptLevel))
2905  return false;
2906 
2907  return selectAddr(N.getNode(),
2908  N.getOperand(1), Base, Scale, Index, Disp, Segment);
2909 }
2910 
2911 /// Return an SDNode that returns the value of the global base register.
2912 /// Output instructions required to initialize the global base register,
2913 /// if necessary.
2914 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
2915  unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
2916  auto &DL = MF->getDataLayout();
2917  return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy(DL)).getNode();
2918 }
2919 
2920 bool X86DAGToDAGISel::isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const {
2921  if (N->getOpcode() == ISD::TRUNCATE)
2922  N = N->getOperand(0).getNode();
2923  if (N->getOpcode() != X86ISD::Wrapper)
2924  return false;
2925 
2926  auto *GA = dyn_cast<GlobalAddressSDNode>(N->getOperand(0));
2927  if (!GA)
2928  return false;
2929 
2931  if (!CR)
2932  return Width == 32 && TM.getCodeModel() == CodeModel::Small;
2933 
2934  return CR->getSignedMin().sge(-1ull << Width) &&
2935  CR->getSignedMax().slt(1ull << Width);
2936 }
2937 
2938 X86::CondCode X86DAGToDAGISel::getCondFromNode(SDNode *N) const {
2939  assert(N->isMachineOpcode() && "Unexpected node");
2940  unsigned Opc = N->getMachineOpcode();
2941  const MCInstrDesc &MCID = getInstrInfo()->get(Opc);
2942  int CondNo = X86::getCondSrcNoFromDesc(MCID);
2943  if (CondNo < 0)
2944  return X86::COND_INVALID;
2945 
2946  return static_cast<X86::CondCode>(N->getConstantOperandVal(CondNo));
2947 }
2948 
2949 /// Test whether the given X86ISD::CMP node has any users that use a flag
2950 /// other than ZF.
2951 bool X86DAGToDAGISel::onlyUsesZeroFlag(SDValue Flags) const {
2952  // Examine each user of the node.
2953  for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
2954  UI != UE; ++UI) {
2955  // Only check things that use the flags.
2956  if (UI.getUse().getResNo() != Flags.getResNo())
2957  continue;
2958  // Only examine CopyToReg uses that copy to EFLAGS.
2959  if (UI->getOpcode() != ISD::CopyToReg ||
2960  cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
2961  return false;
2962  // Examine each user of the CopyToReg use.
2963  for (SDNode::use_iterator FlagUI = UI->use_begin(),
2964  FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
2965  // Only examine the Flag result.
2966  if (FlagUI.getUse().getResNo() != 1) continue;
2967  // Anything unusual: assume conservatively.
2968  if (!FlagUI->isMachineOpcode()) return false;
2969  // Examine the condition code of the user.
2970  X86::CondCode CC = getCondFromNode(*FlagUI);
2971 
2972  switch (CC) {
2973  // Comparisons which only use the zero flag.
2974  case X86::COND_E: case X86::COND_NE:
2975  continue;
2976  // Anything else: assume conservatively.
2977  default:
2978  return false;
2979  }
2980  }
2981  }
2982  return true;
2983 }
2984 
2985 /// Test whether the given X86ISD::CMP node has any uses which require the SF
2986 /// flag to be accurate.
2987 bool X86DAGToDAGISel::hasNoSignFlagUses(SDValue Flags) const {
2988  // Examine each user of the node.
2989  for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
2990  UI != UE; ++UI) {
2991  // Only check things that use the flags.
2992  if (UI.getUse().getResNo() != Flags.getResNo())
2993  continue;
2994  // Only examine CopyToReg uses that copy to EFLAGS.
2995  if (UI->getOpcode() != ISD::CopyToReg ||
2996  cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
2997  return false;
2998  // Examine each user of the CopyToReg use.
2999  for (SDNode::use_iterator FlagUI = UI->use_begin(),
3000  FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
3001  // Only examine the Flag result.
3002  if (FlagUI.getUse().getResNo() != 1) continue;
3003  // Anything unusual: assume conservatively.
3004  if (!FlagUI->isMachineOpcode()) return false;
3005  // Examine the condition code of the user.
3006  X86::CondCode CC = getCondFromNode(*FlagUI);
3007 
3008  switch (CC) {
3009  // Comparisons which don't examine the SF flag.
3010  case X86::COND_A: case X86::COND_AE:
3011  case X86::COND_B: case X86::COND_BE:
3012  case X86::COND_E: case X86::COND_NE:
3013  case X86::COND_O: case X86::COND_NO:
3014  case X86::COND_P: case X86::COND_NP:
3015  continue;
3016  // Anything else: assume conservatively.
3017  default:
3018  return false;
3019  }
3020  }
3021  }
3022  return true;
3023 }
3024 
3026  switch (CC) {
3027  // Comparisons which don't examine the CF flag.
3028  case X86::COND_O: case X86::COND_NO:
3029  case X86::COND_E: case X86::COND_NE:
3030  case X86::COND_S: case X86::COND_NS:
3031  case X86::COND_P: case X86::COND_NP:
3032  case X86::COND_L: case X86::COND_GE:
3033  case X86::COND_G: case X86::COND_LE:
3034  return false;
3035  // Anything else: assume conservatively.
3036  default:
3037  return true;
3038  }
3039 }
3040 
3041 /// Test whether the given node which sets flags has any uses which require the
3042 /// CF flag to be accurate.
3043  bool X86DAGToDAGISel::hasNoCarryFlagUses(SDValue Flags) const {
3044  // Examine each user of the node.
3045  for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
3046  UI != UE; ++UI) {
3047  // Only check things that use the flags.
3048  if (UI.getUse().getResNo() != Flags.getResNo())
3049  continue;
3050 
3051  unsigned UIOpc = UI->getOpcode();
3052 
3053  if (UIOpc == ISD::CopyToReg) {
3054  // Only examine CopyToReg uses that copy to EFLAGS.
3055  if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
3056  return false;
3057  // Examine each user of the CopyToReg use.
3058  for (SDNode::use_iterator FlagUI = UI->use_begin(), FlagUE = UI->use_end();
3059  FlagUI != FlagUE; ++FlagUI) {
3060  // Only examine the Flag result.
3061  if (FlagUI.getUse().getResNo() != 1)
3062  continue;
3063  // Anything unusual: assume conservatively.
3064  if (!FlagUI->isMachineOpcode())
3065  return false;
3066  // Examine the condition code of the user.
3067  X86::CondCode CC = getCondFromNode(*FlagUI);
3068 
3069  if (mayUseCarryFlag(CC))
3070  return false;
3071  }
3072 
3073  // This CopyToReg is ok. Move on to the next user.
3074  continue;
3075  }
3076 
3077  // This might be an unselected node. So look for the pre-isel opcodes that
3078  // use flags.
3079  unsigned CCOpNo;
3080  switch (UIOpc) {
3081  default:
3082  // Something unusual. Be conservative.
3083  return false;
3084  case X86ISD::SETCC: CCOpNo = 0; break;
3085  case X86ISD::SETCC_CARRY: CCOpNo = 0; break;
3086  case X86ISD::CMOV: CCOpNo = 2; break;
3087  case X86ISD::BRCOND: CCOpNo = 2; break;
3088  }
3089 
3090  X86::CondCode CC = (X86::CondCode)UI->getConstantOperandVal(CCOpNo);
3091  if (mayUseCarryFlag(CC))
3092  return false;
3093  }
3094  return true;
3095 }
3096 
3097 /// Check whether or not the chain ending in StoreNode is suitable for doing
3098 /// the {load; op; store} to modify transformation.
3100  SDValue StoredVal, SelectionDAG *CurDAG,
3101  unsigned LoadOpNo,
3102  LoadSDNode *&LoadNode,
3103  SDValue &InputChain) {
3104  // Is the stored value result 0 of the operation?
3105  if (StoredVal.getResNo() != 0) return false;
3106 
3107  // Are there other uses of the operation other than the store?
3108  if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false;
3109 
3110  // Is the store non-extending and non-indexed?
3111  if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
3112  return false;
3113 
3114  SDValue Load = StoredVal->getOperand(LoadOpNo);
3115  // Is the stored value a non-extending and non-indexed load?
3116  if (!ISD::isNormalLoad(Load.getNode())) return false;
3117 
3118  // Return LoadNode by reference.
3119  LoadNode = cast<LoadSDNode>(Load);
3120 
3121  // Is store the only read of the loaded value?
3122  if (!Load.hasOneUse())
3123  return false;
3124 
3125  // Is the address of the store the same as the load?
3126  if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
3127  LoadNode->getOffset() != StoreNode->getOffset())
3128  return false;
3129 
3130  bool FoundLoad = false;
3131  SmallVector<SDValue, 4> ChainOps;
3132  SmallVector<const SDNode *, 4> LoopWorklist;
3134  const unsigned int Max = 1024;
3135 
3136  // Visualization of Load-Op-Store fusion:
3137  // -------------------------
3138  // Legend:
3139  // *-lines = Chain operand dependencies.
3140  // |-lines = Normal operand dependencies.
3141  // Dependencies flow down and right. n-suffix references multiple nodes.
3142  //
3143  // C Xn C
3144  // * * *
3145  // * * *
3146  // Xn A-LD Yn TF Yn
3147  // * * \ | * |
3148  // * * \ | * |
3149  // * * \ | => A--LD_OP_ST
3150  // * * \| \
3151  // TF OP \
3152  // * | \ Zn
3153  // * | \
3154  // A-ST Zn
3155  //
3156 
3157  // This merge induced dependences from: #1: Xn -> LD, OP, Zn
3158  // #2: Yn -> LD
3159  // #3: ST -> Zn
3160 
3161  // Ensure the transform is safe by checking for the dual
3162  // dependencies to make sure we do not induce a loop.
3163 
3164  // As LD is a predecessor to both OP and ST we can do this by checking:
3165  // a). if LD is a predecessor to a member of Xn or Yn.
3166  // b). if a Zn is a predecessor to ST.
3167 
3168  // However, (b) can only occur through being a chain predecessor to
3169  // ST, which is the same as Zn being a member or predecessor of Xn,
3170  // which is a subset of LD being a predecessor of Xn. So it's
3171  // subsumed by check (a).
3172 
3173  SDValue Chain = StoreNode->getChain();
3174 
3175  // Gather X elements in ChainOps.
3176  if (Chain == Load.getValue(1)) {
3177  FoundLoad = true;
3178  ChainOps.push_back(Load.getOperand(0));
3179  } else if (Chain.getOpcode() == ISD::TokenFactor) {
3180  for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
3181  SDValue Op = Chain.getOperand(i);
3182  if (Op == Load.getValue(1)) {
3183  FoundLoad = true;
3184  // Drop Load, but keep its chain. No cycle check necessary.
3185  ChainOps.push_back(Load.getOperand(0));
3186  continue;
3187  }
3188  LoopWorklist.push_back(Op.getNode());
3189  ChainOps.push_back(Op);
3190  }
3191  }
3192 
3193  if (!FoundLoad)
3194  return false;
3195 
3196  // Worklist is currently Xn. Add Yn to worklist.
3197  for (SDValue Op : StoredVal->ops())
3198  if (Op.getNode() != LoadNode)
3199  LoopWorklist.push_back(Op.getNode());
3200 
3201  // Check (a) if Load is a predecessor to Xn + Yn
3202  if (SDNode::hasPredecessorHelper(Load.getNode(), Visited, LoopWorklist, Max,
3203  true))
3204  return false;
3205 
3206  InputChain =
3207  CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ChainOps);
3208  return true;
3209 }
3210 
3211 // Change a chain of {load; op; store} of the same value into a simple op
3212 // through memory of that value, if the uses of the modified value and its
3213 // address are suitable.
3214 //
3215 // The tablegen pattern memory operand pattern is currently not able to match
3216 // the case where the EFLAGS on the original operation are used.
3217 //
3218 // To move this to tablegen, we'll need to improve tablegen to allow flags to
3219 // be transferred from a node in the pattern to the result node, probably with
3220 // a new keyword. For example, we have this
3221 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
3222 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
3223 // (implicit EFLAGS)]>;
3224 // but maybe need something like this
3225 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
3226 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
3227 // (transferrable EFLAGS)]>;
3228 //
3229 // Until then, we manually fold these and instruction select the operation
3230 // here.
3231 bool X86DAGToDAGISel::foldLoadStoreIntoMemOperand(SDNode *Node) {
3232  StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
3233  SDValue StoredVal = StoreNode->getOperand(1);
3234  unsigned Opc = StoredVal->getOpcode();
3235 
3236  // Before we try to select anything, make sure this is memory operand size
3237  // and opcode we can handle. Note that this must match the code below that
3238  // actually lowers the opcodes.
3239  EVT MemVT = StoreNode->getMemoryVT();
3240  if (MemVT != MVT::i64 && MemVT != MVT::i32 && MemVT != MVT::i16 &&
3241  MemVT != MVT::i8)
3242  return false;
3243 
3244  bool IsCommutable = false;
3245  bool IsNegate = false;
3246  switch (Opc) {
3247  default:
3248  return false;
3249  case X86ISD::SUB:
3250  IsNegate = isNullConstant(StoredVal.getOperand(0));
3251  break;
3252  case X86ISD::SBB:
3253  break;
3254  case X86ISD::ADD:
3255  case X86ISD::ADC:
3256  case X86ISD::AND:
3257  case X86ISD::OR:
3258  case X86ISD::XOR:
3259  IsCommutable = true;
3260  break;
3261  }
3262 
3263  unsigned LoadOpNo = IsNegate ? 1 : 0;
3264  LoadSDNode *LoadNode = nullptr;
3265  SDValue InputChain;
3266  if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadOpNo,
3267  LoadNode, InputChain)) {
3268  if (!IsCommutable)
3269  return false;
3270 
3271  // This operation is commutable, try the other operand.
3272  LoadOpNo = 1;
3273  if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadOpNo,
3274  LoadNode, InputChain))
3275  return false;
3276  }
3277 
3278  SDValue Base, Scale, Index, Disp, Segment;
3279  if (!selectAddr(LoadNode, LoadNode->getBasePtr(), Base, Scale, Index, Disp,
3280  Segment))
3281  return false;
3282 
3283  auto SelectOpcode = [&](unsigned Opc64, unsigned Opc32, unsigned Opc16,
3284  unsigned Opc8) {
3285  switch (MemVT.getSimpleVT().SimpleTy) {
3286  case MVT::i64:
3287  return Opc64;
3288  case MVT::i32:
3289  return Opc32;
3290  case MVT::i16:
3291  return Opc16;
3292  case MVT::i8:
3293  return Opc8;
3294  default:
3295  llvm_unreachable("Invalid size!");
3296  }
3297  };
3298 
3300  switch (Opc) {
3301  case X86ISD::SUB:
3302  // Handle negate.
3303  if (IsNegate) {
3304  unsigned NewOpc = SelectOpcode(X86::NEG64m, X86::NEG32m, X86::NEG16m,
3305  X86::NEG8m);
3306  const SDValue Ops[] = {Base, Scale, Index, Disp, Segment, InputChain};
3307  Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32,
3308  MVT::Other, Ops);
3309  break;
3310  }
3312  case X86ISD::ADD:
3313  // Try to match inc/dec.
3314  if (!Subtarget->slowIncDec() || CurDAG->shouldOptForSize()) {
3315  bool IsOne = isOneConstant(StoredVal.getOperand(1));
3316  bool IsNegOne = isAllOnesConstant(StoredVal.getOperand(1));
3317  // ADD/SUB with 1/-1 and carry flag isn't used can use inc/dec.
3318  if ((IsOne || IsNegOne) && hasNoCarryFlagUses(StoredVal.getValue(1))) {
3319  unsigned NewOpc =
3320  ((Opc == X86ISD::ADD) == IsOne)
3321  ? SelectOpcode(X86::INC64m, X86::INC32m, X86::INC16m, X86::INC8m)
3322  : SelectOpcode(X86::DEC64m, X86::DEC32m, X86::DEC16m, X86::DEC8m);
3323  const SDValue Ops[] = {Base, Scale, Index, Disp, Segment, InputChain};
3324  Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32,
3325  MVT::Other, Ops);
3326  break;
3327  }
3328  }
3330  case X86ISD::ADC:
3331  case X86ISD::SBB:
3332  case X86ISD::AND:
3333  case X86ISD::OR:
3334  case X86ISD::XOR: {
3335  auto SelectRegOpcode = [SelectOpcode](unsigned Opc) {
3336  switch (Opc) {
3337  case X86ISD::ADD:
3338  return SelectOpcode(X86::ADD64mr, X86::ADD32mr, X86::ADD16mr,
3339  X86::ADD8mr);
3340  case X86ISD::ADC:
3341  return SelectOpcode(X86::ADC64mr, X86::ADC32mr, X86::ADC16mr,
3342  X86::ADC8mr);
3343  case X86ISD::SUB:
3344  return SelectOpcode(X86::SUB64mr, X86::SUB32mr, X86::SUB16mr,
3345  X86::SUB8mr);
3346  case X86ISD::SBB:
3347  return SelectOpcode(X86::SBB64mr, X86::SBB32mr, X86::SBB16mr,
3348  X86::SBB8mr);
3349  case X86ISD::AND:
3350  return SelectOpcode(X86::AND64mr, X86::AND32mr, X86::AND16mr,
3351  X86::AND8mr);
3352  case X86ISD::OR:
3353  return SelectOpcode(X86::OR64mr, X86::OR32mr, X86::OR16mr, X86::OR8mr);
3354  case X86ISD::XOR:
3355  return SelectOpcode(X86::XOR64mr, X86::XOR32mr, X86::XOR16mr,
3356  X86::XOR8mr);
3357  default:
3358  llvm_unreachable("Invalid opcode!");
3359  }
3360  };
3361  auto SelectImm8Opcode = [SelectOpcode](unsigned Opc) {
3362  switch (Opc) {
3363  case X86ISD::ADD:
3364  return SelectOpcode(X86::ADD64mi8, X86::ADD32mi8, X86::ADD16mi8, 0);
3365  case X86ISD::ADC:
3366  return SelectOpcode(X86::ADC64mi8, X86::ADC32mi8, X86::ADC16mi8, 0);
3367  case X86ISD::SUB:
3368  return SelectOpcode(X86::SUB64mi8, X86::SUB32mi8, X86::SUB16mi8, 0);
3369  case X86ISD::SBB:
3370  return SelectOpcode(X86::SBB64mi8, X86::SBB32mi8, X86::SBB16mi8, 0);
3371  case X86ISD::AND:
3372  return SelectOpcode(X86::AND64mi8, X86::AND32mi8, X86::AND16mi8, 0);
3373  case X86ISD::OR:
3374  return SelectOpcode(X86::OR64mi8, X86::OR32mi8, X86::OR16mi8, 0);
3375  case X86ISD::XOR:
3376  return SelectOpcode(X86::XOR64mi8, X86::XOR32mi8, X86::XOR16mi8, 0);
3377  default:
3378  llvm_unreachable("Invalid opcode!");
3379  }
3380  };
3381  auto SelectImmOpcode = [SelectOpcode](unsigned Opc) {
3382  switch (Opc) {
3383  case X86ISD::ADD:
3384  return SelectOpcode(X86::ADD64mi32, X86::ADD32mi, X86::ADD16mi,
3385  X86::ADD8mi);
3386  case X86ISD::ADC:
3387  return SelectOpcode(X86::ADC64mi32, X86::ADC32mi, X86::ADC16mi,
3388  X86::ADC8mi);
3389  case X86ISD::SUB:
3390  return SelectOpcode(X86::SUB64mi32, X86::SUB32mi, X86::SUB16mi,
3391  X86::SUB8mi);
3392  case X86ISD::SBB:
3393  return SelectOpcode(X86::SBB64mi32, X86::SBB32mi, X86::SBB16mi,
3394  X86::SBB8mi);
3395  case X86ISD::AND:
3396  return SelectOpcode(X86::AND64mi32, X86::AND32mi, X86::AND16mi,
3397  X86::AND8mi);
3398  case X86ISD::OR:
3399  return SelectOpcode(X86::OR64mi32, X86::OR32mi, X86::OR16mi,
3400  X86::OR8mi);
3401  case X86ISD::XOR:
3402  return SelectOpcode(X86::XOR64mi32, X86::XOR32mi, X86::XOR16mi,
3403  X86::XOR8mi);
3404  default:
3405  llvm_unreachable("Invalid opcode!");
3406  }
3407  };
3408 
3409  unsigned NewOpc = SelectRegOpcode(Opc);
3410  SDValue Operand = StoredVal->getOperand(1-LoadOpNo);
3411 
3412  // See if the operand is a constant that we can fold into an immediate
3413  // operand.
3414  if (auto *OperandC = dyn_cast<ConstantSDNode>(Operand)) {
3415  int64_t OperandV = OperandC->getSExtValue();
3416 
3417  // Check if we can shrink the operand enough to fit in an immediate (or
3418  // fit into a smaller immediate) by negating it and switching the
3419  // operation.
3420  if ((Opc == X86ISD::ADD || Opc == X86ISD::SUB) &&
3421  ((MemVT != MVT::i8 && !isInt<8>(OperandV) && isInt<8>(-OperandV)) ||
3422  (MemVT == MVT::i64 && !isInt<32>(OperandV) &&
3423  isInt<32>(-OperandV))) &&
3424  hasNoCarryFlagUses(StoredVal.getValue(1))) {
3425  OperandV = -OperandV;
3426  Opc = Opc == X86ISD::ADD ? X86ISD::SUB : X86ISD::ADD;
3427  }
3428 
3429  // First try to fit this into an Imm8 operand. If it doesn't fit, then try
3430  // the larger immediate operand.
3431  if (MemVT != MVT::i8 && isInt<8>(OperandV)) {
3432  Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT);
3433  NewOpc = SelectImm8Opcode(Opc);
3434  } else if (MemVT != MVT::i64 || isInt<32>(OperandV)) {
3435  Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT);
3436  NewOpc = SelectImmOpcode(Opc);
3437  }
3438  }
3439 
3440  if (Opc == X86ISD::ADC || Opc == X86ISD::SBB) {
3441  SDValue CopyTo =
3442  CurDAG->getCopyToReg(InputChain, SDLoc(Node), X86::EFLAGS,
3443  StoredVal.getOperand(2), SDValue());
3444 
3445  const SDValue Ops[] = {Base, Scale, Index, Disp,
3446  Segment, Operand, CopyTo, CopyTo.getValue(1)};
3447  Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other,
3448  Ops);
3449  } else {
3450  const SDValue Ops[] = {Base, Scale, Index, Disp,
3451  Segment, Operand, InputChain};
3452  Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other,
3453  Ops);
3454  }
3455  break;
3456  }
3457  default:
3458  llvm_unreachable("Invalid opcode!");
3459  }
3460 
3461  MachineMemOperand *MemOps[] = {StoreNode->getMemOperand(),
3462  LoadNode->getMemOperand()};
3463  CurDAG->setNodeMemRefs(Result, MemOps);
3464 
3465  // Update Load Chain uses as well.
3466  ReplaceUses(SDValue(LoadNode, 1), SDValue(Result, 1));
3467  ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
3468  ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
3469  CurDAG->RemoveDeadNode(Node);
3470  return true;
3471 }
3472 
3473 // See if this is an X & Mask that we can match to BEXTR/BZHI.
3474 // Where Mask is one of the following patterns:
3475 // a) x & (1 << nbits) - 1
3476 // b) x & ~(-1 << nbits)
3477 // c) x & (-1 >> (32 - y))
3478 // d) x << (32 - y) >> (32 - y)
3479 bool X86DAGToDAGISel::matchBitExtract(SDNode *Node) {
3480  assert(
3481  (Node->getOpcode() == ISD::AND || Node->getOpcode() == ISD::SRL) &&
3482  "Should be either an and-mask, or right-shift after clearing high bits.");
3483 
3484  // BEXTR is BMI instruction, BZHI is BMI2 instruction. We need at least one.
3485  if (!Subtarget->hasBMI() && !Subtarget->hasBMI2())
3486  return false;
3487 
3488  MVT NVT = Node->getSimpleValueType(0);
3489 
3490  // Only supported for 32 and 64 bits.
3491  if (NVT != MVT::i32 && NVT != MVT::i64)
3492  return false;
3493 
3494  SDValue NBits;
3495  bool NegateNBits;
3496 
3497  // If we have BMI2's BZHI, we are ok with muti-use patterns.
3498  // Else, if we only have BMI1's BEXTR, we require one-use.
3499  const bool AllowExtraUsesByDefault = Subtarget->hasBMI2();
3500  auto checkUses = [AllowExtraUsesByDefault](SDValue Op, unsigned NUses,
3501  Optional<bool> AllowExtraUses) {
3502  return AllowExtraUses.value_or(AllowExtraUsesByDefault) ||
3503  Op.getNode()->hasNUsesOfValue(NUses, Op.getResNo());
3504  };
3505  auto checkOneUse = [checkUses](SDValue Op,
3506  Optional<bool> AllowExtraUses = None) {
3507  return checkUses(Op, 1, AllowExtraUses);
3508  };
3509  auto checkTwoUse = [checkUses](SDValue Op,
3510  Optional<bool> AllowExtraUses = None) {
3511  return checkUses(Op, 2, AllowExtraUses);
3512  };
3513 
3514  auto peekThroughOneUseTruncation = [checkOneUse](SDValue V) {
3515  if (V->getOpcode() == ISD::TRUNCATE && checkOneUse(V)) {
3516  assert(V.getSimpleValueType() == MVT::i32 &&
3517  V.getOperand(0).getSimpleValueType() == MVT::i64 &&
3518  "Expected i64 -> i32 truncation");
3519  V = V.getOperand(0);
3520  }
3521  return V;
3522  };
3523 
3524  // a) x & ((1 << nbits) + (-1))
3525  auto matchPatternA = [checkOneUse, peekThroughOneUseTruncation, &NBits,
3526  &NegateNBits](SDValue Mask) -> bool {
3527  // Match `add`. Must only have one use!
3528  if (Mask->getOpcode() != ISD::ADD || !checkOneUse(Mask))
3529  return false;
3530  // We should be adding all-ones constant (i.e. subtracting one.)
3531  if (!isAllOnesConstant(Mask->getOperand(1)))
3532  return false;
3533  // Match `1 << nbits`. Might be truncated. Must only have one use!
3534  SDValue M0 = peekThroughOneUseTruncation(Mask->getOperand(0));
3535  if (M0->getOpcode() != ISD::SHL || !checkOneUse(M0))
3536  return false;
3537  if (!isOneConstant(M0->getOperand(0)))
3538  return false;
3539  NBits = M0->getOperand(1);
3540  NegateNBits = false;
3541  return true;
3542  };
3543 
3544  auto isAllOnes = [this, peekThroughOneUseTruncation, NVT](SDValue V) {
3545  V = peekThroughOneUseTruncation(V);
3546  return CurDAG->MaskedValueIsAllOnes(
3547  V, APInt::getLowBitsSet(V.getSimpleValueType().getSizeInBits(),
3548  NVT.getSizeInBits()));
3549  };
3550 
3551  // b) x & ~(-1 << nbits)
3552  auto matchPatternB = [checkOneUse, isAllOnes, peekThroughOneUseTruncation,
3553  &NBits, &NegateNBits](SDValue Mask) -> bool {
3554  // Match `~()`. Must only have one use!
3555  if (Mask.getOpcode() != ISD::XOR || !checkOneUse(Mask))
3556  return false;
3557  // The -1 only has to be all-ones for the final Node's NVT.
3558  if (!isAllOnes(Mask->getOperand(1)))
3559  return false;
3560  // Match `-1 << nbits`. Might be truncated. Must only have one use!
3561  SDValue M0 = peekThroughOneUseTruncation(Mask->getOperand(0));
3562  if (M0->getOpcode() != ISD::SHL || !checkOneUse(M0))
3563  return false;
3564  // The -1 only has to be all-ones for the final Node's NVT.
3565  if (!isAllOnes(M0->getOperand(0)))
3566  return false;
3567  NBits = M0->getOperand(1);
3568  NegateNBits = false;
3569  return true;
3570  };
3571 
3572  // Try to match potentially-truncated shift amount as `(bitwidth - y)`,
3573  // or leave the shift amount as-is, but then we'll have to negate it.
3574  auto canonicalizeShiftAmt = [&NBits, &NegateNBits](SDValue ShiftAmt,
3575  unsigned Bitwidth) {
3576  NBits = ShiftAmt;
3577  NegateNBits = true;
3578  // Skip over a truncate of the shift amount, if any.
3579  if (NBits.getOpcode() == ISD::TRUNCATE)
3580  NBits = NBits.getOperand(0);
3581  // Try to match the shift amount as (bitwidth - y). It should go away, too.
3582  // If it doesn't match, that's fine, we'll just negate it ourselves.
3583  if (NBits.getOpcode() != ISD::SUB)
3584  return;
3585  auto *V0 = dyn_cast<ConstantSDNode>(NBits.getOperand(0));
3586  if (!V0 || V0->getZExtValue() != Bitwidth)
3587  return;
3588  NBits = NBits.getOperand(1);
3589  NegateNBits = false;
3590  };
3591 
3592  // c) x & (-1 >> z) but then we'll have to subtract z from bitwidth
3593  // or
3594  // c) x & (-1 >> (32 - y))
3595  auto matchPatternC = [checkOneUse, peekThroughOneUseTruncation, &NegateNBits,
3596  canonicalizeShiftAmt](SDValue Mask) -> bool {
3597  // The mask itself may be truncated.
3598  Mask = peekThroughOneUseTruncation(Mask);
3599  unsigned Bitwidth = Mask.getSimpleValueType().getSizeInBits();
3600  // Match `l>>`. Must only have one use!
3601  if (Mask.getOpcode() != ISD::SRL || !checkOneUse(Mask))
3602  return false;
3603  // We should be shifting truly all-ones constant.
3604  if (!isAllOnesConstant(Mask.getOperand(0)))
3605  return false;
3606  SDValue M1 = Mask.getOperand(1);
3607  // The shift amount should not be used externally.
3608  if (!checkOneUse(M1))
3609  return false;
3610  canonicalizeShiftAmt(M1, Bitwidth);
3611  // Pattern c. is non-canonical, and is expanded into pattern d. iff there
3612  // is no extra use of the mask. Clearly, there was one since we are here.
3613  // But at the same time, if we need to negate the shift amount,
3614  // then we don't want the mask to stick around, else it's unprofitable.
3615  return !NegateNBits;
3616  };
3617 
3618  SDValue X;
3619 
3620  // d) x << z >> z but then we'll have to subtract z from bitwidth
3621  // or
3622  // d) x << (32 - y) >> (32 - y)
3623  auto matchPatternD = [checkOneUse, checkTwoUse, canonicalizeShiftAmt,
3624  AllowExtraUsesByDefault, &NegateNBits,
3625  &X](SDNode *Node) -> bool {
3626  if (Node->getOpcode() != ISD::SRL)
3627  return false;
3628  SDValue N0 = Node->getOperand(0);
3629  if (N0->getOpcode() != ISD::SHL)
3630  return false;
3631  unsigned Bitwidth = N0.getSimpleValueType().getSizeInBits();
3632  SDValue N1 = Node->getOperand(1);
3633  SDValue N01 = N0->getOperand(1);
3634  // Both of the shifts must be by the exact same value.
3635  if (N1 != N01)
3636  return false;
3637  canonicalizeShiftAmt(N1, Bitwidth);
3638  // There should not be any external uses of the inner shift / shift amount.
3639  // Note that while we are generally okay with external uses given BMI2,
3640  // iff we need to negate the shift amount, we are not okay with extra uses.
3641  const bool AllowExtraUses = AllowExtraUsesByDefault && !NegateNBits;
3642  if (!checkOneUse(N0, AllowExtraUses) || !checkTwoUse(N1, AllowExtraUses))
3643  return false;
3644  X = N0->getOperand(0);
3645  return true;
3646  };
3647 
3648  auto matchLowBitMask = [matchPatternA, matchPatternB,
3649  matchPatternC](SDValue Mask) -> bool {
3650  return matchPatternA(Mask) || matchPatternB(Mask) || matchPatternC(Mask);
3651  };
3652 
3653  if (Node->getOpcode() == ISD::AND) {
3654  X = Node->getOperand(0);
3655  SDValue Mask = Node->getOperand(1);
3656 
3657  if (matchLowBitMask(Mask)) {
3658  // Great.
3659  } else {
3660  std::swap(X, Mask);
3661  if (!matchLowBitMask(Mask))
3662  return false;
3663  }
3664  } else if (!matchPatternD(Node))
3665  return false;
3666 
3667  // If we need to negate the shift amount, require BMI2 BZHI support.
3668  // It's just too unprofitable for BMI1 BEXTR.
3669  if (NegateNBits && !Subtarget->hasBMI2())
3670  return false;
3671 
3672  SDLoc DL(Node);
3673 
3674  // Truncate the shift amount.
3675  NBits = CurDAG->getNode(ISD::TRUNCATE, DL, MVT::i8, NBits);
3676  insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3677 
3678  // Insert 8-bit NBits into lowest 8 bits of 32-bit register.
3679  // All the other bits are undefined, we do not care about them.
3680  SDValue ImplDef = SDValue(
3681  CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::i32), 0);
3682  insertDAGNode(*CurDAG, SDValue(Node, 0), ImplDef);
3683 
3684  SDValue SRIdxVal = CurDAG->getTargetConstant(X86::sub_8bit, DL, MVT::i32);
3685  insertDAGNode(*CurDAG, SDValue(Node, 0), SRIdxVal);
3686  NBits = SDValue(CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
3687  MVT::i32, ImplDef, NBits, SRIdxVal),
3688  0);
3689  insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3690 
3691  // We might have matched the amount of high bits to be cleared,
3692  // but we want the amount of low bits to be kept, so negate it then.
3693  if (NegateNBits) {
3694  SDValue BitWidthC = CurDAG->getConstant(NVT.getSizeInBits(), DL, MVT::i32);
3695  insertDAGNode(*CurDAG, SDValue(Node, 0), BitWidthC);
3696 
3697  NBits = CurDAG->getNode(ISD::SUB, DL, MVT::i32, BitWidthC, NBits);
3698  insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3699  }
3700 
3701  if (Subtarget->hasBMI2()) {
3702  // Great, just emit the the BZHI..
3703  if (NVT != MVT::i32) {
3704  // But have to place the bit count into the wide-enough register first.
3705  NBits = CurDAG->getNode(ISD::ANY_EXTEND, DL, NVT, NBits);
3706  insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3707  }
3708 
3709  SDValue Extract = CurDAG->getNode(X86ISD::BZHI, DL, NVT, X, NBits);
3710  ReplaceNode(Node, Extract.getNode());
3711  SelectCode(Extract.getNode());
3712  return true;
3713  }
3714 
3715  // Else, if we do *NOT* have BMI2, let's find out if the if the 'X' is
3716  // *logically* shifted (potentially with one-use trunc inbetween),
3717  // and the truncation was the only use of the shift,
3718  // and if so look past one-use truncation.
3719  {
3720  SDValue RealX = peekThroughOneUseTruncation(X);
3721  // FIXME: only if the shift is one-use?
3722  if (RealX != X && RealX.getOpcode() == ISD::SRL)
3723  X = RealX;
3724  }
3725 
3726  MVT XVT = X.getSimpleValueType();
3727 
3728  // Else, emitting BEXTR requires one more step.
3729  // The 'control' of BEXTR has the pattern of:
3730  // [15...8 bit][ 7...0 bit] location
3731  // [ bit count][ shift] name
3732  // I.e. 0b000000011'00000001 means (x >> 0b1) & 0b11
3733 
3734  // Shift NBits left by 8 bits, thus producing 'control'.
3735  // This makes the low 8 bits to be zero.
3736  SDValue C8 = CurDAG->getConstant(8, DL, MVT::i8);
3737  insertDAGNode(*CurDAG, SDValue(Node, 0), C8);
3738  SDValue Control = CurDAG->getNode(ISD::SHL, DL, MVT::i32, NBits, C8);
3739  insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
3740 
3741  // If the 'X' is *logically* shifted, we can fold that shift into 'control'.
3742  // FIXME: only if the shift is one-use?
3743  if (X.getOpcode() == ISD::SRL) {
3744  SDValue ShiftAmt = X.getOperand(1);
3745  X = X.getOperand(0);
3746 
3747  assert(ShiftAmt.getValueType() == MVT::i8 &&
3748  "Expected shift amount to be i8");
3749 
3750  // Now, *zero*-extend the shift amount. The bits 8...15 *must* be zero!
3751  // We could zext to i16 in some form, but we intentionally don't do that.
3752  SDValue OrigShiftAmt = ShiftAmt;
3753  ShiftAmt = CurDAG->getNode(ISD::ZERO_EXTEND, DL, MVT::i32, ShiftAmt);
3754  insertDAGNode(*CurDAG, OrigShiftAmt, ShiftAmt);
3755 
3756  // And now 'or' these low 8 bits of shift amount into the 'control'.
3757  Control = CurDAG->getNode(ISD::OR, DL, MVT::i32, Control, ShiftAmt);
3758  insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
3759  }
3760 
3761  // But have to place the 'control' into the wide-enough register first.
3762  if (XVT != MVT::i32) {
3763  Control = CurDAG->getNode(ISD::ANY_EXTEND, DL, XVT, Control);
3764  insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
3765  }
3766 
3767  // And finally, form the BEXTR itself.
3768  SDValue Extract = CurDAG->getNode(X86ISD::BEXTR, DL, XVT, X, Control);
3769 
3770  // The 'X' was originally truncated. Do that now.
3771  if (XVT != NVT) {
3772  insertDAGNode(*CurDAG, SDValue(Node, 0), Extract);
3773  Extract = CurDAG->getNode(ISD::TRUNCATE, DL, NVT, Extract);
3774  }
3775 
3776  ReplaceNode(Node, Extract.getNode());
3777  SelectCode(Extract.getNode());
3778 
3779  return true;
3780 }
3781 
3782 // See if this is an (X >> C1) & C2 that we can match to BEXTR/BEXTRI.
3783 MachineSDNode *X86DAGToDAGISel::matchBEXTRFromAndImm(SDNode *Node) {
3784  MVT NVT = Node->getSimpleValueType(0);
3785  SDLoc dl(Node);
3786 
3787  SDValue N0 = Node->getOperand(0);
3788  SDValue N1 = Node->getOperand(1);
3789 
3790  // If we have TBM we can use an immediate for the control. If we have BMI
3791  // we should only do this if the BEXTR instruction is implemented well.
3792  // Otherwise moving the control into a register makes this more costly.
3793  // TODO: Maybe load folding, greater than 32-bit masks, or a guarantee of LICM
3794  // hoisting the move immediate would make it worthwhile with a less optimal
3795  // BEXTR?
3796  bool PreferBEXTR =
3797  Subtarget->hasTBM() || (Subtarget->hasBMI() && Subtarget->hasFastBEXTR());
3798  if (!PreferBEXTR && !Subtarget->hasBMI2())
3799  return nullptr;
3800 
3801  // Must have a shift right.
3802  if (N0->getOpcode() != ISD::SRL && N0->getOpcode() != ISD::SRA)
3803  return nullptr;
3804 
3805  // Shift can't have additional users.
3806  if (!N0->hasOneUse())
3807  return nullptr;
3808 
3809  // Only supported for 32 and 64 bits.
3810  if (NVT != MVT::i32 && NVT != MVT::i64)
3811  return nullptr;
3812 
3813  // Shift amount and RHS of and must be constant.
3814  ConstantSDNode *MaskCst = dyn_cast<ConstantSDNode>(N1);
3815  ConstantSDNode *ShiftCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
3816  if (!MaskCst || !ShiftCst)
3817  return nullptr;
3818 
3819  // And RHS must be a mask.
3820  uint64_t Mask = MaskCst->getZExtValue();
3821  if (!isMask_64(Mask))
3822  return nullptr;
3823 
3824  uint64_t Shift = ShiftCst->getZExtValue();
3825  uint64_t MaskSize = countPopulation(Mask);
3826 
3827  // Don't interfere with something that can be handled by extracting AH.
3828  // TODO: If we are able to fold a load, BEXTR might still be better than AH.
3829  if (Shift == 8 && MaskSize == 8)
3830  return nullptr;
3831 
3832  // Make sure we are only using bits that were in the original value, not
3833  // shifted in.
3834  if (Shift + MaskSize > NVT.getSizeInBits())
3835  return nullptr;
3836 
3837  // BZHI, if available, is always fast, unlike BEXTR. But even if we decide
3838  // that we can't use BEXTR, it is only worthwhile using BZHI if the mask
3839  // does not fit into 32 bits. Load folding is not a sufficient reason.
3840  if (!PreferBEXTR && MaskSize <= 32)
3841  return nullptr;
3842 
3843  SDValue Control;
3844  unsigned ROpc, MOpc;
3845 
3846  if (!PreferBEXTR) {
3847  assert(Subtarget->hasBMI2() && "We must have BMI2's BZHI then.");
3848  // If we can't make use of BEXTR then we can't fuse shift+mask stages.
3849  // Let's perform the mask first, and apply shift later. Note that we need to
3850  // widen the mask to account for the fact that we'll apply shift afterwards!
3851  Control = CurDAG->getTargetConstant(Shift + MaskSize, dl, NVT);
3852  ROpc = NVT == MVT::i64 ? X86::BZHI64rr : X86::BZHI32rr;
3853  MOpc = NVT == MVT::i64 ? X86::BZHI64rm : X86::BZHI32rm;
3854  unsigned NewOpc = NVT == MVT::i64 ? X86::MOV32ri64 : X86::MOV32ri;
3855  Control = SDValue(CurDAG->getMachineNode(NewOpc, dl, NVT, Control), 0);
3856  } else {
3857  // The 'control' of BEXTR has the pattern of:
3858  // [15...8 bit][ 7...0 bit] location
3859  // [ bit count][ shift] name
3860  // I.e. 0b000000011'00000001 means (x >> 0b1) & 0b11
3861  Control = CurDAG->getTargetConstant(Shift | (MaskSize << 8), dl, NVT);
3862  if (Subtarget->hasTBM()) {
3863  ROpc = NVT == MVT::i64 ? X86::BEXTRI64ri : X86::BEXTRI32ri;
3864  MOpc = NVT == MVT::i64 ? X86::BEXTRI64mi : X86::BEXTRI32mi;
3865  } else {
3866  assert(Subtarget->hasBMI() && "We must have BMI1's BEXTR then.");
3867  // BMI requires the immediate to placed in a register.
3868  ROpc = NVT == MVT::i64 ? X86::BEXTR64rr : X86::BEXTR32rr;
3869  MOpc = NVT == MVT::i64 ? X86::BEXTR64rm : X86::BEXTR32rm;
3870  unsigned NewOpc = NVT == MVT::i64 ? X86::MOV32ri64 : X86::MOV32ri;
3871  Control = SDValue(CurDAG->getMachineNode(NewOpc, dl, NVT, Control), 0);
3872  }
3873  }
3874 
3875  MachineSDNode *NewNode;
3876  SDValue Input = N0->getOperand(0);
3877  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
3878  if (tryFoldLoad(Node, N0.getNode(), Input, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
3879  SDValue Ops[] = {
3880  Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Control, Input.getOperand(0)};
3881  SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
3882  NewNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3883  // Update the chain.
3884  ReplaceUses(Input.getValue(1), SDValue(NewNode, 2));
3885  // Record the mem-refs
3886  CurDAG->setNodeMemRefs(NewNode, {cast<LoadSDNode>(Input)->getMemOperand()});
3887  } else {
3888  NewNode = CurDAG->getMachineNode(ROpc, dl, NVT, MVT::i32, Input, Control);
3889  }
3890 
3891  if (!PreferBEXTR) {
3892  // We still need to apply the shift.
3893  SDValue ShAmt = CurDAG->getTargetConstant(Shift, dl, NVT);
3894  unsigned NewOpc = NVT == MVT::i64 ? X86::SHR64ri : X86::SHR32ri;
3895  NewNode =
3896  CurDAG->getMachineNode(NewOpc, dl, NVT, SDValue(NewNode, 0), ShAmt);
3897  }
3898 
3899  return NewNode;
3900 }
3901 
3902 // Emit a PCMISTR(I/M) instruction.
3903 MachineSDNode *X86DAGToDAGISel::emitPCMPISTR(unsigned ROpc, unsigned MOpc,
3904  bool MayFoldLoad, const SDLoc &dl,
3905  MVT VT, SDNode *Node) {
3906  SDValue N0 = Node->getOperand(0);
3907  SDValue N1 = Node->getOperand(1);
3908  SDValue Imm = Node->getOperand(2);
3909  const ConstantInt *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue();
3910  Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType());
3911 
3912  // Try to fold a load. No need to check alignment.
3913  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
3914  if (MayFoldLoad && tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
3915  SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
3916  N1.getOperand(0) };
3917  SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other);
3918  MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3919  // Update the chain.
3920  ReplaceUses(N1.getValue(1), SDValue(CNode, 2));
3921  // Record the mem-refs
3922  CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
3923  return CNode;
3924  }
3925 
3926  SDValue Ops[] = { N0, N1, Imm };
3927  SDVTList VTs = CurDAG->getVTList(VT, MVT::i32);
3928  MachineSDNode *CNode = CurDAG->getMachineNode(ROpc, dl, VTs, Ops);
3929  return CNode;
3930 }
3931 
3932 // Emit a PCMESTR(I/M) instruction. Also return the Glue result in case we need
3933 // to emit a second instruction after this one. This is needed since we have two
3934 // copyToReg nodes glued before this and we need to continue that glue through.
3935 MachineSDNode *X86DAGToDAGISel::emitPCMPESTR(unsigned ROpc, unsigned MOpc,
3936  bool MayFoldLoad, const SDLoc &dl,
3937  MVT VT, SDNode *Node,
3938  SDValue &InFlag) {
3939  SDValue N0 = Node->getOperand(0);
3940  SDValue N2 = Node->getOperand(2);
3941  SDValue Imm = Node->getOperand(4);
3942  const ConstantInt *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue();
3943  Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType());
3944 
3945  // Try to fold a load. No need to check alignment.
3946  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
3947  if (MayFoldLoad && tryFoldLoad(Node, N2, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
3948  SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
3949  N2.getOperand(0), InFlag };
3950  SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other, MVT::Glue);
3951  MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3952  InFlag = SDValue(CNode, 3);
3953  // Update the chain.
3954  ReplaceUses(N2.getValue(1), SDValue(CNode, 2));
3955  // Record the mem-refs
3956  CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N2)->getMemOperand()});
3957  return CNode;
3958  }
3959 
3960  SDValue Ops[] = { N0, N2, Imm, InFlag };
3961  SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Glue);
3962  MachineSDNode *CNode = CurDAG->getMachineNode(ROpc, dl, VTs, Ops);
3963  InFlag = SDValue(CNode, 2);
3964  return CNode;
3965 }
3966 
3967 bool X86DAGToDAGISel::tryShiftAmountMod(SDNode *N) {
3968  EVT VT = N->getValueType(0);
3969 
3970  // Only handle scalar shifts.
3971  if (VT.isVector())
3972  return false;
3973 
3974  // Narrower shifts only mask to 5 bits in hardware.
3975  unsigned Size = VT == MVT::i64 ? 64 : 32;
3976 
3977  SDValue OrigShiftAmt = N->getOperand(1);
3978  SDValue ShiftAmt = OrigShiftAmt;
3979  SDLoc DL(N);
3980 
3981  // Skip over a truncate of the shift amount.
3982  if (ShiftAmt->getOpcode() == ISD::TRUNCATE)
3983  ShiftAmt = ShiftAmt->getOperand(0);
3984 
3985  // This function is called after X86DAGToDAGISel::matchBitExtract(),
3986  // so we are not afraid that we might mess up BZHI/BEXTR pattern.
3987 
3988  SDValue NewShiftAmt;
3989  if (ShiftAmt->getOpcode() == ISD::ADD || ShiftAmt->getOpcode() == ISD::SUB) {
3990  SDValue Add0 = ShiftAmt->getOperand(0);
3991  SDValue Add1 = ShiftAmt->getOperand(1);
3992  auto *Add0C = dyn_cast<ConstantSDNode>(Add0);
3993  auto *Add1C = dyn_cast<ConstantSDNode>(Add1);
3994  // If we are shifting by X+/-N where N == 0 mod Size, then just shift by X
3995  // to avoid the ADD/SUB.
3996  if (Add1C && Add1C->getAPIntValue().urem(Size) == 0) {
3997  NewShiftAmt = Add0;
3998  // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
3999  // to generate a NEG instead of a SUB of a constant.
4000  } else if (ShiftAmt->getOpcode() == ISD::SUB && Add0C &&
4001  Add0C->getZExtValue() != 0) {
4002  EVT SubVT = ShiftAmt.getValueType();
4003  SDValue X;
4004  if (Add0C->getZExtValue() % Size == 0)
4005  X = Add1;
4006  else if (ShiftAmt.hasOneUse() && Size == 64 &&
4007  Add0C->getZExtValue() % 32 == 0) {
4008  // We have a 64-bit shift by (n*32-x), turn it into -(x+n*32).
4009  // This is mainly beneficial if we already compute (x+n*32).
4010  if (Add1.getOpcode() == ISD::TRUNCATE) {
4011  Add1 = Add1.getOperand(0);
4012  SubVT = Add1.getValueType();
4013  }
4014  if (Add0.getValueType() != SubVT) {
4015  Add0 = CurDAG->getZExtOrTrunc(Add0, DL, SubVT);
4016  insertDAGNode(*CurDAG, OrigShiftAmt, Add0);
4017  }
4018 
4019  X = CurDAG->getNode(ISD::ADD, DL, SubVT, Add1, Add0);
4020  insertDAGNode(*CurDAG, OrigShiftAmt, X);
4021  } else
4022  return false;
4023  // Insert a negate op.
4024  // TODO: This isn't guaranteed to replace the sub if there is a logic cone
4025  // that uses it that's not a shift.
4026  SDValue Zero = CurDAG->getConstant(0, DL, SubVT);
4027  SDValue Neg = CurDAG->getNode(ISD::SUB, DL, SubVT, Zero, X);
4028  NewShiftAmt = Neg;
4029 
4030  // Insert these operands into a valid topological order so they can
4031  // get selected independently.
4032  insertDAGNode(*CurDAG, OrigShiftAmt, Zero);
4033  insertDAGNode(*CurDAG, OrigShiftAmt, Neg);
4034  } else
4035  return false;
4036  } else
4037  return false;
4038 
4039  if (NewShiftAmt.getValueType() != MVT::i8) {
4040  // Need to truncate the shift amount.
4041  NewShiftAmt = CurDAG->getNode(ISD::TRUNCATE, DL, MVT::i8, NewShiftAmt);
4042  // Add to a correct topological ordering.
4043  insertDAGNode(*CurDAG, OrigShiftAmt, NewShiftAmt);
4044  }
4045 
4046  // Insert a new mask to keep the shift amount legal. This should be removed
4047  // by isel patterns.
4048  NewShiftAmt = CurDAG->getNode(ISD::AND, DL, MVT::i8, NewShiftAmt,
4049  CurDAG->getConstant(Size - 1, DL, MVT::i8));
4050  // Place in a correct topological ordering.
4051  insertDAGNode(*CurDAG, OrigShiftAmt, NewShiftAmt);
4052 
4053  SDNode *UpdatedNode = CurDAG->UpdateNodeOperands(N, N->getOperand(0),
4054  NewShiftAmt);
4055  if (UpdatedNode != N) {
4056  // If we found an existing node, we should replace ourselves with that node
4057  // and wait for it to be selected after its other users.
4058  ReplaceNode(N, UpdatedNode);
4059  return true;
4060  }
4061 
4062  // If the original shift amount is now dead, delete it so that we don't run
4063  // it through isel.
4064  if (OrigShiftAmt.getNode()->use_empty())
4065  CurDAG->RemoveDeadNode(OrigShiftAmt.getNode());
4066 
4067  // Now that we've optimized the shift amount, defer to normal isel to get
4068  // load folding and legacy vs BMI2 selection without repeating it here.
4069  SelectCode(N);
4070  return true;
4071 }
4072 
4073 bool X86DAGToDAGISel::tryShrinkShlLogicImm(SDNode *N) {
4074  MVT NVT = N->getSimpleValueType(0);
4075  unsigned Opcode = N->getOpcode();
4076  SDLoc dl(N);
4077 
4078  // For operations of the form (x << C1) op C2, check if we can use a smaller
4079  // encoding for C2 by transforming it into (x op (C2>>C1)) << C1.
4080  SDValue Shift = N->getOperand(0);
4081  SDValue N1 = N->getOperand(1);
4082 
4083  ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
4084  if (!Cst)
4085  return false;
4086 
4087  int64_t Val = Cst->getSExtValue();
4088 
4089  // If we have an any_extend feeding the AND, look through it to see if there
4090  // is a shift behind it. But only if the AND doesn't use the extended bits.
4091  // FIXME: Generalize this to other ANY_EXTEND than i32 to i64?
4092  bool FoundAnyExtend = false;
4093  if (Shift.getOpcode() == ISD::ANY_EXTEND && Shift.hasOneUse() &&
4094  Shift.getOperand(0).getSimpleValueType() == MVT::i32 &&
4095  isUInt<32>(Val)) {
4096  FoundAnyExtend = true;
4097  Shift = Shift.getOperand(0);
4098  }
4099 
4100  if (Shift.getOpcode() != ISD::SHL || !Shift.hasOneUse())
4101  return false;
4102 
4103  // i8 is unshrinkable, i16 should be promoted to i32.
4104  if (NVT != MVT::i32 && NVT != MVT::i64)
4105  return false;
4106 
4107  ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
4108  if (!ShlCst)
4109  return false;
4110 
4111  uint64_t ShAmt = ShlCst->getZExtValue();
4112 
4113  // Make sure that we don't change the operation by removing bits.
4114  // This only matters for OR and XOR, AND is unaffected.
4115  uint64_t RemovedBitsMask = (1ULL << ShAmt) - 1;
4116  if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
4117  return false;
4118 
4119  // Check the minimum bitwidth for the new constant.
4120  // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32.
4121  auto CanShrinkImmediate = [&](int64_t &ShiftedVal) {
4122  if (Opcode == ISD::AND) {
4123  // AND32ri is the same as AND64ri32 with zext imm.
4124  // Try this before sign extended immediates below.
4125  ShiftedVal = (uint64_t)Val >> ShAmt;
4126  if (NVT == MVT::i64 && !isUInt<32>(Val) && isUInt<32>(ShiftedVal))
4127  return true;
4128  // Also swap order when the AND can become MOVZX.
4129  if (ShiftedVal == UINT8_MAX || ShiftedVal == UINT16_MAX)
4130  return true;
4131  }
4132  ShiftedVal = Val >> ShAmt;
4133  if ((!isInt<8>(Val) && isInt<8>(ShiftedVal)) ||
4134  (!isInt<32>(Val) && isInt<32>(ShiftedVal)))
4135  return true;
4136  if (Opcode != ISD::AND) {
4137  // MOV32ri+OR64r/XOR64r is cheaper than MOV64ri64+OR64rr/XOR64rr
4138  ShiftedVal = (uint64_t)Val >> ShAmt;
4139  if (NVT == MVT::i64 && !isUInt<32>(Val) && isUInt<32>(ShiftedVal))
4140  return true;
4141  }
4142  return false;
4143  };
4144 
4145  int64_t ShiftedVal;
4146  if (!CanShrinkImmediate(ShiftedVal))
4147  return false;
4148 
4149  // Ok, we can reorder to get a smaller immediate.
4150 
4151  // But, its possible the original immediate allowed an AND to become MOVZX.
4152  // Doing this late due to avoid the MakedValueIsZero call as late as
4153  // possible.
4154  if (Opcode == ISD::AND) {
4155  // Find the smallest zext this could possibly be.
4156  unsigned ZExtWidth = Cst->getAPIntValue().getActiveBits();
4157  ZExtWidth = PowerOf2Ceil(std::max(ZExtWidth, 8U));
4158 
4159  // Figure out which bits need to be zero to achieve that mask.
4160  APInt NeededMask = APInt::getLowBitsSet(NVT.getSizeInBits(),
4161  ZExtWidth);
4162  NeededMask &= ~Cst->getAPIntValue();
4163 
4164  if (CurDAG->MaskedValueIsZero(N->getOperand(0), NeededMask))
4165  return false;
4166  }
4167 
4168  SDValue X = Shift.getOperand(0);
4169  if (FoundAnyExtend) {
4170  SDValue NewX = CurDAG->getNode(ISD::ANY_EXTEND, dl, NVT, X);
4171  insertDAGNode(*CurDAG, SDValue(N, 0), NewX);
4172  X = NewX;
4173  }
4174 
4175  SDValue NewCst = CurDAG->getConstant(ShiftedVal, dl, NVT);
4176  insertDAGNode(*CurDAG, SDValue(N, 0), NewCst);
4177  SDValue NewBinOp = CurDAG->getNode(Opcode, dl, NVT, X, NewCst);
4178  insertDAGNode(*CurDAG, SDValue(N, 0), NewBinOp);
4179  SDValue NewSHL = CurDAG->getNode(ISD::SHL, dl, NVT, NewBinOp,
4180  Shift.getOperand(1));
4181  ReplaceNode(N, NewSHL.getNode());
4182  SelectCode(NewSHL.getNode());
4183  return true;
4184 }
4185 
4186 bool X86DAGToDAGISel::matchVPTERNLOG(SDNode *Root, SDNode *ParentA,
4187  SDNode *ParentB, SDNode *ParentC,
4188  SDValue A, SDValue B, SDValue C,
4189  uint8_t Imm) {
4190  assert(A.isOperandOf(ParentA) && B.isOperandOf(ParentB) &&
4191  C.isOperandOf(ParentC) && "Incorrect parent node");
4192 
4193  auto tryFoldLoadOrBCast =
4194  [this](SDNode *Root, SDNode *P, SDValue &L, SDValue &Base, SDValue &Scale,
4195  SDValue &Index, SDValue &Disp, SDValue &Segment) {
4196  if (tryFoldLoad(Root, P, L, Base, Scale, Index, Disp, Segment))
4197  return true;
4198 
4199  // Not a load, check for broadcast which may be behind a bitcast.
4200  if (L.getOpcode() == ISD::BITCAST && L.hasOneUse()) {
4201  P = L.getNode();
4202  L = L.getOperand(0);
4203  }
4204 
4205  if (L.getOpcode() != X86ISD::VBROADCAST_LOAD)
4206  return false;
4207 
4208  // Only 32 and 64 bit broadcasts are supported.
4209  auto *MemIntr = cast<MemIntrinsicSDNode>(L);
4210  unsigned Size = MemIntr->getMemoryVT().getSizeInBits();
4211  if (Size != 32 && Size != 64)
4212  return false;
4213 
4214  return tryFoldBroadcast(Root, P, L, Base, Scale, Index, Disp, Segment);
4215  };
4216 
4217  bool FoldedLoad = false;
4218  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4219  if (tryFoldLoadOrBCast(Root, ParentC, C, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
4220  FoldedLoad = true;
4221  } else if (tryFoldLoadOrBCast(Root, ParentA, A, Tmp0, Tmp1, Tmp2, Tmp3,
4222  Tmp4)) {
4223  FoldedLoad = true;
4224  std::swap(A, C);
4225  // Swap bits 1/4 and 3/6.
4226  uint8_t OldImm = Imm;
4227  Imm = OldImm & 0xa5;
4228  if (OldImm & 0x02) Imm |= 0x10;
4229  if (OldImm & 0x10) Imm |= 0x02;
4230  if (OldImm & 0x08) Imm |= 0x40;
4231  if (OldImm & 0x40) Imm |= 0x08;
4232  } else if (tryFoldLoadOrBCast(Root, ParentB, B, Tmp0, Tmp1, Tmp2, Tmp3,
4233  Tmp4)) {
4234  FoldedLoad = true;
4235  std::swap(B, C);
4236  // Swap bits 1/2 and 5/6.
4237  uint8_t OldImm = Imm;
4238  Imm = OldImm & 0x99;
4239  if (OldImm & 0x02) Imm |= 0x04;
4240  if (OldImm & 0x04) Imm |= 0x02;
4241  if (OldImm & 0x20) Imm |= 0x40;
4242  if (OldImm & 0x40) Imm |= 0x20;
4243  }
4244 
4245  SDLoc DL(Root);
4246 
4247  SDValue TImm = CurDAG->getTargetConstant(Imm, DL, MVT::i8);
4248 
4249  MVT NVT = Root->getSimpleValueType(0);
4250 
4251  MachineSDNode *MNode;
4252  if (FoldedLoad) {
4253  SDVTList VTs = CurDAG->getVTList(NVT, MVT::Other);
4254 
4255  unsigned Opc;
4256  if (C.getOpcode() == X86ISD::VBROADCAST_LOAD) {
4257  auto *MemIntr = cast<MemIntrinsicSDNode>(C);
4258  unsigned EltSize = MemIntr->getMemoryVT().getSizeInBits();
4259  assert((EltSize == 32 || EltSize == 64) && "Unexpected broadcast size!");
4260 
4261  bool UseD = EltSize == 32;
4262  if (NVT.is128BitVector())
4263  Opc = UseD ? X86::VPTERNLOGDZ128rmbi : X86::VPTERNLOGQZ128rmbi;
4264  else if (NVT.is256BitVector())
4265  Opc = UseD ? X86::VPTERNLOGDZ256rmbi : X86::VPTERNLOGQZ256rmbi;
4266  else if (NVT.is512BitVector())
4267  Opc = UseD ? X86::VPTERNLOGDZrmbi : X86::VPTERNLOGQZrmbi;
4268  else
4269  llvm_unreachable("Unexpected vector size!");
4270  } else {
4271  bool UseD = NVT.getVectorElementType() == MVT::i32;
4272  if (NVT.is128BitVector())
4273  Opc = UseD ? X86::VPTERNLOGDZ128rmi : X86::VPTERNLOGQZ128rmi;
4274  else if (NVT.is256BitVector())
4275  Opc = UseD ? X86::VPTERNLOGDZ256rmi : X86::VPTERNLOGQZ256rmi;
4276  else if (NVT.is512BitVector())
4277  Opc = UseD ? X86::VPTERNLOGDZrmi : X86::VPTERNLOGQZrmi;
4278  else
4279  llvm_unreachable("Unexpected vector size!");
4280  }
4281 
4282  SDValue Ops[] = {A, B, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, TImm, C.getOperand(0)};
4283  MNode = CurDAG->getMachineNode(Opc, DL, VTs, Ops);
4284 
4285  // Update the chain.
4286  ReplaceUses(C.getValue(1), SDValue(MNode, 1));
4287  // Record the mem-refs
4288  CurDAG->setNodeMemRefs(MNode, {cast<MemSDNode>(C)->getMemOperand()});
4289  } else {
4290  bool UseD = NVT.getVectorElementType() == MVT::i32;
4291  unsigned Opc;
4292  if (NVT.is128BitVector())
4293  Opc = UseD ? X86::VPTERNLOGDZ128rri : X86::VPTERNLOGQZ128rri;
4294  else if (NVT.is256BitVector())
4295  Opc = UseD ? X86::VPTERNLOGDZ256rri : X86::VPTERNLOGQZ256rri;
4296  else if (NVT.is512BitVector())
4297  Opc = UseD ? X86::VPTERNLOGDZrri : X86::VPTERNLOGQZrri;
4298  else
4299  llvm_unreachable("Unexpected vector size!");
4300 
4301  MNode = CurDAG->getMachineNode(Opc, DL, NVT, {A, B, C, TImm});
4302  }
4303 
4304  ReplaceUses(SDValue(Root, 0), SDValue(MNode, 0));
4305  CurDAG->RemoveDeadNode(Root);
4306  return true;
4307 }
4308 
4309 // Try to match two logic ops to a VPTERNLOG.
4310 // FIXME: Handle more complex patterns that use an operand more than once?
4311 bool X86DAGToDAGISel::tryVPTERNLOG(SDNode *N) {
4312  MVT NVT = N->getSimpleValueType(0);
4313 
4314  // Make sure we support VPTERNLOG.
4315  if (!NVT.isVector() || !Subtarget->hasAVX512() ||
4316  NVT.getVectorElementType() == MVT::i1)
4317  return false;
4318 
4319  // We need VLX for 128/256-bit.
4320  if (!(Subtarget->hasVLX() || NVT.is512BitVector()))
4321  return false;
4322 
4323  SDValue N0 = N->getOperand(0);
4324  SDValue N1 = N->getOperand(1);
4325 
4326  auto getFoldableLogicOp = [](SDValue Op) {
4327  // Peek through single use bitcast.
4328  if (Op.getOpcode() == ISD::BITCAST && Op.hasOneUse())
4329  Op = Op.getOperand(0);
4330 
4331  if (!Op.hasOneUse())
4332  return SDValue();
4333 
4334  unsigned Opc = Op.getOpcode();
4335  if (Opc == ISD::AND || Opc == ISD::OR || Opc == ISD::XOR ||
4336  Opc == X86ISD::ANDNP)
4337  return Op;
4338 
4339  return SDValue();
4340  };
4341 
4342  SDValue A, FoldableOp;
4343  if ((FoldableOp = getFoldableLogicOp(N1))) {
4344  A = N0;
4345  } else if ((FoldableOp = getFoldableLogicOp(N0))) {
4346  A = N1;
4347  } else
4348  return false;
4349 
4350  SDValue B = FoldableOp.getOperand(0);
4351  SDValue C = FoldableOp.getOperand(1);
4352  SDNode *ParentA = N;
4353  SDNode *ParentB = FoldableOp.getNode();
4354  SDNode *ParentC = FoldableOp.getNode();
4355 
4356  // We can build the appropriate control immediate by performing the logic
4357  // operation we're matching using these constants for A, B, and C.
4358  uint8_t TernlogMagicA = 0xf0;
4359  uint8_t TernlogMagicB = 0xcc;
4360  uint8_t TernlogMagicC = 0xaa;
4361 
4362  // Some of the inputs may be inverted, peek through them and invert the
4363  // magic values accordingly.
4364  // TODO: There may be a bitcast before the xor that we should peek through.
4365  auto PeekThroughNot = [](SDValue &Op, SDNode *&Parent, uint8_t &Magic) {
4366  if (Op.getOpcode() == ISD::XOR && Op.hasOneUse() &&
4367  ISD::isBuildVectorAllOnes(Op.getOperand(1).getNode())) {
4368  Magic = ~Magic;
4369  Parent = Op.getNode();
4370  Op = Op.getOperand(0);
4371  }
4372  };
4373 
4374  PeekThroughNot(A, ParentA, TernlogMagicA);
4375  PeekThroughNot(B, ParentB, TernlogMagicB);
4376  PeekThroughNot(C, ParentC, TernlogMagicC);
4377 
4378  uint8_t Imm;
4379  switch (FoldableOp.getOpcode()) {
4380  default: llvm_unreachable("Unexpected opcode!");
4381  case ISD::AND: Imm = TernlogMagicB & TernlogMagicC; break;
4382  case ISD::OR: Imm = TernlogMagicB | TernlogMagicC; break;
4383  case ISD::XOR: Imm = TernlogMagicB ^ TernlogMagicC; break;
4384  case X86ISD::ANDNP: Imm = ~(TernlogMagicB) & TernlogMagicC; break;
4385  }
4386 
4387  switch (N->getOpcode()) {
4388  default: llvm_unreachable("Unexpected opcode!");
4389  case X86ISD::ANDNP:
4390  if (A == N0)
4391  Imm &= ~TernlogMagicA;
4392  else
4393  Imm = ~(Imm) & TernlogMagicA;
4394  break;
4395  case ISD::AND: Imm &= TernlogMagicA; break;
4396  case ISD::OR: Imm |= TernlogMagicA; break;
4397  case ISD::XOR: Imm ^= TernlogMagicA; break;
4398  }
4399 
4400  return matchVPTERNLOG(N, ParentA, ParentB, ParentC, A, B, C, Imm);
4401 }
4402 
4403 /// If the high bits of an 'and' operand are known zero, try setting the
4404 /// high bits of an 'and' constant operand to produce a smaller encoding by
4405 /// creating a small, sign-extended negative immediate rather than a large
4406 /// positive one. This reverses a transform in SimplifyDemandedBits that
4407 /// shrinks mask constants by clearing bits. There is also a possibility that
4408 /// the 'and' mask can be made -1, so the 'and' itself is unnecessary. In that
4409 /// case, just replace the 'and'. Return 'true' if the node is replaced.
4410 bool X86DAGToDAGISel::shrinkAndImmediate(SDNode *And) {
4411  // i8 is unshrinkable, i16 should be promoted to i32, and vector ops don't
4412  // have immediate operands.
4413  MVT VT = And->getSimpleValueType(0);
4414  if (VT != MVT::i32 && VT != MVT::i64)
4415  return false;
4416 
4417  auto *And1C = dyn_cast<ConstantSDNode>(And->getOperand(1));
4418  if (!And1C)
4419  return false;
4420 
4421  // Bail out if the mask constant is already negative. It's can't shrink more.
4422  // If the upper 32 bits of a 64 bit mask are all zeros, we have special isel
4423  // patterns to use a 32-bit and instead of a 64-bit and by relying on the
4424  // implicit zeroing of 32 bit ops. So we should check if the lower 32 bits
4425  // are negative too.
4426  APInt MaskVal = And1C->getAPIntValue();
4427  unsigned MaskLZ = MaskVal.countLeadingZeros();
4428  if (!MaskLZ || (VT == MVT::i64 && MaskLZ == 32))
4429  return false;
4430 
4431  // Don't extend into the upper 32 bits of a 64 bit mask.
4432  if (VT == MVT::i64 && MaskLZ >= 32) {
4433  MaskLZ -= 32;
4434  MaskVal = MaskVal.trunc(32);
4435  }
4436 
4437  SDValue And0 = And->getOperand(0);
4438  APInt HighZeros = APInt::getHighBitsSet(MaskVal.getBitWidth(), MaskLZ);
4439  APInt NegMaskVal = MaskVal | HighZeros;
4440 
4441  // If a negative constant would not allow a smaller encoding, there's no need
4442  // to continue. Only change the constant when we know it's a win.
4443  unsigned MinWidth = NegMaskVal.getMinSignedBits();
4444  if (MinWidth > 32 || (MinWidth > 8 && MaskVal.getMinSignedBits() <= 32))
4445  return false;
4446 
4447  // Extend masks if we truncated above.
4448  if (VT == MVT::i64 && MaskVal.getBitWidth() < 64) {
4449  NegMaskVal = NegMaskVal.zext(64);
4450  HighZeros = HighZeros.zext(64);
4451  }
4452 
4453  // The variable operand must be all zeros in the top bits to allow using the
4454  // new, negative constant as the mask.
4455  if (!CurDAG->MaskedValueIsZero(And0, HighZeros))
4456  return false;
4457 
4458  // Check if the mask is -1. In that case, this is an unnecessary instruction
4459  // that escaped earlier analysis.
4460  if (NegMaskVal.isAllOnes()) {
4461  ReplaceNode(And, And0.getNode());
4462  return true;
4463  }
4464 
4465  // A negative mask allows a smaller encoding. Create a new 'and' node.
4466  SDValue NewMask = CurDAG->getConstant(NegMaskVal, SDLoc(And), VT);
4467  insertDAGNode(*CurDAG, SDValue(And, 0), NewMask);
4468  SDValue NewAnd = CurDAG->getNode(ISD::AND, SDLoc(And), VT, And0, NewMask);
4469  ReplaceNode(And, NewAnd.getNode());
4470  SelectCode(NewAnd.getNode());
4471  return true;
4472 }
4473 
4474 static unsigned getVPTESTMOpc(MVT TestVT, bool IsTestN, bool FoldedLoad,
4475  bool FoldedBCast, bool Masked) {
4476 #define VPTESTM_CASE(VT, SUFFIX) \
4477 case MVT::VT: \
4478  if (Masked) \
4479  return IsTestN ? X86::VPTESTNM##SUFFIX##k: X86::VPTESTM##SUFFIX##k; \
4480  return IsTestN ? X86::VPTESTNM##SUFFIX : X86::VPTESTM##SUFFIX;
4481 
4482 
4483 #define VPTESTM_BROADCAST_CASES(SUFFIX) \
4484 default: llvm_unreachable("Unexpected VT!"); \
4485 VPTESTM_CASE(v4i32, DZ128##SUFFIX) \
4486 VPTESTM_CASE(v2i64, QZ128##SUFFIX) \
4487 VPTESTM_CASE(v8i32, DZ256##SUFFIX) \
4488 VPTESTM_CASE(v4i64, QZ256##SUFFIX) \
4489 VPTESTM_CASE(v16i32, DZ##SUFFIX) \
4490 VPTESTM_CASE(v8i64, QZ##SUFFIX)
4491 
4492 #define VPTESTM_FULL_CASES(SUFFIX) \
4493 VPTESTM_BROADCAST_CASES(SUFFIX) \
4494 VPTESTM_CASE(v16i8, BZ128##SUFFIX) \
4495 VPTESTM_CASE(v8i16, WZ128##SUFFIX) \
4496 VPTESTM_CASE(v32i8, BZ256##SUFFIX) \
4497 VPTESTM_CASE(v16i16, WZ256##SUFFIX) \
4498 VPTESTM_CASE(v64i8, BZ##SUFFIX) \
4499 VPTESTM_CASE(v32i16, WZ##SUFFIX)
4500 
4501  if (FoldedBCast) {
4502  switch (TestVT.SimpleTy) {
4504  }
4505  }
4506 
4507  if (FoldedLoad) {
4508  switch (TestVT.SimpleTy) {
4509  VPTESTM_FULL_CASES(rm)
4510  }
4511  }
4512 
4513  switch (TestVT.SimpleTy) {
4514  VPTESTM_FULL_CASES(rr)
4515  }
4516 
4517 #undef VPTESTM_FULL_CASES
4518 #undef VPTESTM_BROADCAST_CASES
4519 #undef VPTESTM_CASE
4520 }
4521 
4522 // Try to create VPTESTM instruction. If InMask is not null, it will be used
4523 // to form a masked operation.
4524 bool X86DAGToDAGISel::tryVPTESTM(SDNode *Root, SDValue Setcc,
4525  SDValue InMask) {
4526  assert(Subtarget->hasAVX512() && "Expected AVX512!");
4528  "Unexpected VT!");
4529 
4530  // Look for equal and not equal compares.
4531  ISD::CondCode CC = cast<CondCodeSDNode>(Setcc.getOperand(2))->get();
4532  if (CC != ISD::SETEQ && CC != ISD::SETNE)
4533  return false;
4534 
4535  SDValue SetccOp0 = Setcc.getOperand(0);
4536  SDValue SetccOp1 = Setcc.getOperand(1);
4537 
4538  // Canonicalize the all zero vector to the RHS.
4539  if (ISD::isBuildVectorAllZeros(SetccOp0.getNode()))
4540  std::swap(SetccOp0, SetccOp1);
4541 
4542  // See if we're comparing against zero.
4543  if (!ISD::isBuildVectorAllZeros(SetccOp1.getNode()))
4544  return false;
4545 
4546  SDValue N0 = SetccOp0;
4547 
4548  MVT CmpVT = N0.getSimpleValueType();
4549  MVT CmpSVT = CmpVT.getVectorElementType();
4550 
4551  // Start with both operands the same. We'll try to refine this.
4552  SDValue Src0 = N0;
4553  SDValue Src1 = N0;
4554 
4555  {
4556  // Look through single use bitcasts.
4557  SDValue N0Temp = N0;
4558  if (N0Temp.getOpcode() == ISD::BITCAST && N0Temp.hasOneUse())
4559  N0Temp = N0.getOperand(0);
4560 
4561  // Look for single use AND.
4562  if (N0Temp.getOpcode() == ISD::AND && N0Temp.hasOneUse()) {
4563  Src0 = N0Temp.getOperand(0);
4564  Src1 = N0Temp.getOperand(1);
4565  }
4566  }
4567 
4568  // Without VLX we need to widen the operation.
4569  bool Widen = !Subtarget->hasVLX() && !CmpVT.is512BitVector();
4570 
4571  auto tryFoldLoadOrBCast = [&](SDNode *Root, SDNode *P, SDValue &L,
4572  SDValue &Base, SDValue &Scale, SDValue &Index,
4573  SDValue &Disp, SDValue &Segment) {
4574  // If we need to widen, we can't fold the load.
4575  if (!Widen)
4576  if (tryFoldLoad(Root, P, L, Base, Scale, Index, Disp, Segment))
4577  return true;
4578 
4579  // If we didn't fold a load, try to match broadcast. No widening limitation
4580  // for this. But only 32 and 64 bit types are supported.
4581  if (CmpSVT != MVT::i32 && CmpSVT != MVT::i64)
4582  return false;
4583 
4584  // Look through single use bitcasts.
4585  if (L.getOpcode() == ISD::BITCAST && L.hasOneUse()) {
4586  P = L.getNode();
4587  L = L.getOperand(0);
4588  }
4589 
4590  if (L.getOpcode() != X86ISD::VBROADCAST_LOAD)
4591  return false;
4592 
4593  auto *MemIntr = cast<MemIntrinsicSDNode>(L);
4594  if (MemIntr->getMemoryVT().getSizeInBits() != CmpSVT.getSizeInBits())
4595  return false;
4596 
4597  return tryFoldBroadcast(Root, P, L, Base, Scale, Index, Disp, Segment);
4598  };
4599 
4600  // We can only fold loads if the sources are unique.
4601  bool CanFoldLoads = Src0 != Src1;
4602 
4603  bool FoldedLoad = false;
4604  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4605  if (CanFoldLoads) {
4606  FoldedLoad = tryFoldLoadOrBCast(Root, N0.getNode(), Src1, Tmp0, Tmp1, Tmp2,
4607  Tmp3, Tmp4);
4608  if (!FoldedLoad) {
4609  // And is commutative.
4610  FoldedLoad = tryFoldLoadOrBCast(Root, N0.getNode(), Src0, Tmp0, Tmp1,
4611  Tmp2, Tmp3, Tmp4);
4612  if (FoldedLoad)
4613  std::swap(Src0, Src1);
4614  }
4615  }
4616 
4617  bool FoldedBCast = FoldedLoad && Src1.getOpcode() == X86ISD::VBROADCAST_LOAD;
4618 
4619  bool IsMasked = InMask.getNode() != nullptr;
4620 
4621  SDLoc dl(Root);
4622 
4623  MVT ResVT = Setcc.getSimpleValueType();
4624  MVT MaskVT = ResVT;
4625  if (Widen) {
4626  // Widen the inputs using insert_subreg or copy_to_regclass.
4627  unsigned Scale = CmpVT.is128BitVector() ? 4 : 2;
4628  unsigned SubReg = CmpVT.is128BitVector() ? X86::sub_xmm : X86::sub_ymm;
4629  unsigned NumElts = CmpVT.getVectorNumElements() * Scale;
4630  CmpVT = MVT::getVectorVT(CmpSVT, NumElts);
4631  MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
4632  SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, dl,
4633  CmpVT), 0);
4634  Src0 = CurDAG->getTargetInsertSubreg(SubReg, dl, CmpVT, ImplDef, Src0);
4635 
4636  if (!FoldedBCast)
4637  Src1 = CurDAG->getTargetInsertSubreg(SubReg, dl, CmpVT, ImplDef, Src1);
4638 
4639  if (IsMasked) {
4640  // Widen the mask.
4641  unsigned RegClass = TLI->getRegClassFor(MaskVT)->getID();
4642  SDValue RC = CurDAG->getTargetConstant(RegClass, dl, MVT::i32);
4643  InMask = SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
4644  dl, MaskVT, InMask, RC), 0);
4645  }
4646  }
4647 
4648  bool IsTestN = CC == ISD::SETEQ;
4649  unsigned Opc = getVPTESTMOpc(CmpVT, IsTestN, FoldedLoad, FoldedBCast,
4650  IsMasked);
4651 
4652  MachineSDNode *CNode;
4653  if (FoldedLoad) {
4654  SDVTList VTs = CurDAG->getVTList(MaskVT, MVT::Other);
4655 
4656  if (IsMasked) {
4657  SDValue Ops[] = { InMask, Src0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4,
4658  Src1.getOperand(0) };
4659  CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
4660  } else {
4661  SDValue Ops[] = { Src0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4,
4662  Src1.getOperand(0) };
4663  CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
4664  }
4665 
4666  // Update the chain.
4667  ReplaceUses(Src1.getValue(1), SDValue(CNode, 1));
4668  // Record the mem-refs
4669  CurDAG->setNodeMemRefs(CNode, {cast<MemSDNode>(Src1)->getMemOperand()});
4670  } else {
4671  if (IsMasked)
4672  CNode = CurDAG->getMachineNode(Opc, dl, MaskVT, InMask, Src0, Src1);
4673  else
4674  CNode = CurDAG->getMachineNode(Opc, dl, MaskVT, Src0, Src1);
4675  }
4676 
4677  // If we widened, we need to shrink the mask VT.
4678  if (Widen) {
4679  unsigned RegClass = TLI->getRegClassFor(ResVT)->getID();
4680  SDValue RC = CurDAG->getTargetConstant(RegClass, dl, MVT::i32);
4681  CNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
4682  dl, ResVT, SDValue(CNode, 0), RC);
4683  }
4684 
4685  ReplaceUses(SDValue(Root, 0), SDValue(CNode, 0));
4686  CurDAG->RemoveDeadNode(Root);
4687  return true;
4688 }
4689 
4690 // Try to match the bitselect pattern (or (and A, B), (andn A, C)). Turn it
4691 // into vpternlog.
4692 bool X86DAGToDAGISel::tryMatchBitSelect(SDNode *N) {
4693  assert(N->getOpcode() == ISD::OR && "Unexpected opcode!");
4694 
4695  MVT NVT = N->getSimpleValueType(0);
4696 
4697  // Make sure we support VPTERNLOG.
4698  if (!NVT.isVector() || !Subtarget->hasAVX512())
4699  return false;
4700 
4701  // We need VLX for 128/256-bit.
4702  if (!(Subtarget->hasVLX() || NVT.is512BitVector()))
4703  return false;
4704 
4705  SDValue N0 = N->getOperand(0);
4706  SDValue N1 = N->getOperand(1);
4707 
4708  // Canonicalize AND to LHS.
4709  if (N1.getOpcode() == ISD::AND)
4710  std::swap(N0, N1);
4711 
4712  if (N0.getOpcode() != ISD::AND ||
4713  N1.getOpcode() != X86ISD::ANDNP ||
4714  !N0.hasOneUse() || !N1.hasOneUse())
4715  return false;
4716 
4717  // ANDN is not commutable, use it to pick down A and C.
4718  SDValue A = N1.getOperand(0);
4719  SDValue C = N1.getOperand(1);
4720 
4721  // AND is commutable, if one operand matches A, the other operand is B.
4722  // Otherwise this isn't a match.
4723  SDValue B;
4724  if (N0.getOperand(0) == A)
4725  B = N0.getOperand(1);
4726  else if (N0.getOperand(1) == A)
4727  B = N0.getOperand(0);
4728  else
4729  return false;
4730 
4731  SDLoc dl(N);
4732  SDValue Imm = CurDAG->getTargetConstant(0xCA, dl, MVT::i8);
4733  SDValue Ternlog = CurDAG->getNode(X86ISD::VPTERNLOG, dl, NVT, A, B, C, Imm);
4734  ReplaceNode(N, Ternlog.getNode());
4735 
4736  return matchVPTERNLOG(Ternlog.getNode(), Ternlog.getNode(), Ternlog.getNode(),
4737  Ternlog.getNode(), A, B, C, 0xCA);
4738 }
4739 
4740 void X86DAGToDAGISel::Select(SDNode *Node) {
4741  MVT NVT = Node->getSimpleValueType(0);
4742  unsigned Opcode = Node->getOpcode();
4743  SDLoc dl(Node);
4744 
4745  if (Node->isMachineOpcode()) {
4746  LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
4747  Node->setNodeId(-1);
4748  return; // Already selected.
4749  }
4750 
4751  switch (Opcode) {
4752  default: break;
4753  case ISD::INTRINSIC_W_CHAIN: {
4754  unsigned IntNo = Node->getConstantOperandVal(1);
4755  switch (IntNo) {
4756  default: break;
4757  case Intrinsic::x86_encodekey128:
4758  case Intrinsic::x86_encodekey256: {
4759  if (!Subtarget->hasKL())
4760  break;
4761 
4762  unsigned Opcode;
4763  switch (IntNo) {
4764  default: llvm_unreachable("Impossible intrinsic");
4765  case Intrinsic::x86_encodekey128: Opcode = X86::ENCODEKEY128; break;
4766  case Intrinsic::x86_encodekey256: Opcode = X86::ENCODEKEY256; break;
4767  }
4768 
4769  SDValue Chain = Node->getOperand(0);
4770  Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM0, Node->getOperand(3),
4771  SDValue());
4772  if (Opcode == X86::ENCODEKEY256)
4773  Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM1, Node->getOperand(4),
4774  Chain.getValue(1));
4775 
4776  MachineSDNode *Res = CurDAG->getMachineNode(
4777  Opcode, dl, Node->getVTList(),
4778  {Node->getOperand(2), Chain, Chain.getValue(1)});
4779  ReplaceNode(Node, Res);
4780  return;
4781  }
4782  case Intrinsic::x86_tileloadd64_internal:
4783  case Intrinsic::x86_tileloaddt164_internal: {
4784  if (!Subtarget->hasAMXTILE())
4785  break;
4786  unsigned Opc = IntNo == Intrinsic::x86_tileloadd64_internal
4787  ? X86::PTILELOADDV
4788  : X86::PTILELOADDT1V;
4789  // _tile_loadd_internal(row, col, buf, STRIDE)
4790  SDValue Base = Node->getOperand(4);
4791  SDValue Scale = getI8Imm(1, dl);
4792  SDValue Index = Node->getOperand(5);
4793  SDValue Disp = CurDAG->getTargetConstant(0, dl, MVT::i32);
4794  SDValue Segment = CurDAG->getRegister(0, MVT::i16);
4795  SDValue Chain = Node->getOperand(0);
4796  MachineSDNode *CNode;
4797  SDValue Ops[] = {Node->getOperand(2),
4798  Node->getOperand(3),
4799  Base,
4800  Scale,
4801  Index,
4802  Disp,
4803  Segment,
4804  Chain};
4805  CNode = CurDAG->getMachineNode(Opc, dl, {MVT::x86amx, MVT::Other}, Ops);
4806  ReplaceNode(Node, CNode);
4807  return;
4808  }
4809  }
4810  break;
4811  }
4812  case ISD::INTRINSIC_VOID: {
4813  unsigned IntNo = Node->getConstantOperandVal(1);
4814  switch (IntNo) {
4815  default: break;
4816  case Intrinsic::x86_sse3_monitor:
4817  case Intrinsic::x86_monitorx:
4818  case Intrinsic::x86_clzero: {
4819  bool Use64BitPtr = Node->getOperand(2).getValueType() == MVT::i64;
4820 
4821  unsigned Opc = 0;
4822  switch (IntNo) {
4823  default: llvm_unreachable("Unexpected intrinsic!");
4824  case Intrinsic::x86_sse3_monitor:
4825  if (!Subtarget->hasSSE3())
4826  break;
4827  Opc = Use64BitPtr ? X86::MONITOR64rrr : X86::MONITOR32rrr;
4828  break;
4829  case Intrinsic::x86_monitorx:
4830  if (!Subtarget->hasMWAITX())
4831  break;
4832  Opc = Use64BitPtr ? X86::MONITORX64rrr : X86::MONITORX32rrr;
4833  break;
4834  case Intrinsic::x86_clzero:
4835  if (!Subtarget->hasCLZERO())
4836  break;
4837  Opc = Use64BitPtr ? X86::CLZERO64r : X86::CLZERO32r;
4838  break;
4839  }
4840 
4841  if (Opc) {
4842  unsigned PtrReg = Use64BitPtr ? X86::RAX : X86::EAX;
4843  SDValue Chain = CurDAG->getCopyToReg(Node->getOperand(0), dl, PtrReg,
4844  Node->getOperand(2), SDValue());
4845  SDValue InFlag = Chain.getValue(1);
4846 
4847  if (IntNo == Intrinsic::x86_sse3_monitor ||
4848  IntNo == Intrinsic::x86_monitorx) {
4849  // Copy the other two operands to ECX and EDX.
4850  Chain = CurDAG->getCopyToReg(Chain, dl, X86::ECX, Node->getOperand(3),
4851  InFlag);
4852  InFlag = Chain.getValue(1);
4853  Chain = CurDAG->getCopyToReg(Chain, dl, X86::EDX, Node->getOperand(4),
4854  InFlag);
4855  InFlag = Chain.getValue(1);
4856  }
4857 
4858  MachineSDNode *CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
4859  { Chain, InFlag});
4860  ReplaceNode(Node, CNode);
4861  return;
4862  }
4863 
4864  break;
4865  }
4866  case Intrinsic::x86_tilestored64_internal: {
4867  unsigned Opc = X86::PTILESTOREDV;
4868  // _tile_stored_internal(row, col, buf, STRIDE, c)
4869  SDValue Base = Node->getOperand(4);
4870  SDValue Scale = getI8Imm(1, dl);
4871  SDValue Index = Node->getOperand(5);
4872  SDValue Disp = CurDAG->getTargetConstant(0, dl, MVT::i32);
4873  SDValue Segment = CurDAG->getRegister(0, MVT::i16);
4874  SDValue Chain = Node->getOperand(0);
4875  MachineSDNode *CNode;
4876  SDValue Ops[] = {Node->getOperand(2),
4877  Node->getOperand(3),
4878  Base,
4879  Scale,
4880  Index,
4881  Disp,
4882  Segment,
4883  Node->getOperand(6),
4884  Chain};
4885  CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
4886  ReplaceNode(Node, CNode);
4887  return;
4888  }
4889  case Intrinsic::x86_tileloadd64:
4890  case Intrinsic::x86_tileloaddt164:
4891  case Intrinsic::x86_tilestored64: {
4892  if (!Subtarget->hasAMXTILE())
4893  break;
4894  unsigned Opc;
4895  switch (IntNo) {
4896  default: llvm_unreachable("Unexpected intrinsic!");
4897  case Intrinsic::x86_tileloadd64: Opc = X86::PTILELOADD; break;
4898  case Intrinsic::x86_tileloaddt164: Opc = X86::PTILELOADDT1; break;
4899  case Intrinsic::x86_tilestored64: Opc = X86::PTILESTORED; break;
4900  }
4901  // FIXME: Match displacement and scale.
4902  unsigned TIndex = Node->getConstantOperandVal(2);
4903  SDValue TReg = getI8Imm(TIndex, dl);
4904  SDValue Base = Node->getOperand(3);
4905  SDValue Scale = getI8Imm(1, dl);
4906  SDValue Index = Node->getOperand(4);
4907  SDValue Disp = CurDAG->getTargetConstant(0, dl, MVT::i32);
4908  SDValue Segment = CurDAG->getRegister(0, MVT::i16);
4909  SDValue Chain = Node->getOperand(0);
4910  MachineSDNode *CNode;
4911  if (Opc == X86::PTILESTORED) {
4912  SDValue Ops[] = { Base, Scale, Index, Disp, Segment, TReg, Chain };
4913  CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
4914  } else {
4915  SDValue Ops[] = { TReg, Base, Scale, Index, Disp, Segment, Chain };
4916  CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
4917  }
4918  ReplaceNode(Node, CNode);
4919  return;
4920  }
4921  }
4922  break;
4923  }
4924  case ISD::BRIND:
4925  case X86ISD::NT_BRIND: {
4926  if (Subtarget->isTargetNaCl())
4927  // NaCl has its own pass where jmp %r32 are converted to jmp %r64. We
4928  // leave the instruction alone.
4929  break;
4930  if (Subtarget->isTarget64BitILP32()) {
4931  // Converts a 32-bit register to a 64-bit, zero-extended version of
4932  // it. This is needed because x86-64 can do many things, but jmp %r32
4933  // ain't one of them.
4934  SDValue Target = Node->getOperand(1);
4935  assert(Target.getValueType() == MVT::i32 && "Unexpected VT!");
4936  SDValue ZextTarget = CurDAG->getZExtOrTrunc(Target, dl, MVT::i64);
4937  SDValue Brind = CurDAG->getNode(Opcode, dl, MVT::Other,
4938  Node->getOperand(0), ZextTarget);
4939  ReplaceNode(Node, Brind.getNode());
4940  SelectCode(ZextTarget.getNode());
4941  SelectCode(Brind.getNode());
4942  return;
4943  }
4944  break;
4945  }
4946  case X86ISD::GlobalBaseReg:
4947  ReplaceNode(Node, getGlobalBaseReg());
4948  return;
4949 
4950  case ISD::BITCAST:
4951  // Just drop all 128/256/512-bit bitcasts.
4952  if (NVT.is512BitVector() || NVT.is256BitVector() || NVT.is128BitVector() ||
4953  NVT == MVT::f128) {
4954  ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
4955  CurDAG->RemoveDeadNode(Node);
4956  return;
4957  }
4958  break;
4959 
4960  case ISD::SRL:
4961  if (matchBitExtract(Node))
4962  return;
4964  case ISD::SRA:
4965  case ISD::SHL:
4966  if (tryShiftAmountMod(Node))
4967  return;
4968  break;
4969 
4970  case X86ISD::VPTERNLOG: {
4971  uint8_t Imm = cast<ConstantSDNode>(Node->getOperand(3))->getZExtValue();
4972  if (matchVPTERNLOG(Node, Node, Node, Node, Node->getOperand(0),
4973  Node->getOperand(1), Node->getOperand(2), Imm))
4974  return;
4975  break;
4976  }
4977 
4978  case X86ISD::ANDNP:
4979  if (tryVPTERNLOG(Node))
4980  return;
4981  break;
4982 
4983  case ISD::AND:
4984  if (NVT.isVector() && NVT.getVectorElementType() == MVT::i1) {
4985  // Try to form a masked VPTESTM. Operands can be in either order.
4986  SDValue N0 = Node->getOperand(0);
4987  SDValue N1 = Node->getOperand(1);
4988  if (N0.getOpcode() == ISD::SETCC && N0.hasOneUse() &&
4989  tryVPTESTM(Node, N0, N1))
4990  return;
4991  if (N1.getOpcode() == ISD::SETCC && N1.hasOneUse() &&
4992  tryVPTESTM(Node, N1, N0))
4993  return;
4994  }
4995 
4996  if (MachineSDNode *NewNode = matchBEXTRFromAndImm(Node)) {
4997  ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 0));
4998  CurDAG->RemoveDeadNode(Node);
4999  return;
5000  }
5001  if (matchBitExtract(Node))
5002  return;
5003  if (AndImmShrink && shrinkAndImmediate(Node))
5004  return;
5005 
5007  case ISD::OR:
5008  case ISD::XOR:
5009  if (tryShrinkShlLogicImm(Node))
5010  return;
5011  if (Opcode == ISD::OR && tryMatchBitSelect(Node))
5012  return;
5013  if (tryVPTERNLOG(Node))
5014  return;
5015 
5017  case ISD::ADD:
5018  case ISD::SUB: {
5019  // Try to avoid folding immediates with multiple uses for optsize.
5020  // This code tries to select to register form directly to avoid going
5021  // through the isel table which might fold the immediate. We can't change
5022  // the patterns on the add/sub/and/or/xor with immediate paterns in the
5023  // tablegen files to check immediate use count without making the patterns
5024  // unavailable to the fast-isel table.
5025  if (!CurDAG->shouldOptForSize())
5026  break;
5027 
5028  // Only handle i8/i16/i32/i64.
5029  if (NVT != MVT::i8 && NVT != MVT::i16 && NVT != MVT::i32 && NVT != MVT::i64)
5030  break;
5031 
5032  SDValue N0 = Node->getOperand(0);
5033  SDValue N1 = Node->getOperand(1);
5034 
5035  ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
5036  if (!Cst)
5037  break;
5038 
5039  int64_t Val = Cst->getSExtValue();
5040 
5041  // Make sure its an immediate that is considered foldable.
5042  // FIXME: Handle unsigned 32 bit immediates for 64-bit AND.
5043  if (!isInt<8>(Val) && !isInt<32>(Val))
5044  break;
5045 
5046  // If this can match to INC/DEC, let it go.
5047  if (Opcode == ISD::ADD && (Val == 1 || Val == -1))
5048  break;
5049 
5050  // Check if we should avoid folding this immediate.
5051  if (!shouldAvoidImmediateInstFormsForSize(N1.getNode()))
5052  break;
5053 
5054  // We should not fold the immediate. So we need a register form instead.
5055  unsigned ROpc, MOpc;
5056  switch (NVT.SimpleTy) {
5057  default: llvm_unreachable("Unexpected VT!");
5058  case MVT::i8:
5059  switch (Opcode) {
5060  default: llvm_unreachable("Unexpected opcode!");
5061  case ISD::ADD: ROpc = X86::ADD8rr; MOpc = X86::ADD8rm; break;
5062  case ISD::SUB: ROpc = X86::SUB8rr; MOpc = X86::SUB8rm; break;
5063  case ISD::AND: ROpc = X86::AND8rr; MOpc = X86::AND8rm; break;
5064  case ISD::OR: ROpc = X86::OR8rr; MOpc = X86::OR8rm; break;
5065  case ISD::XOR: ROpc = X86::XOR8rr; MOpc = X86::XOR8rm; break;
5066  }
5067  break;
5068  case MVT::i16:
5069  switch (Opcode) {
5070  default: llvm_unreachable("Unexpected opcode!");
5071  case ISD::ADD: ROpc = X86::ADD16rr; MOpc = X86::ADD16rm; break;
5072  case ISD::SUB: ROpc = X86::SUB16rr; MOpc = X86::SUB16rm; break;
5073  case ISD::AND: ROpc = X86::AND16rr; MOpc = X86::AND16rm; break;
5074  case ISD::OR: ROpc = X86::OR16rr; MOpc = X86::OR16rm; break;
5075  case ISD::XOR: ROpc = X86::XOR16rr; MOpc = X86::XOR16rm; break;
5076  }
5077  break;
5078  case MVT::i32:
5079  switch (Opcode) {
5080  default: llvm_unreachable("Unexpected opcode!");
5081  case ISD::ADD: ROpc = X86::ADD32rr; MOpc = X86::ADD32rm; break;
5082  case ISD::SUB: ROpc = X86::SUB32rr; MOpc = X86::SUB32rm; break;
5083  case ISD::AND: ROpc = X86::AND32rr; MOpc = X86::AND32rm; break;
5084  case ISD::OR: ROpc = X86::OR32rr; MOpc = X86::OR32rm; break;
5085  case ISD::XOR: ROpc = X86::XOR32rr; MOpc = X86::XOR32rm; break;
5086  }
5087  break;
5088  case MVT::i64:
5089  switch (Opcode) {
5090  default: llvm_unreachable("Unexpected opcode!");
5091  case ISD::ADD: ROpc = X86::ADD64rr; MOpc = X86::ADD64rm; break;
5092  case ISD::SUB: ROpc = X86::SUB64rr; MOpc = X86::SUB64rm; break;
5093  case ISD::AND: ROpc = X86::AND64rr; MOpc = X86::AND64rm; break;
5094  case ISD::OR: ROpc = X86::OR64rr; MOpc = X86::OR64rm; break;
5095  case ISD::XOR: ROpc = X86::XOR64rr; MOpc = X86::XOR64rm; break;
5096  }
5097  break;
5098  }
5099 
5100  // Ok this is a AND/OR/XOR/ADD/SUB with constant.
5101 
5102  // If this is a not a subtract, we can still try to fold a load.
5103  if (Opcode != ISD::SUB) {
5104  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
5105