LLVM  14.0.0git
TargetLowering.cpp
Go to the documentation of this file.
1 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements the TargetLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12 
14 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/GlobalVariable.h"
26 #include "llvm/IR/LLVMContext.h"
27 #include "llvm/MC/MCAsmInfo.h"
28 #include "llvm/MC/MCExpr.h"
30 #include "llvm/Support/KnownBits.h"
34 #include <cctype>
35 using namespace llvm;
36 
37 /// NOTE: The TargetMachine owns TLOF.
39  : TargetLoweringBase(tm) {}
40 
41 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const {
42  return nullptr;
43 }
44 
47 }
48 
49 /// Check whether a given call node is in tail position within its function. If
50 /// so, it sets Chain to the input chain of the tail call.
52  SDValue &Chain) const {
53  const Function &F = DAG.getMachineFunction().getFunction();
54 
55  // First, check if tail calls have been disabled in this function.
56  if (F.getFnAttribute("disable-tail-calls").getValueAsBool())
57  return false;
58 
59  // Conservatively require the attributes of the call to match those of
60  // the return. Ignore following attributes because they don't affect the
61  // call sequence.
62  AttrBuilder CallerAttrs(F.getAttributes(), AttributeList::ReturnIndex);
63  for (const auto &Attr : {Attribute::Alignment, Attribute::Dereferenceable,
64  Attribute::DereferenceableOrNull, Attribute::NoAlias,
65  Attribute::NonNull})
66  CallerAttrs.removeAttribute(Attr);
67 
68  if (CallerAttrs.hasAttributes())
69  return false;
70 
71  // It's not safe to eliminate the sign / zero extension of the return value.
72  if (CallerAttrs.contains(Attribute::ZExt) ||
73  CallerAttrs.contains(Attribute::SExt))
74  return false;
75 
76  // Check if the only use is a function return node.
77  return isUsedByReturnOnly(Node, Chain);
78 }
79 
81  const uint32_t *CallerPreservedMask,
82  const SmallVectorImpl<CCValAssign> &ArgLocs,
83  const SmallVectorImpl<SDValue> &OutVals) const {
84  for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
85  const CCValAssign &ArgLoc = ArgLocs[I];
86  if (!ArgLoc.isRegLoc())
87  continue;
88  MCRegister Reg = ArgLoc.getLocReg();
89  // Only look at callee saved registers.
90  if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg))
91  continue;
92  // Check that we pass the value used for the caller.
93  // (We look for a CopyFromReg reading a virtual register that is used
94  // for the function live-in value of register Reg)
95  SDValue Value = OutVals[I];
96  if (Value->getOpcode() != ISD::CopyFromReg)
97  return false;
98  Register ArgReg = cast<RegisterSDNode>(Value->getOperand(1))->getReg();
99  if (MRI.getLiveInPhysReg(ArgReg) != Reg)
100  return false;
101  }
102  return true;
103 }
104 
105 /// Set CallLoweringInfo attribute flags based on a call instruction
106 /// and called function attributes.
108  unsigned ArgIdx) {
109  IsSExt = Call->paramHasAttr(ArgIdx, Attribute::SExt);
110  IsZExt = Call->paramHasAttr(ArgIdx, Attribute::ZExt);
111  IsInReg = Call->paramHasAttr(ArgIdx, Attribute::InReg);
112  IsSRet = Call->paramHasAttr(ArgIdx, Attribute::StructRet);
113  IsNest = Call->paramHasAttr(ArgIdx, Attribute::Nest);
114  IsByVal = Call->paramHasAttr(ArgIdx, Attribute::ByVal);
115  IsPreallocated = Call->paramHasAttr(ArgIdx, Attribute::Preallocated);
116  IsInAlloca = Call->paramHasAttr(ArgIdx, Attribute::InAlloca);
117  IsReturned = Call->paramHasAttr(ArgIdx, Attribute::Returned);
118  IsSwiftSelf = Call->paramHasAttr(ArgIdx, Attribute::SwiftSelf);
119  IsSwiftAsync = Call->paramHasAttr(ArgIdx, Attribute::SwiftAsync);
120  IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError);
121  Alignment = Call->getParamStackAlign(ArgIdx);
122  IndirectType = nullptr;
124  "multiple ABI attributes?");
125  if (IsByVal) {
126  IndirectType = Call->getParamByValType(ArgIdx);
127  if (!Alignment)
128  Alignment = Call->getParamAlign(ArgIdx);
129  }
130  if (IsPreallocated)
131  IndirectType = Call->getParamPreallocatedType(ArgIdx);
132  if (IsInAlloca)
133  IndirectType = Call->getParamInAllocaType(ArgIdx);
134 }
135 
136 /// Generate a libcall taking the given operands as arguments and returning a
137 /// result of type RetVT.
138 std::pair<SDValue, SDValue>
140  ArrayRef<SDValue> Ops,
141  MakeLibCallOptions CallOptions,
142  const SDLoc &dl,
143  SDValue InChain) const {
144  if (!InChain)
145  InChain = DAG.getEntryNode();
146 
148  Args.reserve(Ops.size());
149 
151  for (unsigned i = 0; i < Ops.size(); ++i) {
152  SDValue NewOp = Ops[i];
153  Entry.Node = NewOp;
154  Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext());
155  Entry.IsSExt = shouldSignExtendTypeInLibCall(NewOp.getValueType(),
156  CallOptions.IsSExt);
157  Entry.IsZExt = !Entry.IsSExt;
158 
159  if (CallOptions.IsSoften &&
161  Entry.IsSExt = Entry.IsZExt = false;
162  }
163  Args.push_back(Entry);
164  }
165 
166  if (LC == RTLIB::UNKNOWN_LIBCALL)
167  report_fatal_error("Unsupported library call operation!");
169  getPointerTy(DAG.getDataLayout()));
170 
171  Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
173  bool signExtend = shouldSignExtendTypeInLibCall(RetVT, CallOptions.IsSExt);
174  bool zeroExtend = !signExtend;
175 
176  if (CallOptions.IsSoften &&
178  signExtend = zeroExtend = false;
179  }
180 
181  CLI.setDebugLoc(dl)
182  .setChain(InChain)
184  .setNoReturn(CallOptions.DoesNotReturn)
185  .setDiscardResult(!CallOptions.IsReturnValueUsed)
187  .setSExtResult(signExtend)
188  .setZExtResult(zeroExtend);
189  return LowerCallTo(CLI);
190 }
191 
193  std::vector<EVT> &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS,
194  unsigned SrcAS, const AttributeList &FuncAttributes) const {
195  if (Op.isMemcpyWithFixedDstAlign() && Op.getSrcAlign() < Op.getDstAlign())
196  return false;
197 
198  EVT VT = getOptimalMemOpType(Op, FuncAttributes);
199 
200  if (VT == MVT::Other) {
201  // Use the largest integer type whose alignment constraints are satisfied.
202  // We only need to check DstAlign here as SrcAlign is always greater or
203  // equal to DstAlign (or zero).
204  VT = MVT::i64;
205  if (Op.isFixedDstAlign())
206  while (Op.getDstAlign() < (VT.getSizeInBits() / 8) &&
207  !allowsMisalignedMemoryAccesses(VT, DstAS, Op.getDstAlign()))
208  VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1);
209  assert(VT.isInteger());
210 
211  // Find the largest legal integer type.
212  MVT LVT = MVT::i64;
213  while (!isTypeLegal(LVT))
214  LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
215  assert(LVT.isInteger());
216 
217  // If the type we've chosen is larger than the largest legal integer type
218  // then use that instead.
219  if (VT.bitsGT(LVT))
220  VT = LVT;
221  }
222 
223  unsigned NumMemOps = 0;
224  uint64_t Size = Op.size();
225  while (Size) {
226  unsigned VTSize = VT.getSizeInBits() / 8;
227  while (VTSize > Size) {
228  // For now, only use non-vector load / store's for the left-over pieces.
229  EVT NewVT = VT;
230  unsigned NewVTSize;
231 
232  bool Found = false;
233  if (VT.isVector() || VT.isFloatingPoint()) {
234  NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
235  if (isOperationLegalOrCustom(ISD::STORE, NewVT) &&
236  isSafeMemOpType(NewVT.getSimpleVT()))
237  Found = true;
238  else if (NewVT == MVT::i64 &&
241  // i64 is usually not legal on 32-bit targets, but f64 may be.
242  NewVT = MVT::f64;
243  Found = true;
244  }
245  }
246 
247  if (!Found) {
248  do {
249  NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
250  if (NewVT == MVT::i8)
251  break;
252  } while (!isSafeMemOpType(NewVT.getSimpleVT()));
253  }
254  NewVTSize = NewVT.getSizeInBits() / 8;
255 
256  // If the new VT cannot cover all of the remaining bits, then consider
257  // issuing a (or a pair of) unaligned and overlapping load / store.
258  bool Fast;
259  if (NumMemOps && Op.allowOverlap() && NewVTSize < Size &&
261  VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign() : Align(1),
262  MachineMemOperand::MONone, &Fast) &&
263  Fast)
264  VTSize = Size;
265  else {
266  VT = NewVT;
267  VTSize = NewVTSize;
268  }
269  }
270 
271  if (++NumMemOps > Limit)
272  return false;
273 
274  MemOps.push_back(VT);
275  Size -= VTSize;
276  }
277 
278  return true;
279 }
280 
281 /// Soften the operands of a comparison. This code is shared among BR_CC,
282 /// SELECT_CC, and SETCC handlers.
284  SDValue &NewLHS, SDValue &NewRHS,
285  ISD::CondCode &CCCode,
286  const SDLoc &dl, const SDValue OldLHS,
287  const SDValue OldRHS) const {
288  SDValue Chain;
289  return softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, dl, OldLHS,
290  OldRHS, Chain);
291 }
292 
294  SDValue &NewLHS, SDValue &NewRHS,
295  ISD::CondCode &CCCode,
296  const SDLoc &dl, const SDValue OldLHS,
297  const SDValue OldRHS,
298  SDValue &Chain,
299  bool IsSignaling) const {
300  // FIXME: Currently we cannot really respect all IEEE predicates due to libgcc
301  // not supporting it. We can update this code when libgcc provides such
302  // functions.
303 
304  assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128)
305  && "Unsupported setcc type!");
306 
307  // Expand into one or more soft-fp libcall(s).
308  RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL;
309  bool ShouldInvertCC = false;
310  switch (CCCode) {
311  case ISD::SETEQ:
312  case ISD::SETOEQ:
313  LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
314  (VT == MVT::f64) ? RTLIB::OEQ_F64 :
315  (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
316  break;
317  case ISD::SETNE:
318  case ISD::SETUNE:
319  LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 :
320  (VT == MVT::f64) ? RTLIB::UNE_F64 :
321  (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128;
322  break;
323  case ISD::SETGE:
324  case ISD::SETOGE:
325  LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
326  (VT == MVT::f64) ? RTLIB::OGE_F64 :
327  (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
328  break;
329  case ISD::SETLT:
330  case ISD::SETOLT:
331  LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
332  (VT == MVT::f64) ? RTLIB::OLT_F64 :
333  (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
334  break;
335  case ISD::SETLE:
336  case ISD::SETOLE:
337  LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
338  (VT == MVT::f64) ? RTLIB::OLE_F64 :
339  (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
340  break;
341  case ISD::SETGT:
342  case ISD::SETOGT:
343  LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
344  (VT == MVT::f64) ? RTLIB::OGT_F64 :
345  (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
346  break;
347  case ISD::SETO:
348  ShouldInvertCC = true;
350  case ISD::SETUO:
351  LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
352  (VT == MVT::f64) ? RTLIB::UO_F64 :
353  (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
354  break;
355  case ISD::SETONE:
356  // SETONE = O && UNE
357  ShouldInvertCC = true;
359  case ISD::SETUEQ:
360  LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
361  (VT == MVT::f64) ? RTLIB::UO_F64 :
362  (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
363  LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
364  (VT == MVT::f64) ? RTLIB::OEQ_F64 :
365  (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
366  break;
367  default:
368  // Invert CC for unordered comparisons
369  ShouldInvertCC = true;
370  switch (CCCode) {
371  case ISD::SETULT:
372  LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
373  (VT == MVT::f64) ? RTLIB::OGE_F64 :
374  (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
375  break;
376  case ISD::SETULE:
377  LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
378  (VT == MVT::f64) ? RTLIB::OGT_F64 :
379  (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
380  break;
381  case ISD::SETUGT:
382  LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
383  (VT == MVT::f64) ? RTLIB::OLE_F64 :
384  (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
385  break;
386  case ISD::SETUGE:
387  LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
388  (VT == MVT::f64) ? RTLIB::OLT_F64 :
389  (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
390  break;
391  default: llvm_unreachable("Do not know how to soften this setcc!");
392  }
393  }
394 
395  // Use the target specific return value for comparions lib calls.
396  EVT RetVT = getCmpLibcallReturnType();
397  SDValue Ops[2] = {NewLHS, NewRHS};
399  EVT OpsVT[2] = { OldLHS.getValueType(),
400  OldRHS.getValueType() };
401  CallOptions.setTypeListBeforeSoften(OpsVT, RetVT, true);
402  auto Call = makeLibCall(DAG, LC1, RetVT, Ops, CallOptions, dl, Chain);
403  NewLHS = Call.first;
404  NewRHS = DAG.getConstant(0, dl, RetVT);
405 
406  CCCode = getCmpLibcallCC(LC1);
407  if (ShouldInvertCC) {
408  assert(RetVT.isInteger());
409  CCCode = getSetCCInverse(CCCode, RetVT);
410  }
411 
412  if (LC2 == RTLIB::UNKNOWN_LIBCALL) {
413  // Update Chain.
414  Chain = Call.second;
415  } else {
416  EVT SetCCVT =
417  getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT);
418  SDValue Tmp = DAG.getSetCC(dl, SetCCVT, NewLHS, NewRHS, CCCode);
419  auto Call2 = makeLibCall(DAG, LC2, RetVT, Ops, CallOptions, dl, Chain);
420  CCCode = getCmpLibcallCC(LC2);
421  if (ShouldInvertCC)
422  CCCode = getSetCCInverse(CCCode, RetVT);
423  NewLHS = DAG.getSetCC(dl, SetCCVT, Call2.first, NewRHS, CCCode);
424  if (Chain)
425  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Call.second,
426  Call2.second);
427  NewLHS = DAG.getNode(ShouldInvertCC ? ISD::AND : ISD::OR, dl,
428  Tmp.getValueType(), Tmp, NewLHS);
429  NewRHS = SDValue();
430  }
431 }
432 
433 /// Return the entry encoding for a jump table in the current function. The
434 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
436  // In non-pic modes, just use the address of a block.
437  if (!isPositionIndependent())
439 
440  // In PIC mode, if the target supports a GPRel32 directive, use it.
441  if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != nullptr)
443 
444  // Otherwise, use a label difference.
446 }
447 
449  SelectionDAG &DAG) const {
450  // If our PIC model is GP relative, use the global offset table as the base.
451  unsigned JTEncoding = getJumpTableEncoding();
452 
453  if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) ||
456 
457  return Table;
458 }
459 
460 /// This returns the relocation base for the given PIC jumptable, the same as
461 /// getPICJumpTableRelocBase, but as an MCExpr.
462 const MCExpr *
464  unsigned JTI,MCContext &Ctx) const{
465  // The normal PIC reloc base is the label at the start of the jump table.
466  return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx);
467 }
468 
469 bool
471  const TargetMachine &TM = getTargetMachine();
472  const GlobalValue *GV = GA->getGlobal();
473 
474  // If the address is not even local to this DSO we will have to load it from
475  // a got and then add the offset.
476  if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
477  return false;
478 
479  // If the code is position independent we will have to add a base register.
480  if (isPositionIndependent())
481  return false;
482 
483  // Otherwise we can do it.
484  return true;
485 }
486 
487 //===----------------------------------------------------------------------===//
488 // Optimization Methods
489 //===----------------------------------------------------------------------===//
490 
491 /// If the specified instruction has a constant integer operand and there are
492 /// bits set in that constant that are not demanded, then clear those bits and
493 /// return true.
495  const APInt &DemandedBits,
496  const APInt &DemandedElts,
497  TargetLoweringOpt &TLO) const {
498  SDLoc DL(Op);
499  unsigned Opcode = Op.getOpcode();
500 
501  // Do target-specific constant optimization.
502  if (targetShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO))
503  return TLO.New.getNode();
504 
505  // FIXME: ISD::SELECT, ISD::SELECT_CC
506  switch (Opcode) {
507  default:
508  break;
509  case ISD::XOR:
510  case ISD::AND:
511  case ISD::OR: {
512  auto *Op1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
513  if (!Op1C || Op1C->isOpaque())
514  return false;
515 
516  // If this is a 'not' op, don't touch it because that's a canonical form.
517  const APInt &C = Op1C->getAPIntValue();
518  if (Opcode == ISD::XOR && DemandedBits.isSubsetOf(C))
519  return false;
520 
521  if (!C.isSubsetOf(DemandedBits)) {
522  EVT VT = Op.getValueType();
523  SDValue NewC = TLO.DAG.getConstant(DemandedBits & C, DL, VT);
524  SDValue NewOp = TLO.DAG.getNode(Opcode, DL, VT, Op.getOperand(0), NewC);
525  return TLO.CombineTo(Op, NewOp);
526  }
527 
528  break;
529  }
530  }
531 
532  return false;
533 }
534 
536  const APInt &DemandedBits,
537  TargetLoweringOpt &TLO) const {
538  EVT VT = Op.getValueType();
539  APInt DemandedElts = VT.isVector()
541  : APInt(1, 1);
542  return ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO);
543 }
544 
545 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
546 /// This uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
547 /// generalized for targets with other types of implicit widening casts.
549  const APInt &Demanded,
550  TargetLoweringOpt &TLO) const {
551  assert(Op.getNumOperands() == 2 &&
552  "ShrinkDemandedOp only supports binary operators!");
553  assert(Op.getNode()->getNumValues() == 1 &&
554  "ShrinkDemandedOp only supports nodes with one result!");
555 
556  SelectionDAG &DAG = TLO.DAG;
557  SDLoc dl(Op);
558 
559  // Early return, as this function cannot handle vector types.
560  if (Op.getValueType().isVector())
561  return false;
562 
563  // Don't do this if the node has another user, which may require the
564  // full value.
565  if (!Op.getNode()->hasOneUse())
566  return false;
567 
568  // Search for the smallest integer type with free casts to and from
569  // Op's type. For expedience, just check power-of-2 integer types.
570  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
571  unsigned DemandedSize = Demanded.getActiveBits();
572  unsigned SmallVTBits = DemandedSize;
573  if (!isPowerOf2_32(SmallVTBits))
574  SmallVTBits = NextPowerOf2(SmallVTBits);
575  for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) {
576  EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits);
577  if (TLI.isTruncateFree(Op.getValueType(), SmallVT) &&
578  TLI.isZExtFree(SmallVT, Op.getValueType())) {
579  // We found a type with free casts.
580  SDValue X = DAG.getNode(
581  Op.getOpcode(), dl, SmallVT,
582  DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(0)),
583  DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(1)));
584  assert(DemandedSize <= SmallVTBits && "Narrowed below demanded bits?");
585  SDValue Z = DAG.getNode(ISD::ANY_EXTEND, dl, Op.getValueType(), X);
586  return TLO.CombineTo(Op, Z);
587  }
588  }
589  return false;
590 }
591 
593  DAGCombinerInfo &DCI) const {
594  SelectionDAG &DAG = DCI.DAG;
595  TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
596  !DCI.isBeforeLegalizeOps());
597  KnownBits Known;
598 
599  bool Simplified = SimplifyDemandedBits(Op, DemandedBits, Known, TLO);
600  if (Simplified) {
601  DCI.AddToWorklist(Op.getNode());
602  DCI.CommitTargetLoweringOpt(TLO);
603  }
604  return Simplified;
605 }
606 
608  KnownBits &Known,
609  TargetLoweringOpt &TLO,
610  unsigned Depth,
611  bool AssumeSingleUse) const {
612  EVT VT = Op.getValueType();
613 
614  // TODO: We can probably do more work on calculating the known bits and
615  // simplifying the operations for scalable vectors, but for now we just
616  // bail out.
617  if (VT.isScalableVector()) {
618  // Pretend we don't know anything for now.
619  Known = KnownBits(DemandedBits.getBitWidth());
620  return false;
621  }
622 
623  APInt DemandedElts = VT.isVector()
625  : APInt(1, 1);
626  return SimplifyDemandedBits(Op, DemandedBits, DemandedElts, Known, TLO, Depth,
627  AssumeSingleUse);
628 }
629 
630 // TODO: Can we merge SelectionDAG::GetDemandedBits into this?
631 // TODO: Under what circumstances can we create nodes? Constant folding?
633  SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
634  SelectionDAG &DAG, unsigned Depth) const {
635  // Limit search depth.
637  return SDValue();
638 
639  // Ignore UNDEFs.
640  if (Op.isUndef())
641  return SDValue();
642 
643  // Not demanding any bits/elts from Op.
644  if (DemandedBits == 0 || DemandedElts == 0)
645  return DAG.getUNDEF(Op.getValueType());
646 
647  unsigned NumElts = DemandedElts.getBitWidth();
648  unsigned BitWidth = DemandedBits.getBitWidth();
649  KnownBits LHSKnown, RHSKnown;
650  switch (Op.getOpcode()) {
651  case ISD::BITCAST: {
652  SDValue Src = peekThroughBitcasts(Op.getOperand(0));
653  EVT SrcVT = Src.getValueType();
654  EVT DstVT = Op.getValueType();
655  if (SrcVT == DstVT)
656  return Src;
657 
658  unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits();
659  unsigned NumDstEltBits = DstVT.getScalarSizeInBits();
660  if (NumSrcEltBits == NumDstEltBits)
661  if (SDValue V = SimplifyMultipleUseDemandedBits(
662  Src, DemandedBits, DemandedElts, DAG, Depth + 1))
663  return DAG.getBitcast(DstVT, V);
664 
665  // TODO - bigendian once we have test coverage.
666  if (SrcVT.isVector() && (NumDstEltBits % NumSrcEltBits) == 0 &&
667  DAG.getDataLayout().isLittleEndian()) {
668  unsigned Scale = NumDstEltBits / NumSrcEltBits;
669  unsigned NumSrcElts = SrcVT.getVectorNumElements();
670  APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits);
671  APInt DemandedSrcElts = APInt::getZero(NumSrcElts);
672  for (unsigned i = 0; i != Scale; ++i) {
673  unsigned Offset = i * NumSrcEltBits;
674  APInt Sub = DemandedBits.extractBits(NumSrcEltBits, Offset);
675  if (!Sub.isNullValue()) {
676  DemandedSrcBits |= Sub;
677  for (unsigned j = 0; j != NumElts; ++j)
678  if (DemandedElts[j])
679  DemandedSrcElts.setBit((j * Scale) + i);
680  }
681  }
682 
683  if (SDValue V = SimplifyMultipleUseDemandedBits(
684  Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1))
685  return DAG.getBitcast(DstVT, V);
686  }
687 
688  // TODO - bigendian once we have test coverage.
689  if ((NumSrcEltBits % NumDstEltBits) == 0 &&
690  DAG.getDataLayout().isLittleEndian()) {
691  unsigned Scale = NumSrcEltBits / NumDstEltBits;
692  unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1;
693  APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits);
694  APInt DemandedSrcElts = APInt::getZero(NumSrcElts);
695  for (unsigned i = 0; i != NumElts; ++i)
696  if (DemandedElts[i]) {
697  unsigned Offset = (i % Scale) * NumDstEltBits;
698  DemandedSrcBits.insertBits(DemandedBits, Offset);
699  DemandedSrcElts.setBit(i / Scale);
700  }
701 
702  if (SDValue V = SimplifyMultipleUseDemandedBits(
703  Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1))
704  return DAG.getBitcast(DstVT, V);
705  }
706 
707  break;
708  }
709  case ISD::AND: {
710  LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
711  RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
712 
713  // If all of the demanded bits are known 1 on one side, return the other.
714  // These bits cannot contribute to the result of the 'and' in this
715  // context.
716  if (DemandedBits.isSubsetOf(LHSKnown.Zero | RHSKnown.One))
717  return Op.getOperand(0);
718  if (DemandedBits.isSubsetOf(RHSKnown.Zero | LHSKnown.One))
719  return Op.getOperand(1);
720  break;
721  }
722  case ISD::OR: {
723  LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
724  RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
725 
726  // If all of the demanded bits are known zero on one side, return the
727  // other. These bits cannot contribute to the result of the 'or' in this
728  // context.
729  if (DemandedBits.isSubsetOf(LHSKnown.One | RHSKnown.Zero))
730  return Op.getOperand(0);
731  if (DemandedBits.isSubsetOf(RHSKnown.One | LHSKnown.Zero))
732  return Op.getOperand(1);
733  break;
734  }
735  case ISD::XOR: {
736  LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
737  RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
738 
739  // If all of the demanded bits are known zero on one side, return the
740  // other.
741  if (DemandedBits.isSubsetOf(RHSKnown.Zero))
742  return Op.getOperand(0);
743  if (DemandedBits.isSubsetOf(LHSKnown.Zero))
744  return Op.getOperand(1);
745  break;
746  }
747  case ISD::SHL: {
748  // If we are only demanding sign bits then we can use the shift source
749  // directly.
750  if (const APInt *MaxSA =
751  DAG.getValidMaximumShiftAmountConstant(Op, DemandedElts)) {
752  SDValue Op0 = Op.getOperand(0);
753  unsigned ShAmt = MaxSA->getZExtValue();
754  unsigned NumSignBits =
755  DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1);
756  unsigned UpperDemandedBits = BitWidth - DemandedBits.countTrailingZeros();
757  if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits))
758  return Op0;
759  }
760  break;
761  }
762  case ISD::SETCC: {
763  SDValue Op0 = Op.getOperand(0);
764  SDValue Op1 = Op.getOperand(1);
765  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
766  // If (1) we only need the sign-bit, (2) the setcc operands are the same
767  // width as the setcc result, and (3) the result of a setcc conforms to 0 or
768  // -1, we may be able to bypass the setcc.
769  if (DemandedBits.isSignMask() &&
772  BooleanContent::ZeroOrNegativeOneBooleanContent) {
773  // If we're testing X < 0, then this compare isn't needed - just use X!
774  // FIXME: We're limiting to integer types here, but this should also work
775  // if we don't care about FP signed-zero. The use of SETLT with FP means
776  // that we don't care about NaNs.
777  if (CC == ISD::SETLT && Op1.getValueType().isInteger() &&
779  return Op0;
780  }
781  break;
782  }
783  case ISD::SIGN_EXTEND_INREG: {
784  // If none of the extended bits are demanded, eliminate the sextinreg.
785  SDValue Op0 = Op.getOperand(0);
786  EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
787  unsigned ExBits = ExVT.getScalarSizeInBits();
788  if (DemandedBits.getActiveBits() <= ExBits)
789  return Op0;
790  // If the input is already sign extended, just drop the extension.
791  unsigned NumSignBits = DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1);
792  if (NumSignBits >= (BitWidth - ExBits + 1))
793  return Op0;
794  break;
795  }
799  // If we only want the lowest element and none of extended bits, then we can
800  // return the bitcasted source vector.
801  SDValue Src = Op.getOperand(0);
802  EVT SrcVT = Src.getValueType();
803  EVT DstVT = Op.getValueType();
804  if (DemandedElts == 1 && DstVT.getSizeInBits() == SrcVT.getSizeInBits() &&
805  DAG.getDataLayout().isLittleEndian() &&
806  DemandedBits.getActiveBits() <= SrcVT.getScalarSizeInBits()) {
807  return DAG.getBitcast(DstVT, Src);
808  }
809  break;
810  }
811  case ISD::INSERT_VECTOR_ELT: {
812  // If we don't demand the inserted element, return the base vector.
813  SDValue Vec = Op.getOperand(0);
814  auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
815  EVT VecVT = Vec.getValueType();
816  if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) &&
817  !DemandedElts[CIdx->getZExtValue()])
818  return Vec;
819  break;
820  }
821  case ISD::INSERT_SUBVECTOR: {
822  SDValue Vec = Op.getOperand(0);
823  SDValue Sub = Op.getOperand(1);
824  uint64_t Idx = Op.getConstantOperandVal(2);
825  unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
826  APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
827  // If we don't demand the inserted subvector, return the base vector.
828  if (DemandedSubElts == 0)
829  return Vec;
830  // If this simply widens the lowest subvector, see if we can do it earlier.
831  if (Idx == 0 && Vec.isUndef()) {
832  if (SDValue NewSub = SimplifyMultipleUseDemandedBits(
833  Sub, DemandedBits, DemandedSubElts, DAG, Depth + 1))
834  return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(),
835  Op.getOperand(0), NewSub, Op.getOperand(2));
836  }
837  break;
838  }
839  case ISD::VECTOR_SHUFFLE: {
840  ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask();
841 
842  // If all the demanded elts are from one operand and are inline,
843  // then we can use the operand directly.
844  bool AllUndef = true, IdentityLHS = true, IdentityRHS = true;
845  for (unsigned i = 0; i != NumElts; ++i) {
846  int M = ShuffleMask[i];
847  if (M < 0 || !DemandedElts[i])
848  continue;
849  AllUndef = false;
850  IdentityLHS &= (M == (int)i);
851  IdentityRHS &= ((M - NumElts) == i);
852  }
853 
854  if (AllUndef)
855  return DAG.getUNDEF(Op.getValueType());
856  if (IdentityLHS)
857  return Op.getOperand(0);
858  if (IdentityRHS)
859  return Op.getOperand(1);
860  break;
861  }
862  default:
863  if (Op.getOpcode() >= ISD::BUILTIN_OP_END)
864  if (SDValue V = SimplifyMultipleUseDemandedBitsForTargetNode(
865  Op, DemandedBits, DemandedElts, DAG, Depth))
866  return V;
867  break;
868  }
869  return SDValue();
870 }
871 
873  SDValue Op, const APInt &DemandedBits, SelectionDAG &DAG,
874  unsigned Depth) const {
875  EVT VT = Op.getValueType();
876  APInt DemandedElts = VT.isVector()
878  : APInt(1, 1);
879  return SimplifyMultipleUseDemandedBits(Op, DemandedBits, DemandedElts, DAG,
880  Depth);
881 }
882 
884  SDValue Op, const APInt &DemandedElts, SelectionDAG &DAG,
885  unsigned Depth) const {
886  APInt DemandedBits = APInt::getAllOnes(Op.getScalarValueSizeInBits());
887  return SimplifyMultipleUseDemandedBits(Op, DemandedBits, DemandedElts, DAG,
888  Depth);
889 }
890 
891 /// Look at Op. At this point, we know that only the OriginalDemandedBits of the
892 /// result of Op are ever used downstream. If we can use this information to
893 /// simplify Op, create a new simplified DAG node and return true, returning the
894 /// original and new nodes in Old and New. Otherwise, analyze the expression and
895 /// return a mask of Known bits for the expression (used to simplify the
896 /// caller). The Known bits may only be accurate for those bits in the
897 /// OriginalDemandedBits and OriginalDemandedElts.
899  SDValue Op, const APInt &OriginalDemandedBits,
900  const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
901  unsigned Depth, bool AssumeSingleUse) const {
902  unsigned BitWidth = OriginalDemandedBits.getBitWidth();
903  assert(Op.getScalarValueSizeInBits() == BitWidth &&
904  "Mask size mismatches value type size!");
905 
906  // Don't know anything.
907  Known = KnownBits(BitWidth);
908 
909  // TODO: We can probably do more work on calculating the known bits and
910  // simplifying the operations for scalable vectors, but for now we just
911  // bail out.
912  if (Op.getValueType().isScalableVector())
913  return false;
914 
915  unsigned NumElts = OriginalDemandedElts.getBitWidth();
916  assert((!Op.getValueType().isVector() ||
917  NumElts == Op.getValueType().getVectorNumElements()) &&
918  "Unexpected vector size");
919 
920  APInt DemandedBits = OriginalDemandedBits;
921  APInt DemandedElts = OriginalDemandedElts;
922  SDLoc dl(Op);
923  auto &DL = TLO.DAG.getDataLayout();
924 
925  // Undef operand.
926  if (Op.isUndef())
927  return false;
928 
929  if (Op.getOpcode() == ISD::Constant) {
930  // We know all of the bits for a constant!
931  Known = KnownBits::makeConstant(cast<ConstantSDNode>(Op)->getAPIntValue());
932  return false;
933  }
934 
935  if (Op.getOpcode() == ISD::ConstantFP) {
936  // We know all of the bits for a floating point constant!
937  Known = KnownBits::makeConstant(
938  cast<ConstantFPSDNode>(Op)->getValueAPF().bitcastToAPInt());
939  return false;
940  }
941 
942  // Other users may use these bits.
943  EVT VT = Op.getValueType();
944  if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) {
945  if (Depth != 0) {
946  // If not at the root, Just compute the Known bits to
947  // simplify things downstream.
948  Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
949  return false;
950  }
951  // If this is the root being simplified, allow it to have multiple uses,
952  // just set the DemandedBits/Elts to all bits.
954  DemandedElts = APInt::getAllOnes(NumElts);
955  } else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) {
956  // Not demanding any bits/elts from Op.
957  return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
958  } else if (Depth >= SelectionDAG::MaxRecursionDepth) {
959  // Limit search depth.
960  return false;
961  }
962 
963  KnownBits Known2;
964  switch (Op.getOpcode()) {
965  case ISD::TargetConstant:
966  llvm_unreachable("Can't simplify this node");
967  case ISD::SCALAR_TO_VECTOR: {
968  if (!DemandedElts[0])
969  return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
970 
971  KnownBits SrcKnown;
972  SDValue Src = Op.getOperand(0);
973  unsigned SrcBitWidth = Src.getScalarValueSizeInBits();
974  APInt SrcDemandedBits = DemandedBits.zextOrSelf(SrcBitWidth);
975  if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcKnown, TLO, Depth + 1))
976  return true;
977 
978  // Upper elements are undef, so only get the knownbits if we just demand
979  // the bottom element.
980  if (DemandedElts == 1)
981  Known = SrcKnown.anyextOrTrunc(BitWidth);
982  break;
983  }
984  case ISD::BUILD_VECTOR:
985  // Collect the known bits that are shared by every demanded element.
986  // TODO: Call SimplifyDemandedBits for non-constant demanded elements.
987  Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
988  return false; // Don't fall through, will infinitely loop.
989  case ISD::LOAD: {
990  auto *LD = cast<LoadSDNode>(Op);
991  if (getTargetConstantFromLoad(LD)) {
992  Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
993  return false; // Don't fall through, will infinitely loop.
994  }
995  if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
996  // If this is a ZEXTLoad and we are looking at the loaded value.
997  EVT MemVT = LD->getMemoryVT();
998  unsigned MemBits = MemVT.getScalarSizeInBits();
999  Known.Zero.setBitsFrom(MemBits);
1000  return false; // Don't fall through, will infinitely loop.
1001  }
1002  break;
1003  }
1004  case ISD::INSERT_VECTOR_ELT: {
1005  SDValue Vec = Op.getOperand(0);
1006  SDValue Scl = Op.getOperand(1);
1007  auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
1008  EVT VecVT = Vec.getValueType();
1009 
1010  // If index isn't constant, assume we need all vector elements AND the
1011  // inserted element.
1012  APInt DemandedVecElts(DemandedElts);
1013  if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) {
1014  unsigned Idx = CIdx->getZExtValue();
1015  DemandedVecElts.clearBit(Idx);
1016 
1017  // Inserted element is not required.
1018  if (!DemandedElts[Idx])
1019  return TLO.CombineTo(Op, Vec);
1020  }
1021 
1022  KnownBits KnownScl;
1023  unsigned NumSclBits = Scl.getScalarValueSizeInBits();
1024  APInt DemandedSclBits = DemandedBits.zextOrTrunc(NumSclBits);
1025  if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1))
1026  return true;
1027 
1028  Known = KnownScl.anyextOrTrunc(BitWidth);
1029 
1030  KnownBits KnownVec;
1031  if (SimplifyDemandedBits(Vec, DemandedBits, DemandedVecElts, KnownVec, TLO,
1032  Depth + 1))
1033  return true;
1034 
1035  if (!!DemandedVecElts)
1036  Known = KnownBits::commonBits(Known, KnownVec);
1037 
1038  return false;
1039  }
1040  case ISD::INSERT_SUBVECTOR: {
1041  // Demand any elements from the subvector and the remainder from the src its
1042  // inserted into.
1043  SDValue Src = Op.getOperand(0);
1044  SDValue Sub = Op.getOperand(1);
1045  uint64_t Idx = Op.getConstantOperandVal(2);
1046  unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
1047  APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
1048  APInt DemandedSrcElts = DemandedElts;
1049  DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx);
1050 
1051  KnownBits KnownSub, KnownSrc;
1052  if (SimplifyDemandedBits(Sub, DemandedBits, DemandedSubElts, KnownSub, TLO,
1053  Depth + 1))
1054  return true;
1055  if (SimplifyDemandedBits(Src, DemandedBits, DemandedSrcElts, KnownSrc, TLO,
1056  Depth + 1))
1057  return true;
1058 
1059  Known.Zero.setAllBits();
1060  Known.One.setAllBits();
1061  if (!!DemandedSubElts)
1062  Known = KnownBits::commonBits(Known, KnownSub);
1063  if (!!DemandedSrcElts)
1064  Known = KnownBits::commonBits(Known, KnownSrc);
1065 
1066  // Attempt to avoid multi-use src if we don't need anything from it.
1067  if (!DemandedBits.isAllOnes() || !DemandedSubElts.isAllOnes() ||
1068  !DemandedSrcElts.isAllOnes()) {
1069  SDValue NewSub = SimplifyMultipleUseDemandedBits(
1070  Sub, DemandedBits, DemandedSubElts, TLO.DAG, Depth + 1);
1071  SDValue NewSrc = SimplifyMultipleUseDemandedBits(
1072  Src, DemandedBits, DemandedSrcElts, TLO.DAG, Depth + 1);
1073  if (NewSub || NewSrc) {
1074  NewSub = NewSub ? NewSub : Sub;
1075  NewSrc = NewSrc ? NewSrc : Src;
1076  SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc, NewSub,
1077  Op.getOperand(2));
1078  return TLO.CombineTo(Op, NewOp);
1079  }
1080  }
1081  break;
1082  }
1083  case ISD::EXTRACT_SUBVECTOR: {
1084  // Offset the demanded elts by the subvector index.
1085  SDValue Src = Op.getOperand(0);
1086  if (Src.getValueType().isScalableVector())
1087  break;
1088  uint64_t Idx = Op.getConstantOperandVal(1);
1089  unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
1090  APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
1091 
1092  if (SimplifyDemandedBits(Src, DemandedBits, DemandedSrcElts, Known, TLO,
1093  Depth + 1))
1094  return true;
1095 
1096  // Attempt to avoid multi-use src if we don't need anything from it.
1097  if (!DemandedBits.isAllOnes() || !DemandedSrcElts.isAllOnes()) {
1098  SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
1099  Src, DemandedBits, DemandedSrcElts, TLO.DAG, Depth + 1);
1100  if (DemandedSrc) {
1101  SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc,
1102  Op.getOperand(1));
1103  return TLO.CombineTo(Op, NewOp);
1104  }
1105  }
1106  break;
1107  }
1108  case ISD::CONCAT_VECTORS: {
1109  Known.Zero.setAllBits();
1110  Known.One.setAllBits();
1111  EVT SubVT = Op.getOperand(0).getValueType();
1112  unsigned NumSubVecs = Op.getNumOperands();
1113  unsigned NumSubElts = SubVT.getVectorNumElements();
1114  for (unsigned i = 0; i != NumSubVecs; ++i) {
1115  APInt DemandedSubElts =
1116  DemandedElts.extractBits(NumSubElts, i * NumSubElts);
1117  if (SimplifyDemandedBits(Op.getOperand(i), DemandedBits, DemandedSubElts,
1118  Known2, TLO, Depth + 1))
1119  return true;
1120  // Known bits are shared by every demanded subvector element.
1121  if (!!DemandedSubElts)
1122  Known = KnownBits::commonBits(Known, Known2);
1123  }
1124  break;
1125  }
1126  case ISD::VECTOR_SHUFFLE: {
1127  ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask();
1128 
1129  // Collect demanded elements from shuffle operands..
1130  APInt DemandedLHS(NumElts, 0);
1131  APInt DemandedRHS(NumElts, 0);
1132  for (unsigned i = 0; i != NumElts; ++i) {
1133  if (!DemandedElts[i])
1134  continue;
1135  int M = ShuffleMask[i];
1136  if (M < 0) {
1137  // For UNDEF elements, we don't know anything about the common state of
1138  // the shuffle result.
1139  DemandedLHS.clearAllBits();
1140  DemandedRHS.clearAllBits();
1141  break;
1142  }
1143  assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range");
1144  if (M < (int)NumElts)
1145  DemandedLHS.setBit(M);
1146  else
1147  DemandedRHS.setBit(M - NumElts);
1148  }
1149 
1150  if (!!DemandedLHS || !!DemandedRHS) {
1151  SDValue Op0 = Op.getOperand(0);
1152  SDValue Op1 = Op.getOperand(1);
1153 
1154  Known.Zero.setAllBits();
1155  Known.One.setAllBits();
1156  if (!!DemandedLHS) {
1157  if (SimplifyDemandedBits(Op0, DemandedBits, DemandedLHS, Known2, TLO,
1158  Depth + 1))
1159  return true;
1160  Known = KnownBits::commonBits(Known, Known2);
1161  }
1162  if (!!DemandedRHS) {
1163  if (SimplifyDemandedBits(Op1, DemandedBits, DemandedRHS, Known2, TLO,
1164  Depth + 1))
1165  return true;
1166  Known = KnownBits::commonBits(Known, Known2);
1167  }
1168 
1169  // Attempt to avoid multi-use ops if we don't need anything from them.
1170  SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1171  Op0, DemandedBits, DemandedLHS, TLO.DAG, Depth + 1);
1172  SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1173  Op1, DemandedBits, DemandedRHS, TLO.DAG, Depth + 1);
1174  if (DemandedOp0 || DemandedOp1) {
1175  Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1176  Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1177  SDValue NewOp = TLO.DAG.getVectorShuffle(VT, dl, Op0, Op1, ShuffleMask);
1178  return TLO.CombineTo(Op, NewOp);
1179  }
1180  }
1181  break;
1182  }
1183  case ISD::AND: {
1184  SDValue Op0 = Op.getOperand(0);
1185  SDValue Op1 = Op.getOperand(1);
1186 
1187  // If the RHS is a constant, check to see if the LHS would be zero without
1188  // using the bits from the RHS. Below, we use knowledge about the RHS to
1189  // simplify the LHS, here we're using information from the LHS to simplify
1190  // the RHS.
1191  if (ConstantSDNode *RHSC = isConstOrConstSplat(Op1)) {
1192  // Do not increment Depth here; that can cause an infinite loop.
1193  KnownBits LHSKnown = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth);
1194  // If the LHS already has zeros where RHSC does, this 'and' is dead.
1195  if ((LHSKnown.Zero & DemandedBits) ==
1196  (~RHSC->getAPIntValue() & DemandedBits))
1197  return TLO.CombineTo(Op, Op0);
1198 
1199  // If any of the set bits in the RHS are known zero on the LHS, shrink
1200  // the constant.
1201  if (ShrinkDemandedConstant(Op, ~LHSKnown.Zero & DemandedBits,
1202  DemandedElts, TLO))
1203  return true;
1204 
1205  // Bitwise-not (xor X, -1) is a special case: we don't usually shrink its
1206  // constant, but if this 'and' is only clearing bits that were just set by
1207  // the xor, then this 'and' can be eliminated by shrinking the mask of
1208  // the xor. For example, for a 32-bit X:
1209  // and (xor (srl X, 31), -1), 1 --> xor (srl X, 31), 1
1210  if (isBitwiseNot(Op0) && Op0.hasOneUse() &&
1211  LHSKnown.One == ~RHSC->getAPIntValue()) {
1212  SDValue Xor = TLO.DAG.getNode(ISD::XOR, dl, VT, Op0.getOperand(0), Op1);
1213  return TLO.CombineTo(Op, Xor);
1214  }
1215  }
1216 
1217  if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO,
1218  Depth + 1))
1219  return true;
1220  assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1221  if (SimplifyDemandedBits(Op0, ~Known.Zero & DemandedBits, DemandedElts,
1222  Known2, TLO, Depth + 1))
1223  return true;
1224  assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
1225 
1226  // Attempt to avoid multi-use ops if we don't need anything from them.
1227  if (!DemandedBits.isAllOnes() || !DemandedElts.isAllOnes()) {
1228  SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1229  Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
1230  SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1231  Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
1232  if (DemandedOp0 || DemandedOp1) {
1233  Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1234  Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1235  SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1);
1236  return TLO.CombineTo(Op, NewOp);
1237  }
1238  }
1239 
1240  // If all of the demanded bits are known one on one side, return the other.
1241  // These bits cannot contribute to the result of the 'and'.
1242  if (DemandedBits.isSubsetOf(Known2.Zero | Known.One))
1243  return TLO.CombineTo(Op, Op0);
1244  if (DemandedBits.isSubsetOf(Known.Zero | Known2.One))
1245  return TLO.CombineTo(Op, Op1);
1246  // If all of the demanded bits in the inputs are known zeros, return zero.
1247  if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero))
1248  return TLO.CombineTo(Op, TLO.DAG.getConstant(0, dl, VT));
1249  // If the RHS is a constant, see if we can simplify it.
1250  if (ShrinkDemandedConstant(Op, ~Known2.Zero & DemandedBits, DemandedElts,
1251  TLO))
1252  return true;
1253  // If the operation can be done in a smaller type, do so.
1254  if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO))
1255  return true;
1256 
1257  Known &= Known2;
1258  break;
1259  }
1260  case ISD::OR: {
1261  SDValue Op0 = Op.getOperand(0);
1262  SDValue Op1 = Op.getOperand(1);
1263 
1264  if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO,
1265  Depth + 1))
1266  return true;
1267  assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1268  if (SimplifyDemandedBits(Op0, ~Known.One & DemandedBits, DemandedElts,
1269  Known2, TLO, Depth + 1))
1270  return true;
1271  assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
1272 
1273  // Attempt to avoid multi-use ops if we don't need anything from them.
1274  if (!DemandedBits.isAllOnes() || !DemandedElts.isAllOnes()) {
1275  SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1276  Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
1277  SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1278  Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
1279  if (DemandedOp0 || DemandedOp1) {
1280  Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1281  Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1282  SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1);
1283  return TLO.CombineTo(Op, NewOp);
1284  }
1285  }
1286 
1287  // If all of the demanded bits are known zero on one side, return the other.
1288  // These bits cannot contribute to the result of the 'or'.
1289  if (DemandedBits.isSubsetOf(Known2.One | Known.Zero))
1290  return TLO.CombineTo(Op, Op0);
1291  if (DemandedBits.isSubsetOf(Known.One | Known2.Zero))
1292  return TLO.CombineTo(Op, Op1);
1293  // If the RHS is a constant, see if we can simplify it.
1294  if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO))
1295  return true;
1296  // If the operation can be done in a smaller type, do so.
1297  if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO))
1298  return true;
1299 
1300  Known |= Known2;
1301  break;
1302  }
1303  case ISD::XOR: {
1304  SDValue Op0 = Op.getOperand(0);
1305  SDValue Op1 = Op.getOperand(1);
1306 
1307  if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO,
1308  Depth + 1))
1309  return true;
1310  assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1311  if (SimplifyDemandedBits(Op0, DemandedBits, DemandedElts, Known2, TLO,
1312  Depth + 1))
1313  return true;
1314  assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
1315 
1316  // Attempt to avoid multi-use ops if we don't need anything from them.
1317  if (!DemandedBits.isAllOnes() || !DemandedElts.isAllOnes()) {
1318  SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1319  Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
1320  SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1321  Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
1322  if (DemandedOp0 || DemandedOp1) {
1323  Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1324  Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1325  SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1);
1326  return TLO.CombineTo(Op, NewOp);
1327  }
1328  }
1329 
1330  // If all of the demanded bits are known zero on one side, return the other.
1331  // These bits cannot contribute to the result of the 'xor'.
1332  if (DemandedBits.isSubsetOf(Known.Zero))
1333  return TLO.CombineTo(Op, Op0);
1334  if (DemandedBits.isSubsetOf(Known2.Zero))
1335  return TLO.CombineTo(Op, Op1);
1336  // If the operation can be done in a smaller type, do so.
1337  if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO))
1338  return true;
1339 
1340  // If all of the unknown bits are known to be zero on one side or the other
1341  // turn this into an *inclusive* or.
1342  // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
1343  if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero))
1344  return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, VT, Op0, Op1));
1345 
1346  ConstantSDNode* C = isConstOrConstSplat(Op1, DemandedElts);
1347  if (C) {
1348  // If one side is a constant, and all of the set bits in the constant are
1349  // also known set on the other side, turn this into an AND, as we know
1350  // the bits will be cleared.
1351  // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
1352  // NB: it is okay if more bits are known than are requested
1353  if (C->getAPIntValue() == Known2.One) {
1354  SDValue ANDC =
1355  TLO.DAG.getConstant(~C->getAPIntValue() & DemandedBits, dl, VT);
1356  return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT, Op0, ANDC));
1357  }
1358 
1359  // If the RHS is a constant, see if we can change it. Don't alter a -1
1360  // constant because that's a 'not' op, and that is better for combining
1361  // and codegen.
1362  if (!C->isAllOnes() && DemandedBits.isSubsetOf(C->getAPIntValue())) {
1363  // We're flipping all demanded bits. Flip the undemanded bits too.
1364  SDValue New = TLO.DAG.getNOT(dl, Op0, VT);
1365  return TLO.CombineTo(Op, New);
1366  }
1367  }
1368 
1369  // If we can't turn this into a 'not', try to shrink the constant.
1370  if (!C || !C->isAllOnes())
1371  if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO))
1372  return true;
1373 
1374  Known ^= Known2;
1375  break;
1376  }
1377  case ISD::SELECT:
1378  if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known, TLO,
1379  Depth + 1))
1380  return true;
1381  if (SimplifyDemandedBits(Op.getOperand(1), DemandedBits, Known2, TLO,
1382  Depth + 1))
1383  return true;
1384  assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1385  assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
1386 
1387  // If the operands are constants, see if we can simplify them.
1388  if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO))
1389  return true;
1390 
1391  // Only known if known in both the LHS and RHS.
1392  Known = KnownBits::commonBits(Known, Known2);
1393  break;
1394  case ISD::SELECT_CC:
1395  if (SimplifyDemandedBits(Op.getOperand(3), DemandedBits, Known, TLO,
1396  Depth + 1))
1397  return true;
1398  if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known2, TLO,
1399  Depth + 1))
1400  return true;
1401  assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1402  assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
1403 
1404  // If the operands are constants, see if we can simplify them.
1405  if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO))
1406  return true;
1407 
1408  // Only known if known in both the LHS and RHS.
1409  Known = KnownBits::commonBits(Known, Known2);
1410  break;
1411  case ISD::SETCC: {
1412  SDValue Op0 = Op.getOperand(0);
1413  SDValue Op1 = Op.getOperand(1);
1414  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
1415  // If (1) we only need the sign-bit, (2) the setcc operands are the same
1416  // width as the setcc result, and (3) the result of a setcc conforms to 0 or
1417  // -1, we may be able to bypass the setcc.
1418  if (DemandedBits.isSignMask() &&
1421  BooleanContent::ZeroOrNegativeOneBooleanContent) {
1422  // If we're testing X < 0, then this compare isn't needed - just use X!
1423  // FIXME: We're limiting to integer types here, but this should also work
1424  // if we don't care about FP signed-zero. The use of SETLT with FP means
1425  // that we don't care about NaNs.
1426  if (CC == ISD::SETLT && Op1.getValueType().isInteger() &&
1428  return TLO.CombineTo(Op, Op0);
1429 
1430  // TODO: Should we check for other forms of sign-bit comparisons?
1431  // Examples: X <= -1, X >= 0
1432  }
1433  if (getBooleanContents(Op0.getValueType()) ==
1435  BitWidth > 1)
1436  Known.Zero.setBitsFrom(1);
1437  break;
1438  }
1439  case ISD::SHL: {
1440  SDValue Op0 = Op.getOperand(0);
1441  SDValue Op1 = Op.getOperand(1);
1442  EVT ShiftVT = Op1.getValueType();
1443 
1444  if (const APInt *SA =
1445  TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) {
1446  unsigned ShAmt = SA->getZExtValue();
1447  if (ShAmt == 0)
1448  return TLO.CombineTo(Op, Op0);
1449 
1450  // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
1451  // single shift. We can do this if the bottom bits (which are shifted
1452  // out) are never demanded.
1453  // TODO - support non-uniform vector amounts.
1454  if (Op0.getOpcode() == ISD::SRL) {
1455  if (!DemandedBits.intersects(APInt::getLowBitsSet(BitWidth, ShAmt))) {
1456  if (const APInt *SA2 =
1457  TLO.DAG.getValidShiftAmountConstant(Op0, DemandedElts)) {
1458  unsigned C1 = SA2->getZExtValue();
1459  unsigned Opc = ISD::SHL;
1460  int Diff = ShAmt - C1;
1461  if (Diff < 0) {
1462  Diff = -Diff;
1463  Opc = ISD::SRL;
1464  }
1465  SDValue NewSA = TLO.DAG.getConstant(Diff, dl, ShiftVT);
1466  return TLO.CombineTo(
1467  Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA));
1468  }
1469  }
1470  }
1471 
1472  // Convert (shl (anyext x, c)) to (anyext (shl x, c)) if the high bits
1473  // are not demanded. This will likely allow the anyext to be folded away.
1474  // TODO - support non-uniform vector amounts.
1475  if (Op0.getOpcode() == ISD::ANY_EXTEND) {
1476  SDValue InnerOp = Op0.getOperand(0);
1477  EVT InnerVT = InnerOp.getValueType();
1478  unsigned InnerBits = InnerVT.getScalarSizeInBits();
1479  if (ShAmt < InnerBits && DemandedBits.getActiveBits() <= InnerBits &&
1480  isTypeDesirableForOp(ISD::SHL, InnerVT)) {
1481  EVT ShTy = getShiftAmountTy(InnerVT, DL);
1482  if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits()))
1483  ShTy = InnerVT;
1484  SDValue NarrowShl =
1485  TLO.DAG.getNode(ISD::SHL, dl, InnerVT, InnerOp,
1486  TLO.DAG.getConstant(ShAmt, dl, ShTy));
1487  return TLO.CombineTo(
1488  Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, NarrowShl));
1489  }
1490 
1491  // Repeat the SHL optimization above in cases where an extension
1492  // intervenes: (shl (anyext (shr x, c1)), c2) to
1493  // (shl (anyext x), c2-c1). This requires that the bottom c1 bits
1494  // aren't demanded (as above) and that the shifted upper c1 bits of
1495  // x aren't demanded.
1496  // TODO - support non-uniform vector amounts.
1497  if (Op0.hasOneUse() && InnerOp.getOpcode() == ISD::SRL &&
1498  InnerOp.hasOneUse()) {
1499  if (const APInt *SA2 =
1500  TLO.DAG.getValidShiftAmountConstant(InnerOp, DemandedElts)) {
1501  unsigned InnerShAmt = SA2->getZExtValue();
1502  if (InnerShAmt < ShAmt && InnerShAmt < InnerBits &&
1503  DemandedBits.getActiveBits() <=
1504  (InnerBits - InnerShAmt + ShAmt) &&
1505  DemandedBits.countTrailingZeros() >= ShAmt) {
1506  SDValue NewSA =
1507  TLO.DAG.getConstant(ShAmt - InnerShAmt, dl, ShiftVT);
1508  SDValue NewExt = TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT,
1509  InnerOp.getOperand(0));
1510  return TLO.CombineTo(
1511  Op, TLO.DAG.getNode(ISD::SHL, dl, VT, NewExt, NewSA));
1512  }
1513  }
1514  }
1515  }
1516 
1517  APInt InDemandedMask = DemandedBits.lshr(ShAmt);
1518  if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
1519  Depth + 1))
1520  return true;
1521  assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1522  Known.Zero <<= ShAmt;
1523  Known.One <<= ShAmt;
1524  // low bits known zero.
1525  Known.Zero.setLowBits(ShAmt);
1526 
1527  // Try shrinking the operation as long as the shift amount will still be
1528  // in range.
1529  if ((ShAmt < DemandedBits.getActiveBits()) &&
1530  ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO))
1531  return true;
1532  }
1533 
1534  // If we are only demanding sign bits then we can use the shift source
1535  // directly.
1536  if (const APInt *MaxSA =
1537  TLO.DAG.getValidMaximumShiftAmountConstant(Op, DemandedElts)) {
1538  unsigned ShAmt = MaxSA->getZExtValue();
1539  unsigned NumSignBits =
1540  TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1);
1541  unsigned UpperDemandedBits = BitWidth - DemandedBits.countTrailingZeros();
1542  if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits))
1543  return TLO.CombineTo(Op, Op0);
1544  }
1545  break;
1546  }
1547  case ISD::SRL: {
1548  SDValue Op0 = Op.getOperand(0);
1549  SDValue Op1 = Op.getOperand(1);
1550  EVT ShiftVT = Op1.getValueType();
1551 
1552  if (const APInt *SA =
1553  TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) {
1554  unsigned ShAmt = SA->getZExtValue();
1555  if (ShAmt == 0)
1556  return TLO.CombineTo(Op, Op0);
1557 
1558  // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
1559  // single shift. We can do this if the top bits (which are shifted out)
1560  // are never demanded.
1561  // TODO - support non-uniform vector amounts.
1562  if (Op0.getOpcode() == ISD::SHL) {
1563  if (!DemandedBits.intersects(APInt::getHighBitsSet(BitWidth, ShAmt))) {
1564  if (const APInt *SA2 =
1565  TLO.DAG.getValidShiftAmountConstant(Op0, DemandedElts)) {
1566  unsigned C1 = SA2->getZExtValue();
1567  unsigned Opc = ISD::SRL;
1568  int Diff = ShAmt - C1;
1569  if (Diff < 0) {
1570  Diff = -Diff;
1571  Opc = ISD::SHL;
1572  }
1573  SDValue NewSA = TLO.DAG.getConstant(Diff, dl, ShiftVT);
1574  return TLO.CombineTo(
1575  Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA));
1576  }
1577  }
1578  }
1579 
1580  APInt InDemandedMask = (DemandedBits << ShAmt);
1581 
1582  // If the shift is exact, then it does demand the low bits (and knows that
1583  // they are zero).
1584  if (Op->getFlags().hasExact())
1585  InDemandedMask.setLowBits(ShAmt);
1586 
1587  // Compute the new bits that are at the top now.
1588  if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
1589  Depth + 1))
1590  return true;
1591  assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1592  Known.Zero.lshrInPlace(ShAmt);
1593  Known.One.lshrInPlace(ShAmt);
1594  // High bits known zero.
1595  Known.Zero.setHighBits(ShAmt);
1596  }
1597  break;
1598  }
1599  case ISD::SRA: {
1600  SDValue Op0 = Op.getOperand(0);
1601  SDValue Op1 = Op.getOperand(1);
1602  EVT ShiftVT = Op1.getValueType();
1603 
1604  // If we only want bits that already match the signbit then we don't need
1605  // to shift.
1606  unsigned NumHiDemandedBits = BitWidth - DemandedBits.countTrailingZeros();
1607  if (TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1) >=
1608  NumHiDemandedBits)
1609  return TLO.CombineTo(Op, Op0);
1610 
1611  // If this is an arithmetic shift right and only the low-bit is set, we can
1612  // always convert this into a logical shr, even if the shift amount is
1613  // variable. The low bit of the shift cannot be an input sign bit unless
1614  // the shift amount is >= the size of the datatype, which is undefined.
1615  if (DemandedBits.isOneValue())
1616  return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1));
1617 
1618  if (const APInt *SA =
1619  TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) {
1620  unsigned ShAmt = SA->getZExtValue();
1621  if (ShAmt == 0)
1622  return TLO.CombineTo(Op, Op0);
1623 
1624  APInt InDemandedMask = (DemandedBits << ShAmt);
1625 
1626  // If the shift is exact, then it does demand the low bits (and knows that
1627  // they are zero).
1628  if (Op->getFlags().hasExact())
1629  InDemandedMask.setLowBits(ShAmt);
1630 
1631  // If any of the demanded bits are produced by the sign extension, we also
1632  // demand the input sign bit.
1633  if (DemandedBits.countLeadingZeros() < ShAmt)
1634  InDemandedMask.setSignBit();
1635 
1636  if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
1637  Depth + 1))
1638  return true;
1639  assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1640  Known.Zero.lshrInPlace(ShAmt);
1641  Known.One.lshrInPlace(ShAmt);
1642 
1643  // If the input sign bit is known to be zero, or if none of the top bits
1644  // are demanded, turn this into an unsigned shift right.
1645  if (Known.Zero[BitWidth - ShAmt - 1] ||
1646  DemandedBits.countLeadingZeros() >= ShAmt) {
1647  SDNodeFlags Flags;
1648  Flags.setExact(Op->getFlags().hasExact());
1649  return TLO.CombineTo(
1650  Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1, Flags));
1651  }
1652 
1653  int Log2 = DemandedBits.exactLogBase2();
1654  if (Log2 >= 0) {
1655  // The bit must come from the sign.
1656  SDValue NewSA = TLO.DAG.getConstant(BitWidth - 1 - Log2, dl, ShiftVT);
1657  return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, NewSA));
1658  }
1659 
1660  if (Known.One[BitWidth - ShAmt - 1])
1661  // New bits are known one.
1662  Known.One.setHighBits(ShAmt);
1663 
1664  // Attempt to avoid multi-use ops if we don't need anything from them.
1665  if (!InDemandedMask.isAllOnes() || !DemandedElts.isAllOnes()) {
1666  SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1667  Op0, InDemandedMask, DemandedElts, TLO.DAG, Depth + 1);
1668  if (DemandedOp0) {
1669  SDValue NewOp = TLO.DAG.getNode(ISD::SRA, dl, VT, DemandedOp0, Op1);
1670  return TLO.CombineTo(Op, NewOp);
1671  }
1672  }
1673  }
1674  break;
1675  }
1676  case ISD::FSHL:
1677  case ISD::FSHR: {
1678  SDValue Op0 = Op.getOperand(0);
1679  SDValue Op1 = Op.getOperand(1);
1680  SDValue Op2 = Op.getOperand(2);
1681  bool IsFSHL = (Op.getOpcode() == ISD::FSHL);
1682 
1683  if (ConstantSDNode *SA = isConstOrConstSplat(Op2, DemandedElts)) {
1684  unsigned Amt = SA->getAPIntValue().urem(BitWidth);
1685 
1686  // For fshl, 0-shift returns the 1st arg.
1687  // For fshr, 0-shift returns the 2nd arg.
1688  if (Amt == 0) {
1689  if (SimplifyDemandedBits(IsFSHL ? Op0 : Op1, DemandedBits, DemandedElts,
1690  Known, TLO, Depth + 1))
1691  return true;
1692  break;
1693  }
1694 
1695  // fshl: (Op0 << Amt) | (Op1 >> (BW - Amt))
1696  // fshr: (Op0 << (BW - Amt)) | (Op1 >> Amt)
1697  APInt Demanded0 = DemandedBits.lshr(IsFSHL ? Amt : (BitWidth - Amt));
1698  APInt Demanded1 = DemandedBits << (IsFSHL ? (BitWidth - Amt) : Amt);
1699  if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO,
1700  Depth + 1))
1701  return true;
1702  if (SimplifyDemandedBits(Op1, Demanded1, DemandedElts, Known, TLO,
1703  Depth + 1))
1704  return true;
1705 
1706  Known2.One <<= (IsFSHL ? Amt : (BitWidth - Amt));
1707  Known2.Zero <<= (IsFSHL ? Amt : (BitWidth - Amt));
1708  Known.One.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt);
1709  Known.Zero.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt);
1710  Known.One |= Known2.One;
1711  Known.Zero |= Known2.Zero;
1712  }
1713 
1714  // For pow-2 bitwidths we only demand the bottom modulo amt bits.
1715  if (isPowerOf2_32(BitWidth)) {
1716  APInt DemandedAmtBits(Op2.getScalarValueSizeInBits(), BitWidth - 1);
1717  if (SimplifyDemandedBits(Op2, DemandedAmtBits, DemandedElts,
1718  Known2, TLO, Depth + 1))
1719  return true;
1720  }
1721  break;
1722  }
1723  case ISD::ROTL:
1724  case ISD::ROTR: {
1725  SDValue Op0 = Op.getOperand(0);
1726  SDValue Op1 = Op.getOperand(1);
1727 
1728  // If we're rotating an 0/-1 value, then it stays an 0/-1 value.
1729  if (BitWidth == TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1))
1730  return TLO.CombineTo(Op, Op0);
1731 
1732  // For pow-2 bitwidths we only demand the bottom modulo amt bits.
1733  if (isPowerOf2_32(BitWidth)) {
1734  APInt DemandedAmtBits(Op1.getScalarValueSizeInBits(), BitWidth - 1);
1735  if (SimplifyDemandedBits(Op1, DemandedAmtBits, DemandedElts, Known2, TLO,
1736  Depth + 1))
1737  return true;
1738  }
1739  break;
1740  }
1741  case ISD::UMIN: {
1742  // Check if one arg is always less than (or equal) to the other arg.
1743  SDValue Op0 = Op.getOperand(0);
1744  SDValue Op1 = Op.getOperand(1);
1745  KnownBits Known0 = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth + 1);
1746  KnownBits Known1 = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1);
1747  Known = KnownBits::umin(Known0, Known1);
1748  if (Optional<bool> IsULE = KnownBits::ule(Known0, Known1))
1749  return TLO.CombineTo(Op, IsULE.getValue() ? Op0 : Op1);
1750  if (Optional<bool> IsULT = KnownBits::ult(Known0, Known1))
1751  return TLO.CombineTo(Op, IsULT.getValue() ? Op0 : Op1);
1752  break;
1753  }
1754  case ISD::UMAX: {
1755  // Check if one arg is always greater than (or equal) to the other arg.
1756  SDValue Op0 = Op.getOperand(0);
1757  SDValue Op1 = Op.getOperand(1);
1758  KnownBits Known0 = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth + 1);
1759  KnownBits Known1 = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1);
1760  Known = KnownBits::umax(Known0, Known1);
1761  if (Optional<bool> IsUGE = KnownBits::uge(Known0, Known1))
1762  return TLO.CombineTo(Op, IsUGE.getValue() ? Op0 : Op1);
1763  if (Optional<bool> IsUGT = KnownBits::ugt(Known0, Known1))
1764  return TLO.CombineTo(Op, IsUGT.getValue() ? Op0 : Op1);
1765  break;
1766  }
1767  case ISD::BITREVERSE: {
1768  SDValue Src = Op.getOperand(0);
1769  APInt DemandedSrcBits = DemandedBits.reverseBits();
1770  if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO,
1771  Depth + 1))
1772  return true;
1773  Known.One = Known2.One.reverseBits();
1774  Known.Zero = Known2.Zero.reverseBits();
1775  break;
1776  }
1777  case ISD::BSWAP: {
1778  SDValue Src = Op.getOperand(0);
1779  APInt DemandedSrcBits = DemandedBits.byteSwap();
1780  if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO,
1781  Depth + 1))
1782  return true;
1783  Known.One = Known2.One.byteSwap();
1784  Known.Zero = Known2.Zero.byteSwap();
1785  break;
1786  }
1787  case ISD::CTPOP: {
1788  // If only 1 bit is demanded, replace with PARITY as long as we're before
1789  // op legalization.
1790  // FIXME: Limit to scalars for now.
1791  if (DemandedBits.isOneValue() && !TLO.LegalOps && !VT.isVector())
1792  return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::PARITY, dl, VT,
1793  Op.getOperand(0)));
1794 
1795  Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
1796  break;
1797  }
1798  case ISD::SIGN_EXTEND_INREG: {
1799  SDValue Op0 = Op.getOperand(0);
1800  EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1801  unsigned ExVTBits = ExVT.getScalarSizeInBits();
1802 
1803  // If we only care about the highest bit, don't bother shifting right.
1804  if (DemandedBits.isSignMask()) {
1805  unsigned NumSignBits =
1806  TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1);
1807  bool AlreadySignExtended = NumSignBits >= BitWidth - ExVTBits + 1;
1808  // However if the input is already sign extended we expect the sign
1809  // extension to be dropped altogether later and do not simplify.
1810  if (!AlreadySignExtended) {
1811  // Compute the correct shift amount type, which must be getShiftAmountTy
1812  // for scalar types after legalization.
1813  EVT ShiftAmtTy = VT;
1814  if (TLO.LegalTypes() && !ShiftAmtTy.isVector())
1815  ShiftAmtTy = getShiftAmountTy(ShiftAmtTy, DL);
1816 
1817  SDValue ShiftAmt =
1818  TLO.DAG.getConstant(BitWidth - ExVTBits, dl, ShiftAmtTy);
1819  return TLO.CombineTo(Op,
1820  TLO.DAG.getNode(ISD::SHL, dl, VT, Op0, ShiftAmt));
1821  }
1822  }
1823 
1824  // If none of the extended bits are demanded, eliminate the sextinreg.
1825  if (DemandedBits.getActiveBits() <= ExVTBits)
1826  return TLO.CombineTo(Op, Op0);
1827 
1828  APInt InputDemandedBits = DemandedBits.getLoBits(ExVTBits);
1829 
1830  // Since the sign extended bits are demanded, we know that the sign
1831  // bit is demanded.
1832  InputDemandedBits.setBit(ExVTBits - 1);
1833 
1834  if (SimplifyDemandedBits(Op0, InputDemandedBits, Known, TLO, Depth + 1))
1835  return true;
1836  assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1837 
1838  // If the sign bit of the input is known set or clear, then we know the
1839  // top bits of the result.
1840 
1841  // If the input sign bit is known zero, convert this into a zero extension.
1842  if (Known.Zero[ExVTBits - 1])
1843  return TLO.CombineTo(Op, TLO.DAG.getZeroExtendInReg(Op0, dl, ExVT));
1844 
1845  APInt Mask = APInt::getLowBitsSet(BitWidth, ExVTBits);
1846  if (Known.One[ExVTBits - 1]) { // Input sign bit known set
1847  Known.One.setBitsFrom(ExVTBits);
1848  Known.Zero &= Mask;
1849  } else { // Input sign bit unknown
1850  Known.Zero &= Mask;
1851  Known.One &= Mask;
1852  }
1853  break;
1854  }
1855  case ISD::BUILD_PAIR: {
1856  EVT HalfVT = Op.getOperand(0).getValueType();
1857  unsigned HalfBitWidth = HalfVT.getScalarSizeInBits();
1858 
1859  APInt MaskLo = DemandedBits.getLoBits(HalfBitWidth).trunc(HalfBitWidth);
1860  APInt MaskHi = DemandedBits.getHiBits(HalfBitWidth).trunc(HalfBitWidth);
1861 
1862  KnownBits KnownLo, KnownHi;
1863 
1864  if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownLo, TLO, Depth + 1))
1865  return true;
1866 
1867  if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownHi, TLO, Depth + 1))
1868  return true;
1869 
1870  Known.Zero = KnownLo.Zero.zext(BitWidth) |
1871  KnownHi.Zero.zext(BitWidth).shl(HalfBitWidth);
1872 
1873  Known.One = KnownLo.One.zext(BitWidth) |
1874  KnownHi.One.zext(BitWidth).shl(HalfBitWidth);
1875  break;
1876  }
1877  case ISD::ZERO_EXTEND:
1879  SDValue Src = Op.getOperand(0);
1880  EVT SrcVT = Src.getValueType();
1881  unsigned InBits = SrcVT.getScalarSizeInBits();
1882  unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1;
1883  bool IsVecInReg = Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG;
1884 
1885  // If none of the top bits are demanded, convert this into an any_extend.
1886  if (DemandedBits.getActiveBits() <= InBits) {
1887  // If we only need the non-extended bits of the bottom element
1888  // then we can just bitcast to the result.
1889  if (IsVecInReg && DemandedElts == 1 &&
1890  VT.getSizeInBits() == SrcVT.getSizeInBits() &&
1892  return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src));
1893 
1894  unsigned Opc =
1896  if (!TLO.LegalOperations() || isOperationLegal(Opc, VT))
1897  return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src));
1898  }
1899 
1900  APInt InDemandedBits = DemandedBits.trunc(InBits);
1901  APInt InDemandedElts = DemandedElts.zextOrSelf(InElts);
1902  if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
1903  Depth + 1))
1904  return true;
1905  assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1906  assert(Known.getBitWidth() == InBits && "Src width has changed?");
1907  Known = Known.zext(BitWidth);
1908 
1909  // Attempt to avoid multi-use ops if we don't need anything from them.
1910  if (SDValue NewSrc = SimplifyMultipleUseDemandedBits(
1911  Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1))
1912  return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc));
1913  break;
1914  }
1915  case ISD::SIGN_EXTEND:
1917  SDValue Src = Op.getOperand(0);
1918  EVT SrcVT = Src.getValueType();
1919  unsigned InBits = SrcVT.getScalarSizeInBits();
1920  unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1;
1921  bool IsVecInReg = Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG;
1922 
1923  // If none of the top bits are demanded, convert this into an any_extend.
1924  if (DemandedBits.getActiveBits() <= InBits) {
1925  // If we only need the non-extended bits of the bottom element
1926  // then we can just bitcast to the result.
1927  if (IsVecInReg && DemandedElts == 1 &&
1928  VT.getSizeInBits() == SrcVT.getSizeInBits() &&
1930  return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src));
1931 
1932  unsigned Opc =
1934  if (!TLO.LegalOperations() || isOperationLegal(Opc, VT))
1935  return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src));
1936  }
1937 
1938  APInt InDemandedBits = DemandedBits.trunc(InBits);
1939  APInt InDemandedElts = DemandedElts.zextOrSelf(InElts);
1940 
1941  // Since some of the sign extended bits are demanded, we know that the sign
1942  // bit is demanded.
1943  InDemandedBits.setBit(InBits - 1);
1944 
1945  if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
1946  Depth + 1))
1947  return true;
1948  assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1949  assert(Known.getBitWidth() == InBits && "Src width has changed?");
1950 
1951  // If the sign bit is known one, the top bits match.
1952  Known = Known.sext(BitWidth);
1953 
1954  // If the sign bit is known zero, convert this to a zero extend.
1955  if (Known.isNonNegative()) {
1956  unsigned Opc =
1958  if (!TLO.LegalOperations() || isOperationLegal(Opc, VT))
1959  return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src));
1960  }
1961 
1962  // Attempt to avoid multi-use ops if we don't need anything from them.
1963  if (SDValue NewSrc = SimplifyMultipleUseDemandedBits(
1964  Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1))
1965  return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc));
1966  break;
1967  }
1968  case ISD::ANY_EXTEND:
1970  SDValue Src = Op.getOperand(0);
1971  EVT SrcVT = Src.getValueType();
1972  unsigned InBits = SrcVT.getScalarSizeInBits();
1973  unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1;
1974  bool IsVecInReg = Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG;
1975 
1976  // If we only need the bottom element then we can just bitcast.
1977  // TODO: Handle ANY_EXTEND?
1978  if (IsVecInReg && DemandedElts == 1 &&
1979  VT.getSizeInBits() == SrcVT.getSizeInBits() &&
1981  return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src));
1982 
1983  APInt InDemandedBits = DemandedBits.trunc(InBits);
1984  APInt InDemandedElts = DemandedElts.zextOrSelf(InElts);
1985  if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
1986  Depth + 1))
1987  return true;
1988  assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1989  assert(Known.getBitWidth() == InBits && "Src width has changed?");
1990  Known = Known.anyext(BitWidth);
1991 
1992  // Attempt to avoid multi-use ops if we don't need anything from them.
1993  if (SDValue NewSrc = SimplifyMultipleUseDemandedBits(
1994  Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1))
1995  return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc));
1996  break;
1997  }
1998  case ISD::TRUNCATE: {
1999  SDValue Src = Op.getOperand(0);
2000 
2001  // Simplify the input, using demanded bit information, and compute the known
2002  // zero/one bits live out.
2003  unsigned OperandBitWidth = Src.getScalarValueSizeInBits();
2004  APInt TruncMask = DemandedBits.zext(OperandBitWidth);
2005  if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, Known, TLO,
2006  Depth + 1))
2007  return true;
2008  Known = Known.trunc(BitWidth);
2009 
2010  // Attempt to avoid multi-use ops if we don't need anything from them.
2011  if (SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2012  Src, TruncMask, DemandedElts, TLO.DAG, Depth + 1))
2013  return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, NewSrc));
2014 
2015  // If the input is only used by this truncate, see if we can shrink it based
2016  // on the known demanded bits.
2017  if (Src.getNode()->hasOneUse()) {
2018  switch (Src.getOpcode()) {
2019  default:
2020  break;
2021  case ISD::SRL:
2022  // Shrink SRL by a constant if none of the high bits shifted in are
2023  // demanded.
2024  if (TLO.LegalTypes() && !isTypeDesirableForOp(ISD::SRL, VT))
2025  // Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is
2026  // undesirable.
2027  break;
2028 
2029  const APInt *ShAmtC =
2030  TLO.DAG.getValidShiftAmountConstant(Src, DemandedElts);
2031  if (!ShAmtC || ShAmtC->uge(BitWidth))
2032  break;
2033  uint64_t ShVal = ShAmtC->getZExtValue();
2034 
2035  APInt HighBits =
2036  APInt::getHighBitsSet(OperandBitWidth, OperandBitWidth - BitWidth);
2037  HighBits.lshrInPlace(ShVal);
2038  HighBits = HighBits.trunc(BitWidth);
2039 
2040  if (!(HighBits & DemandedBits)) {
2041  // None of the shifted in bits are needed. Add a truncate of the
2042  // shift input, then shift it.
2043  SDValue NewShAmt = TLO.DAG.getConstant(
2044  ShVal, dl, getShiftAmountTy(VT, DL, TLO.LegalTypes()));
2045  SDValue NewTrunc =
2046  TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, Src.getOperand(0));
2047  return TLO.CombineTo(
2048  Op, TLO.DAG.getNode(ISD::SRL, dl, VT, NewTrunc, NewShAmt));
2049  }
2050  break;
2051  }
2052  }
2053 
2054  assert(!Known.hasConflict() && "Bits known to be one AND zero?");
2055  break;
2056  }
2057  case ISD::AssertZext: {
2058  // AssertZext demands all of the high bits, plus any of the low bits
2059  // demanded by its users.
2060  EVT ZVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2062  if (SimplifyDemandedBits(Op.getOperand(0), ~InMask | DemandedBits, Known,
2063  TLO, Depth + 1))
2064  return true;
2065  assert(!Known.hasConflict() && "Bits known to be one AND zero?");
2066 
2067  Known.Zero |= ~InMask;
2068  break;
2069  }
2070  case ISD::EXTRACT_VECTOR_ELT: {
2071  SDValue Src = Op.getOperand(0);
2072  SDValue Idx = Op.getOperand(1);
2073  ElementCount SrcEltCnt = Src.getValueType().getVectorElementCount();
2074  unsigned EltBitWidth = Src.getScalarValueSizeInBits();
2075 
2076  if (SrcEltCnt.isScalable())
2077  return false;
2078 
2079  // Demand the bits from every vector element without a constant index.
2080  unsigned NumSrcElts = SrcEltCnt.getFixedValue();
2081  APInt DemandedSrcElts = APInt::getAllOnes(NumSrcElts);
2082  if (auto *CIdx = dyn_cast<ConstantSDNode>(Idx))
2083  if (CIdx->getAPIntValue().ult(NumSrcElts))
2084  DemandedSrcElts = APInt::getOneBitSet(NumSrcElts, CIdx->getZExtValue());
2085 
2086  // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
2087  // anything about the extended bits.
2088  APInt DemandedSrcBits = DemandedBits;
2089  if (BitWidth > EltBitWidth)
2090  DemandedSrcBits = DemandedSrcBits.trunc(EltBitWidth);
2091 
2092  if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, Known2, TLO,
2093  Depth + 1))
2094  return true;
2095 
2096  // Attempt to avoid multi-use ops if we don't need anything from them.
2097  if (!DemandedSrcBits.isAllOnes() || !DemandedSrcElts.isAllOnes()) {
2098  if (SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
2099  Src, DemandedSrcBits, DemandedSrcElts, TLO.DAG, Depth + 1)) {
2100  SDValue NewOp =
2101  TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, Idx);
2102  return TLO.CombineTo(Op, NewOp);
2103  }
2104  }
2105 
2106  Known = Known2;
2107  if (BitWidth > EltBitWidth)
2108  Known = Known.anyext(BitWidth);
2109  break;
2110  }
2111  case ISD::BITCAST: {
2112  SDValue Src = Op.getOperand(0);
2113  EVT SrcVT = Src.getValueType();
2114  unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits();
2115 
2116  // If this is an FP->Int bitcast and if the sign bit is the only
2117  // thing demanded, turn this into a FGETSIGN.
2118  if (!TLO.LegalOperations() && !VT.isVector() && !SrcVT.isVector() &&
2119  DemandedBits == APInt::getSignMask(Op.getValueSizeInBits()) &&
2120  SrcVT.isFloatingPoint()) {
2121  bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, VT);
2123  if ((OpVTLegal || i32Legal) && VT.isSimple() && SrcVT != MVT::f16 &&
2124  SrcVT != MVT::f128) {
2125  // Cannot eliminate/lower SHL for f128 yet.
2126  EVT Ty = OpVTLegal ? VT : MVT::i32;
2127  // Make a FGETSIGN + SHL to move the sign bit into the appropriate
2128  // place. We expect the SHL to be eliminated by other optimizations.
2129  SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Src);
2130  unsigned OpVTSizeInBits = Op.getValueSizeInBits();
2131  if (!OpVTLegal && OpVTSizeInBits > 32)
2132  Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Sign);
2133  unsigned ShVal = Op.getValueSizeInBits() - 1;
2134  SDValue ShAmt = TLO.DAG.getConstant(ShVal, dl, VT);
2135  return TLO.CombineTo(Op,
2136  TLO.DAG.getNode(ISD::SHL, dl, VT, Sign, ShAmt));
2137  }
2138  }
2139 
2140  // Bitcast from a vector using SimplifyDemanded Bits/VectorElts.
2141  // Demand the elt/bit if any of the original elts/bits are demanded.
2142  // TODO - bigendian once we have test coverage.
2143  if (SrcVT.isVector() && (BitWidth % NumSrcEltBits) == 0 &&
2144  TLO.DAG.getDataLayout().isLittleEndian()) {
2145  unsigned Scale = BitWidth / NumSrcEltBits;
2146  unsigned NumSrcElts = SrcVT.getVectorNumElements();
2147  APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits);
2148  APInt DemandedSrcElts = APInt::getZero(NumSrcElts);
2149  for (unsigned i = 0; i != Scale; ++i) {
2150  unsigned Offset = i * NumSrcEltBits;
2151  APInt Sub = DemandedBits.extractBits(NumSrcEltBits, Offset);
2152  if (!Sub.isNullValue()) {
2153  DemandedSrcBits |= Sub;
2154  for (unsigned j = 0; j != NumElts; ++j)
2155  if (DemandedElts[j])
2156  DemandedSrcElts.setBit((j * Scale) + i);
2157  }
2158  }
2159 
2160  APInt KnownSrcUndef, KnownSrcZero;
2161  if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef,
2162  KnownSrcZero, TLO, Depth + 1))
2163  return true;
2164 
2165  KnownBits KnownSrcBits;
2166  if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts,
2167  KnownSrcBits, TLO, Depth + 1))
2168  return true;
2169  } else if ((NumSrcEltBits % BitWidth) == 0 &&
2170  TLO.DAG.getDataLayout().isLittleEndian()) {
2171  unsigned Scale = NumSrcEltBits / BitWidth;
2172  unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1;
2173  APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits);
2174  APInt DemandedSrcElts = APInt::getZero(NumSrcElts);
2175  for (unsigned i = 0; i != NumElts; ++i)
2176  if (DemandedElts[i]) {
2177  unsigned Offset = (i % Scale) * BitWidth;
2178  DemandedSrcBits.insertBits(DemandedBits, Offset);
2179  DemandedSrcElts.setBit(i / Scale);
2180  }
2181 
2182  if (SrcVT.isVector()) {
2183  APInt KnownSrcUndef, KnownSrcZero;
2184  if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef,
2185  KnownSrcZero, TLO, Depth + 1))
2186  return true;
2187  }
2188 
2189  KnownBits KnownSrcBits;
2190  if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts,
2191  KnownSrcBits, TLO, Depth + 1))
2192  return true;
2193  }
2194 
2195  // If this is a bitcast, let computeKnownBits handle it. Only do this on a
2196  // recursive call where Known may be useful to the caller.
2197  if (Depth > 0) {
2198  Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
2199  return false;
2200  }
2201  break;
2202  }
2203  case ISD::ADD:
2204  case ISD::MUL:
2205  case ISD::SUB: {
2206  // Add, Sub, and Mul don't demand any bits in positions beyond that
2207  // of the highest bit demanded of them.
2208  SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
2209  SDNodeFlags Flags = Op.getNode()->getFlags();
2210  unsigned DemandedBitsLZ = DemandedBits.countLeadingZeros();
2211  APInt LoMask = APInt::getLowBitsSet(BitWidth, BitWidth - DemandedBitsLZ);
2212  if (SimplifyDemandedBits(Op0, LoMask, DemandedElts, Known2, TLO,
2213  Depth + 1) ||
2214  SimplifyDemandedBits(Op1, LoMask, DemandedElts, Known2, TLO,
2215  Depth + 1) ||
2216  // See if the operation should be performed at a smaller bit width.
2217  ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) {
2218  if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) {
2219  // Disable the nsw and nuw flags. We can no longer guarantee that we
2220  // won't wrap after simplification.
2221  Flags.setNoSignedWrap(false);
2222  Flags.setNoUnsignedWrap(false);
2223  SDValue NewOp =
2224  TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags);
2225  return TLO.CombineTo(Op, NewOp);
2226  }
2227  return true;
2228  }
2229 
2230  // Attempt to avoid multi-use ops if we don't need anything from them.
2231  if (!LoMask.isAllOnes() || !DemandedElts.isAllOnes()) {
2232  SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2233  Op0, LoMask, DemandedElts, TLO.DAG, Depth + 1);
2234  SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
2235  Op1, LoMask, DemandedElts, TLO.DAG, Depth + 1);
2236  if (DemandedOp0 || DemandedOp1) {
2237  Flags.setNoSignedWrap(false);
2238  Flags.setNoUnsignedWrap(false);
2239  Op0 = DemandedOp0 ? DemandedOp0 : Op0;
2240  Op1 = DemandedOp1 ? DemandedOp1 : Op1;
2241  SDValue NewOp =
2242  TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags);
2243  return TLO.CombineTo(Op, NewOp);
2244  }
2245  }
2246 
2247  // If we have a constant operand, we may be able to turn it into -1 if we
2248  // do not demand the high bits. This can make the constant smaller to
2249  // encode, allow more general folding, or match specialized instruction
2250  // patterns (eg, 'blsr' on x86). Don't bother changing 1 to -1 because that
2251  // is probably not useful (and could be detrimental).
2253  APInt HighMask = APInt::getHighBitsSet(BitWidth, DemandedBitsLZ);
2254  if (C && !C->isAllOnes() && !C->isOne() &&
2255  (C->getAPIntValue() | HighMask).isAllOnes()) {
2256  SDValue Neg1 = TLO.DAG.getAllOnesConstant(dl, VT);
2257  // Disable the nsw and nuw flags. We can no longer guarantee that we
2258  // won't wrap after simplification.
2259  Flags.setNoSignedWrap(false);
2260  Flags.setNoUnsignedWrap(false);
2261  SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Neg1, Flags);
2262  return TLO.CombineTo(Op, NewOp);
2263  }
2264 
2266  }
2267  default:
2268  if (Op.getOpcode() >= ISD::BUILTIN_OP_END) {
2269  if (SimplifyDemandedBitsForTargetNode(Op, DemandedBits, DemandedElts,
2270  Known, TLO, Depth))
2271  return true;
2272  break;
2273  }
2274 
2275  // Just use computeKnownBits to compute output bits.
2276  Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
2277  break;
2278  }
2279 
2280  // If we know the value of all of the demanded bits, return this as a
2281  // constant.
2282  if (DemandedBits.isSubsetOf(Known.Zero | Known.One)) {
2283  // Avoid folding to a constant if any OpaqueConstant is involved.
2284  const SDNode *N = Op.getNode();
2285  for (SDNode *Op :
2287  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
2288  if (C->isOpaque())
2289  return false;
2290  }
2291  if (VT.isInteger())
2292  return TLO.CombineTo(Op, TLO.DAG.getConstant(Known.One, dl, VT));
2293  if (VT.isFloatingPoint())
2294  return TLO.CombineTo(
2295  Op,
2296  TLO.DAG.getConstantFP(
2297  APFloat(TLO.DAG.EVTToAPFloatSemantics(VT), Known.One), dl, VT));
2298  }
2299 
2300  return false;
2301 }
2302 
2304  const APInt &DemandedElts,
2305  APInt &KnownUndef,
2306  APInt &KnownZero,
2307  DAGCombinerInfo &DCI) const {
2308  SelectionDAG &DAG = DCI.DAG;
2309  TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
2310  !DCI.isBeforeLegalizeOps());
2311 
2312  bool Simplified =
2313  SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, TLO);
2314  if (Simplified) {
2315  DCI.AddToWorklist(Op.getNode());
2316  DCI.CommitTargetLoweringOpt(TLO);
2317  }
2318 
2319  return Simplified;
2320 }
2321 
2322 /// Given a vector binary operation and known undefined elements for each input
2323 /// operand, compute whether each element of the output is undefined.
2325  const APInt &UndefOp0,
2326  const APInt &UndefOp1) {
2327  EVT VT = BO.getValueType();
2328  assert(DAG.getTargetLoweringInfo().isBinOp(BO.getOpcode()) && VT.isVector() &&
2329  "Vector binop only");
2330 
2331  EVT EltVT = VT.getVectorElementType();
2332  unsigned NumElts = VT.getVectorNumElements();
2333  assert(UndefOp0.getBitWidth() == NumElts &&
2334  UndefOp1.getBitWidth() == NumElts && "Bad type for undef analysis");
2335 
2336  auto getUndefOrConstantElt = [&](SDValue V, unsigned Index,
2337  const APInt &UndefVals) {
2338  if (UndefVals[Index])
2339  return DAG.getUNDEF(EltVT);
2340 
2341  if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
2342  // Try hard to make sure that the getNode() call is not creating temporary
2343  // nodes. Ignore opaque integers because they do not constant fold.
2344  SDValue Elt = BV->getOperand(Index);
2345  auto *C = dyn_cast<ConstantSDNode>(Elt);
2346  if (isa<ConstantFPSDNode>(Elt) || Elt.isUndef() || (C && !C->isOpaque()))
2347  return Elt;
2348  }
2349 
2350  return SDValue();
2351  };
2352 
2353  APInt KnownUndef = APInt::getZero(NumElts);
2354  for (unsigned i = 0; i != NumElts; ++i) {
2355  // If both inputs for this element are either constant or undef and match
2356  // the element type, compute the constant/undef result for this element of
2357  // the vector.
2358  // TODO: Ideally we would use FoldConstantArithmetic() here, but that does
2359  // not handle FP constants. The code within getNode() should be refactored
2360  // to avoid the danger of creating a bogus temporary node here.
2361  SDValue C0 = getUndefOrConstantElt(BO.getOperand(0), i, UndefOp0);
2362  SDValue C1 = getUndefOrConstantElt(BO.getOperand(1), i, UndefOp1);
2363  if (C0 && C1 && C0.getValueType() == EltVT && C1.getValueType() == EltVT)
2364  if (DAG.getNode(BO.getOpcode(), SDLoc(BO), EltVT, C0, C1).isUndef())
2365  KnownUndef.setBit(i);
2366  }
2367  return KnownUndef;
2368 }
2369 
2371  SDValue Op, const APInt &OriginalDemandedElts, APInt &KnownUndef,
2372  APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth,
2373  bool AssumeSingleUse) const {
2374  EVT VT = Op.getValueType();
2375  unsigned Opcode = Op.getOpcode();
2376  APInt DemandedElts = OriginalDemandedElts;
2377  unsigned NumElts = DemandedElts.getBitWidth();
2378  assert(VT.isVector() && "Expected vector op");
2379 
2380  KnownUndef = KnownZero = APInt::getZero(NumElts);
2381 
2382  // TODO: For now we assume we know nothing about scalable vectors.
2383  if (VT.isScalableVector())
2384  return false;
2385 
2386  assert(VT.getVectorNumElements() == NumElts &&
2387  "Mask size mismatches value type element count!");
2388 
2389  // Undef operand.
2390  if (Op.isUndef()) {
2391  KnownUndef.setAllBits();
2392  return false;
2393  }
2394 
2395  // If Op has other users, assume that all elements are needed.
2396  if (!Op.getNode()->hasOneUse() && !AssumeSingleUse)
2397  DemandedElts.setAllBits();
2398 
2399  // Not demanding any elements from Op.
2400  if (DemandedElts == 0) {
2401  KnownUndef.setAllBits();
2402  return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
2403  }
2404 
2405  // Limit search depth.
2407  return false;
2408 
2409  SDLoc DL(Op);
2410  unsigned EltSizeInBits = VT.getScalarSizeInBits();
2411 
2412  // Helper for demanding the specified elements and all the bits of both binary
2413  // operands.
2414  auto SimplifyDemandedVectorEltsBinOp = [&](SDValue Op0, SDValue Op1) {
2415  SDValue NewOp0 = SimplifyMultipleUseDemandedVectorElts(Op0, DemandedElts,
2416  TLO.DAG, Depth + 1);
2417  SDValue NewOp1 = SimplifyMultipleUseDemandedVectorElts(Op1, DemandedElts,
2418  TLO.DAG, Depth + 1);
2419  if (NewOp0 || NewOp1) {
2420  SDValue NewOp = TLO.DAG.getNode(
2421  Opcode, SDLoc(Op), VT, NewOp0 ? NewOp0 : Op0, NewOp1 ? NewOp1 : Op1);
2422  return TLO.CombineTo(Op, NewOp);
2423  }
2424  return false;
2425  };
2426 
2427  switch (Opcode) {
2428  case ISD::SCALAR_TO_VECTOR: {
2429  if (!DemandedElts[0]) {
2430  KnownUndef.setAllBits();
2431  return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
2432  }
2433  SDValue ScalarSrc = Op.getOperand(0);
2434  if (ScalarSrc.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
2435  SDValue Src = ScalarSrc.getOperand(0);
2436  SDValue Idx = ScalarSrc.getOperand(1);
2437  EVT SrcVT = Src.getValueType();
2438 
2439  ElementCount SrcEltCnt = SrcVT.getVectorElementCount();
2440 
2441  if (SrcEltCnt.isScalable())
2442  return false;
2443 
2444  unsigned NumSrcElts = SrcEltCnt.getFixedValue();
2445  if (isNullConstant(Idx)) {
2446  APInt SrcDemandedElts = APInt::getOneBitSet(NumSrcElts, 0);
2447  APInt SrcUndef = KnownUndef.zextOrTrunc(NumSrcElts);
2448  APInt SrcZero = KnownZero.zextOrTrunc(NumSrcElts);
2449  if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
2450  TLO, Depth + 1))
2451  return true;
2452  }
2453  }
2454  KnownUndef.setHighBits(NumElts - 1);
2455  break;
2456  }
2457  case ISD::BITCAST: {
2458  SDValue Src = Op.getOperand(0);
2459  EVT SrcVT = Src.getValueType();
2460 
2461  // We only handle vectors here.
2462  // TODO - investigate calling SimplifyDemandedBits/ComputeKnownBits?
2463  if (!SrcVT.isVector())
2464  break;
2465 
2466  // Fast handling of 'identity' bitcasts.
2467  unsigned NumSrcElts = SrcVT.getVectorNumElements();
2468  if (NumSrcElts == NumElts)
2469  return SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef,
2470  KnownZero, TLO, Depth + 1);
2471 
2472  APInt SrcDemandedElts, SrcZero, SrcUndef;
2473 
2474  // Bitcast from 'large element' src vector to 'small element' vector, we
2475  // must demand a source element if any DemandedElt maps to it.
2476  if ((NumElts % NumSrcElts) == 0) {
2477  unsigned Scale = NumElts / NumSrcElts;
2478  SrcDemandedElts = APIntOps::ScaleBitMask(DemandedElts, NumSrcElts);
2479  if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
2480  TLO, Depth + 1))
2481  return true;
2482 
2483  // Try calling SimplifyDemandedBits, converting demanded elts to the bits
2484  // of the large element.
2485  // TODO - bigendian once we have test coverage.
2486  if (TLO.DAG.getDataLayout().isLittleEndian()) {
2487  unsigned SrcEltSizeInBits = SrcVT.getScalarSizeInBits();
2488  APInt SrcDemandedBits = APInt::getZero(SrcEltSizeInBits);
2489  for (unsigned i = 0; i != NumElts; ++i)
2490  if (DemandedElts[i]) {
2491  unsigned Ofs = (i % Scale) * EltSizeInBits;
2492  SrcDemandedBits.setBits(Ofs, Ofs + EltSizeInBits);
2493  }
2494 
2495  KnownBits Known;
2496  if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcDemandedElts, Known,
2497  TLO, Depth + 1))
2498  return true;
2499  }
2500 
2501  // If the src element is zero/undef then all the output elements will be -
2502  // only demanded elements are guaranteed to be correct.
2503  for (unsigned i = 0; i != NumSrcElts; ++i) {
2504  if (SrcDemandedElts[i]) {
2505  if (SrcZero[i])
2506  KnownZero.setBits(i * Scale, (i + 1) * Scale);
2507  if (SrcUndef[i])
2508  KnownUndef.setBits(i * Scale, (i + 1) * Scale);
2509  }
2510  }
2511  }
2512 
2513  // Bitcast from 'small element' src vector to 'large element' vector, we
2514  // demand all smaller source elements covered by the larger demanded element
2515  // of this vector.
2516  if ((NumSrcElts % NumElts) == 0) {
2517  unsigned Scale = NumSrcElts / NumElts;
2518  SrcDemandedElts = APIntOps::ScaleBitMask(DemandedElts, NumSrcElts);
2519  if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
2520  TLO, Depth + 1))
2521  return true;
2522 
2523  // If all the src elements covering an output element are zero/undef, then
2524  // the output element will be as well, assuming it was demanded.
2525  for (unsigned i = 0; i != NumElts; ++i) {
2526  if (DemandedElts[i]) {
2527  if (SrcZero.extractBits(Scale, i * Scale).isAllOnes())
2528  KnownZero.setBit(i);
2529  if (SrcUndef.extractBits(Scale, i * Scale).isAllOnes())
2530  KnownUndef.setBit(i);
2531  }
2532  }
2533  }
2534  break;
2535  }
2536  case ISD::BUILD_VECTOR: {
2537  // Check all elements and simplify any unused elements with UNDEF.
2538  if (!DemandedElts.isAllOnes()) {
2539  // Don't simplify BROADCASTS.
2540  if (llvm::any_of(Op->op_values(),
2541  [&](SDValue Elt) { return Op.getOperand(0) != Elt; })) {
2542  SmallVector<SDValue, 32> Ops(Op->op_begin(), Op->op_end());
2543  bool Updated = false;
2544  for (unsigned i = 0; i != NumElts; ++i) {
2545  if (!DemandedElts[i] && !Ops[i].isUndef()) {
2546  Ops[i] = TLO.DAG.getUNDEF(Ops[0].getValueType());
2547  KnownUndef.setBit(i);
2548  Updated = true;
2549  }
2550  }
2551  if (Updated)
2552  return TLO.CombineTo(Op, TLO.DAG.getBuildVector(VT, DL, Ops));
2553  }
2554  }
2555  for (unsigned i = 0; i != NumElts; ++i) {
2556  SDValue SrcOp = Op.getOperand(i);
2557  if (SrcOp.isUndef()) {
2558  KnownUndef.setBit(i);
2559  } else if (EltSizeInBits == SrcOp.getScalarValueSizeInBits() &&
2561  KnownZero.setBit(i);
2562  }
2563  }
2564  break;
2565  }
2566  case ISD::CONCAT_VECTORS: {
2567  EVT SubVT = Op.getOperand(0).getValueType();
2568  unsigned NumSubVecs = Op.getNumOperands();
2569  unsigned NumSubElts = SubVT.getVectorNumElements();
2570  for (unsigned i = 0; i != NumSubVecs; ++i) {
2571  SDValue SubOp = Op.getOperand(i);
2572  APInt SubElts = DemandedElts.extractBits(NumSubElts, i * NumSubElts);
2573  APInt SubUndef, SubZero;
2574  if (SimplifyDemandedVectorElts(SubOp, SubElts, SubUndef, SubZero, TLO,
2575  Depth + 1))
2576  return true;
2577  KnownUndef.insertBits(SubUndef, i * NumSubElts);
2578  KnownZero.insertBits(SubZero, i * NumSubElts);
2579  }
2580  break;
2581  }
2582  case ISD::INSERT_SUBVECTOR: {
2583  // Demand any elements from the subvector and the remainder from the src its
2584  // inserted into.
2585  SDValue Src = Op.getOperand(0);
2586  SDValue Sub = Op.getOperand(1);
2587  uint64_t Idx = Op.getConstantOperandVal(2);
2588  unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
2589  APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
2590  APInt DemandedSrcElts = DemandedElts;
2591  DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx);
2592 
2593  APInt SubUndef, SubZero;
2594  if (SimplifyDemandedVectorElts(Sub, DemandedSubElts, SubUndef, SubZero, TLO,
2595  Depth + 1))
2596  return true;
2597 
2598  // If none of the src operand elements are demanded, replace it with undef.
2599  if (!DemandedSrcElts && !Src.isUndef())
2600  return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
2601  TLO.DAG.getUNDEF(VT), Sub,
2602  Op.getOperand(2)));
2603 
2604  if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownUndef, KnownZero,
2605  TLO, Depth + 1))
2606  return true;
2607  KnownUndef.insertBits(SubUndef, Idx);
2608  KnownZero.insertBits(SubZero, Idx);
2609 
2610  // Attempt to avoid multi-use ops if we don't need anything from them.
2611  if (!DemandedSrcElts.isAllOnes() || !DemandedSubElts.isAllOnes()) {
2612  SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
2613  Src, DemandedSrcElts, TLO.DAG, Depth + 1);
2614  SDValue NewSub = SimplifyMultipleUseDemandedVectorElts(
2615  Sub, DemandedSubElts, TLO.DAG, Depth + 1);
2616  if (NewSrc || NewSub) {
2617  NewSrc = NewSrc ? NewSrc : Src;
2618  NewSub = NewSub ? NewSub : Sub;
2619  SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, NewSrc,
2620  NewSub, Op.getOperand(2));
2621  return TLO.CombineTo(Op, NewOp);
2622  }
2623  }
2624  break;
2625  }
2626  case ISD::EXTRACT_SUBVECTOR: {
2627  // Offset the demanded elts by the subvector index.
2628  SDValue Src = Op.getOperand(0);
2629  if (Src.getValueType().isScalableVector())
2630  break;
2631  uint64_t Idx = Op.getConstantOperandVal(1);
2632  unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2633  APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2634 
2635  APInt SrcUndef, SrcZero;
2636  if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO,
2637  Depth + 1))
2638  return true;
2639  KnownUndef = SrcUndef.extractBits(NumElts, Idx);
2640  KnownZero = SrcZero.extractBits(NumElts, Idx);
2641 
2642  // Attempt to avoid multi-use ops if we don't need anything from them.
2643  if (!DemandedElts.isAllOnes()) {
2644  SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
2645  Src, DemandedSrcElts, TLO.DAG, Depth + 1);
2646  if (NewSrc) {
2647  SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, NewSrc,
2648  Op.getOperand(1));
2649  return TLO.CombineTo(Op, NewOp);
2650  }
2651  }
2652  break;
2653  }
2654  case ISD::INSERT_VECTOR_ELT: {
2655  SDValue Vec = Op.getOperand(0);
2656  SDValue Scl = Op.getOperand(1);
2657  auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2658 
2659  // For a legal, constant insertion index, if we don't need this insertion
2660  // then strip it, else remove it from the demanded elts.
2661  if (CIdx && CIdx->getAPIntValue().ult(NumElts)) {
2662  unsigned Idx = CIdx->getZExtValue();
2663  if (!DemandedElts[Idx])
2664  return TLO.CombineTo(Op, Vec);
2665 
2666  APInt DemandedVecElts(DemandedElts);
2667  DemandedVecElts.clearBit(Idx);
2668  if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
2669  KnownZero, TLO, Depth + 1))
2670  return true;
2671 
2672  KnownUndef.setBitVal(Idx, Scl.isUndef());
2673 
2674  KnownZero.setBitVal(Idx, isNullConstant(Scl) || isNullFPConstant(Scl));
2675  break;
2676  }
2677 
2678  APInt VecUndef, VecZero;
2679  if (SimplifyDemandedVectorElts(Vec, DemandedElts, VecUndef, VecZero, TLO,
2680  Depth + 1))
2681  return true;
2682  // Without knowing the insertion index we can't set KnownUndef/KnownZero.
2683  break;
2684  }
2685  case ISD::VSELECT: {
2686  // Try to transform the select condition based on the current demanded
2687  // elements.
2688  // TODO: If a condition element is undef, we can choose from one arm of the
2689  // select (and if one arm is undef, then we can propagate that to the
2690  // result).
2691  // TODO - add support for constant vselect masks (see IR version of this).
2692  APInt UnusedUndef, UnusedZero;
2693  if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, UnusedUndef,
2694  UnusedZero, TLO, Depth + 1))
2695  return true;
2696 
2697  // See if we can simplify either vselect operand.
2698  APInt DemandedLHS(DemandedElts);
2699  APInt DemandedRHS(DemandedElts);
2700  APInt UndefLHS, ZeroLHS;
2701  APInt UndefRHS, ZeroRHS;
2702  if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedLHS, UndefLHS,
2703  ZeroLHS, TLO, Depth + 1))
2704  return true;
2705  if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedRHS, UndefRHS,
2706  ZeroRHS, TLO, Depth + 1))
2707  return true;
2708 
2709  KnownUndef = UndefLHS & UndefRHS;
2710  KnownZero = ZeroLHS & ZeroRHS;
2711  break;
2712  }
2713  case ISD::VECTOR_SHUFFLE: {
2714  ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask();
2715 
2716  // Collect demanded elements from shuffle operands..
2717  APInt DemandedLHS(NumElts, 0);
2718  APInt DemandedRHS(NumElts, 0);
2719  for (unsigned i = 0; i != NumElts; ++i) {
2720  int M = ShuffleMask[i];
2721  if (M < 0 || !DemandedElts[i])
2722  continue;
2723  assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range");
2724  if (M < (int)NumElts)
2725  DemandedLHS.setBit(M);
2726  else
2727  DemandedRHS.setBit(M - NumElts);
2728  }
2729 
2730  // See if we can simplify either shuffle operand.
2731  APInt UndefLHS, ZeroLHS;
2732  APInt UndefRHS, ZeroRHS;
2733  if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, UndefLHS,
2734  ZeroLHS, TLO, Depth + 1))
2735  return true;
2736  if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, UndefRHS,
2737  ZeroRHS, TLO, Depth + 1))
2738  return true;
2739 
2740  // Simplify mask using undef elements from LHS/RHS.
2741  bool Updated = false;
2742  bool IdentityLHS = true, IdentityRHS = true;
2743  SmallVector<int, 32> NewMask(ShuffleMask.begin(), ShuffleMask.end());
2744  for (unsigned i = 0; i != NumElts; ++i) {
2745  int &M = NewMask[i];
2746  if (M < 0)
2747  continue;
2748  if (!DemandedElts[i] || (M < (int)NumElts && UndefLHS[M]) ||
2749  (M >= (int)NumElts && UndefRHS[M - NumElts])) {
2750  Updated = true;
2751  M = -1;
2752  }
2753  IdentityLHS &= (M < 0) || (M == (int)i);
2754  IdentityRHS &= (M < 0) || ((M - NumElts) == i);
2755  }
2756 
2757  // Update legal shuffle masks based on demanded elements if it won't reduce
2758  // to Identity which can cause premature removal of the shuffle mask.
2759  if (Updated && !IdentityLHS && !IdentityRHS && !TLO.LegalOps) {
2760  SDValue LegalShuffle =
2761  buildLegalVectorShuffle(VT, DL, Op.getOperand(0), Op.getOperand(1),
2762  NewMask, TLO.DAG);
2763  if (LegalShuffle)
2764  return TLO.CombineTo(Op, LegalShuffle);
2765  }
2766 
2767  // Propagate undef/zero elements from LHS/RHS.
2768  for (unsigned i = 0; i != NumElts; ++i) {
2769  int M = ShuffleMask[i];
2770  if (M < 0) {
2771  KnownUndef.setBit(i);
2772  } else if (M < (int)NumElts) {
2773  if (UndefLHS[M])
2774  KnownUndef.setBit(i);
2775  if (ZeroLHS[M])
2776  KnownZero.setBit(i);
2777  } else {
2778  if (UndefRHS[M - NumElts])
2779  KnownUndef.setBit(i);
2780  if (ZeroRHS[M - NumElts])
2781  KnownZero.setBit(i);
2782  }
2783  }
2784  break;
2785  }
2789  APInt SrcUndef, SrcZero;
2790  SDValue Src = Op.getOperand(0);
2791  unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2792  APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts);
2793  if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO,
2794  Depth + 1))
2795  return true;
2796  KnownZero = SrcZero.zextOrTrunc(NumElts);
2797  KnownUndef = SrcUndef.zextOrTrunc(NumElts);
2798 
2799  if (Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG &&
2800  Op.getValueSizeInBits() == Src.getValueSizeInBits() &&
2801  DemandedSrcElts == 1 && TLO.DAG.getDataLayout().isLittleEndian()) {
2802  // aext - if we just need the bottom element then we can bitcast.
2803  return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src));
2804  }
2805 
2806  if (Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) {
2807  // zext(undef) upper bits are guaranteed to be zero.
2808  if (DemandedElts.isSubsetOf(KnownUndef))
2809  return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
2810  KnownUndef.clearAllBits();
2811  }
2812  break;
2813  }
2814 
2815  // TODO: There are more binop opcodes that could be handled here - MIN,
2816  // MAX, saturated math, etc.
2817  case ISD::OR:
2818  case ISD::XOR:
2819  case ISD::ADD:
2820  case ISD::SUB:
2821  case ISD::FADD:
2822  case ISD::FSUB:
2823  case ISD::FMUL:
2824  case ISD::FDIV:
2825  case ISD::FREM: {
2826  SDValue Op0 = Op.getOperand(0);
2827  SDValue Op1 = Op.getOperand(1);
2828 
2829  APInt UndefRHS, ZeroRHS;
2830  if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO,
2831  Depth + 1))
2832  return true;
2833  APInt UndefLHS, ZeroLHS;
2834  if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
2835  Depth + 1))
2836  return true;
2837 
2838  KnownZero = ZeroLHS & ZeroRHS;
2839  KnownUndef = getKnownUndefForVectorBinop(Op, TLO.DAG, UndefLHS, UndefRHS);
2840 
2841  // Attempt to avoid multi-use ops if we don't need anything from them.
2842  // TODO - use KnownUndef to relax the demandedelts?
2843  if (!DemandedElts.isAllOnes())
2844  if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
2845  return true;
2846  break;
2847  }
2848  case ISD::SHL:
2849  case ISD::SRL:
2850  case ISD::SRA:
2851  case ISD::ROTL:
2852  case ISD::ROTR: {
2853  SDValue Op0 = Op.getOperand(0);
2854  SDValue Op1 = Op.getOperand(1);
2855 
2856  APInt UndefRHS, ZeroRHS;
2857  if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO,
2858  Depth + 1))
2859  return true;
2860  APInt UndefLHS, ZeroLHS;
2861  if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
2862  Depth + 1))
2863  return true;
2864 
2865  KnownZero = ZeroLHS;
2866  KnownUndef = UndefLHS & UndefRHS; // TODO: use getKnownUndefForVectorBinop?
2867 
2868  // Attempt to avoid multi-use ops if we don't need anything from them.
2869  // TODO - use KnownUndef to relax the demandedelts?
2870  if (!DemandedElts.isAllOnes())
2871  if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
2872  return true;
2873  break;
2874  }
2875  case ISD::MUL:
2876  case ISD::AND: {
2877  SDValue Op0 = Op.getOperand(0);
2878  SDValue Op1 = Op.getOperand(1);
2879 
2880  APInt SrcUndef, SrcZero;
2881  if (SimplifyDemandedVectorElts(Op1, DemandedElts, SrcUndef, SrcZero, TLO,
2882  Depth + 1))
2883  return true;
2884  if (SimplifyDemandedVectorElts(Op0, DemandedElts, KnownUndef, KnownZero,
2885  TLO, Depth + 1))
2886  return true;
2887 
2888  // If either side has a zero element, then the result element is zero, even
2889  // if the other is an UNDEF.
2890  // TODO: Extend getKnownUndefForVectorBinop to also deal with known zeros
2891  // and then handle 'and' nodes with the rest of the binop opcodes.
2892  KnownZero |= SrcZero;
2893  KnownUndef &= SrcUndef;
2894  KnownUndef &= ~KnownZero;
2895 
2896  // Attempt to avoid multi-use ops if we don't need anything from them.
2897  // TODO - use KnownUndef to relax the demandedelts?
2898  if (!DemandedElts.isAllOnes())
2899  if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
2900  return true;
2901  break;
2902  }
2903  case ISD::TRUNCATE:
2904  case ISD::SIGN_EXTEND:
2905  case ISD::ZERO_EXTEND:
2906  if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, KnownUndef,
2907  KnownZero, TLO, Depth + 1))
2908  return true;
2909 
2910  if (Op.getOpcode() == ISD::ZERO_EXTEND) {
2911  // zext(undef) upper bits are guaranteed to be zero.
2912  if (DemandedElts.isSubsetOf(KnownUndef))
2913  return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
2914  KnownUndef.clearAllBits();
2915  }
2916  break;
2917  default: {
2918  if (Op.getOpcode() >= ISD::BUILTIN_OP_END) {
2919  if (SimplifyDemandedVectorEltsForTargetNode(Op, DemandedElts, KnownUndef,
2920  KnownZero, TLO, Depth))
2921  return true;
2922  } else {
2923  KnownBits Known;
2924  APInt DemandedBits = APInt::getAllOnes(EltSizeInBits);
2925  if (SimplifyDemandedBits(Op, DemandedBits, OriginalDemandedElts, Known,
2926  TLO, Depth, AssumeSingleUse))
2927  return true;
2928  }
2929  break;
2930  }
2931  }
2932  assert((KnownUndef & KnownZero) == 0 && "Elements flagged as undef AND zero");
2933 
2934  // Constant fold all undef cases.
2935  // TODO: Handle zero cases as well.
2936  if (DemandedElts.isSubsetOf(KnownUndef))
2937  return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
2938 
2939  return false;
2940 }
2941 
2942 /// Determine which of the bits specified in Mask are known to be either zero or
2943 /// one and return them in the Known.
2945  KnownBits &Known,
2946  const APInt &DemandedElts,
2947  const SelectionDAG &DAG,
2948  unsigned Depth) const {
2949  assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2950  Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2951  Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2952  Op.getOpcode() == ISD::INTRINSIC_VOID) &&
2953  "Should use MaskedValueIsZero if you don't know whether Op"
2954  " is a target node!");
2955  Known.resetAll();
2956 }
2957 
2959  GISelKnownBits &Analysis, Register R, KnownBits &Known,
2960  const APInt &DemandedElts, const MachineRegisterInfo &MRI,
2961  unsigned Depth) const {
2962  Known.resetAll();
2963 }
2964 
2966  const int FrameIdx, KnownBits &Known, const MachineFunction &MF) const {
2967  // The low bits are known zero if the pointer is aligned.
2968  Known.Zero.setLowBits(Log2(MF.getFrameInfo().getObjectAlign(FrameIdx)));
2969 }
2970 
2972  GISelKnownBits &Analysis, Register R, const MachineRegisterInfo &MRI,
2973  unsigned Depth) const {
2974  return Align(1);
2975 }
2976 
2977 /// This method can be implemented by targets that want to expose additional
2978 /// information about sign bits to the DAG Combiner.
2980  const APInt &,
2981  const SelectionDAG &,
2982  unsigned Depth) const {
2983  assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2984  Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2985  Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2986  Op.getOpcode() == ISD::INTRINSIC_VOID) &&
2987  "Should use ComputeNumSignBits if you don't know whether Op"
2988  " is a target node!");
2989  return 1;
2990 }
2991 
2993  GISelKnownBits &Analysis, Register R, const APInt &DemandedElts,
2994  const MachineRegisterInfo &MRI, unsigned Depth) const {
2995  return 1;
2996 }
2997 
2999  SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero,
3000  TargetLoweringOpt &TLO, unsigned Depth) const {
3001  assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
3002  Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
3003  Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
3004  Op.getOpcode() == ISD::INTRINSIC_VOID) &&
3005  "Should use SimplifyDemandedVectorElts if you don't know whether Op"
3006  " is a target node!");
3007  return false;
3008 }
3009 
3011  SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
3012  KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const {
3013  assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
3014  Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
3015  Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
3016  Op.getOpcode() == ISD::INTRINSIC_VOID) &&
3017  "Should use SimplifyDemandedBits if you don't know whether Op"
3018  " is a target node!");
3019  computeKnownBitsForTargetNode(Op, Known, DemandedElts, TLO.DAG, Depth);
3020  return false;
3021 }
3022 
3024  SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
3025  SelectionDAG &DAG, unsigned Depth) const {
3026  assert(
3027  (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
3028  Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
3029  Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
3030  Op.getOpcode() == ISD::INTRINSIC_VOID) &&
3031  "Should use SimplifyMultipleUseDemandedBits if you don't know whether Op"
3032  " is a target node!");
3033  return SDValue();
3034 }
3035 
3036 SDValue
3039  SelectionDAG &DAG) const {
3040  bool LegalMask = isShuffleMaskLegal(Mask, VT);
3041  if (!LegalMask) {
3042  std::swap(N0, N1);
3044  LegalMask = isShuffleMaskLegal(Mask, VT);
3045  }
3046 
3047  if (!LegalMask)
3048  return SDValue();
3049 
3050  return DAG.getVectorShuffle(VT, DL, N0, N1, Mask);
3051 }
3052 
3054  return nullptr;
3055 }
3056 
3058  SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
3059  bool PoisonOnly, unsigned Depth) const {
3060  assert(
3061  (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
3062  Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
3063  Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
3064  Op.getOpcode() == ISD::INTRINSIC_VOID) &&
3065  "Should use isGuaranteedNotToBeUndefOrPoison if you don't know whether Op"
3066  " is a target node!");
3067  return false;
3068 }
3069 
3071  const SelectionDAG &DAG,
3072  bool SNaN,
3073  unsigned Depth) const {
3074  assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
3075  Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
3076  Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
3077  Op.getOpcode() == ISD::INTRINSIC_VOID) &&
3078  "Should use isKnownNeverNaN if you don't know whether Op"
3079  " is a target node!");
3080  return false;
3081 }
3082 
3083 // FIXME: Ideally, this would use ISD::isConstantSplatVector(), but that must
3084 // work with truncating build vectors and vectors with elements of less than
3085 // 8 bits.
3087  if (!N)
3088  return false;
3089 
3090  APInt CVal;
3091  if (auto *CN = dyn_cast<ConstantSDNode>(N)) {
3092  CVal = CN->getAPIntValue();
3093  } else if (auto *BV = dyn_cast<BuildVectorSDNode>(N)) {
3094  auto *CN = BV->getConstantSplatNode();
3095  if (!CN)
3096  return false;
3097 
3098  // If this is a truncating build vector, truncate the splat value.
3099  // Otherwise, we may fail to match the expected values below.
3100  unsigned BVEltWidth = BV->getValueType(0).getScalarSizeInBits();
3101  CVal = CN->getAPIntValue();
3102  if (BVEltWidth < CVal.getBitWidth())
3103  CVal = CVal.trunc(BVEltWidth);
3104  } else {
3105  return false;
3106  }
3107 
3108  switch (getBooleanContents(N->getValueType(0))) {
3110  return CVal[0];
3112  return CVal.isOneValue();
3114  return CVal.isAllOnes();
3115  }
3116 
3117  llvm_unreachable("Invalid boolean contents");
3118 }
3119 
3121  if (!N)
3122  return false;
3123 
3124  const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
3125  if (!CN) {
3126  const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N);
3127  if (!BV)
3128  return false;
3129 
3130  // Only interested in constant splats, we don't care about undef
3131  // elements in identifying boolean constants and getConstantSplatNode
3132  // returns NULL if all ops are undef;
3133  CN = BV->getConstantSplatNode();
3134  if (!CN)
3135  return false;
3136  }
3137 
3138  if (getBooleanContents(N->getValueType(0)) == UndefinedBooleanContent)
3139  return !CN->getAPIntValue()[0];
3140 
3141  return CN->isZero();
3142 }
3143 
3145  bool SExt) const {
3146  if (VT == MVT::i1)
3147  return N->isOne();
3148 
3150  switch (Cnt) {
3152  // An extended value of 1 is always true, unless its original type is i1,
3153  // in which case it will be sign extended to -1.
3154  return (N->isOne() && !SExt) || (SExt && (N->getValueType(0) != MVT::i1));
3157  return N->isAllOnes() && SExt;
3158  }
3159  llvm_unreachable("Unexpected enumeration.");
3160 }
3161 
3162 /// This helper function of SimplifySetCC tries to optimize the comparison when
3163 /// either operand of the SetCC node is a bitwise-and instruction.
3164 SDValue TargetLowering::foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1,
3165  ISD::CondCode Cond, const SDLoc &DL,
3166  DAGCombinerInfo &DCI) const {
3167  // Match these patterns in any of their permutations:
3168  // (X & Y) == Y
3169  // (X & Y) != Y
3170  if (N1.getOpcode() == ISD::AND && N0.getOpcode() != ISD::AND)
3171  std::swap(N0, N1);
3172 
3173  EVT OpVT = N0.getValueType();
3174  if (N0.getOpcode() != ISD::AND || !OpVT.isInteger() ||
3175  (Cond != ISD::SETEQ && Cond != ISD::SETNE))
3176  return SDValue();
3177 
3178  SDValue X, Y;
3179  if (N0.getOperand(0) == N1) {
3180  X = N0.getOperand(1);
3181  Y = N0.getOperand(0);
3182  } else if (N0.getOperand(1) == N1) {
3183  X = N0.getOperand(0);
3184  Y = N0.getOperand(1);
3185  } else {
3186  return SDValue();
3187  }
3188 
3189  SelectionDAG &DAG = DCI.DAG;
3190  SDValue Zero = DAG.getConstant(0, DL, OpVT);
3191  if (DAG.isKnownToBeAPowerOfTwo(Y)) {
3192  // Simplify X & Y == Y to X & Y != 0 if Y has exactly one bit set.
3193  // Note that where Y is variable and is known to have at most one bit set
3194  // (for example, if it is Z & 1) we cannot do this; the expressions are not
3195  // equivalent when Y == 0.
3196  assert(OpVT.isInteger());
3197  Cond = ISD::getSetCCInverse(Cond, OpVT);
3198  if (DCI.isBeforeLegalizeOps() ||
3200  return DAG.getSetCC(DL, VT, N0, Zero, Cond);
3201  } else if (N0.hasOneUse() && hasAndNotCompare(Y)) {
3202  // If the target supports an 'and-not' or 'and-complement' logic operation,
3203  // try to use that to make a comparison operation more efficient.
3204  // But don't do this transform if the mask is a single bit because there are
3205  // more efficient ways to deal with that case (for example, 'bt' on x86 or
3206  // 'rlwinm' on PPC).
3207 
3208  // Bail out if the compare operand that we want to turn into a zero is
3209  // already a zero (otherwise, infinite loop).
3210  auto *YConst = dyn_cast<ConstantSDNode>(Y);
3211  if (YConst && YConst->isZero())
3212  return SDValue();
3213 
3214  // Transform this into: ~X & Y == 0.
3215  SDValue NotX = DAG.getNOT(SDLoc(X), X, OpVT);
3216  SDValue NewAnd = DAG.getNode(ISD::AND, SDLoc(N0), OpVT, NotX, Y);
3217  return DAG.getSetCC(DL, VT, NewAnd, Zero, Cond);
3218  }
3219 
3220  return SDValue();
3221 }
3222 
3223 /// There are multiple IR patterns that could be checking whether certain
3224 /// truncation of a signed number would be lossy or not. The pattern which is
3225 /// best at IR level, may not lower optimally. Thus, we want to unfold it.
3226 /// We are looking for the following pattern: (KeptBits is a constant)
3227 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits)
3228 /// KeptBits won't be bitwidth(x), that will be constant-folded to true/false.
3229 /// KeptBits also can't be 1, that would have been folded to %x dstcond 0
3230 /// We will unfold it into the natural trunc+sext pattern:
3231 /// ((%x << C) a>> C) dstcond %x
3232 /// Where C = bitwidth(x) - KeptBits and C u< bitwidth(x)
3233 SDValue TargetLowering::optimizeSetCCOfSignedTruncationCheck(
3234  EVT SCCVT, SDValue N0, SDValue N1, ISD::CondCode Cond, DAGCombinerInfo &DCI,
3235  const SDLoc &DL) const {
3236  // We must be comparing with a constant.
3237  ConstantSDNode *C1;
3238  if (!(C1 = dyn_cast<ConstantSDNode>(N1)))
3239  return SDValue();
3240 
3241  // N0 should be: add %x, (1 << (KeptBits-1))
3242  if (N0->getOpcode() != ISD::ADD)
3243  return SDValue();
3244 
3245  // And we must be 'add'ing a constant.
3246  ConstantSDNode *C01;
3247  if (!(C01 = dyn_cast<ConstantSDNode>(N0->getOperand(1))))
3248  return SDValue();
3249 
3250  SDValue X = N0->getOperand(0);
3251  EVT XVT = X.getValueType();
3252 
3253  // Validate constants ...
3254 
3255  APInt I1 = C1->getAPIntValue();
3256 
3257  ISD::CondCode NewCond;
3258  if (Cond == ISD::CondCode::SETULT) {
3259  NewCond = ISD::CondCode::SETEQ;
3260  } else if (Cond == ISD::CondCode::SETULE) {
3261  NewCond = ISD::CondCode::SETEQ;
3262  // But need to 'canonicalize' the constant.
3263  I1 += 1;
3264  } else if (Cond == ISD::CondCode::SETUGT) {
3265  NewCond = ISD::CondCode::SETNE;
3266  // But need to 'canonicalize' the constant.
3267  I1 += 1;
3268  } else if (Cond == ISD::CondCode::SETUGE) {
3269  NewCond = ISD::CondCode::SETNE;
3270  } else
3271  return SDValue();
3272 
3273  APInt I01 = C01->getAPIntValue();
3274 
3275  auto checkConstants = [&I1, &I01]() -> bool {
3276  // Both of them must be power-of-two, and the constant from setcc is bigger.
3277  return I1.ugt(I01) && I1.isPowerOf2() && I01.isPowerOf2();
3278  };
3279 
3280  if (checkConstants()) {
3281  // Great, e.g. got icmp ult i16 (add i16 %x, 128), 256
3282  } else {
3283  // What if we invert constants? (and the target predicate)
3284  I1.negate();
3285  I01.negate();
3286  assert(XVT.isInteger());
3287  NewCond = getSetCCInverse(NewCond, XVT);
3288  if (!checkConstants())
3289  return SDValue();
3290  // Great, e.g. got icmp uge i16 (add i16 %x, -128), -256
3291  }
3292 
3293  // They are power-of-two, so which bit is set?
3294  const unsigned KeptBits = I1.logBase2();
3295  const unsigned KeptBitsMinusOne = I01.logBase2();
3296 
3297  // Magic!
3298  if (KeptBits != (KeptBitsMinusOne + 1))
3299  return SDValue();
3300  assert(KeptBits > 0 && KeptBits < XVT.getSizeInBits() && "unreachable");
3301 
3302  // We don't want to do this in every single case.
3303  SelectionDAG &DAG = DCI.DAG;
3305  XVT, KeptBits))
3306  return SDValue();
3307 
3308  const unsigned MaskedBits = XVT.getSizeInBits() - KeptBits;
3309  assert(MaskedBits > 0 && MaskedBits < XVT.getSizeInBits() && "unreachable");
3310 
3311  // Unfold into: ((%x << C) a>> C) cond %x
3312  // Where 'cond' will be either 'eq' or 'ne'.
3313  SDValue ShiftAmt = DAG.getConstant(MaskedBits, DL, XVT);
3314  SDValue T0 = DAG.getNode(ISD::SHL, DL, XVT, X, ShiftAmt);
3315  SDValue T1 = DAG.getNode(ISD::SRA, DL, XVT, T0, ShiftAmt);
3316  SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, X, NewCond);
3317 
3318  return T2;
3319 }
3320 
3321 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0
3322 SDValue TargetLowering::optimizeSetCCByHoistingAndByConstFromLogicalShift(
3323  EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond,
3324  DAGCombinerInfo &DCI, const SDLoc &DL) const {
3325  assert(isConstOrConstSplat(N1C) &&
3326  isConstOrConstSplat(N1C)->getAPIntValue().isNullValue() &&
3327  "Should be a comparison with 0.");
3328  assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
3329  "Valid only for [in]equality comparisons.");
3330 
3331  unsigned NewShiftOpcode;
3332  SDValue X, C, Y;
3333 
3334  SelectionDAG &DAG = DCI.DAG;
3335  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3336 
3337  // Look for '(C l>>/<< Y)'.
3338  auto Match = [&NewShiftOpcode, &X, &C, &Y, &TLI, &DAG](SDValue V) {
3339  // The shift should be one-use.
3340  if (!V.hasOneUse())
3341  return false;
3342  unsigned OldShiftOpcode = V.getOpcode();
3343  switch (OldShiftOpcode) {
3344  case ISD::SHL:
3345  NewShiftOpcode = ISD::SRL;
3346  break;
3347  case ISD::SRL:
3348  NewShiftOpcode = ISD::SHL;
3349  break;
3350  default:
3351  return false; // must be a logical shift.
3352  }
3353  // We should be shifting a constant.
3354  // FIXME: best to use isConstantOrConstantVector().
3355  C = V.getOperand(0);
3356  ConstantSDNode *CC =
3357  isConstOrConstSplat(C, /*AllowUndefs=*/true, /*AllowTruncation=*/true);
3358  if (!CC)
3359  return false;
3360  Y = V.getOperand(1);
3361 
3362  ConstantSDNode *XC =
3363  isConstOrConstSplat(X, /*AllowUndefs=*/true, /*AllowTruncation=*/true);
3364  return TLI.shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
3365  X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG);
3366  };
3367 
3368  // LHS of comparison should be an one-use 'and'.
3369  if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
3370  return SDValue();
3371 
3372  X = N0.getOperand(0);
3373  SDValue Mask = N0.getOperand(1);
3374 
3375  // 'and' is commutative!
3376  if (!Match(Mask)) {
3377  std::swap(X, Mask);
3378  if (!Match(Mask))
3379  return SDValue();
3380  }
3381 
3382  EVT VT = X.getValueType();
3383 
3384  // Produce:
3385  // ((X 'OppositeShiftOpcode' Y) & C) Cond 0
3386  SDValue T0 = DAG.getNode(NewShiftOpcode, DL, VT, X, Y);
3387  SDValue T1 = DAG.getNode(ISD::AND, DL, VT, T0, C);
3388  SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, N1C, Cond);
3389  return T2;
3390 }
3391 
3392 /// Try to fold an equality comparison with a {add/sub/xor} binary operation as
3393 /// the 1st operand (N0). Callers are expected to swap the N0/N1 parameters to
3394 /// handle the commuted versions of these patterns.
3395 SDValue TargetLowering::foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1,
3396  ISD::CondCode Cond, const SDLoc &DL,
3397  DAGCombinerInfo &DCI) const {
3398  unsigned BOpcode = N0.getOpcode();
3399  assert((BOpcode == ISD::ADD || BOpcode == ISD::SUB || BOpcode == ISD::XOR) &&
3400  "Unexpected binop");
3401  assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && "Unexpected condcode");
3402 
3403  // (X + Y) == X --> Y == 0
3404  // (X - Y) == X --> Y == 0
3405  // (X ^ Y) == X --> Y == 0
3406  SelectionDAG &DAG = DCI.DAG;
3407  EVT OpVT = N0.getValueType();
3408  SDValue X = N0.getOperand(0);
3409  SDValue Y = N0.getOperand(1);
3410  if (X == N1)
3411  return DAG.getSetCC(DL, VT, Y, DAG.getConstant(0, DL, OpVT), Cond);
3412 
3413  if (Y != N1)
3414  return SDValue();
3415 
3416  // (X + Y) == Y --> X == 0
3417  // (X ^ Y) == Y --> X == 0
3418  if (BOpcode == ISD::ADD || BOpcode == ISD::XOR)
3419  return DAG.getSetCC(DL, VT, X, DAG.getConstant(0, DL, OpVT), Cond);
3420 
3421  // The shift would not be valid if the operands are boolean (i1).
3422  if (!N0.hasOneUse() || OpVT.getScalarSizeInBits() == 1)
3423  return SDValue();
3424 
3425  // (X - Y) == Y --> X == Y << 1
3426  EVT ShiftVT = getShiftAmountTy(OpVT, DAG.getDataLayout(),
3427  !DCI.isBeforeLegalize());
3428  SDValue One = DAG.getConstant(1, DL, ShiftVT);
3429  SDValue YShl1 = DAG.getNode(ISD::SHL, DL, N1.getValueType(), Y, One);
3430  if (!DCI.isCalledByLegalizer())
3431  DCI.AddToWorklist(YShl1.getNode());
3432  return DAG.getSetCC(DL, VT, X, YShl1, Cond);
3433 }
3434 
3436  SDValue N0, const APInt &C1,
3437  ISD::CondCode Cond, const SDLoc &dl,
3438  SelectionDAG &DAG) {
3439  // Look through truncs that don't change the value of a ctpop.
3440  // FIXME: Add vector support? Need to be careful with setcc result type below.
3441  SDValue CTPOP = N0;
3442  if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() && !VT.isVector() &&
3444  CTPOP = N0.getOperand(0);
3445 
3446  if (CTPOP.getOpcode() != ISD::CTPOP || !CTPOP.hasOneUse())
3447  return SDValue();
3448 
3449  EVT CTVT = CTPOP.getValueType();
3450  SDValue CTOp = CTPOP.getOperand(0);
3451 
3452  // If this is a vector CTPOP, keep the CTPOP if it is legal.
3453  // TODO: Should we check if CTPOP is legal(or custom) for scalars?
3454  if (VT.isVector() && TLI.isOperationLegal(ISD::CTPOP, CTVT))
3455  return SDValue();
3456 
3457  // (ctpop x) u< 2 -> (x & x-1) == 0
3458  // (ctpop x) u> 1 -> (x & x-1) != 0
3459  if (Cond == ISD::SETULT || Cond == ISD::SETUGT) {
3460  unsigned CostLimit = TLI.getCustomCtpopCost(CTVT, Cond);
3461  if (C1.ugt(CostLimit + (Cond == ISD::SETULT)))
3462  return SDValue();
3463  if (C1 == 0 && (Cond == ISD::SETULT))
3464  return SDValue(); // This is handled elsewhere.
3465 
3466  unsigned Passes = C1.getLimitedValue() - (Cond == ISD::SETULT);
3467 
3468  SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT);
3469  SDValue Result = CTOp;
3470  for (unsigned i = 0; i < Passes; i++) {
3471  SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, Result, NegOne);
3472  Result = DAG.getNode(ISD::AND, dl, CTVT, Result, Add);
3473  }
3475  return DAG.getSetCC(dl, VT, Result, DAG.getConstant(0, dl, CTVT), CC);
3476  }
3477 
3478  // If ctpop is not supported, expand a power-of-2 comparison based on it.
3479  if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && C1 == 1) {
3480  // For scalars, keep CTPOP if it is legal or custom.
3481  if (!VT.isVector() && TLI.isOperationLegalOrCustom(ISD::CTPOP, CTVT))
3482  return SDValue();
3483  // This is based on X86's custom lowering for CTPOP which produces more
3484  // instructions than the expansion here.
3485 
3486  // (ctpop x) == 1 --> (x != 0) && ((x & x-1) == 0)
3487  // (ctpop x) != 1 --> (x == 0) || ((x & x-1) != 0)
3488  SDValue Zero = DAG.getConstant(0, dl, CTVT);
3489  SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT);
3490  assert(CTVT.isInteger());
3491  ISD::CondCode InvCond = ISD::getSetCCInverse(Cond, CTVT);
3492  SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, CTOp, NegOne);
3493  SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Add);
3494  SDValue LHS = DAG.getSetCC(dl, VT, CTOp, Zero, InvCond);
3495  SDValue RHS = DAG.getSetCC(dl, VT, And, Zero, Cond);
3496  unsigned LogicOpcode = Cond == ISD::SETEQ ? ISD::AND : ISD::OR;
3497  return DAG.getNode(LogicOpcode, dl, VT, LHS, RHS);
3498  }
3499 
3500  return SDValue();
3501 }
3502 
3503 /// Try to simplify a setcc built with the specified operands and cc. If it is
3504 /// unable to simplify it, return a null SDValue.
3506  ISD::CondCode Cond, bool foldBooleans,
3507  DAGCombinerInfo &DCI,
3508  const SDLoc &dl) const {
3509  SelectionDAG &DAG = DCI.DAG;
3510  const DataLayout &Layout = DAG.getDataLayout();
3511  EVT OpVT = N0.getValueType();
3512 
3513  // Constant fold or commute setcc.
3514  if (SDValue Fold = DAG.FoldSetCC(VT, N0, N1, Cond, dl))
3515  return Fold;
3516 
3517  // Ensure that the constant occurs on the RHS and fold constant comparisons.
3518  // TODO: Handle non-splat vector constants. All undef causes trouble.
3519  // FIXME: We can't yet fold constant scalable vector splats, so avoid an
3520  // infinite loop here when we encounter one.
3522  if (isConstOrConstSplat(N0) &&
3523  (!OpVT.isScalableVector() || !isConstOrConstSplat(N1)) &&
3524  (DCI.isBeforeLegalizeOps() ||
3525  isCondCodeLegal(SwappedCC, N0.getSimpleValueType())))
3526  return DAG.getSetCC(dl, VT, N1, N0, SwappedCC);
3527 
3528  // If we have a subtract with the same 2 non-constant operands as this setcc
3529  // -- but in reverse order -- then try to commute the operands of this setcc
3530  // to match. A matching pair of setcc (cmp) and sub may be combined into 1
3531  // instruction on some targets.
3532  if (!isConstOrConstSplat(N0) && !isConstOrConstSplat(N1) &&
3533  (DCI.isBeforeLegalizeOps() ||
3534  isCondCodeLegal(SwappedCC, N0.getSimpleValueType())) &&
3535  DAG.doesNodeExist(ISD::SUB, DAG.getVTList(OpVT), {N1, N0}) &&
3536  !DAG.doesNodeExist(ISD::SUB, DAG.getVTList(OpVT), {N0, N1}))
3537  return DAG.getSetCC(dl, VT, N1, N0, SwappedCC);
3538 
3539  if (auto *N1C = isConstOrConstSplat(N1)) {
3540  const APInt &C1 = N1C->getAPIntValue();
3541 
3542  // Optimize some CTPOP cases.
3543  if (SDValue V = simplifySetCCWithCTPOP(*this, VT, N0, C1, Cond, dl, DAG))
3544  return V;
3545 
3546  // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an
3547  // equality comparison, then we're just comparing whether X itself is
3548  // zero.
3549  if (N0.getOpcode() == ISD::SRL && (C1.isNullValue() || C1.isOneValue()) &&
3550  N0.getOperand(0).getOpcode() == ISD::CTLZ &&
3552  if (ConstantSDNode *ShAmt = isConstOrConstSplat(N0.getOperand(1))) {
3553  if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
3554  ShAmt->getAPIntValue() == Log2_32(N0.getScalarValueSizeInBits())) {
3555  if ((C1 == 0) == (Cond == ISD::SETEQ)) {
3556  // (srl (ctlz x), 5) == 0 -> X != 0
3557  // (srl (ctlz x), 5) != 1 -> X != 0
3558  Cond = ISD::SETNE;
3559  } else {
3560  // (srl (ctlz x), 5) != 0 -> X == 0
3561  // (srl (ctlz x), 5) == 1 -> X == 0
3562  Cond = ISD::SETEQ;
3563  }
3564  SDValue Zero = DAG.getConstant(0, dl, N0.getValueType());
3565  return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0), Zero,
3566  Cond);
3567  }
3568  }
3569  }
3570  }
3571 
3572  // FIXME: Support vectors.
3573  if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
3574  const APInt &C1 = N1C->getAPIntValue();
3575 
3576  // (zext x) == C --> x == (trunc C)
3577  // (sext x) == C --> x == (trunc C)
3578  if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
3579  DCI.isBeforeLegalize() && N0->hasOneUse()) {
3580  unsigned MinBits = N0.getValueSizeInBits();
3581  SDValue PreExt;
3582  bool Signed = false;
3583  if (N0->getOpcode() == ISD::ZERO_EXTEND) {
3584  // ZExt
3585  MinBits = N0->getOperand(0).getValueSizeInBits();
3586  PreExt = N0->getOperand(0);
3587  } else if (N0->getOpcode() == ISD::AND) {
3588  // DAGCombine turns costly ZExts into ANDs
3589  if (auto *C = dyn_cast<ConstantSDNode>(N0->getOperand(1)))
3590  if ((C->getAPIntValue()+1).isPowerOf2()) {
3591  MinBits = C->getAPIntValue().countTrailingOnes();
3592  PreExt = N0->getOperand(0);
3593  }
3594  } else if (N0->getOpcode() == ISD::SIGN_EXTEND) {
3595  // SExt
3596  MinBits = N0->getOperand(0).getValueSizeInBits();
3597  PreExt = N0->getOperand(0);
3598  Signed = true;
3599  } else if (auto *LN0 = dyn_cast<LoadSDNode>(N0)) {
3600  // ZEXTLOAD / SEXTLOAD
3601  if (LN0->getExtensionType() == ISD::ZEXTLOAD) {
3602  MinBits = LN0->getMemoryVT().getSizeInBits();
3603  PreExt = N0;
3604  } else if (LN0->getExtensionType() == ISD::SEXTLOAD) {
3605  Signed = true;
3606  MinBits = LN0->getMemoryVT().getSizeInBits();
3607  PreExt = N0;
3608  }
3609  }
3610 
3611  // Figure out how many bits we need to preserve this constant.
3612  unsigned ReqdBits = Signed ?
3613  C1.getBitWidth() - C1.getNumSignBits() + 1 :
3614  C1.getActiveBits();
3615 
3616  // Make sure we're not losing bits from the constant.
3617  if (MinBits > 0 &&
3618  MinBits < C1.getBitWidth() &&
3619  MinBits >= ReqdBits) {
3620  EVT MinVT = EVT::getIntegerVT(*DAG.getContext(), MinBits);
3621  if (isTypeDesirableForOp(ISD::SETCC, MinVT)) {
3622  // Will get folded away.
3623  SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MinVT, PreExt);
3624  if (MinBits == 1 && C1 == 1)
3625  // Invert the condition.
3626  return DAG.getSetCC(dl, VT, Trunc, DAG.getConstant(0, dl, MVT::i1),
3628  SDValue C = DAG.getConstant(C1.trunc(MinBits), dl, MinVT);
3629  return DAG.getSetCC(dl, VT, Trunc, C, Cond);
3630  }
3631 
3632  // If truncating the setcc operands is not desirable, we can still
3633  // simplify the expression in some cases:
3634  // setcc ([sz]ext (setcc x, y, cc)), 0, setne) -> setcc (x, y, cc)
3635  // setcc ([sz]ext (setcc x, y, cc)), 0, seteq) -> setcc (x, y, inv(cc))
3636  // setcc (zext (setcc x, y, cc)), 1, setne) -> setcc (x, y, inv(cc))
3637  // setcc (zext (setcc x, y, cc)), 1, seteq) -> setcc (x, y, cc)
3638  // setcc (sext (setcc x, y, cc)), -1, setne) -> setcc (x, y, inv(cc))
3639  // setcc (sext (setcc x, y, cc)), -1, seteq) -> setcc (x, y, cc)
3640  SDValue TopSetCC = N0->getOperand(0);
3641  unsigned N0Opc = N0->getOpcode();
3642  bool SExt = (N0Opc == ISD::SIGN_EXTEND);
3643  if (TopSetCC.getValueType() == MVT::i1 && VT == MVT::i1 &&
3644  TopSetCC.getOpcode() == ISD::SETCC &&
3645  (N0Opc == ISD::ZERO_EXTEND || N0Opc == ISD::SIGN_EXTEND) &&
3646  (isConstFalseVal(N1C) ||
3647  isExtendedTrueVal(N1C, N0->getValueType(0), SExt))) {
3648 
3649  bool Inverse = (N1C->isZero() && Cond == ISD::SETEQ) ||
3650  (!N1C->isZero() && Cond == ISD::SETNE);
3651 
3652  if (!Inverse)
3653  return TopSetCC;
3654 
3656  cast<CondCodeSDNode>(TopSetCC.getOperand(2))->get(),
3657  TopSetCC.getOperand(0).getValueType());
3658  return DAG.getSetCC(dl, VT, TopSetCC.getOperand(0),
3659  TopSetCC.getOperand(1),
3660  InvCond);
3661  }
3662  }
3663  }
3664 
3665  // If the LHS is '(and load, const)', the RHS is 0, the test is for
3666  // equality or unsigned, and all 1 bits of the const are in the same
3667  // partial word, see if we can shorten the load.
3668  if (DCI.isBeforeLegalize() &&
3670  N0.getOpcode() == ISD::AND && C1 == 0 &&
3671  N0.getNode()->hasOneUse() &&
3672  isa<LoadSDNode>(N0.getOperand(0)) &&
3673  N0.getOperand(0).getNode()->hasOneUse() &&
3674  isa<ConstantSDNode>(N0.getOperand(1))) {
3675  LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0));
3676  APInt bestMask;
3677  unsigned bestWidth = 0, bestOffset = 0;
3678  if (Lod->isSimple() && Lod->isUnindexed()) {
3679  unsigned origWidth = N0.getValueSizeInBits();
3680  unsigned maskWidth = origWidth;
3681  // We can narrow (e.g.) 16-bit extending loads on 32-bit target to
3682  // 8 bits, but have to be careful...
3683  if (Lod->getExtensionType() != ISD::NON_EXTLOAD)
3684  origWidth = Lod->getMemoryVT().getSizeInBits();
3685  const APInt &Mask = N0.getConstantOperandAPInt(1);
3686  for (unsigned width = origWidth / 2; width>=8; width /= 2) {
3687  APInt newMask = APInt::getLowBitsSet(maskWidth, width);
3688  for (unsigned offset=0; offset<origWidth/width; offset++) {
3689  if (Mask.isSubsetOf(newMask)) {
3690  if (Layout.isLittleEndian())
3691  bestOffset = (uint64_t)offset * (width/8);
3692  else
3693  bestOffset = (origWidth/width - offset - 1) * (width/8);
3694  bestMask = Mask.lshr(offset * (width/8) * 8);
3695  bestWidth = width;
3696  break;
3697  }
3698  newMask <<= width;
3699  }
3700  }
3701  }
3702  if (bestWidth) {
3703  EVT newVT = EVT::getIntegerVT(*DAG.getContext(), bestWidth);
3704  if (newVT.isRound() &&
3705  shouldReduceLoadWidth(Lod, ISD::NON_EXTLOAD, newVT)) {
3706  SDValue Ptr = Lod->getBasePtr();
3707  if (bestOffset != 0)
3708  Ptr =
3709  DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(bestOffset), dl);
3710  SDValue NewLoad =
3711  DAG.getLoad(newVT, dl, Lod->getChain(), Ptr,
3712  Lod->getPointerInfo().getWithOffset(bestOffset),
3713  Lod->getOriginalAlign());
3714  return DAG.getSetCC(dl, VT,
3715  DAG.getNode(ISD::AND, dl, newVT, NewLoad,
3716  DAG.getConstant(bestMask.trunc(bestWidth),
3717  dl, newVT)),
3718  DAG.getConstant(0LL, dl, newVT), Cond);
3719  }
3720  }
3721  }
3722 
3723  // If the LHS is a ZERO_EXTEND, perform the comparison on the input.
3724  if (N0.getOpcode() == ISD::ZERO_EXTEND) {
3725  unsigned InSize = N0.getOperand(0).getValueSizeInBits();
3726 
3727  // If the comparison constant has bits in the upper part, the
3728  // zero-extended value could never match.
3729  if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(),
3730  C1.getBitWidth() - InSize))) {
3731  switch (Cond) {
3732  case ISD::SETUGT:
3733  case ISD::SETUGE:
3734  case ISD::SETEQ:
3735  return DAG.getConstant(0, dl, VT);
3736  case ISD::SETULT:
3737  case ISD::SETULE:
3738  case ISD::SETNE:
3739  return DAG.getConstant(1, dl, VT);
3740  case ISD::SETGT:
3741  case ISD::SETGE:
3742  // True if the sign bit of C1 is set.
3743  return DAG.getConstant(C1.isNegative(), dl, VT);
3744  case ISD::SETLT:
3745  case ISD::SETLE:
3746  // True if the sign bit of C1 isn't set.
3747  return DAG.getConstant(C1.isNonNegative(), dl, VT);
3748  default:
3749  break;
3750  }
3751  }
3752 
3753  // Otherwise, we can perform the comparison with the low bits.
3754  switch (Cond) {
3755  case ISD::SETEQ:
3756  case ISD::SETNE:
3757  case ISD::SETUGT:
3758  case ISD::SETUGE:
3759  case ISD::SETULT:
3760  case ISD::SETULE: {
3761  EVT newVT = N0.getOperand(0).getValueType();
3762  if (DCI.isBeforeLegalizeOps() ||
3763  (isOperationLegal(ISD::SETCC, newVT) &&
3764  isCondCodeLegal(Cond, newVT.getSimpleVT()))) {
3765  EVT NewSetCCVT = getSetCCResultType(Layout, *DAG.getContext(), newVT);
3766  SDValue NewConst = DAG.getConstant(C1.trunc(InSize), dl, newVT);
3767 
3768  SDValue NewSetCC = DAG.getSetCC(dl, NewSetCCVT, N0.getOperand(0),
3769  NewConst, Cond);
3770  return DAG.getBoolExtOrTrunc(NewSetCC, dl, VT, N0.getValueType());
3771  }
3772  break;
3773  }
3774  default:
3775  break; // todo, be more careful with signed comparisons
3776  }
3777  } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
3778  (Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
3779  !isSExtCheaperThanZExt(cast<VTSDNode>(N0.getOperand(1))->getVT(),
3780  OpVT)) {
3781  EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT();
3782  unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits();
3783  EVT ExtDstTy = N0.getValueType();
3784  unsigned ExtDstTyBits = ExtDstTy.getSizeInBits();
3785 
3786  // If the constant doesn't fit into the number of bits for the source of
3787  // the sign extension, it is impossible for both sides to be equal.
3788  if (C1.getMinSignedBits() > ExtSrcTyBits)
3789  return DAG.getBoolConstant(Cond == ISD::SETNE, dl, VT, OpVT);
3790 
3791  assert(ExtDstTy == N0.getOperand(0).getValueType() &&
3792  ExtDstTy != ExtSrcTy && "Unexpected types!");
3793  APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits);
3794  SDValue ZextOp = DAG.getNode(ISD::AND, dl, ExtDstTy, N0.getOperand(0),
3795  DAG.getConstant(Imm, dl, ExtDstTy));
3796  if (!DCI.isCalledByLegalizer())
3797  DCI.AddToWorklist(ZextOp.getNode());
3798  // Otherwise, make this a use of a zext.
3799  return DAG.getSetCC(dl, VT, ZextOp,
3800  DAG.getConstant(C1 & Imm, dl, ExtDstTy), Cond);
3801  } else if ((N1C->isZero() || N1C->isOne()) &&
3802  (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
3803  // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC
3804  if (N0.getOpcode() == ISD::SETCC &&
3805  isTypeLegal(VT) && VT.bitsLE(N0.getValueType()) &&
3806  (N0.getValueType() == MVT::i1 ||
3809  bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (!N1C->isOne());
3810  if (TrueWhenTrue)
3811  return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
3812  // Invert the condition.
3813  ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
3814  CC = ISD::getSetCCInverse(CC, N0.getOperand(0).getValueType());
3815  if (DCI.isBeforeLegalizeOps() ||
3817  return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
3818  }
3819 
3820  if ((N0.getOpcode() == ISD::XOR ||
3821  (N0.getOpcode() == ISD::AND &&
3822  N0.getOperand(0).getOpcode() == ISD::XOR &&
3823  N0.getOperand(1) == N0.getOperand(0).getOperand(1))) &&
3824  isOneConstant(N0.getOperand(1))) {
3825  // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We
3826  // can only do this if the top bits are known zero.
3827  unsigned BitWidth = N0.getValueSizeInBits();
3828  if (DAG.MaskedValueIsZero(N0,
3830  BitWidth-1))) {
3831  // Okay, get the un-inverted input value.
3832  SDValue Val;
3833  if (N0.getOpcode() == ISD::XOR) {
3834  Val = N0.getOperand(0);
3835  } else {
3836  assert(N0.getOpcode() == ISD::AND &&
3837  N0.getOperand(0).getOpcode() == ISD::XOR);
3838  // ((X^1)&1)^1 -> X & 1
3839  Val = DAG.getNode(ISD::AND, dl, N0.getValueType(),
3840  N0.getOperand(0).getOperand(0),
3841  N0.getOperand(1));
3842  }
3843 
3844  return DAG.getSetCC(dl, VT, Val, N1,
3846  }
3847  } else if (N1C->isOne()) {
3848  SDValue Op0 = N0;
3849  if (Op0.getOpcode() == ISD::TRUNCATE)
3850  Op0 = Op0.getOperand(0);
3851 
3852  if ((Op0.getOpcode() == ISD::XOR) &&
3853  Op0.getOperand(0).getOpcode() == ISD::SETCC &&
3854  Op0.getOperand(1).getOpcode() == ISD::SETCC) {
3855  SDValue XorLHS = Op0.getOperand(0);
3856  SDValue XorRHS = Op0.getOperand(1);
3857  // Ensure that the input setccs return an i1 type or 0/1 value.
3858  if (Op0.getValueType() == MVT::i1 ||
3859  (getBooleanContents(XorLHS.getOperand(0).getValueType()) ==
3863  // (xor (setcc), (setcc)) == / != 1 -> (setcc) != / == (setcc)
3865  return DAG.getSetCC(dl, VT, XorLHS, XorRHS, Cond);
3866  }
3867  }
3868  if (Op0.getOpcode() == ISD::AND && isOneConstant(Op0.getOperand(1))) {
3869  // If this is (X&1) == / != 1, normalize it to (X&1) != / == 0.
3870  if (Op0.getValueType().bitsGT(VT))
3871  Op0 = DAG.getNode(ISD::AND, dl, VT,
3872  DAG.getNode(ISD::TRUNCATE, dl, VT, Op0.getOperand(0)),
3873  DAG.getConstant(1, dl, VT));
3874  else if (Op0.getValueType().bitsLT(VT))
3875  Op0 = DAG.getNode(ISD::AND, dl, VT,
3876  DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op0.getOperand(0)),
3877  DAG.getConstant(1, dl, VT));
3878 
3879  return DAG.getSetCC(dl, VT, Op0,
3880  DAG.getConstant(0, dl, Op0.getValueType()),
3882  }
3883  if (Op0.getOpcode() == ISD::AssertZext &&
3884  cast<VTSDNode>(Op0.getOperand(1))->getVT() == MVT::i1)
3885  return DAG.getSetCC(dl, VT, Op0,
3886  DAG.getConstant(0, dl, Op0.getValueType()),
3888  }
3889  }
3890 
3891  // Given:
3892  // icmp eq/ne (urem %x, %y), 0
3893  // Iff %x has 0 or 1 bits set, and %y has at least 2 bits set, omit 'urem':
3894  // icmp eq/ne %x, 0
3895  if (N0.getOpcode() == ISD::UREM && N1C->isZero() &&
3896  (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
3897  KnownBits XKnown = DAG.computeKnownBits(N0.getOperand(0));
3898  KnownBits YKnown = DAG.computeKnownBits(N0.getOperand(1));
3899  if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2)
3900  return DAG.getSetCC(dl, VT, N0.getOperand(0), N1, Cond);
3901  }
3902 
3903  // Fold set_cc seteq (ashr X, BW-1), -1 -> set_cc setlt X, 0
3904  // and set_cc setne (ashr X, BW-1), -1 -> set_cc setge X, 0
3905  if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
3906  N0.getOpcode() == ISD::SRA && isa<ConstantSDNode>(N0.getOperand(1)) &&
3907  N0.getConstantOperandAPInt(1) == OpVT.getScalarSizeInBits() - 1 &&
3908  N1C && N1C->isAllOnes()) {
3909  return DAG.getSetCC(dl, VT, N0.getOperand(0),
3910  DAG.getConstant(0, dl, OpVT),
3912  }
3913 
3914  if (SDValue V =
3915  optimizeSetCCOfSignedTruncationCheck(VT, N0, N1, Cond, DCI, dl))
3916  return V;
3917  }
3918 
3919  // These simplifications apply to splat vectors as well.
3920  // TODO: Handle more splat vector cases.
3921  if (auto *N1C = isConstOrConstSplat(N1)) {
3922  const APInt &C1 = N1C->getAPIntValue();
3923 
3924  APInt MinVal, MaxVal;
3925  unsigned OperandBitSize = N1C->getValueType(0).getScalarSizeInBits();
3926  if (ISD::isSignedIntSetCC(Cond)) {
3927  MinVal = APInt::getSignedMinValue(OperandBitSize);
3928  MaxVal = APInt::getSignedMaxValue(OperandBitSize);
3929  } else {
3930  MinVal = APInt::getMinValue(OperandBitSize);
3931  MaxVal = APInt::getMaxValue(OperandBitSize);
3932  }
3933 
3934  // Canonicalize GE/LE comparisons to use GT/LT comparisons.
3935  if (Cond == ISD::SETGE || Cond == ISD::SETUGE) {
3936  // X >= MIN --> true
3937  if (C1 == MinVal)
3938  return DAG.getBoolConstant(true, dl, VT, OpVT);
3939 
3940  if (!VT.isVector()) { // TODO: Support this for vectors.
3941  // X >= C0 --> X > (C0 - 1)
3942  APInt C = C1 - 1;
3944  if ((DCI.isBeforeLegalizeOps() ||
3945  isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
3946  (!N1C->isOpaque() || (C.getBitWidth() <= 64 &&
3947  isLegalICmpImmediate(C.getSExtValue())))) {
3948  return DAG.getSetCC(dl, VT, N0,
3949  DAG.getConstant(C, dl, N1.getValueType()),
3950  NewCC);
3951  }
3952  }
3953  }
3954 
3955  if (Cond == ISD::SETLE || Cond == ISD::SETULE) {
3956  // X <= MAX --> true
3957  if (C1 == MaxVal)
3958  return DAG.getBoolConstant(true, dl, VT, OpVT);
3959 
3960  // X <= C0 --> X < (C0 + 1)
3961  if (!VT.isVector()) { // TODO: Support this for vectors.
3962  APInt C = C1 + 1;
3964  if ((DCI.isBeforeLegalizeOps() ||
3965  isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
3966  (!N1C->isOpaque() || (C.getBitWidth() <= 64 &&
3967  isLegalICmpImmediate(C.getSExtValue())))) {
3968  return DAG.getSetCC(dl, VT, N0,
3969  DAG.getConstant(C, dl, N1.getValueType()),
3970  NewCC);
3971  }
3972  }
3973  }
3974 
3975  if (Cond == ISD::SETLT || Cond == ISD::SETULT) {
3976  if (C1 == MinVal)
3977  return DAG.getBoolConstant(false, dl, VT, OpVT); // X < MIN --> false
3978 
3979  // TODO: Support this for vectors after legalize ops.
3980  if (!VT.isVector() || DCI.isBeforeLegalizeOps()) {
3981  // Canonicalize setlt X, Max --> setne X, Max
3982  if (C1 == MaxVal)
3983  return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
3984 
3985  // If we have setult X, 1, turn it into seteq X, 0
3986  if (C1 == MinVal+1)
3987  return DAG.getSetCC(dl, VT, N0,
3988  DAG.getConstant(MinVal, dl, N0.getValueType()),
3989  ISD::SETEQ);
3990  }
3991  }
3992 
3993  if (Cond == ISD::SETGT || Cond == ISD::SETUGT) {
3994  if (C1 == MaxVal)
3995  return DAG.getBoolConstant(false, dl, VT, OpVT); // X > MAX --> false
3996 
3997  // TODO: Support this for vectors after legalize ops.
3998  if (!VT.isVector() || DCI.isBeforeLegalizeOps()) {
3999  // Canonicalize setgt X, Min --> setne X, Min
4000  if (C1 == MinVal)
4001  return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
4002 
4003  // If we have setugt X, Max-1, turn it into seteq X, Max
4004  if (C1 == MaxVal-1)
4005  return DAG.getSetCC(dl, VT, N0,
4006  DAG.getConstant(MaxVal, dl, N0.getValueType()),
4007  ISD::SETEQ);
4008  }
4009  }
4010 
4011  if (Cond == ISD::SETEQ || Cond == ISD::SETNE) {
4012  // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0
4013  if (C1.isZero())
4014  if (SDValue CC = optimizeSetCCByHoistingAndByConstFromLogicalShift(
4015  VT, N0, N1, Cond, DCI, dl))
4016  return CC;
4017 
4018  // For all/any comparisons, replace or(x,shl(y,bw/2)) with and/or(x,y).
4019  // For example, when high 32-bits of i64 X are known clear:
4020  // all bits clear: (X | (Y<<32)) == 0 --> (X | Y) == 0
4021  // all bits set: (X | (Y<<32)) == -1 --> (X & Y) == -1
4022  bool CmpZero = N1C->getAPIntValue().isNullValue();
4023  bool CmpNegOne = N1C->getAPIntValue().isAllOnes();
4024  if ((CmpZero || CmpNegOne) && N0.hasOneUse()) {
4025  // Match or(lo,shl(hi,bw/2)) pattern.
4026  auto IsConcat = [&](SDValue V, SDValue &Lo, SDValue &Hi) {
4027  unsigned EltBits = V.getScalarValueSizeInBits();
4028  if (V.getOpcode() != ISD::OR || (EltBits % 2) != 0)
4029  return false;
4030  SDValue LHS = V.getOperand(0);
4031  SDValue RHS = V.getOperand(1);
4032  APInt HiBits = APInt::getHighBitsSet(EltBits, EltBits / 2);
4033  // Unshifted element must have zero upperbits.
4034  if (RHS.getOpcode() == ISD::SHL &&
4035  isa<ConstantSDNode>(RHS.getOperand(1)) &&
4036  RHS.getConstantOperandAPInt(1) == (EltBits / 2) &&
4037  DAG.MaskedValueIsZero(LHS, HiBits)) {
4038  Lo = LHS;
4039  Hi = RHS.getOperand(0);
4040  return true;
4041  }
4042  if (LHS.getOpcode() == ISD::SHL &&
4043  isa<ConstantSDNode>(LHS.getOperand(1)) &&
4044  LHS.getConstantOperandAPInt(1) == (EltBits / 2) &&
4045  DAG.MaskedValueIsZero(RHS, HiBits)) {
4046  Lo = RHS;
4047  Hi = LHS.getOperand(0);
4048  return true;
4049  }
4050  return false;
4051  };
4052 
4053  auto MergeConcat = [&](SDValue Lo, SDValue Hi) {
4054  unsigned EltBits = N0.getScalarValueSizeInBits();
4055  unsigned HalfBits = EltBits / 2;
4056  APInt HiBits = APInt::getHighBitsSet(EltBits, HalfBits);
4057  SDValue LoBits = DAG.getConstant(~HiBits, dl, OpVT);
4058  SDValue HiMask = DAG.getNode(ISD::AND, dl, OpVT, Hi, LoBits);
4059  SDValue NewN0 =
4060  DAG.getNode(CmpZero ? ISD::OR : ISD::AND, dl, OpVT, Lo, HiMask);
4061  SDValue NewN1 = CmpZero ? DAG.getConstant(0, dl, OpVT) : LoBits;
4062  return DAG.getSetCC(dl, VT, NewN0, NewN1, Cond);
4063  };
4064 
4065  SDValue Lo, Hi;
4066  if (IsConcat(N0, Lo, Hi))
4067  return MergeConcat(Lo, Hi);
4068 
4069  if (N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR) {
4070  SDValue Lo0, Lo1, Hi0, Hi1;
4071  if (IsConcat(N0.getOperand(0), Lo0, Hi0) &&
4072  IsConcat(N0.getOperand(1), Lo1, Hi1)) {
4073  return MergeConcat(DAG.getNode(N0.getOpcode(), dl, OpVT, Lo0, Lo1),
4074  DAG.getNode(N0.getOpcode(), dl, OpVT, Hi0, Hi1));
4075  }
4076  }
4077  }
4078  }
4079 
4080  // If we have "setcc X, C0", check to see if we can shrink the immediate
4081  // by changing cc.
4082  // TODO: Support this for vectors after legalize ops.
4083  if (!VT.isVector() || DCI.isBeforeLegalizeOps()) {
4084  // SETUGT X, SINTMAX -> SETLT X, 0
4085  // SETUGE X, SINTMIN -> SETLT X, 0
4086  if ((Cond == ISD::SETUGT && C1.isMaxSignedValue()) ||
4087  (Cond == ISD::SETUGE && C1.isMinSignedValue()))
4088  return DAG.getSetCC(dl, VT, N0,
4089  DAG.getConstant(0, dl, N1.getValueType()),
4090  ISD::SETLT);
4091 
4092  // SETULT X, SINTMIN -> SETGT X, -1
4093  // SETULE X, SINTMAX -> SETGT X, -1
4094  if ((Cond == ISD::SETULT && C1.isMinSignedValue()) ||
4095  (Cond == ISD::SETULE && C1.isMaxSignedValue()))
4096  return DAG.getSetCC(dl, VT, N0,
4097  DAG.getAllOnesConstant(dl, N1.getValueType()),
4098  ISD::SETGT);
4099  }
4100  }
4101 
4102  // Back to non-vector simplifications.
4103  // TODO: Can we do these for vector splats?
4104  if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
4105  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4106  const APInt &C1 = N1C->getAPIntValue();
4107  EVT ShValTy = N0.getValueType();
4108 
4109  // Fold bit comparisons when we can. This will result in an
4110  // incorrect value when boolean false is negative one, unless
4111  // the bitsize is 1 in which case the false value is the same
4112  // in practice regardless of the representation.
4113  if ((VT.getSizeInBits() == 1 ||
4115  (Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
4116  (VT == ShValTy || (isTypeLegal(VT) && VT.bitsLE(ShValTy))) &&
4117  N0.getOpcode() == ISD::AND) {
4118  if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
4119  EVT ShiftTy =
4120  getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize());
4121  if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3
4122  // Perform the xform if the AND RHS is a single bit.
4123  unsigned ShCt = AndRHS->getAPIntValue().logBase2();
4124  if (AndRHS->getAPIntValue().isPowerOf2() &&
4125  !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) {
4126  return DAG.getNode(ISD::TRUNCATE, dl, VT,
4127  DAG.getNode(ISD::SRL, dl, ShValTy, N0,
4128  DAG.getConstant(ShCt, dl, ShiftTy)));
4129  }
4130  } else if (Cond == ISD::SETEQ && C1 == AndRHS->getAPIntValue()) {
4131  // (X & 8) == 8 --> (X & 8) >> 3
4132  // Perform the xform if C1 is a single bit.
4133  unsigned ShCt = C1.logBase2();
4134  if (C1.isPowerOf2() &&
4135  !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) {
4136  return DAG.getNode(ISD::TRUNCATE, dl, VT,
4137  DAG.getNode(ISD::SRL, dl, ShValTy, N0,
4138  DAG.getConstant(ShCt, dl, ShiftTy)));
4139  }
4140  }
4141  }
4142  }
4143 
4144  if (C1.getMinSignedBits() <= 64 &&
4145  !isLegalICmpImmediate(C1.getSExtValue())) {
4146  EVT ShiftTy = getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize());
4147  // (X & -256) == 256 -> (X >> 8) == 1
4148  if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
4149  N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
4150  if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
4151  const APInt &AndRHSC = AndRHS->getAPIntValue();
4152  if ((-AndRHSC).isPowerOf2() && (AndRHSC & C1) == C1) {
4153  unsigned ShiftBits = AndRHSC.countTrailingZeros();
4154  if (!TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) {
4155  SDValue Shift =
4156  DAG.getNode(ISD::SRL, dl, ShValTy, N0.getOperand(0),
4157  DAG.getConstant(ShiftBits, dl, ShiftTy));
4158  SDValue CmpRHS = DAG.getConstant(C1.lshr(ShiftBits), dl, ShValTy);
4159  return DAG.getSetCC(dl, VT, Shift, CmpRHS, Cond);
4160  }
4161  }
4162  }
4163  } else if (Cond == ISD::SETULT || Cond == ISD::SETUGE ||
4164  Cond == ISD::SETULE || Cond == ISD::SETUGT) {
4165  bool AdjOne = (Cond == ISD::SETULE || Cond == ISD::SETUGT);
4166  // X < 0x100000000 -> (X >> 32) < 1
4167  // X >= 0x100000000 -> (X >> 32) >= 1
4168  // X <= 0x0ffffffff -> (X >> 32) < 1
4169  // X > 0x0ffffffff -> (X >> 32) >= 1
4170  unsigned ShiftBits;
4171  APInt NewC = C1;
4172  ISD::CondCode NewCond = Cond;
4173  if (AdjOne) {
4174  ShiftBits = C1.countTrailingOnes();
4175  NewC = NewC + 1;
4176  NewCond = (Cond == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
4177  } else {
4178  ShiftBits = C1.countTrailingZeros();
4179  }
4180  NewC.lshrInPlace(ShiftBits);
4181  if (ShiftBits && NewC.getMinSignedBits() <= 64 &&
4183  !TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) {
4184  SDValue Shift = DAG.getNode(ISD::SRL, dl, ShValTy, N0,
4185  DAG.getConstant(ShiftBits, dl, ShiftTy));
4186  SDValue CmpRHS = DAG.getConstant(NewC, dl, ShValTy);
4187  return DAG.getSetCC(dl, VT, Shift, CmpRHS, NewCond);
4188  }
4189  }
4190  }
4191  }
4192 
4193  if (!isa<ConstantFPSDNode>(N0) && isa<ConstantFPSDNode>(N1)) {
4194  auto *CFP = cast<ConstantFPSDNode>(N1);
4195  assert(!CFP->getValueAPF().isNaN() && "Unexpected NaN value");
4196 
4197  // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the
4198  // constant if knowing that the operand is non-nan is enough. We prefer to
4199  // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to
4200  // materialize 0.0.
4201  if (Cond == ISD::SETO || Cond == ISD::SETUO)
4202  return DAG.getSetCC(dl, VT, N0, N0, Cond);
4203 
4204  // setcc (fneg x), C -> setcc swap(pred) x, -C
4205  if (N0.getOpcode() == ISD::FNEG) {
4207  if (DCI.isBeforeLegalizeOps() ||
4208  isCondCodeLegal(SwapCond, N0.getSimpleValueType())) {
4209  SDValue NegN1 = DAG.getNode(ISD::FNEG, dl, N0.getValueType(), N1);
4210  return DAG.getSetCC(dl, VT, N0.getOperand(0), NegN1, SwapCond);
4211  }
4212  }
4213 
4214  // If the condition is not legal, see if we can find an equivalent one
4215  // which is legal.
4216  if (!isCondCodeLegal(Cond, N0.getSimpleValueType())) {
4217  // If the comparison was an awkward floating-point == or != and one of
4218  // the comparison operands is infinity or negative infinity, convert the
4219  // condition to a less-awkward <= or >=.
4220  if (CFP->getValueAPF().isInfinity()) {
4221  bool IsNegInf = CFP->getValueAPF().isNegative();
4223  switch (Cond) {
4224  case ISD::SETOEQ: NewCond = IsNegInf ? ISD::SETOLE : ISD::SETOGE; break;
4225  case ISD::SETUEQ: NewCond = IsNegInf ? ISD::SETULE : ISD::SETUGE; break;
4226  case ISD::SETUNE: NewCond = IsNegInf ? ISD::SETUGT : ISD::SETULT; break;
4227  case ISD::SETONE: NewCond = IsNegInf ? ISD::SETOGT : ISD::SETOLT; break;
4228  default: break;
4229  }
4230  if (NewCond != ISD::SETCC_INVALID &&
4231  isCondCodeLegal(NewCond, N0.getSimpleValueType()))
4232  return DAG.getSetCC(dl, VT, N0, N1, NewCond);
4233  }
4234  }
4235  }
4236 
4237  if (N0 == N1) {
4238  // The sext(setcc()) => setcc() optimization relies on the appropriate
4239  // constant being emitted.
4240  assert(!N0.getValueType().isInteger() &&
4241  "Integer types should be handled by FoldSetCC");
4242 
4243  bool EqTrue = ISD::isTrueWhenEqual(Cond);
4244  unsigned UOF = ISD::getUnorderedFlavor(Cond);
4245  if (UOF == 2) // FP operators that are undefined on NaNs.
4246  return DAG.getBoolConstant(EqTrue, dl, VT, OpVT);
4247  if (UOF == unsigned(EqTrue))
4248  return DAG.getBoolConstant(EqTrue, dl, VT, OpVT);
4249  // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO
4250  // if it is not already.
4251  ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO;
4252  if (NewCond != Cond &&
4253  (DCI.isBeforeLegalizeOps() ||
4254  isCondCodeLegal(NewCond, N0.getSimpleValueType())))
4255  return DAG.getSetCC(dl, VT, N0, N1, NewCond);
4256  }
4257 
4258  if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
4259  N0.getValueType().isInteger()) {
4260  if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB ||
4261  N0.getOpcode() == ISD::XOR) {
4262  // Simplify (X+Y) == (X+Z) --> Y == Z
4263  if (N0.getOpcode() == N1.getOpcode()) {
4264  if (N0.getOperand(0) == N1.getOperand(0))
4265  return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond);
4266  if (N0.getOperand(1) == N1.getOperand(1))
4267  return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond);
4268  if (isCommutativeBinOp(N0.getOpcode())) {
4269  // If X op Y == Y op X, try other combinations.
4270  if (N0.getOperand(0) == N1.getOperand(1))
4271  return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0),
4272  Cond);
4273  if (N0.getOperand(1) == N1.getOperand(0))
4274  return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1),
4275  Cond);
4276  }
4277  }
4278 
4279  // If RHS is a legal immediate value for a compare instruction, we need
4280  // to be careful about increasing register pressure needlessly.
4281  bool LegalRHSImm = false;
4282 
4283  if (auto *RHSC = dyn_cast<ConstantSDNode>(N1)) {
4284  if (auto *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
4285  // Turn (X+C1) == C2 --> X == C2-C1
4286  if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) {
4287  return DAG.getSetCC(dl, VT, N0.getOperand(0),
4288  DAG.getConstant(RHSC->getAPIntValue()-
4289  LHSR->getAPIntValue(),
4290  dl, N0.getValueType()), Cond);
4291  }
4292 
4293  // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0.
4294  if (N0.getOpcode() == ISD::XOR)
4295  // If we know that all of the inverted bits are zero, don't bother
4296  // performing the inversion.
4297  if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue()))
4298  return
4299  DAG.getSetCC(dl, VT, N0.getOperand(0),
4300  DAG.getConstant(LHSR->getAPIntValue() ^
4301  RHSC->getAPIntValue(),
4302  dl, N0.getValueType()),
4303  Cond);
4304  }
4305 
4306  // Turn (C1-X) == C2 --> X == C1-C2
4307  if (auto *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
4308  if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) {
4309  return
4310  DAG.getSetCC(dl, VT, N0.getOperand(1),
4311  DAG.getConstant(SUBC->getAPIntValue() -
4312  RHSC->getAPIntValue(),
4313  dl, N0.getValueType()),
4314  Cond);
4315  }
4316  }
4317 
4318  // Could RHSC fold directly into a compare?
4319  if (RHSC->getValueType(0).getSizeInBits() <= 64)
4320  LegalRHSImm = isLegalICmpImmediate(RHSC->getSExtValue());
4321  }
4322 
4323  // (X+Y) == X --> Y == 0 and similar folds.
4324  // Don't do this if X is an immediate that can fold into a cmp
4325  // instruction and X+Y has other uses. It could be an induction variable
4326  // chain, and the transform would increase register pressure.
4327  if (!LegalRHSImm || N0.hasOneUse())
4328  if (SDValue V = foldSetCCWithBinOp(VT, N0, N1, Cond, dl, DCI))
4329  return V;
4330  }
4331 
4332  if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB ||
4333  N1.getOpcode() == ISD::XOR)
4334  if (SDValue V = foldSetCCWithBinOp(VT, N1, N0, Cond, dl, DCI))
4335  return V;
4336 
4337  if (SDValue V = foldSetCCWithAnd(VT, N0, N1, Cond, dl, DCI))
4338  return V;
4339  }
4340 
4341  // Fold remainder of division by a constant.
4342  if ((N0.getOpcode() == ISD::UREM || N0.getOpcode() == ISD::SREM) &&
4343  N0.hasOneUse() && (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
4345 
4346  // When division is cheap or optimizing for minimum size,
4347  // fall through to DIVREM creation by skipping this fold.
4348  if (!isIntDivCheap(VT, Attr) && !Attr.hasFnAttr(Attribute::MinSize)) {
4349  if (N0.getOpcode() == ISD::UREM) {
4350  if (SDValue Folded = buildUREMEqFold(VT, N0, N1, Cond, DCI, dl))
4351  return Folded;
4352  } else if (N0.getOpcode() == ISD::SREM) {
4353  if (SDValue Folded = buildSREMEqFold(VT, N0, N1, Cond, DCI, dl))
4354  return Folded;
4355  }
4356  }
4357  }
4358 
4359  // Fold away ALL boolean setcc's.
4360  if (N0.getValueType().getScalarType() == MVT::i1 && foldBooleans) {
4361  SDValue Temp;
4362  switch (Cond) {
4363  default: llvm_unreachable("Unknown integer setcc!");
4364  case ISD::SETEQ: // X == Y -> ~(X^Y)
4365  Temp = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1);
4366  N0 = DAG.getNOT(dl, Temp, OpVT);
4367  if (!DCI.isCalledByLegalizer())
4368  DCI.AddToWorklist(Temp.getNode());
4369  break;
4370  case ISD::SETNE: // X != Y --> (X^Y)
4371  N0 = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1);
4372  break;
4373  case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> ~X & Y
4374  case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> ~X & Y
4375  Temp = DAG.getNOT(dl, N0, OpVT);
4376  N0 = DAG.getNode(ISD::AND, dl, OpVT, N1, Temp);
4377  if (!DCI.isCalledByLegalizer())
4378  DCI.AddToWorklist(Temp.getNode());
4379  break;
4380  case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> ~Y & X
4381  case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> ~Y & X
4382  Temp = DAG.getNOT(dl, N1, OpVT);
4383  N0 = DAG.getNode(ISD::AND, dl, OpVT, N0, Temp);
4384  if (!DCI.isCalledByLegalizer())
4385  DCI.AddToWorklist(Temp.getNode());
4386  break;
4387  case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> ~X | Y
4388  case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> ~X | Y
4389  Temp = DAG.getNOT(dl, N0, OpVT);
4390  N0 = DAG.getNode(ISD::OR, dl, OpVT, N1, Temp);
4391  if (!DCI.isCalledByLegalizer())
4392  DCI.AddToWorklist(Temp.getNode());
4393  break;
4394  case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> ~Y | X
4395  case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> ~Y | X
4396  Temp = DAG.getNOT(dl, N1, OpVT);
4397  N0 = DAG.getNode(ISD::OR, dl, OpVT, N0, Temp);
4398  break;
4399  }
4400  if (VT.getScalarType() != MVT::i1) {
4401  if (!DCI.isCalledByLegalizer())
4402  DCI.AddToWorklist(N0.getNode());
4403  // FIXME: If running after legalize, we probably can't do this.
4405  N0 = DAG.getNode(ExtendCode, dl, VT, N0);
4406  }
4407  return N0;
4408  }
4409 
4410  // Could not fold it.
4411  return SDValue();
4412 }
4413 
4414 /// Returns true (and the GlobalValue and the offset) if the node is a
4415 /// GlobalAddress + offset.
4417  int64_t &Offset) const {
4418 
4419  SDNode *N = unwrapAddress(SDValue(WN, 0)).getNode();
4420 
4421  if (auto *GASD = dyn_cast<GlobalAddressSDNode>(N)) {
4422  GA = GASD->getGlobal();
4423  Offset += GASD->getOffset();
4424  return true;
4425  }
4426 
4427  if (N->getOpcode() == ISD::ADD) {
4428  SDValue N1 = N->getOperand(0);
4429  SDValue N2 = N->getOperand(1);
4430  if (isGAPlusOffset(N1.getNode(), GA, Offset)) {
4431  if (auto *V = dyn_cast<ConstantSDNode>(N2)) {
4432  Offset += V->getSExtValue();
4433  return true;
4434  }
4435  } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) {
4436  if (auto *V = dyn_cast<ConstantSDNode>(N1)) {
4437  Offset += V->getSExtValue();
4438  return true;
4439  }
4440  }
4441  }
4442 
4443  return false;
4444 }
4445 
4447