LLVM  4.0.0
SelectionDAG.cpp
Go to the documentation of this file.
1 //===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements the SelectionDAG class.
11 //
12 //===----------------------------------------------------------------------===//
13 
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/ADT/SetVector.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/IR/CallingConv.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DebugInfo.h"
32 #include "llvm/IR/DerivedTypes.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/GlobalAlias.h"
35 #include "llvm/IR/GlobalVariable.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/Support/Debug.h"
41 #include "llvm/Support/Mutex.h"
50 #include <algorithm>
51 #include <cmath>
52 #include <utility>
53 
54 using namespace llvm;
55 
56 /// makeVTList - Return an instance of the SDVTList struct initialized with the
57 /// specified members.
58 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
59  SDVTList Res = {VTs, NumVTs};
60  return Res;
61 }
62 
63 // Default null implementations of the callbacks.
66 
67 //===----------------------------------------------------------------------===//
68 // ConstantFPSDNode Class
69 //===----------------------------------------------------------------------===//
70 
71 /// isExactlyValue - We don't rely on operator== working on double values, as
72 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
73 /// As such, this method can be used to do an exact bit-for-bit comparison of
74 /// two floating point values.
76  return getValueAPF().bitwiseIsEqual(V);
77 }
78 
80  const APFloat& Val) {
81  assert(VT.isFloatingPoint() && "Can only convert between FP types");
82 
83  // convert modifies in place, so make a copy.
84  APFloat Val2 = APFloat(Val);
85  bool losesInfo;
86  (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
88  &losesInfo);
89  return !losesInfo;
90 }
91 
92 //===----------------------------------------------------------------------===//
93 // ISD Namespace
94 //===----------------------------------------------------------------------===//
95 
96 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) {
97  auto *BV = dyn_cast<BuildVectorSDNode>(N);
98  if (!BV)
99  return false;
100 
101  APInt SplatUndef;
102  unsigned SplatBitSize;
103  bool HasUndefs;
104  EVT EltVT = N->getValueType(0).getVectorElementType();
105  return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs) &&
106  EltVT.getSizeInBits() >= SplatBitSize;
107 }
108 
109 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be
110 // specializations of the more general isConstantSplatVector()?
111 
113  // Look through a bit convert.
114  while (N->getOpcode() == ISD::BITCAST)
115  N = N->getOperand(0).getNode();
116 
117  if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
118 
119  unsigned i = 0, e = N->getNumOperands();
120 
121  // Skip over all of the undef values.
122  while (i != e && N->getOperand(i).isUndef())
123  ++i;
124 
125  // Do not accept an all-undef vector.
126  if (i == e) return false;
127 
128  // Do not accept build_vectors that aren't all constants or which have non-~0
129  // elements. We have to be a bit careful here, as the type of the constant
130  // may not be the same as the type of the vector elements due to type
131  // legalization (the elements are promoted to a legal type for the target and
132  // a vector of a type may be legal when the base element type is not).
133  // We only want to check enough bits to cover the vector elements, because
134  // we care if the resultant vector is all ones, not whether the individual
135  // constants are.
136  SDValue NotZero = N->getOperand(i);
137  unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
138  if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
139  if (CN->getAPIntValue().countTrailingOnes() < EltSize)
140  return false;
141  } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
142  if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
143  return false;
144  } else
145  return false;
146 
147  // Okay, we have at least one ~0 value, check to see if the rest match or are
148  // undefs. Even with the above element type twiddling, this should be OK, as
149  // the same type legalization should have applied to all the elements.
150  for (++i; i != e; ++i)
151  if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef())
152  return false;
153  return true;
154 }
155 
157  // Look through a bit convert.
158  while (N->getOpcode() == ISD::BITCAST)
159  N = N->getOperand(0).getNode();
160 
161  if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
162 
163  bool IsAllUndef = true;
164  for (const SDValue &Op : N->op_values()) {
165  if (Op.isUndef())
166  continue;
167  IsAllUndef = false;
168  // Do not accept build_vectors that aren't all constants or which have non-0
169  // elements. We have to be a bit careful here, as the type of the constant
170  // may not be the same as the type of the vector elements due to type
171  // legalization (the elements are promoted to a legal type for the target
172  // and a vector of a type may be legal when the base element type is not).
173  // We only want to check enough bits to cover the vector elements, because
174  // we care if the resultant vector is all zeros, not whether the individual
175  // constants are.
176  unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
177  if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) {
178  if (CN->getAPIntValue().countTrailingZeros() < EltSize)
179  return false;
180  } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) {
181  if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize)
182  return false;
183  } else
184  return false;
185  }
186 
187  // Do not accept an all-undef vector.
188  if (IsAllUndef)
189  return false;
190  return true;
191 }
192 
194  if (N->getOpcode() != ISD::BUILD_VECTOR)
195  return false;
196 
197  for (const SDValue &Op : N->op_values()) {
198  if (Op.isUndef())
199  continue;
200  if (!isa<ConstantSDNode>(Op))
201  return false;
202  }
203  return true;
204 }
205 
207  if (N->getOpcode() != ISD::BUILD_VECTOR)
208  return false;
209 
210  for (const SDValue &Op : N->op_values()) {
211  if (Op.isUndef())
212  continue;
213  if (!isa<ConstantFPSDNode>(Op))
214  return false;
215  }
216  return true;
217 }
218 
220  // Return false if the node has no operands.
221  // This is "logically inconsistent" with the definition of "all" but
222  // is probably the desired behavior.
223  if (N->getNumOperands() == 0)
224  return false;
225 
226  for (const SDValue &Op : N->op_values())
227  if (!Op.isUndef())
228  return false;
229 
230  return true;
231 }
232 
234  switch (ExtType) {
235  case ISD::EXTLOAD:
236  return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND;
237  case ISD::SEXTLOAD:
238  return ISD::SIGN_EXTEND;
239  case ISD::ZEXTLOAD:
240  return ISD::ZERO_EXTEND;
241  default:
242  break;
243  }
244 
245  llvm_unreachable("Invalid LoadExtType");
246 }
247 
249  // To perform this operation, we just need to swap the L and G bits of the
250  // operation.
251  unsigned OldL = (Operation >> 2) & 1;
252  unsigned OldG = (Operation >> 1) & 1;
253  return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
254  (OldL << 1) | // New G bit
255  (OldG << 2)); // New L bit.
256 }
257 
259  unsigned Operation = Op;
260  if (isInteger)
261  Operation ^= 7; // Flip L, G, E bits, but not U.
262  else
263  Operation ^= 15; // Flip all of the condition bits.
264 
265  if (Operation > ISD::SETTRUE2)
266  Operation &= ~8; // Don't let N and U bits get set.
267 
268  return ISD::CondCode(Operation);
269 }
270 
271 
272 /// For an integer comparison, return 1 if the comparison is a signed operation
273 /// and 2 if the result is an unsigned comparison. Return zero if the operation
274 /// does not depend on the sign of the input (setne and seteq).
275 static int isSignedOp(ISD::CondCode Opcode) {
276  switch (Opcode) {
277  default: llvm_unreachable("Illegal integer setcc operation!");
278  case ISD::SETEQ:
279  case ISD::SETNE: return 0;
280  case ISD::SETLT:
281  case ISD::SETLE:
282  case ISD::SETGT:
283  case ISD::SETGE: return 1;
284  case ISD::SETULT:
285  case ISD::SETULE:
286  case ISD::SETUGT:
287  case ISD::SETUGE: return 2;
288  }
289 }
290 
292  bool isInteger) {
293  if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
294  // Cannot fold a signed integer setcc with an unsigned integer setcc.
295  return ISD::SETCC_INVALID;
296 
297  unsigned Op = Op1 | Op2; // Combine all of the condition bits.
298 
299  // If the N and U bits get set then the resultant comparison DOES suddenly
300  // care about orderedness, and is true when ordered.
301  if (Op > ISD::SETTRUE2)
302  Op &= ~16; // Clear the U bit if the N bit is set.
303 
304  // Canonicalize illegal integer setcc's.
305  if (isInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
306  Op = ISD::SETNE;
307 
308  return ISD::CondCode(Op);
309 }
310 
312  bool isInteger) {
313  if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
314  // Cannot fold a signed setcc with an unsigned setcc.
315  return ISD::SETCC_INVALID;
316 
317  // Combine all of the condition bits.
318  ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
319 
320  // Canonicalize illegal integer setcc's.
321  if (isInteger) {
322  switch (Result) {
323  default: break;
324  case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
325  case ISD::SETOEQ: // SETEQ & SETU[LG]E
326  case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
327  case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
328  case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
329  }
330  }
331 
332  return Result;
333 }
334 
335 //===----------------------------------------------------------------------===//
336 // SDNode Profile Support
337 //===----------------------------------------------------------------------===//
338 
339 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
340 ///
341 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
342  ID.AddInteger(OpC);
343 }
344 
345 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
346 /// solely with their pointer.
348  ID.AddPointer(VTList.VTs);
349 }
350 
351 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
352 ///
354  ArrayRef<SDValue> Ops) {
355  for (auto& Op : Ops) {
356  ID.AddPointer(Op.getNode());
357  ID.AddInteger(Op.getResNo());
358  }
359 }
360 
361 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
362 ///
364  ArrayRef<SDUse> Ops) {
365  for (auto& Op : Ops) {
366  ID.AddPointer(Op.getNode());
367  ID.AddInteger(Op.getResNo());
368  }
369 }
370 
371 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
372  SDVTList VTList, ArrayRef<SDValue> OpList) {
373  AddNodeIDOpcode(ID, OpC);
374  AddNodeIDValueTypes(ID, VTList);
375  AddNodeIDOperands(ID, OpList);
376 }
377 
378 /// If this is an SDNode with special info, add this info to the NodeID data.
379 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
380  switch (N->getOpcode()) {
382  case ISD::ExternalSymbol:
383  case ISD::MCSymbol:
384  llvm_unreachable("Should only be used on nodes with operands");
385  default: break; // Normal nodes don't need extra info.
386  case ISD::TargetConstant:
387  case ISD::Constant: {
388  const ConstantSDNode *C = cast<ConstantSDNode>(N);
390  ID.AddBoolean(C->isOpaque());
391  break;
392  }
394  case ISD::ConstantFP: {
395  ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
396  break;
397  }
399  case ISD::GlobalAddress:
401  case ISD::GlobalTLSAddress: {
402  const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
403  ID.AddPointer(GA->getGlobal());
404  ID.AddInteger(GA->getOffset());
405  ID.AddInteger(GA->getTargetFlags());
406  break;
407  }
408  case ISD::BasicBlock:
409  ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
410  break;
411  case ISD::Register:
412  ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
413  break;
414  case ISD::RegisterMask:
415  ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
416  break;
417  case ISD::SRCVALUE:
418  ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
419  break;
420  case ISD::FrameIndex:
422  ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
423  break;
424  case ISD::JumpTable:
426  ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
427  ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
428  break;
429  case ISD::ConstantPool:
431  const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
432  ID.AddInteger(CP->getAlignment());
433  ID.AddInteger(CP->getOffset());
434  if (CP->isMachineConstantPoolEntry())
436  else
437  ID.AddPointer(CP->getConstVal());
438  ID.AddInteger(CP->getTargetFlags());
439  break;
440  }
441  case ISD::TargetIndex: {
442  const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
443  ID.AddInteger(TI->getIndex());
444  ID.AddInteger(TI->getOffset());
445  ID.AddInteger(TI->getTargetFlags());
446  break;
447  }
448  case ISD::LOAD: {
449  const LoadSDNode *LD = cast<LoadSDNode>(N);
450  ID.AddInteger(LD->getMemoryVT().getRawBits());
451  ID.AddInteger(LD->getRawSubclassData());
453  break;
454  }
455  case ISD::STORE: {
456  const StoreSDNode *ST = cast<StoreSDNode>(N);
457  ID.AddInteger(ST->getMemoryVT().getRawBits());
458  ID.AddInteger(ST->getRawSubclassData());
460  break;
461  }
464  case ISD::ATOMIC_SWAP:
468  case ISD::ATOMIC_LOAD_OR:
475  case ISD::ATOMIC_LOAD:
476  case ISD::ATOMIC_STORE: {
477  const AtomicSDNode *AT = cast<AtomicSDNode>(N);
478  ID.AddInteger(AT->getMemoryVT().getRawBits());
479  ID.AddInteger(AT->getRawSubclassData());
481  break;
482  }
483  case ISD::PREFETCH: {
484  const MemSDNode *PF = cast<MemSDNode>(N);
486  break;
487  }
488  case ISD::VECTOR_SHUFFLE: {
489  const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
490  for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
491  i != e; ++i)
492  ID.AddInteger(SVN->getMaskElt(i));
493  break;
494  }
496  case ISD::BlockAddress: {
497  const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
498  ID.AddPointer(BA->getBlockAddress());
499  ID.AddInteger(BA->getOffset());
500  ID.AddInteger(BA->getTargetFlags());
501  break;
502  }
503  } // end switch (N->getOpcode())
504 
505  // Target specific memory nodes could also have address spaces to check.
506  if (N->isTargetMemoryOpcode())
507  ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
508 }
509 
510 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
511 /// data.
512 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
513  AddNodeIDOpcode(ID, N->getOpcode());
514  // Add the return value info.
515  AddNodeIDValueTypes(ID, N->getVTList());
516  // Add the operand info.
517  AddNodeIDOperands(ID, N->ops());
518 
519  // Handle SDNode leafs with special info.
520  AddNodeIDCustom(ID, N);
521 }
522 
523 //===----------------------------------------------------------------------===//
524 // SelectionDAG Class
525 //===----------------------------------------------------------------------===//
526 
527 /// doNotCSE - Return true if CSE should not be performed for this node.
528 static bool doNotCSE(SDNode *N) {
529  if (N->getValueType(0) == MVT::Glue)
530  return true; // Never CSE anything that produces a flag.
531 
532  switch (N->getOpcode()) {
533  default: break;
534  case ISD::HANDLENODE:
535  case ISD::EH_LABEL:
536  return true; // Never CSE these nodes.
537  }
538 
539  // Check that remaining values produced are not flags.
540  for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
541  if (N->getValueType(i) == MVT::Glue)
542  return true; // Never CSE anything that produces a flag.
543 
544  return false;
545 }
546 
547 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
548 /// SelectionDAG.
550  // Create a dummy node (which is not added to allnodes), that adds a reference
551  // to the root node, preventing it from being deleted.
553 
554  SmallVector<SDNode*, 128> DeadNodes;
555 
556  // Add all obviously-dead nodes to the DeadNodes worklist.
557  for (SDNode &Node : allnodes())
558  if (Node.use_empty())
559  DeadNodes.push_back(&Node);
560 
561  RemoveDeadNodes(DeadNodes);
562 
563  // If the root changed (e.g. it was a dead load, update the root).
564  setRoot(Dummy.getValue());
565 }
566 
567 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
568 /// given list, and any nodes that become unreachable as a result.
570 
571  // Process the worklist, deleting the nodes and adding their uses to the
572  // worklist.
573  while (!DeadNodes.empty()) {
574  SDNode *N = DeadNodes.pop_back_val();
575 
576  for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
577  DUL->NodeDeleted(N, nullptr);
578 
579  // Take the node out of the appropriate CSE map.
580  RemoveNodeFromCSEMaps(N);
581 
582  // Next, brutally remove the operand list. This is safe to do, as there are
583  // no cycles in the graph.
584  for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
585  SDUse &Use = *I++;
586  SDNode *Operand = Use.getNode();
587  Use.set(SDValue());
588 
589  // Now that we removed this operand, see if there are no uses of it left.
590  if (Operand->use_empty())
591  DeadNodes.push_back(Operand);
592  }
593 
594  DeallocateNode(N);
595  }
596 }
597 
599  SmallVector<SDNode*, 16> DeadNodes(1, N);
600 
601  // Create a dummy node that adds a reference to the root node, preventing
602  // it from being deleted. (This matters if the root is an operand of the
603  // dead node.)
605 
606  RemoveDeadNodes(DeadNodes);
607 }
608 
610  // First take this out of the appropriate CSE map.
611  RemoveNodeFromCSEMaps(N);
612 
613  // Finally, remove uses due to operands of this node, remove from the
614  // AllNodes list, and delete the node.
615  DeleteNodeNotInCSEMaps(N);
616 }
617 
618 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
619  assert(N->getIterator() != AllNodes.begin() &&
620  "Cannot delete the entry node!");
621  assert(N->use_empty() && "Cannot delete a node that is not dead!");
622 
623  // Drop all of the operands and decrement used node's use counts.
624  N->DropOperands();
625 
626  DeallocateNode(N);
627 }
628 
629 void SDDbgInfo::erase(const SDNode *Node) {
630  DbgValMapType::iterator I = DbgValMap.find(Node);
631  if (I == DbgValMap.end())
632  return;
633  for (auto &Val: I->second)
634  Val->setIsInvalidated();
635  DbgValMap.erase(I);
636 }
637 
638 void SelectionDAG::DeallocateNode(SDNode *N) {
639  // If we have operands, deallocate them.
640  removeOperands(N);
641 
642  // Set the opcode to DELETED_NODE to help catch bugs when node
643  // memory is reallocated.
644  N->NodeType = ISD::DELETED_NODE;
645 
646  NodeAllocator.Deallocate(AllNodes.remove(N));
647 
648  // If any of the SDDbgValue nodes refer to this SDNode, invalidate
649  // them and forget about that node.
650  DbgInfo->erase(N);
651 }
652 
653 #ifndef NDEBUG
654 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
655 static void VerifySDNode(SDNode *N) {
656  switch (N->getOpcode()) {
657  default:
658  break;
659  case ISD::BUILD_PAIR: {
660  EVT VT = N->getValueType(0);
661  assert(N->getNumValues() == 1 && "Too many results!");
662  assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
663  "Wrong return type!");
664  assert(N->getNumOperands() == 2 && "Wrong number of operands!");
666  "Mismatched operand types!");
667  assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
668  "Wrong operand type!");
669  assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
670  "Wrong return type size");
671  break;
672  }
673  case ISD::BUILD_VECTOR: {
674  assert(N->getNumValues() == 1 && "Too many results!");
675  assert(N->getValueType(0).isVector() && "Wrong return type!");
677  "Wrong number of operands!");
678  EVT EltVT = N->getValueType(0).getVectorElementType();
679  for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
680  assert((I->getValueType() == EltVT ||
681  (EltVT.isInteger() && I->getValueType().isInteger() &&
682  EltVT.bitsLE(I->getValueType()))) &&
683  "Wrong operand type!");
684  assert(I->getValueType() == N->getOperand(0).getValueType() &&
685  "Operands must all have the same type");
686  }
687  break;
688  }
689  }
690 }
691 #endif // NDEBUG
692 
693 /// \brief Insert a newly allocated node into the DAG.
694 ///
695 /// Handles insertion into the all nodes list and CSE map, as well as
696 /// verification and other common operations when a new node is allocated.
697 void SelectionDAG::InsertNode(SDNode *N) {
698  AllNodes.push_back(N);
699 #ifndef NDEBUG
700  N->PersistentId = NextPersistentId++;
701  VerifySDNode(N);
702 #endif
703 }
704 
705 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
706 /// correspond to it. This is useful when we're about to delete or repurpose
707 /// the node. We don't want future request for structurally identical nodes
708 /// to return N anymore.
709 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
710  bool Erased = false;
711  switch (N->getOpcode()) {
712  case ISD::HANDLENODE: return false; // noop.
713  case ISD::CONDCODE:
714  assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
715  "Cond code doesn't exist!");
716  Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
717  CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
718  break;
719  case ISD::ExternalSymbol:
720  Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
721  break;
723  ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
724  Erased = TargetExternalSymbols.erase(
725  std::pair<std::string,unsigned char>(ESN->getSymbol(),
726  ESN->getTargetFlags()));
727  break;
728  }
729  case ISD::MCSymbol: {
730  auto *MCSN = cast<MCSymbolSDNode>(N);
731  Erased = MCSymbols.erase(MCSN->getMCSymbol());
732  break;
733  }
734  case ISD::VALUETYPE: {
735  EVT VT = cast<VTSDNode>(N)->getVT();
736  if (VT.isExtended()) {
737  Erased = ExtendedValueTypeNodes.erase(VT);
738  } else {
739  Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
740  ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
741  }
742  break;
743  }
744  default:
745  // Remove it from the CSE Map.
746  assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
747  assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
748  Erased = CSEMap.RemoveNode(N);
749  break;
750  }
751 #ifndef NDEBUG
752  // Verify that the node was actually in one of the CSE maps, unless it has a
753  // flag result (which cannot be CSE'd) or is one of the special cases that are
754  // not subject to CSE.
755  if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
756  !N->isMachineOpcode() && !doNotCSE(N)) {
757  N->dump(this);
758  dbgs() << "\n";
759  llvm_unreachable("Node is not in map!");
760  }
761 #endif
762  return Erased;
763 }
764 
765 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
766 /// maps and modified in place. Add it back to the CSE maps, unless an identical
767 /// node already exists, in which case transfer all its users to the existing
768 /// node. This transfer can potentially trigger recursive merging.
769 ///
770 void
771 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
772  // For node types that aren't CSE'd, just act as if no identical node
773  // already exists.
774  if (!doNotCSE(N)) {
775  SDNode *Existing = CSEMap.GetOrInsertNode(N);
776  if (Existing != N) {
777  // If there was already an existing matching node, use ReplaceAllUsesWith
778  // to replace the dead one with the existing one. This can cause
779  // recursive merging of other unrelated nodes down the line.
780  ReplaceAllUsesWith(N, Existing);
781 
782  // N is now dead. Inform the listeners and delete it.
783  for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
784  DUL->NodeDeleted(N, Existing);
785  DeleteNodeNotInCSEMaps(N);
786  return;
787  }
788  }
789 
790  // If the node doesn't already exist, we updated it. Inform listeners.
791  for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
792  DUL->NodeUpdated(N);
793 }
794 
795 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
796 /// were replaced with those specified. If this node is never memoized,
797 /// return null, otherwise return a pointer to the slot it would take. If a
798 /// node already exists with these operands, the slot will be non-null.
799 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
800  void *&InsertPos) {
801  if (doNotCSE(N))
802  return nullptr;
803 
804  SDValue Ops[] = { Op };
806  AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
807  AddNodeIDCustom(ID, N);
808  SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
809  if (Node)
810  if (const SDNodeFlags *Flags = N->getFlags())
811  Node->intersectFlagsWith(Flags);
812  return Node;
813 }
814 
815 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
816 /// were replaced with those specified. If this node is never memoized,
817 /// return null, otherwise return a pointer to the slot it would take. If a
818 /// node already exists with these operands, the slot will be non-null.
819 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
820  SDValue Op1, SDValue Op2,
821  void *&InsertPos) {
822  if (doNotCSE(N))
823  return nullptr;
824 
825  SDValue Ops[] = { Op1, Op2 };
827  AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
828  AddNodeIDCustom(ID, N);
829  SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
830  if (Node)
831  if (const SDNodeFlags *Flags = N->getFlags())
832  Node->intersectFlagsWith(Flags);
833  return Node;
834 }
835 
836 
837 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
838 /// were replaced with those specified. If this node is never memoized,
839 /// return null, otherwise return a pointer to the slot it would take. If a
840 /// node already exists with these operands, the slot will be non-null.
841 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
842  void *&InsertPos) {
843  if (doNotCSE(N))
844  return nullptr;
845 
847  AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
848  AddNodeIDCustom(ID, N);
849  SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
850  if (Node)
851  if (const SDNodeFlags *Flags = N->getFlags())
852  Node->intersectFlagsWith(Flags);
853  return Node;
854 }
855 
856 unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
857  Type *Ty = VT == MVT::iPTR ?
859  VT.getTypeForEVT(*getContext());
860 
861  return getDataLayout().getABITypeAlignment(Ty);
862 }
863 
864 // EntryNode could meaningfully have debug info if we can find it...
865 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
866  : TM(tm), TSI(nullptr), TLI(nullptr), OptLevel(OL),
867  EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
869  UpdateListeners(nullptr) {
870  InsertNode(&EntryNode);
871  DbgInfo = new SDDbgInfo();
872 }
873 
875  MF = &mf;
878  Context = &mf.getFunction()->getContext();
879 }
880 
882  assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
883  allnodes_clear();
884  OperandRecycler.clear(OperandAllocator);
885  delete DbgInfo;
886 }
887 
888 void SelectionDAG::allnodes_clear() {
889  assert(&*AllNodes.begin() == &EntryNode);
890  AllNodes.remove(AllNodes.begin());
891  while (!AllNodes.empty())
892  DeallocateNode(&AllNodes.front());
893 #ifndef NDEBUG
894  NextPersistentId = 0;
895 #endif
896 }
897 
898 SDNode *SelectionDAG::GetBinarySDNode(unsigned Opcode, const SDLoc &DL,
899  SDVTList VTs, SDValue N1, SDValue N2,
900  const SDNodeFlags *Flags) {
901  SDValue Ops[] = {N1, N2};
902 
903  if (isBinOpWithFlags(Opcode)) {
904  // If no flags were passed in, use a default flags object.
905  SDNodeFlags F;
906  if (Flags == nullptr)
907  Flags = &F;
908 
909  auto *FN = newSDNode<BinaryWithFlagsSDNode>(Opcode, DL.getIROrder(),
910  DL.getDebugLoc(), VTs, *Flags);
911  createOperands(FN, Ops);
912 
913  return FN;
914  }
915 
916  auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
917  createOperands(N, Ops);
918  return N;
919 }
920 
921 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
922  void *&InsertPos) {
923  SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
924  if (N) {
925  switch (N->getOpcode()) {
926  default: break;
927  case ISD::Constant:
928  case ISD::ConstantFP:
929  llvm_unreachable("Querying for Constant and ConstantFP nodes requires "
930  "debug location. Use another overload.");
931  }
932  }
933  return N;
934 }
935 
936 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
937  const SDLoc &DL, void *&InsertPos) {
938  SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
939  if (N) {
940  switch (N->getOpcode()) {
941  case ISD::Constant:
942  case ISD::ConstantFP:
943  // Erase debug location from the node if the node is used at several
944  // different places. Do not propagate one location to all uses as it
945  // will cause a worse single stepping debugging experience.
946  if (N->getDebugLoc() != DL.getDebugLoc())
947  N->setDebugLoc(DebugLoc());
948  break;
949  default:
950  // When the node's point of use is located earlier in the instruction
951  // sequence than its prior point of use, update its debug info to the
952  // earlier location.
953  if (DL.getIROrder() && DL.getIROrder() < N->getIROrder())
954  N->setDebugLoc(DL.getDebugLoc());
955  break;
956  }
957  }
958  return N;
959 }
960 
962  allnodes_clear();
963  OperandRecycler.clear(OperandAllocator);
964  OperandAllocator.Reset();
965  CSEMap.clear();
966 
967  ExtendedValueTypeNodes.clear();
968  ExternalSymbols.clear();
969  TargetExternalSymbols.clear();
970  MCSymbols.clear();
971  std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
972  static_cast<CondCodeSDNode*>(nullptr));
973  std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
974  static_cast<SDNode*>(nullptr));
975 
976  EntryNode.UseList = nullptr;
977  InsertNode(&EntryNode);
978  Root = getEntryNode();
979  DbgInfo->clear();
980 }
981 
983  return VT.bitsGT(Op.getValueType()) ?
984  getNode(ISD::ANY_EXTEND, DL, VT, Op) :
985  getNode(ISD::TRUNCATE, DL, VT, Op);
986 }
987 
989  return VT.bitsGT(Op.getValueType()) ?
990  getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
991  getNode(ISD::TRUNCATE, DL, VT, Op);
992 }
993 
995  return VT.bitsGT(Op.getValueType()) ?
996  getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
997  getNode(ISD::TRUNCATE, DL, VT, Op);
998 }
999 
1001  EVT OpVT) {
1002  if (VT.bitsLE(Op.getValueType()))
1003  return getNode(ISD::TRUNCATE, SL, VT, Op);
1004 
1006  return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
1007 }
1008 
1010  assert(!VT.isVector() &&
1011  "getZeroExtendInReg should use the vector element type instead of "
1012  "the vector type!");
1013  if (Op.getValueType() == VT) return Op;
1014  unsigned BitWidth = Op.getScalarValueSizeInBits();
1015  APInt Imm = APInt::getLowBitsSet(BitWidth,
1016  VT.getSizeInBits());
1017  return getNode(ISD::AND, DL, Op.getValueType(), Op,
1018  getConstant(Imm, DL, Op.getValueType()));
1019 }
1020 
1022  EVT VT) {
1023  assert(VT.isVector() && "This DAG node is restricted to vector types.");
1024  assert(VT.getSizeInBits() == Op.getValueSizeInBits() &&
1025  "The sizes of the input and result must match in order to perform the "
1026  "extend in-register.");
1028  "The destination vector type must have fewer lanes than the input.");
1029  return getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, Op);
1030 }
1031 
1033  EVT VT) {
1034  assert(VT.isVector() && "This DAG node is restricted to vector types.");
1035  assert(VT.getSizeInBits() == Op.getValueSizeInBits() &&
1036  "The sizes of the input and result must match in order to perform the "
1037  "extend in-register.");
1039  "The destination vector type must have fewer lanes than the input.");
1040  return getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, VT, Op);
1041 }
1042 
1044  EVT VT) {
1045  assert(VT.isVector() && "This DAG node is restricted to vector types.");
1046  assert(VT.getSizeInBits() == Op.getValueSizeInBits() &&
1047  "The sizes of the input and result must match in order to perform the "
1048  "extend in-register.");
1050  "The destination vector type must have fewer lanes than the input.");
1051  return getNode(ISD::ZERO_EXTEND_VECTOR_INREG, DL, VT, Op);
1052 }
1053 
1054 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
1055 ///
1057  EVT EltVT = VT.getScalarType();
1058  SDValue NegOne =
1060  return getNode(ISD::XOR, DL, VT, Val, NegOne);
1061 }
1062 
1064  EVT EltVT = VT.getScalarType();
1065  SDValue TrueValue;
1066  switch (TLI->getBooleanContents(VT)) {
1069  TrueValue = getConstant(1, DL, VT);
1070  break;
1072  TrueValue = getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL,
1073  VT);
1074  break;
1075  }
1076  return getNode(ISD::XOR, DL, VT, Val, TrueValue);
1077 }
1078 
1079 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
1080  bool isT, bool isO) {
1081  EVT EltVT = VT.getScalarType();
1082  assert((EltVT.getSizeInBits() >= 64 ||
1083  (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
1084  "getConstant with a uint64_t value that doesn't fit in the type!");
1085  return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO);
1086 }
1087 
1089  bool isT, bool isO) {
1090  return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO);
1091 }
1092 
1094  EVT VT, bool isT, bool isO) {
1095  assert(VT.isInteger() && "Cannot create FP integer constant!");
1096 
1097  EVT EltVT = VT.getScalarType();
1098  const ConstantInt *Elt = &Val;
1099 
1100  // In some cases the vector type is legal but the element type is illegal and
1101  // needs to be promoted, for example v8i8 on ARM. In this case, promote the
1102  // inserted value (the type does not need to match the vector element type).
1103  // Any extra bits introduced will be truncated away.
1104  if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1106  EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1107  APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits());
1108  Elt = ConstantInt::get(*getContext(), NewVal);
1109  }
1110  // In other cases the element type is illegal and needs to be expanded, for
1111  // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1112  // the value into n parts and use a vector type with n-times the elements.
1113  // Then bitcast to the type requested.
1114  // Legalizing constants too early makes the DAGCombiner's job harder so we
1115  // only legalize if the DAG tells us we must produce legal types.
1116  else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1117  TLI->getTypeAction(*getContext(), EltVT) ==
1119  const APInt &NewVal = Elt->getValue();
1120  EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1121  unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1122  unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1123  EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1124 
1125  // Check the temporary vector is the correct size. If this fails then
1126  // getTypeToTransformTo() probably returned a type whose size (in bits)
1127  // isn't a power-of-2 factor of the requested type size.
1128  assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
1129 
1130  SmallVector<SDValue, 2> EltParts;
1131  for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
1132  EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits)
1133  .zextOrTrunc(ViaEltSizeInBits), DL,
1134  ViaEltVT, isT, isO));
1135  }
1136 
1137  // EltParts is currently in little endian order. If we actually want
1138  // big-endian order then reverse it now.
1139  if (getDataLayout().isBigEndian())
1140  std::reverse(EltParts.begin(), EltParts.end());
1141 
1142  // The elements must be reversed when the element order is different
1143  // to the endianness of the elements (because the BITCAST is itself a
1144  // vector shuffle in this situation). However, we do not need any code to
1145  // perform this reversal because getConstant() is producing a vector
1146  // splat.
1147  // This situation occurs in MIPS MSA.
1148 
1150  for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1151  Ops.insert(Ops.end(), EltParts.begin(), EltParts.end());
1152  return getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops));
1153  }
1154 
1155  assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
1156  "APInt size does not match type size!");
1157  unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1159  AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1160  ID.AddPointer(Elt);
1161  ID.AddBoolean(isO);
1162  void *IP = nullptr;
1163  SDNode *N = nullptr;
1164  if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1165  if (!VT.isVector())
1166  return SDValue(N, 0);
1167 
1168  if (!N) {
1169  N = newSDNode<ConstantSDNode>(isT, isO, Elt, DL.getDebugLoc(), EltVT);
1170  CSEMap.InsertNode(N, IP);
1171  InsertNode(N);
1172  }
1173 
1174  SDValue Result(N, 0);
1175  if (VT.isVector())
1176  Result = getSplatBuildVector(VT, DL, Result);
1177  return Result;
1178 }
1179 
1181  bool isTarget) {
1182  return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget);
1183 }
1184 
1186  bool isTarget) {
1187  return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget);
1188 }
1189 
1191  EVT VT, bool isTarget) {
1192  assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1193 
1194  EVT EltVT = VT.getScalarType();
1195 
1196  // Do the map lookup using the actual bit pattern for the floating point
1197  // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1198  // we don't have issues with SNANs.
1199  unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1201  AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1202  ID.AddPointer(&V);
1203  void *IP = nullptr;
1204  SDNode *N = nullptr;
1205  if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1206  if (!VT.isVector())
1207  return SDValue(N, 0);
1208 
1209  if (!N) {
1210  N = newSDNode<ConstantFPSDNode>(isTarget, &V, DL.getDebugLoc(), EltVT);
1211  CSEMap.InsertNode(N, IP);
1212  InsertNode(N);
1213  }
1214 
1215  SDValue Result(N, 0);
1216  if (VT.isVector())
1217  Result = getSplatBuildVector(VT, DL, Result);
1218  return Result;
1219 }
1220 
1221 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT,
1222  bool isTarget) {
1223  EVT EltVT = VT.getScalarType();
1224  if (EltVT == MVT::f32)
1225  return getConstantFP(APFloat((float)Val), DL, VT, isTarget);
1226  else if (EltVT == MVT::f64)
1227  return getConstantFP(APFloat(Val), DL, VT, isTarget);
1228  else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1229  EltVT == MVT::f16) {
1230  bool Ignored;
1231  APFloat APF = APFloat(Val);
1233  &Ignored);
1234  return getConstantFP(APF, DL, VT, isTarget);
1235  } else
1236  llvm_unreachable("Unsupported type in getConstantFP");
1237 }
1238 
1240  EVT VT, int64_t Offset, bool isTargetGA,
1241  unsigned char TargetFlags) {
1242  assert((TargetFlags == 0 || isTargetGA) &&
1243  "Cannot set target flags on target-independent globals");
1244 
1245  // Truncate (with sign-extension) the offset value to the pointer size.
1246  unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
1247  if (BitWidth < 64)
1248  Offset = SignExtend64(Offset, BitWidth);
1249 
1250  unsigned Opc;
1251  if (GV->isThreadLocal())
1253  else
1254  Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1255 
1257  AddNodeIDNode(ID, Opc, getVTList(VT), None);
1258  ID.AddPointer(GV);
1259  ID.AddInteger(Offset);
1260  ID.AddInteger(TargetFlags);
1261  void *IP = nullptr;
1262  if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
1263  return SDValue(E, 0);
1264 
1265  auto *N = newSDNode<GlobalAddressSDNode>(
1266  Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags);
1267  CSEMap.InsertNode(N, IP);
1268  InsertNode(N);
1269  return SDValue(N, 0);
1270 }
1271 
1272 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1273  unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1275  AddNodeIDNode(ID, Opc, getVTList(VT), None);
1276  ID.AddInteger(FI);
1277  void *IP = nullptr;
1278  if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1279  return SDValue(E, 0);
1280 
1281  auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget);
1282  CSEMap.InsertNode(N, IP);
1283  InsertNode(N);
1284  return SDValue(N, 0);
1285 }
1286 
1287 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1288  unsigned char TargetFlags) {
1289  assert((TargetFlags == 0 || isTarget) &&
1290  "Cannot set target flags on target-independent jump tables");
1291  unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1293  AddNodeIDNode(ID, Opc, getVTList(VT), None);
1294  ID.AddInteger(JTI);
1295  ID.AddInteger(TargetFlags);
1296  void *IP = nullptr;
1297  if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1298  return SDValue(E, 0);
1299 
1300  auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags);
1301  CSEMap.InsertNode(N, IP);
1302  InsertNode(N);
1303  return SDValue(N, 0);
1304 }
1305 
1307  unsigned Alignment, int Offset,
1308  bool isTarget,
1309  unsigned char TargetFlags) {
1310  assert((TargetFlags == 0 || isTarget) &&
1311  "Cannot set target flags on target-independent globals");
1312  if (Alignment == 0)
1313  Alignment = MF->getFunction()->optForSize()
1316  unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1318  AddNodeIDNode(ID, Opc, getVTList(VT), None);
1319  ID.AddInteger(Alignment);
1320  ID.AddInteger(Offset);
1321  ID.AddPointer(C);
1322  ID.AddInteger(TargetFlags);
1323  void *IP = nullptr;
1324  if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1325  return SDValue(E, 0);
1326 
1327  auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment,
1328  TargetFlags);
1329  CSEMap.InsertNode(N, IP);
1330  InsertNode(N);
1331  return SDValue(N, 0);
1332 }
1333 
1334 
1336  unsigned Alignment, int Offset,
1337  bool isTarget,
1338  unsigned char TargetFlags) {
1339  assert((TargetFlags == 0 || isTarget) &&
1340  "Cannot set target flags on target-independent globals");
1341  if (Alignment == 0)
1342  Alignment = getDataLayout().getPrefTypeAlignment(C->getType());
1343  unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1345  AddNodeIDNode(ID, Opc, getVTList(VT), None);
1346  ID.AddInteger(Alignment);
1347  ID.AddInteger(Offset);
1348  C->addSelectionDAGCSEId(ID);
1349  ID.AddInteger(TargetFlags);
1350  void *IP = nullptr;
1351  if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1352  return SDValue(E, 0);
1353 
1354  auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment,
1355  TargetFlags);
1356  CSEMap.InsertNode(N, IP);
1357  InsertNode(N);
1358  return SDValue(N, 0);
1359 }
1360 
1362  unsigned char TargetFlags) {
1365  ID.AddInteger(Index);
1366  ID.AddInteger(Offset);
1367  ID.AddInteger(TargetFlags);
1368  void *IP = nullptr;
1369  if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1370  return SDValue(E, 0);
1371 
1372  auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags);
1373  CSEMap.InsertNode(N, IP);
1374  InsertNode(N);
1375  return SDValue(N, 0);
1376 }
1377 
1381  ID.AddPointer(MBB);
1382  void *IP = nullptr;
1383  if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1384  return SDValue(E, 0);
1385 
1386  auto *N = newSDNode<BasicBlockSDNode>(MBB);
1387  CSEMap.InsertNode(N, IP);
1388  InsertNode(N);
1389  return SDValue(N, 0);
1390 }
1391 
1393  if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1394  ValueTypeNodes.size())
1395  ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1396 
1397  SDNode *&N = VT.isExtended() ?
1398  ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1399 
1400  if (N) return SDValue(N, 0);
1401  N = newSDNode<VTSDNode>(VT);
1402  InsertNode(N);
1403  return SDValue(N, 0);
1404 }
1405 
1407  SDNode *&N = ExternalSymbols[Sym];
1408  if (N) return SDValue(N, 0);
1409  N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT);
1410  InsertNode(N);
1411  return SDValue(N, 0);
1412 }
1413 
1415  SDNode *&N = MCSymbols[Sym];
1416  if (N)
1417  return SDValue(N, 0);
1418  N = newSDNode<MCSymbolSDNode>(Sym, VT);
1419  InsertNode(N);
1420  return SDValue(N, 0);
1421 }
1422 
1424  unsigned char TargetFlags) {
1425  SDNode *&N =
1426  TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym,
1427  TargetFlags)];
1428  if (N) return SDValue(N, 0);
1429  N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT);
1430  InsertNode(N);
1431  return SDValue(N, 0);
1432 }
1433 
1435  if ((unsigned)Cond >= CondCodeNodes.size())
1436  CondCodeNodes.resize(Cond+1);
1437 
1438  if (!CondCodeNodes[Cond]) {
1439  auto *N = newSDNode<CondCodeSDNode>(Cond);
1440  CondCodeNodes[Cond] = N;
1441  InsertNode(N);
1442  }
1443 
1444  return SDValue(CondCodeNodes[Cond], 0);
1445 }
1446 
1447 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that
1448 /// point at N1 to point at N2 and indices that point at N2 to point at N1.
1450  std::swap(N1, N2);
1452 }
1453 
1455  SDValue N2, ArrayRef<int> Mask) {
1456  assert(VT.getVectorNumElements() == Mask.size() &&
1457  "Must have the same number of vector elements as mask elements!");
1458  assert(VT == N1.getValueType() && VT == N2.getValueType() &&
1459  "Invalid VECTOR_SHUFFLE");
1460 
1461  // Canonicalize shuffle undef, undef -> undef
1462  if (N1.isUndef() && N2.isUndef())
1463  return getUNDEF(VT);
1464 
1465  // Validate that all indices in Mask are within the range of the elements
1466  // input to the shuffle.
1467  int NElts = Mask.size();
1468  assert(all_of(Mask, [&](int M) { return M < (NElts * 2); }) &&
1469  "Index out of range");
1470 
1471  // Copy the mask so we can do any needed cleanup.
1472  SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end());
1473 
1474  // Canonicalize shuffle v, v -> v, undef
1475  if (N1 == N2) {
1476  N2 = getUNDEF(VT);
1477  for (int i = 0; i != NElts; ++i)
1478  if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
1479  }
1480 
1481  // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
1482  if (N1.isUndef())
1483  commuteShuffle(N1, N2, MaskVec);
1484 
1485  // If shuffling a splat, try to blend the splat instead. We do this here so
1486  // that even when this arises during lowering we don't have to re-handle it.
1487  auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) {
1488  BitVector UndefElements;
1489  SDValue Splat = BV->getSplatValue(&UndefElements);
1490  if (!Splat)
1491  return;
1492 
1493  for (int i = 0; i < NElts; ++i) {
1494  if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts))
1495  continue;
1496 
1497  // If this input comes from undef, mark it as such.
1498  if (UndefElements[MaskVec[i] - Offset]) {
1499  MaskVec[i] = -1;
1500  continue;
1501  }
1502 
1503  // If we can blend a non-undef lane, use that instead.
1504  if (!UndefElements[i])
1505  MaskVec[i] = i + Offset;
1506  }
1507  };
1508  if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
1509  BlendSplat(N1BV, 0);
1510  if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
1511  BlendSplat(N2BV, NElts);
1512 
1513  // Canonicalize all index into lhs, -> shuffle lhs, undef
1514  // Canonicalize all index into rhs, -> shuffle rhs, undef
1515  bool AllLHS = true, AllRHS = true;
1516  bool N2Undef = N2.isUndef();
1517  for (int i = 0; i != NElts; ++i) {
1518  if (MaskVec[i] >= NElts) {
1519  if (N2Undef)
1520  MaskVec[i] = -1;
1521  else
1522  AllLHS = false;
1523  } else if (MaskVec[i] >= 0) {
1524  AllRHS = false;
1525  }
1526  }
1527  if (AllLHS && AllRHS)
1528  return getUNDEF(VT);
1529  if (AllLHS && !N2Undef)
1530  N2 = getUNDEF(VT);
1531  if (AllRHS) {
1532  N1 = getUNDEF(VT);
1533  commuteShuffle(N1, N2, MaskVec);
1534  }
1535  // Reset our undef status after accounting for the mask.
1536  N2Undef = N2.isUndef();
1537  // Re-check whether both sides ended up undef.
1538  if (N1.isUndef() && N2Undef)
1539  return getUNDEF(VT);
1540 
1541  // If Identity shuffle return that node.
1542  bool Identity = true, AllSame = true;
1543  for (int i = 0; i != NElts; ++i) {
1544  if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false;
1545  if (MaskVec[i] != MaskVec[0]) AllSame = false;
1546  }
1547  if (Identity && NElts)
1548  return N1;
1549 
1550  // Shuffling a constant splat doesn't change the result.
1551  if (N2Undef) {
1552  SDValue V = N1;
1553 
1554  // Look through any bitcasts. We check that these don't change the number
1555  // (and size) of elements and just changes their types.
1556  while (V.getOpcode() == ISD::BITCAST)
1557  V = V->getOperand(0);
1558 
1559  // A splat should always show up as a build vector node.
1560  if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
1561  BitVector UndefElements;
1562  SDValue Splat = BV->getSplatValue(&UndefElements);
1563  // If this is a splat of an undef, shuffling it is also undef.
1564  if (Splat && Splat.isUndef())
1565  return getUNDEF(VT);
1566 
1567  bool SameNumElts =
1569 
1570  // We only have a splat which can skip shuffles if there is a splatted
1571  // value and no undef lanes rearranged by the shuffle.
1572  if (Splat && UndefElements.none()) {
1573  // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
1574  // number of elements match or the value splatted is a zero constant.
1575  if (SameNumElts)
1576  return N1;
1577  if (auto *C = dyn_cast<ConstantSDNode>(Splat))
1578  if (C->isNullValue())
1579  return N1;
1580  }
1581 
1582  // If the shuffle itself creates a splat, build the vector directly.
1583  if (AllSame && SameNumElts) {
1584  EVT BuildVT = BV->getValueType(0);
1585  const SDValue &Splatted = BV->getOperand(MaskVec[0]);
1586  SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted);
1587 
1588  // We may have jumped through bitcasts, so the type of the
1589  // BUILD_VECTOR may not match the type of the shuffle.
1590  if (BuildVT != VT)
1591  NewBV = getNode(ISD::BITCAST, dl, VT, NewBV);
1592  return NewBV;
1593  }
1594  }
1595  }
1596 
1598  SDValue Ops[2] = { N1, N2 };
1600  for (int i = 0; i != NElts; ++i)
1601  ID.AddInteger(MaskVec[i]);
1602 
1603  void* IP = nullptr;
1604  if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1605  return SDValue(E, 0);
1606 
1607  // Allocate the mask array for the node out of the BumpPtrAllocator, since
1608  // SDNode doesn't have access to it. This memory will be "leaked" when
1609  // the node is deallocated, but recovered when the NodeAllocator is released.
1610  int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1611  std::copy(MaskVec.begin(), MaskVec.end(), MaskAlloc);
1612 
1613  auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(),
1614  dl.getDebugLoc(), MaskAlloc);
1615  createOperands(N, Ops);
1616 
1617  CSEMap.InsertNode(N, IP);
1618  InsertNode(N);
1619  return SDValue(N, 0);
1620 }
1621 
1623  MVT VT = SV.getSimpleValueType(0);
1624  SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end());
1626 
1627  SDValue Op0 = SV.getOperand(0);
1628  SDValue Op1 = SV.getOperand(1);
1629  return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec);
1630 }
1631 
1635  ID.AddInteger(RegNo);
1636  void *IP = nullptr;
1637  if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1638  return SDValue(E, 0);
1639 
1640  auto *N = newSDNode<RegisterSDNode>(RegNo, VT);
1641  CSEMap.InsertNode(N, IP);
1642  InsertNode(N);
1643  return SDValue(N, 0);
1644 }
1645 
1649  ID.AddPointer(RegMask);
1650  void *IP = nullptr;
1651  if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1652  return SDValue(E, 0);
1653 
1654  auto *N = newSDNode<RegisterMaskSDNode>(RegMask);
1655  CSEMap.InsertNode(N, IP);
1656  InsertNode(N);
1657  return SDValue(N, 0);
1658 }
1659 
1661  MCSymbol *Label) {
1663  SDValue Ops[] = { Root };
1665  ID.AddPointer(Label);
1666  void *IP = nullptr;
1667  if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1668  return SDValue(E, 0);
1669 
1670  auto *N = newSDNode<EHLabelSDNode>(dl.getIROrder(), dl.getDebugLoc(), Label);
1671  createOperands(N, Ops);
1672 
1673  CSEMap.InsertNode(N, IP);
1674  InsertNode(N);
1675  return SDValue(N, 0);
1676 }
1677 
1679  int64_t Offset,
1680  bool isTarget,
1681  unsigned char TargetFlags) {
1682  unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
1683 
1685  AddNodeIDNode(ID, Opc, getVTList(VT), None);
1686  ID.AddPointer(BA);
1687  ID.AddInteger(Offset);
1688  ID.AddInteger(TargetFlags);
1689  void *IP = nullptr;
1690  if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1691  return SDValue(E, 0);
1692 
1693  auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags);
1694  CSEMap.InsertNode(N, IP);
1695  InsertNode(N);
1696  return SDValue(N, 0);
1697 }
1698 
1700  assert((!V || V->getType()->isPointerTy()) &&
1701  "SrcValue is not a pointer?");
1702 
1705  ID.AddPointer(V);
1706 
1707  void *IP = nullptr;
1708  if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1709  return SDValue(E, 0);
1710 
1711  auto *N = newSDNode<SrcValueSDNode>(V);
1712  CSEMap.InsertNode(N, IP);
1713  InsertNode(N);
1714  return SDValue(N, 0);
1715 }
1716 
1720  ID.AddPointer(MD);
1721 
1722  void *IP = nullptr;
1723  if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1724  return SDValue(E, 0);
1725 
1726  auto *N = newSDNode<MDNodeSDNode>(MD);
1727  CSEMap.InsertNode(N, IP);
1728  InsertNode(N);
1729  return SDValue(N, 0);
1730 }
1731 
1733  if (VT == V.getValueType())
1734  return V;
1735 
1736  return getNode(ISD::BITCAST, SDLoc(V), VT, V);
1737 }
1738 
1740  unsigned SrcAS, unsigned DestAS) {
1741  SDValue Ops[] = {Ptr};
1744  ID.AddInteger(SrcAS);
1745  ID.AddInteger(DestAS);
1746 
1747  void *IP = nullptr;
1748  if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1749  return SDValue(E, 0);
1750 
1751  auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(),
1752  VT, SrcAS, DestAS);
1753  createOperands(N, Ops);
1754 
1755  CSEMap.InsertNode(N, IP);
1756  InsertNode(N);
1757  return SDValue(N, 0);
1758 }
1759 
1760 /// getShiftAmountOperand - Return the specified value casted to
1761 /// the target's desired shift amount type.
1763  EVT OpTy = Op.getValueType();
1764  EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout());
1765  if (OpTy == ShTy || OpTy.isVector()) return Op;
1766 
1767  return getZExtOrTrunc(Op, SDLoc(Op), ShTy);
1768 }
1769 
1771  SDLoc dl(Node);
1772  const TargetLowering &TLI = getTargetLoweringInfo();
1773  const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
1774  EVT VT = Node->getValueType(0);
1775  SDValue Tmp1 = Node->getOperand(0);
1776  SDValue Tmp2 = Node->getOperand(1);
1777  unsigned Align = Node->getConstantOperandVal(3);
1778 
1779  SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1,
1780  Tmp2, MachinePointerInfo(V));
1781  SDValue VAList = VAListLoad;
1782 
1783  if (Align > TLI.getMinStackArgumentAlignment()) {
1784  assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2");
1785 
1786  VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
1787  getConstant(Align - 1, dl, VAList.getValueType()));
1788 
1789  VAList = getNode(ISD::AND, dl, VAList.getValueType(), VAList,
1790  getConstant(-(int64_t)Align, dl, VAList.getValueType()));
1791  }
1792 
1793  // Increment the pointer, VAList, to the next vaarg
1794  Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
1795  getConstant(getDataLayout().getTypeAllocSize(
1796  VT.getTypeForEVT(*getContext())),
1797  dl, VAList.getValueType()));
1798  // Store the incremented VAList to the legalized pointer
1799  Tmp1 =
1800  getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V));
1801  // Load the actual argument out of the pointer VAList
1802  return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo());
1803 }
1804 
1806  SDLoc dl(Node);
1807  const TargetLowering &TLI = getTargetLoweringInfo();
1808  // This defaults to loading a pointer from the input and storing it to the
1809  // output, returning the chain.
1810  const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
1811  const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
1812  SDValue Tmp1 =
1813  getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0),
1814  Node->getOperand(2), MachinePointerInfo(VS));
1815  return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
1816  MachinePointerInfo(VD));
1817 }
1818 
1821  unsigned ByteSize = VT.getStoreSize();
1822  Type *Ty = VT.getTypeForEVT(*getContext());
1823  unsigned StackAlign =
1824  std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign);
1825 
1826  int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false);
1827  return getFrameIndex(FrameIdx, TLI->getPointerTy(getDataLayout()));
1828 }
1829 
1831  unsigned Bytes = std::max(VT1.getStoreSize(), VT2.getStoreSize());
1832  Type *Ty1 = VT1.getTypeForEVT(*getContext());
1833  Type *Ty2 = VT2.getTypeForEVT(*getContext());
1834  const DataLayout &DL = getDataLayout();
1835  unsigned Align =
1836  std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2));
1837 
1839  int FrameIdx = MFI.CreateStackObject(Bytes, Align, false);
1840  return getFrameIndex(FrameIdx, TLI->getPointerTy(getDataLayout()));
1841 }
1842 
1844  ISD::CondCode Cond, const SDLoc &dl) {
1845  // These setcc operations always fold.
1846  switch (Cond) {
1847  default: break;
1848  case ISD::SETFALSE:
1849  case ISD::SETFALSE2: return getConstant(0, dl, VT);
1850  case ISD::SETTRUE:
1851  case ISD::SETTRUE2: {
1853  TLI->getBooleanContents(N1->getValueType(0));
1854  return getConstant(
1855  Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, dl,
1856  VT);
1857  }
1858 
1859  case ISD::SETOEQ:
1860  case ISD::SETOGT:
1861  case ISD::SETOGE:
1862  case ISD::SETOLT:
1863  case ISD::SETOLE:
1864  case ISD::SETONE:
1865  case ISD::SETO:
1866  case ISD::SETUO:
1867  case ISD::SETUEQ:
1868  case ISD::SETUNE:
1869  assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!");
1870  break;
1871  }
1872 
1873  if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) {
1874  const APInt &C2 = N2C->getAPIntValue();
1875  if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
1876  const APInt &C1 = N1C->getAPIntValue();
1877 
1878  switch (Cond) {
1879  default: llvm_unreachable("Unknown integer setcc!");
1880  case ISD::SETEQ: return getConstant(C1 == C2, dl, VT);
1881  case ISD::SETNE: return getConstant(C1 != C2, dl, VT);
1882  case ISD::SETULT: return getConstant(C1.ult(C2), dl, VT);
1883  case ISD::SETUGT: return getConstant(C1.ugt(C2), dl, VT);
1884  case ISD::SETULE: return getConstant(C1.ule(C2), dl, VT);
1885  case ISD::SETUGE: return getConstant(C1.uge(C2), dl, VT);
1886  case ISD::SETLT: return getConstant(C1.slt(C2), dl, VT);
1887  case ISD::SETGT: return getConstant(C1.sgt(C2), dl, VT);
1888  case ISD::SETLE: return getConstant(C1.sle(C2), dl, VT);
1889  case ISD::SETGE: return getConstant(C1.sge(C2), dl, VT);
1890  }
1891  }
1892  }
1893  if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1)) {
1894  if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2)) {
1895  APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
1896  switch (Cond) {
1897  default: break;
1898  case ISD::SETEQ: if (R==APFloat::cmpUnordered)
1899  return getUNDEF(VT);
1901  case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, dl, VT);
1902  case ISD::SETNE: if (R==APFloat::cmpUnordered)
1903  return getUNDEF(VT);
1906  R==APFloat::cmpLessThan, dl, VT);
1907  case ISD::SETLT: if (R==APFloat::cmpUnordered)
1908  return getUNDEF(VT);
1910  case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, dl, VT);
1911  case ISD::SETGT: if (R==APFloat::cmpUnordered)
1912  return getUNDEF(VT);
1914  case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, dl, VT);
1915  case ISD::SETLE: if (R==APFloat::cmpUnordered)
1916  return getUNDEF(VT);
1918  case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan ||
1919  R==APFloat::cmpEqual, dl, VT);
1920  case ISD::SETGE: if (R==APFloat::cmpUnordered)
1921  return getUNDEF(VT);
1924  R==APFloat::cmpEqual, dl, VT);
1925  case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, dl, VT);
1926  case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, dl, VT);
1927  case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered ||
1928  R==APFloat::cmpEqual, dl, VT);
1929  case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, dl, VT);
1930  case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered ||
1931  R==APFloat::cmpLessThan, dl, VT);
1933  R==APFloat::cmpUnordered, dl, VT);
1934  case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, dl, VT);
1935  case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, dl, VT);
1936  }
1937  } else {
1938  // Ensure that the constant occurs on the RHS.
1939  ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
1940  MVT CompVT = N1.getValueType().getSimpleVT();
1941  if (!TLI->isCondCodeLegal(SwappedCond, CompVT))
1942  return SDValue();
1943 
1944  return getSetCC(dl, VT, N2, N1, SwappedCond);
1945  }
1946  }
1947 
1948  // Could not fold it.
1949  return SDValue();
1950 }
1951 
1952 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
1953 /// use this predicate to simplify operations downstream.
1954 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
1955  unsigned BitWidth = Op.getScalarValueSizeInBits();
1956  return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth);
1957 }
1958 
1959 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
1960 /// this predicate to simplify operations downstream. Mask is known to be zero
1961 /// for bits that V cannot have.
1963  unsigned Depth) const {
1964  APInt KnownZero, KnownOne;
1965  computeKnownBits(Op, KnownZero, KnownOne, Depth);
1966  return (KnownZero & Mask) == Mask;
1967 }
1968 
1969 /// If a SHL/SRA/SRL node has a constant or splat constant shift amount that
1970 /// is less than the element bit-width of the shift node, return it.
1972  if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1))) {
1973  // Shifting more than the bitwidth is not valid.
1974  const APInt &ShAmt = SA->getAPIntValue();
1975  if (ShAmt.ult(V.getScalarValueSizeInBits()))
1976  return &ShAmt;
1977  }
1978  return nullptr;
1979 }
1980 
1981 /// Determine which bits of Op are known to be either zero or one and return
1982 /// them in the KnownZero/KnownOne bitsets. For vectors, the known bits are
1983 /// those that are shared by every vector element.
1985  APInt &KnownOne, unsigned Depth) const {
1986  EVT VT = Op.getValueType();
1987  APInt DemandedElts = VT.isVector()
1989  : APInt(1, 1);
1990  computeKnownBits(Op, KnownZero, KnownOne, DemandedElts, Depth);
1991 }
1992 
1993 /// Determine which bits of Op are known to be either zero or one and return
1994 /// them in the KnownZero/KnownOne bitsets. The DemandedElts argument allows
1995 /// us to only collect the known bits that are shared by the requested vector
1996 /// elements.
1997 /// TODO: We only support DemandedElts on a few opcodes so far, the remainder
1998 /// should be added when they become necessary.
2000  APInt &KnownOne, const APInt &DemandedElts,
2001  unsigned Depth) const {
2002  unsigned BitWidth = Op.getScalarValueSizeInBits();
2003 
2004  KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
2005  if (Depth == 6)
2006  return; // Limit search depth.
2007 
2008  APInt KnownZero2, KnownOne2;
2009  unsigned NumElts = DemandedElts.getBitWidth();
2010 
2011  if (!DemandedElts)
2012  return; // No demanded elts, better to assume we don't know anything.
2013 
2014  unsigned Opcode = Op.getOpcode();
2015  switch (Opcode) {
2016  case ISD::Constant:
2017  // We know all of the bits for a constant!
2018  KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
2019  KnownZero = ~KnownOne;
2020  break;
2021  case ISD::BUILD_VECTOR:
2022  // Collect the known bits that are shared by every demanded vector element.
2023  assert(NumElts == Op.getValueType().getVectorNumElements() &&
2024  "Unexpected vector size");
2025  KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
2026  for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
2027  if (!DemandedElts[i])
2028  continue;
2029 
2030  SDValue SrcOp = Op.getOperand(i);
2031  computeKnownBits(SrcOp, KnownZero2, KnownOne2, Depth + 1);
2032 
2033  // BUILD_VECTOR can implicitly truncate sources, we must handle this.
2034  if (SrcOp.getValueSizeInBits() != BitWidth) {
2035  assert(SrcOp.getValueSizeInBits() > BitWidth &&
2036  "Expected BUILD_VECTOR implicit truncation");
2037  KnownOne2 = KnownOne2.trunc(BitWidth);
2038  KnownZero2 = KnownZero2.trunc(BitWidth);
2039  }
2040 
2041  // Known bits are the values that are shared by every demanded element.
2042  KnownOne &= KnownOne2;
2043  KnownZero &= KnownZero2;
2044 
2045  // If we don't know any bits, early out.
2046  if (!KnownOne && !KnownZero)
2047  break;
2048  }
2049  break;
2050  case ISD::VECTOR_SHUFFLE: {
2051  // Collect the known bits that are shared by every vector element referenced
2052  // by the shuffle.
2053  APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
2054  KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
2055  const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
2056  assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
2057  for (unsigned i = 0; i != NumElts; ++i) {
2058  if (!DemandedElts[i])
2059  continue;
2060 
2061  int M = SVN->getMaskElt(i);
2062  if (M < 0) {
2063  // For UNDEF elements, we don't know anything about the common state of
2064  // the shuffle result.
2065  KnownOne.clearAllBits();
2066  KnownZero.clearAllBits();
2067  DemandedLHS.clearAllBits();
2068  DemandedRHS.clearAllBits();
2069  break;
2070  }
2071 
2072  if ((unsigned)M < NumElts)
2073  DemandedLHS.setBit((unsigned)M % NumElts);
2074  else
2075  DemandedRHS.setBit((unsigned)M % NumElts);
2076  }
2077  // Known bits are the values that are shared by every demanded element.
2078  if (!!DemandedLHS) {
2079  SDValue LHS = Op.getOperand(0);
2080  computeKnownBits(LHS, KnownZero2, KnownOne2, DemandedLHS, Depth + 1);
2081  KnownOne &= KnownOne2;
2082  KnownZero &= KnownZero2;
2083  }
2084  // If we don't know any bits, early out.
2085  if (!KnownOne && !KnownZero)
2086  break;
2087  if (!!DemandedRHS) {
2088  SDValue RHS = Op.getOperand(1);
2089  computeKnownBits(RHS, KnownZero2, KnownOne2, DemandedRHS, Depth + 1);
2090  KnownOne &= KnownOne2;
2091  KnownZero &= KnownZero2;
2092  }
2093  break;
2094  }
2095  case ISD::CONCAT_VECTORS: {
2096  // Split DemandedElts and test each of the demanded subvectors.
2097  KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
2098  EVT SubVectorVT = Op.getOperand(0).getValueType();
2099  unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
2100  unsigned NumSubVectors = Op.getNumOperands();
2101  for (unsigned i = 0; i != NumSubVectors; ++i) {
2102  APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
2103  DemandedSub = DemandedSub.trunc(NumSubVectorElts);
2104  if (!!DemandedSub) {
2105  SDValue Sub = Op.getOperand(i);
2106  computeKnownBits(Sub, KnownZero2, KnownOne2, DemandedSub, Depth + 1);
2107  KnownOne &= KnownOne2;
2108  KnownZero &= KnownZero2;
2109  }
2110  // If we don't know any bits, early out.
2111  if (!KnownOne && !KnownZero)
2112  break;
2113  }
2114  break;
2115  }
2116  case ISD::EXTRACT_SUBVECTOR: {
2117  // If we know the element index, just demand that subvector elements,
2118  // otherwise demand them all.
2119  SDValue Src = Op.getOperand(0);
2121  unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2122  if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
2123  // Offset the demanded elts by the subvector index.
2124  uint64_t Idx = SubIdx->getZExtValue();
2125  APInt DemandedSrc = DemandedElts.zext(NumSrcElts).shl(Idx);
2126  computeKnownBits(Src, KnownZero, KnownOne, DemandedSrc, Depth + 1);
2127  } else {
2128  computeKnownBits(Src, KnownZero, KnownOne, Depth + 1);
2129  }
2130  break;
2131  }
2132  case ISD::BITCAST: {
2133  SDValue N0 = Op.getOperand(0);
2134  unsigned SubBitWidth = N0.getScalarValueSizeInBits();
2135 
2136  // Ignore bitcasts from floating point.
2137  if (!N0.getValueType().isInteger())
2138  break;
2139 
2140  // Fast handling of 'identity' bitcasts.
2141  if (BitWidth == SubBitWidth) {
2142  computeKnownBits(N0, KnownZero, KnownOne, DemandedElts, Depth + 1);
2143  break;
2144  }
2145 
2146  // Support big-endian targets when it becomes useful.
2147  bool IsLE = getDataLayout().isLittleEndian();
2148  if (!IsLE)
2149  break;
2150 
2151  // Bitcast 'small element' vector to 'large element' scalar/vector.
2152  if ((BitWidth % SubBitWidth) == 0) {
2153  assert(N0.getValueType().isVector() && "Expected bitcast from vector");
2154 
2155  // Collect known bits for the (larger) output by collecting the known
2156  // bits from each set of sub elements and shift these into place.
2157  // We need to separately call computeKnownBits for each set of
2158  // sub elements as the knownbits for each is likely to be different.
2159  unsigned SubScale = BitWidth / SubBitWidth;
2160  APInt SubDemandedElts(NumElts * SubScale, 0);
2161  for (unsigned i = 0; i != NumElts; ++i)
2162  if (DemandedElts[i])
2163  SubDemandedElts.setBit(i * SubScale);
2164 
2165  for (unsigned i = 0; i != SubScale; ++i) {
2166  computeKnownBits(N0, KnownZero2, KnownOne2, SubDemandedElts.shl(i),
2167  Depth + 1);
2168  KnownOne |= KnownOne2.zext(BitWidth).shl(SubBitWidth * i);
2169  KnownZero |= KnownZero2.zext(BitWidth).shl(SubBitWidth * i);
2170  }
2171  }
2172 
2173  // Bitcast 'large element' scalar/vector to 'small element' vector.
2174  if ((SubBitWidth % BitWidth) == 0) {
2175  assert(Op.getValueType().isVector() && "Expected bitcast to vector");
2176 
2177  // Collect known bits for the (smaller) output by collecting the known
2178  // bits from the overlapping larger input elements and extracting the
2179  // sub sections we actually care about.
2180  unsigned SubScale = SubBitWidth / BitWidth;
2181  APInt SubDemandedElts(NumElts / SubScale, 0);
2182  for (unsigned i = 0; i != NumElts; ++i)
2183  if (DemandedElts[i])
2184  SubDemandedElts.setBit(i / SubScale);
2185 
2186  computeKnownBits(N0, KnownZero2, KnownOne2, SubDemandedElts, Depth + 1);
2187 
2188  KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
2189  for (unsigned i = 0; i != NumElts; ++i)
2190  if (DemandedElts[i]) {
2191  unsigned Offset = (i % SubScale) * BitWidth;
2192  KnownOne &= KnownOne2.lshr(Offset).trunc(BitWidth);
2193  KnownZero &= KnownZero2.lshr(Offset).trunc(BitWidth);
2194  // If we don't know any bits, early out.
2195  if (!KnownOne && !KnownZero)
2196  break;
2197  }
2198  }
2199  break;
2200  }
2201  case ISD::AND:
2202  // If either the LHS or the RHS are Zero, the result is zero.
2203  computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, DemandedElts,
2204  Depth + 1);
2205  computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2206  Depth + 1);
2207 
2208  // Output known-1 bits are only known if set in both the LHS & RHS.
2209  KnownOne &= KnownOne2;
2210  // Output known-0 are known to be clear if zero in either the LHS | RHS.
2211  KnownZero |= KnownZero2;
2212  break;
2213  case ISD::OR:
2214  computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, DemandedElts,
2215  Depth + 1);
2216  computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2217  Depth + 1);
2218 
2219  // Output known-0 bits are only known if clear in both the LHS & RHS.
2220  KnownZero &= KnownZero2;
2221  // Output known-1 are known to be set if set in either the LHS | RHS.
2222  KnownOne |= KnownOne2;
2223  break;
2224  case ISD::XOR: {
2225  computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, DemandedElts,
2226  Depth + 1);
2227  computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2228  Depth + 1);
2229 
2230  // Output known-0 bits are known if clear or set in both the LHS & RHS.
2231  APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
2232  // Output known-1 are known to be set if set in only one of the LHS, RHS.
2233  KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
2234  KnownZero = KnownZeroOut;
2235  break;
2236  }
2237  case ISD::MUL: {
2238  computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, DemandedElts,
2239  Depth + 1);
2240  computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2241  Depth + 1);
2242 
2243  // If low bits are zero in either operand, output low known-0 bits.
2244  // Also compute a conservative estimate for high known-0 bits.
2245  // More trickiness is possible, but this is sufficient for the
2246  // interesting case of alignment computation.
2247  KnownOne.clearAllBits();
2248  unsigned TrailZ = KnownZero.countTrailingOnes() +
2249  KnownZero2.countTrailingOnes();
2250  unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
2251  KnownZero2.countLeadingOnes(),
2252  BitWidth) - BitWidth;
2253 
2254  TrailZ = std::min(TrailZ, BitWidth);
2255  LeadZ = std::min(LeadZ, BitWidth);
2256  KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
2257  APInt::getHighBitsSet(BitWidth, LeadZ);
2258  break;
2259  }
2260  case ISD::UDIV: {
2261  // For the purposes of computing leading zeros we can conservatively
2262  // treat a udiv as a logical right shift by the power of 2 known to
2263  // be less than the denominator.
2264  computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2265  Depth + 1);
2266  unsigned LeadZ = KnownZero2.countLeadingOnes();
2267 
2268  computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, DemandedElts,
2269  Depth + 1);
2270  unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
2271  if (RHSUnknownLeadingOnes != BitWidth)
2272  LeadZ = std::min(BitWidth,
2273  LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
2274 
2275  KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
2276  break;
2277  }
2278  case ISD::SELECT:
2279  computeKnownBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1);
2280  // If we don't know any bits, early out.
2281  if (!KnownOne && !KnownZero)
2282  break;
2283  computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2284 
2285  // Only known if known in both the LHS and RHS.
2286  KnownOne &= KnownOne2;
2287  KnownZero &= KnownZero2;
2288  break;
2289  case ISD::SELECT_CC:
2290  computeKnownBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1);
2291  // If we don't know any bits, early out.
2292  if (!KnownOne && !KnownZero)
2293  break;
2294  computeKnownBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1);
2295 
2296  // Only known if known in both the LHS and RHS.
2297  KnownOne &= KnownOne2;
2298  KnownZero &= KnownZero2;
2299  break;
2300  case ISD::SADDO:
2301  case ISD::UADDO:
2302  case ISD::SSUBO:
2303  case ISD::USUBO:
2304  case ISD::SMULO:
2305  case ISD::UMULO:
2306  if (Op.getResNo() != 1)
2307  break;
2308  // The boolean result conforms to getBooleanContents.
2309  // If we know the result of a setcc has the top bits zero, use this info.
2310  // We know that we have an integer-based boolean since these operations
2311  // are only available for integer.
2312  if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
2314  BitWidth > 1)
2315  KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2316  break;
2317  case ISD::SETCC:
2318  // If we know the result of a setcc has the top bits zero, use this info.
2319  if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
2321  BitWidth > 1)
2322  KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2323  break;
2324  case ISD::SHL:
2325  if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
2326  computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2327  Depth + 1);
2328  KnownZero = KnownZero << *ShAmt;
2329  KnownOne = KnownOne << *ShAmt;
2330  // Low bits are known zero.
2331  KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt->getZExtValue());
2332  }
2333  break;
2334  case ISD::SRL:
2335  if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
2336  computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2337  Depth + 1);
2338  KnownZero = KnownZero.lshr(*ShAmt);
2339  KnownOne = KnownOne.lshr(*ShAmt);
2340  // High bits are known zero.
2341  APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt->getZExtValue());
2342  KnownZero |= HighBits;
2343  }
2344  break;
2345  case ISD::SRA:
2346  if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
2347  computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2348  Depth + 1);
2349  KnownZero = KnownZero.lshr(*ShAmt);
2350  KnownOne = KnownOne.lshr(*ShAmt);
2351  // If we know the value of the sign bit, then we know it is copied across
2352  // the high bits by the shift amount.
2353  APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt->getZExtValue());
2354  APInt SignBit = APInt::getSignBit(BitWidth);
2355  SignBit = SignBit.lshr(*ShAmt); // Adjust to where it is now in the mask.
2356  if (KnownZero.intersects(SignBit)) {
2357  KnownZero |= HighBits; // New bits are known zero.
2358  } else if (KnownOne.intersects(SignBit)) {
2359  KnownOne |= HighBits; // New bits are known one.
2360  }
2361  }
2362  break;
2363  case ISD::SIGN_EXTEND_INREG: {
2364  EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2365  unsigned EBits = EVT.getScalarSizeInBits();
2366 
2367  // Sign extension. Compute the demanded bits in the result that are not
2368  // present in the input.
2369  APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
2370 
2371  APInt InSignBit = APInt::getSignBit(EBits);
2372  APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
2373 
2374  // If the sign extended bits are demanded, we know that the sign
2375  // bit is demanded.
2376  InSignBit = InSignBit.zext(BitWidth);
2377  if (NewBits.getBoolValue())
2378  InputDemandedBits |= InSignBit;
2379 
2380  computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2381  Depth + 1);
2382  KnownOne &= InputDemandedBits;
2383  KnownZero &= InputDemandedBits;
2384 
2385  // If the sign bit of the input is known set or clear, then we know the
2386  // top bits of the result.
2387  if (KnownZero.intersects(InSignBit)) { // Input sign bit known clear
2388  KnownZero |= NewBits;
2389  KnownOne &= ~NewBits;
2390  } else if (KnownOne.intersects(InSignBit)) { // Input sign bit known set
2391  KnownOne |= NewBits;
2392  KnownZero &= ~NewBits;
2393  } else { // Input sign bit unknown
2394  KnownZero &= ~NewBits;
2395  KnownOne &= ~NewBits;
2396  }
2397  break;
2398  }
2399  case ISD::CTTZ:
2400  case ISD::CTTZ_ZERO_UNDEF:
2401  case ISD::CTLZ:
2402  case ISD::CTLZ_ZERO_UNDEF:
2403  case ISD::CTPOP: {
2404  unsigned LowBits = Log2_32(BitWidth)+1;
2405  KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
2406  KnownOne.clearAllBits();
2407  break;
2408  }
2409  case ISD::LOAD: {
2410  LoadSDNode *LD = cast<LoadSDNode>(Op);
2411  // If this is a ZEXTLoad and we are looking at the loaded value.
2412  if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
2413  EVT VT = LD->getMemoryVT();
2414  unsigned MemBits = VT.getScalarSizeInBits();
2415  KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
2416  } else if (const MDNode *Ranges = LD->getRanges()) {
2417  if (LD->getExtensionType() == ISD::NON_EXTLOAD)
2418  computeKnownBitsFromRangeMetadata(*Ranges, KnownZero, KnownOne);
2419  }
2420  break;
2421  }
2422  case ISD::ZERO_EXTEND: {
2423  EVT InVT = Op.getOperand(0).getValueType();
2424  unsigned InBits = InVT.getScalarSizeInBits();
2425  APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
2426  KnownZero = KnownZero.trunc(InBits);
2427  KnownOne = KnownOne.trunc(InBits);
2428  computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2429  Depth + 1);
2430  KnownZero = KnownZero.zext(BitWidth);
2431  KnownOne = KnownOne.zext(BitWidth);
2432  KnownZero |= NewBits;
2433  break;
2434  }
2435  case ISD::SIGN_EXTEND: {
2436  EVT InVT = Op.getOperand(0).getValueType();
2437  unsigned InBits = InVT.getScalarSizeInBits();
2438 
2439  KnownZero = KnownZero.trunc(InBits);
2440  KnownOne = KnownOne.trunc(InBits);
2441  computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2442  Depth + 1);
2443 
2444  // If the sign bit is known to be zero or one, then sext will extend
2445  // it to the top bits, else it will just zext.
2446  KnownZero = KnownZero.sext(BitWidth);
2447  KnownOne = KnownOne.sext(BitWidth);
2448  break;
2449  }
2450  case ISD::ANY_EXTEND: {
2451  EVT InVT = Op.getOperand(0).getValueType();
2452  unsigned InBits = InVT.getScalarSizeInBits();
2453  KnownZero = KnownZero.trunc(InBits);
2454  KnownOne = KnownOne.trunc(InBits);
2455  computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2456  KnownZero = KnownZero.zext(BitWidth);
2457  KnownOne = KnownOne.zext(BitWidth);
2458  break;
2459  }
2460  case ISD::TRUNCATE: {
2461  EVT InVT = Op.getOperand(0).getValueType();
2462  unsigned InBits = InVT.getScalarSizeInBits();
2463  KnownZero = KnownZero.zext(InBits);
2464  KnownOne = KnownOne.zext(InBits);
2465  computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2466  Depth + 1);
2467  KnownZero = KnownZero.trunc(BitWidth);
2468  KnownOne = KnownOne.trunc(BitWidth);
2469  break;
2470  }
2471  case ISD::AssertZext: {
2472  EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2473  APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
2474  computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2475  KnownZero |= (~InMask);
2476  KnownOne &= (~KnownZero);
2477  break;
2478  }
2479  case ISD::FGETSIGN:
2480  // All bits are zero except the low bit.
2481  KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2482  break;
2483 
2484  case ISD::SUB: {
2485  if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) {
2486  // We know that the top bits of C-X are clear if X contains less bits
2487  // than C (i.e. no wrap-around can happen). For example, 20-X is
2488  // positive if we can prove that X is >= 0 and < 16.
2489  if (CLHS->getAPIntValue().isNonNegative()) {
2490  unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
2491  // NLZ can't be BitWidth with no sign bit
2492  APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
2493  computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, DemandedElts,
2494  Depth + 1);
2495 
2496  // If all of the MaskV bits are known to be zero, then we know the
2497  // output top bits are zero, because we now know that the output is
2498  // from [0-C].
2499  if ((KnownZero2 & MaskV) == MaskV) {
2500  unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
2501  // Top bits known zero.
2502  KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
2503  }
2504  }
2505  }
2507  }
2508  case ISD::ADD:
2509  case ISD::ADDE: {
2510  // Output known-0 bits are known if clear or set in both the low clear bits
2511  // common to both LHS & RHS. For example, 8+(X<<3) is known to have the
2512  // low 3 bits clear.
2513  // Output known-0 bits are also known if the top bits of each input are
2514  // known to be clear. For example, if one input has the top 10 bits clear
2515  // and the other has the top 8 bits clear, we know the top 7 bits of the
2516  // output must be clear.
2517  computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2518  Depth + 1);
2519  unsigned KnownZeroHigh = KnownZero2.countLeadingOnes();
2520  unsigned KnownZeroLow = KnownZero2.countTrailingOnes();
2521 
2522  computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, DemandedElts,
2523  Depth + 1);
2524  KnownZeroHigh = std::min(KnownZeroHigh,
2525  KnownZero2.countLeadingOnes());
2526  KnownZeroLow = std::min(KnownZeroLow,
2527  KnownZero2.countTrailingOnes());
2528 
2529  if (Opcode == ISD::ADD) {
2530  KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroLow);
2531  if (KnownZeroHigh > 1)
2532  KnownZero |= APInt::getHighBitsSet(BitWidth, KnownZeroHigh - 1);
2533  break;
2534  }
2535 
2536  // With ADDE, a carry bit may be added in, so we can only use this
2537  // information if we know (at least) that the low two bits are clear. We
2538  // then return to the caller that the low bit is unknown but that other bits
2539  // are known zero.
2540  if (KnownZeroLow >= 2) // ADDE
2541  KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroLow);
2542  break;
2543  }
2544  case ISD::SREM:
2545  if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) {
2546  const APInt &RA = Rem->getAPIntValue().abs();
2547  if (RA.isPowerOf2()) {
2548  APInt LowBits = RA - 1;
2549  computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2550  Depth + 1);
2551 
2552  // The low bits of the first operand are unchanged by the srem.
2553  KnownZero = KnownZero2 & LowBits;
2554  KnownOne = KnownOne2 & LowBits;
2555 
2556  // If the first operand is non-negative or has all low bits zero, then
2557  // the upper bits are all zero.
2558  if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
2559  KnownZero |= ~LowBits;
2560 
2561  // If the first operand is negative and not all low bits are zero, then
2562  // the upper bits are all one.
2563  if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
2564  KnownOne |= ~LowBits;
2565  assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
2566  }
2567  }
2568  break;
2569  case ISD::UREM: {
2570  if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) {
2571  const APInt &RA = Rem->getAPIntValue();
2572  if (RA.isPowerOf2()) {
2573  APInt LowBits = (RA - 1);
2574  computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2575  Depth + 1);
2576 
2577  // The upper bits are all zero, the lower ones are unchanged.
2578  KnownZero = KnownZero2 | ~LowBits;
2579  KnownOne = KnownOne2 & LowBits;
2580  break;
2581  }
2582  }
2583 
2584  // Since the result is less than or equal to either operand, any leading
2585  // zero bits in either operand must also exist in the result.
2586  computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2587  Depth + 1);
2588  computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, DemandedElts,
2589  Depth + 1);
2590 
2591  uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
2592  KnownZero2.countLeadingOnes());
2593  KnownOne.clearAllBits();
2594  KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
2595  break;
2596  }
2597  case ISD::EXTRACT_ELEMENT: {
2598  computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2599  const unsigned Index = Op.getConstantOperandVal(1);
2600  const unsigned BitWidth = Op.getValueSizeInBits();
2601 
2602  // Remove low part of known bits mask
2603  KnownZero = KnownZero.getHiBits(KnownZero.getBitWidth() - Index * BitWidth);
2604  KnownOne = KnownOne.getHiBits(KnownOne.getBitWidth() - Index * BitWidth);
2605 
2606  // Remove high part of known bit mask
2607  KnownZero = KnownZero.trunc(BitWidth);
2608  KnownOne = KnownOne.trunc(BitWidth);
2609  break;
2610  }
2611  case ISD::EXTRACT_VECTOR_ELT: {
2612  SDValue InVec = Op.getOperand(0);
2613  SDValue EltNo = Op.getOperand(1);
2614  EVT VecVT = InVec.getValueType();
2615  const unsigned BitWidth = Op.getValueSizeInBits();
2616  const unsigned EltBitWidth = VecVT.getScalarSizeInBits();
2617  const unsigned NumSrcElts = VecVT.getVectorNumElements();
2618  // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
2619  // anything about the extended bits.
2620  if (BitWidth > EltBitWidth) {
2621  KnownZero = KnownZero.trunc(EltBitWidth);
2622  KnownOne = KnownOne.trunc(EltBitWidth);
2623  }
2624  ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
2625  if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) {
2626  // If we know the element index, just demand that vector element.
2627  unsigned Idx = ConstEltNo->getZExtValue();
2628  APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx);
2629  computeKnownBits(InVec, KnownZero, KnownOne, DemandedElt, Depth + 1);
2630  } else {
2631  // Unknown element index, so ignore DemandedElts and demand them all.
2632  computeKnownBits(InVec, KnownZero, KnownOne, Depth + 1);
2633  }
2634  if (BitWidth > EltBitWidth) {
2635  KnownZero = KnownZero.zext(BitWidth);
2636  KnownOne = KnownOne.zext(BitWidth);
2637  }
2638  break;
2639  }
2640  case ISD::INSERT_VECTOR_ELT: {
2641  SDValue InVec = Op.getOperand(0);
2642  SDValue InVal = Op.getOperand(1);
2643  SDValue EltNo = Op.getOperand(2);
2644 
2645  ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
2646  if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
2647  // If we know the element index, split the demand between the
2648  // source vector and the inserted element.
2649  KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
2650  unsigned EltIdx = CEltNo->getZExtValue();
2651 
2652  // If we demand the inserted element then add its common known bits.
2653  if (DemandedElts[EltIdx]) {
2654  computeKnownBits(InVal, KnownZero2, KnownOne2, Depth + 1);
2655  KnownOne &= KnownOne2.zextOrTrunc(KnownOne.getBitWidth());
2656  KnownZero &= KnownZero2.zextOrTrunc(KnownZero.getBitWidth());;
2657  }
2658 
2659  // If we demand the source vector then add its common known bits, ensuring
2660  // that we don't demand the inserted element.
2661  APInt VectorElts = DemandedElts & ~(APInt::getOneBitSet(NumElts, EltIdx));
2662  if (!!VectorElts) {
2663  computeKnownBits(InVec, KnownZero2, KnownOne2, VectorElts, Depth + 1);
2664  KnownOne &= KnownOne2;
2665  KnownZero &= KnownZero2;
2666  }
2667  } else {
2668  // Unknown element index, so ignore DemandedElts and demand them all.
2669  computeKnownBits(InVec, KnownZero, KnownOne, Depth + 1);
2670  computeKnownBits(InVal, KnownZero2, KnownOne2, Depth + 1);
2671  KnownOne &= KnownOne2.zextOrTrunc(KnownOne.getBitWidth());
2672  KnownZero &= KnownZero2.zextOrTrunc(KnownZero.getBitWidth());;
2673  }
2674  break;
2675  }
2676  case ISD::BSWAP: {
2677  computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2678  Depth + 1);
2679  KnownZero = KnownZero2.byteSwap();
2680  KnownOne = KnownOne2.byteSwap();
2681  break;
2682  }
2683  case ISD::SMIN:
2684  case ISD::SMAX:
2685  case ISD::UMIN:
2686  case ISD::UMAX: {
2687  computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2688  Depth + 1);
2689  // If we don't know any bits, early out.
2690  if (!KnownOne && !KnownZero)
2691  break;
2692  computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, DemandedElts,
2693  Depth + 1);
2694  KnownZero &= KnownZero2;
2695  KnownOne &= KnownOne2;
2696  break;
2697  }
2698  case ISD::FrameIndex:
2699  case ISD::TargetFrameIndex:
2700  if (unsigned Align = InferPtrAlignment(Op)) {
2701  // The low bits are known zero if the pointer is aligned.
2702  KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align));
2703  break;
2704  }
2705  break;
2706 
2707  default:
2708  if (Opcode < ISD::BUILTIN_OP_END)
2709  break;
2713  case ISD::INTRINSIC_VOID:
2714  // Allow the target to implement this method for its nodes.
2715  TLI->computeKnownBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth);
2716  break;
2717  }
2718 
2719  assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
2720 }
2721 
2723  EVT OpVT = Val.getValueType();
2724  unsigned BitWidth = OpVT.getScalarSizeInBits();
2725 
2726  // Is the constant a known power of 2?
2727  if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val))
2728  return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
2729 
2730  // A left-shift of a constant one will have exactly one bit set because
2731  // shifting the bit off the end is undefined.
2732  if (Val.getOpcode() == ISD::SHL) {
2733  auto *C = dyn_cast<ConstantSDNode>(Val.getOperand(0));
2734  if (C && C->getAPIntValue() == 1)
2735  return true;
2736  }
2737 
2738  // Similarly, a logical right-shift of a constant sign-bit will have exactly
2739  // one bit set.
2740  if (Val.getOpcode() == ISD::SRL) {
2741  auto *C = dyn_cast<ConstantSDNode>(Val.getOperand(0));
2742  if (C && C->getAPIntValue().isSignBit())
2743  return true;
2744  }
2745 
2746  // Are all operands of a build vector constant powers of two?
2747  if (Val.getOpcode() == ISD::BUILD_VECTOR)
2748  if (llvm::all_of(Val->ops(), [this, BitWidth](SDValue E) {
2749  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E))
2750  return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
2751  return false;
2752  }))
2753  return true;
2754 
2755  // More could be done here, though the above checks are enough
2756  // to handle some common cases.
2757 
2758  // Fall back to computeKnownBits to catch other known cases.
2759  APInt KnownZero, KnownOne;
2760  computeKnownBits(Val, KnownZero, KnownOne);
2761  return (KnownZero.countPopulation() == BitWidth - 1) &&
2762  (KnownOne.countPopulation() == 1);
2763 }
2764 
2765 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const {
2766  EVT VT = Op.getValueType();
2767  assert(VT.isInteger() && "Invalid VT!");
2768  unsigned VTBits = VT.getScalarSizeInBits();
2769  unsigned Tmp, Tmp2;
2770  unsigned FirstAnswer = 1;
2771 
2772  if (Depth == 6)
2773  return 1; // Limit search depth.
2774 
2775  switch (Op.getOpcode()) {
2776  default: break;
2777  case ISD::AssertSext:
2778  Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2779  return VTBits-Tmp+1;
2780  case ISD::AssertZext:
2781  Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2782  return VTBits-Tmp;
2783 
2784  case ISD::Constant: {
2785  const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue();
2786  return Val.getNumSignBits();
2787  }
2788 
2789  case ISD::SIGN_EXTEND:
2790  Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits();
2791  return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
2792 
2794  // Max of the input and what this extends.
2795  Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
2796  Tmp = VTBits-Tmp+1;
2797 
2798  Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2799  return std::max(Tmp, Tmp2);
2800 
2801  case ISD::SRA:
2802  Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2803  // SRA X, C -> adds C sign bits.
2805  APInt ShiftVal = C->getAPIntValue();
2806  ShiftVal += Tmp;
2807  Tmp = ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
2808  }
2809  return Tmp;
2810  case ISD::SHL:
2812  // shl destroys sign bits.
2813  Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2814  if (C->getAPIntValue().uge(VTBits) || // Bad shift.
2815  C->getAPIntValue().uge(Tmp)) break; // Shifted all sign bits out.
2816  return Tmp - C->getZExtValue();
2817  }
2818  break;
2819  case ISD::AND:
2820  case ISD::OR:
2821  case ISD::XOR: // NOT is handled here.
2822  // Logical binary ops preserve the number of sign bits at the worst.
2823  Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2824  if (Tmp != 1) {
2825  Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2826  FirstAnswer = std::min(Tmp, Tmp2);
2827  // We computed what we know about the sign bits as our first
2828  // answer. Now proceed to the generic code that uses
2829  // computeKnownBits, and pick whichever answer is better.
2830  }
2831  break;
2832 
2833  case ISD::SELECT:
2834  Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2835  if (Tmp == 1) return 1; // Early out.
2836  Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1);
2837  return std::min(Tmp, Tmp2);
2838  case ISD::SELECT_CC:
2839  Tmp = ComputeNumSignBits(Op.getOperand(2), Depth+1);
2840  if (Tmp == 1) return 1; // Early out.
2841  Tmp2 = ComputeNumSignBits(Op.getOperand(3), Depth+1);
2842  return std::min(Tmp, Tmp2);
2843  case ISD::SMIN:
2844  case ISD::SMAX:
2845  case ISD::UMIN:
2846  case ISD::UMAX:
2847  Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
2848  if (Tmp == 1)
2849  return 1; // Early out.
2850  Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
2851  return std::min(Tmp, Tmp2);
2852  case ISD::SADDO:
2853  case ISD::UADDO:
2854  case ISD::SSUBO:
2855  case ISD::USUBO:
2856  case ISD::SMULO:
2857  case ISD::UMULO:
2858  if (Op.getResNo() != 1)
2859  break;
2860  // The boolean result conforms to getBooleanContents. Fall through.
2861  // If setcc returns 0/-1, all bits are sign bits.
2862  // We know that we have an integer-based boolean since these operations
2863  // are only available for integer.
2864  if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
2866  return VTBits;
2867  break;
2868  case ISD::SETCC:
2869  // If setcc returns 0/-1, all bits are sign bits.
2870  if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
2872  return VTBits;
2873  break;
2874  case ISD::ROTL:
2875  case ISD::ROTR:
2876  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2877  unsigned RotAmt = C->getZExtValue() & (VTBits-1);
2878 
2879  // Handle rotate right by N like a rotate left by 32-N.
2880  if (Op.getOpcode() == ISD::ROTR)
2881  RotAmt = (VTBits-RotAmt) & (VTBits-1);
2882 
2883  // If we aren't rotating out all of the known-in sign bits, return the
2884  // number that are left. This handles rotl(sext(x), 1) for example.
2885  Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2886  if (Tmp > RotAmt+1) return Tmp-RotAmt;
2887  }
2888  break;
2889  case ISD::ADD:
2890  // Add can have at most one carry bit. Thus we know that the output
2891  // is, at worst, one more bit than the inputs.
2892  Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2893  if (Tmp == 1) return 1; // Early out.
2894 
2895  // Special case decrementing a value (ADD X, -1):
2896  if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2897  if (CRHS->isAllOnesValue()) {
2898  APInt KnownZero, KnownOne;
2899  computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2900 
2901  // If the input is known to be 0 or 1, the output is 0/-1, which is all
2902  // sign bits set.
2903  if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2904  return VTBits;
2905 
2906  // If we are subtracting one from a positive number, there is no carry
2907  // out of the result.
2908  if (KnownZero.isNegative())
2909  return Tmp;
2910  }
2911 
2912  Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2913  if (Tmp2 == 1) return 1;
2914  return std::min(Tmp, Tmp2)-1;
2915 
2916  case ISD::SUB:
2917  Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2918  if (Tmp2 == 1) return 1;
2919 
2920  // Handle NEG.
2921  if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0)))
2922  if (CLHS->isNullValue()) {
2923  APInt KnownZero, KnownOne;
2924  computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
2925  // If the input is known to be 0 or 1, the output is 0/-1, which is all
2926  // sign bits set.
2927  if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2928  return VTBits;
2929 
2930  // If the input is known to be positive (the sign bit is known clear),
2931  // the output of the NEG has the same number of sign bits as the input.
2932  if (KnownZero.isNegative())
2933  return Tmp2;
2934 
2935  // Otherwise, we treat this like a SUB.
2936  }
2937 
2938  // Sub can have at most one carry bit. Thus we know that the output
2939  // is, at worst, one more bit than the inputs.
2940  Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2941  if (Tmp == 1) return 1; // Early out.
2942  return std::min(Tmp, Tmp2)-1;
2943  case ISD::TRUNCATE: {
2944  // Check if the sign bits of source go down as far as the truncated value.
2945  unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits();
2946  unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
2947  if (NumSrcSignBits > (NumSrcBits - VTBits))
2948  return NumSrcSignBits - (NumSrcBits - VTBits);
2949  break;
2950  }
2951  case ISD::EXTRACT_ELEMENT: {
2952  const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2953  const int BitWidth = Op.getValueSizeInBits();
2954  const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth;
2955 
2956  // Get reverse index (starting from 1), Op1 value indexes elements from
2957  // little end. Sign starts at big end.
2958  const int rIndex = Items - 1 - Op.getConstantOperandVal(1);
2959 
2960  // If the sign portion ends in our element the subtraction gives correct
2961  // result. Otherwise it gives either negative or > bitwidth result
2962  return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0);
2963  }
2964  case ISD::EXTRACT_VECTOR_ELT: {
2965  // At the moment we keep this simple and skip tracking the specific
2966  // element. This way we get the lowest common denominator for all elements
2967  // of the vector.
2968  // TODO: get information for given vector element
2969  const unsigned BitWidth = Op.getValueSizeInBits();
2970  const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
2971  // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know
2972  // anything about sign bits. But if the sizes match we can derive knowledge
2973  // about sign bits from the vector operand.
2974  if (BitWidth == EltBitWidth)
2975  return ComputeNumSignBits(Op.getOperand(0), Depth+1);
2976  break;
2977  }
2979  return ComputeNumSignBits(Op.getOperand(0), Depth + 1);
2980  case ISD::CONCAT_VECTORS:
2981  // Determine the minimum number of sign bits across all input vectors.
2982  // Early out if the result is already 1.
2983  Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
2984  for (unsigned i = 1, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i)
2985  Tmp = std::min(Tmp, ComputeNumSignBits(Op.getOperand(i), Depth + 1));
2986  return Tmp;
2987  }
2988 
2989  // If we are looking at the loaded value of the SDNode.
2990  if (Op.getResNo() == 0) {
2991  // Handle LOADX separately here. EXTLOAD case will fallthrough.
2992  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
2993  unsigned ExtType = LD->getExtensionType();
2994  switch (ExtType) {
2995  default: break;
2996  case ISD::SEXTLOAD: // '17' bits known
2997  Tmp = LD->getMemoryVT().getScalarSizeInBits();
2998  return VTBits-Tmp+1;
2999  case ISD::ZEXTLOAD: // '16' bits known
3000  Tmp = LD->getMemoryVT().getScalarSizeInBits();
3001  return VTBits-Tmp;
3002  }
3003  }
3004  }
3005 
3006  // Allow the target to implement this method for its nodes.
3007  if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
3010  Op.getOpcode() == ISD::INTRINSIC_VOID) {
3011  unsigned NumBits = TLI->ComputeNumSignBitsForTargetNode(Op, *this, Depth);
3012  if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits);
3013  }
3014 
3015  // Finally, if we can prove that the top bits of the result are 0's or 1's,
3016  // use this information.
3017  APInt KnownZero, KnownOne;
3018  computeKnownBits(Op, KnownZero, KnownOne, Depth);
3019 
3020  APInt Mask;
3021  if (KnownZero.isNegative()) { // sign bit is 0
3022  Mask = KnownZero;
3023  } else if (KnownOne.isNegative()) { // sign bit is 1;
3024  Mask = KnownOne;
3025  } else {
3026  // Nothing known.
3027  return FirstAnswer;
3028  }
3029 
3030  // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
3031  // the number of identical bits in the top of the input value.
3032  Mask = ~Mask;
3033  Mask <<= Mask.getBitWidth()-VTBits;
3034  // Return # leading zeros. We use 'min' here in case Val was zero before
3035  // shifting. We don't want to return '64' as for an i32 "0".
3036  return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
3037 }
3038 
3040  if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
3041  !isa<ConstantSDNode>(Op.getOperand(1)))
3042  return false;
3043 
3044  if (Op.getOpcode() == ISD::OR &&
3046  cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue()))
3047  return false;
3048 
3049  return true;
3050 }
3051 
3053  // If we're told that NaNs won't happen, assume they won't.
3054  if (getTarget().Options.NoNaNsFPMath)
3055  return true;
3056 
3057  // If the value is a constant, we can obviously see if it is a NaN or not.
3058  if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
3059  return !C->getValueAPF().isNaN();
3060 
3061  // TODO: Recognize more cases here.
3062 
3063  return false;
3064 }
3065 
3067  // If the value is a constant, we can obviously see if it is a zero or not.
3068  if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
3069  return !C->isZero();
3070 
3071  // TODO: Recognize more cases here.
3072  switch (Op.getOpcode()) {
3073  default: break;
3074  case ISD::OR:
3075  if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
3076  return !C->isNullValue();
3077  break;
3078  }
3079 
3080  return false;
3081 }
3082 
3084  // Check the obvious case.
3085  if (A == B) return true;
3086 
3087  // For for negative and positive zero.
3088  if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
3089  if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
3090  if (CA->isZero() && CB->isZero()) return true;
3091 
3092  // Otherwise they may not be equal.
3093  return false;
3094 }
3095 
3097  assert(A.getValueType() == B.getValueType() &&
3098  "Values must have the same type");
3099  APInt AZero, AOne;
3100  APInt BZero, BOne;
3101  computeKnownBits(A, AZero, AOne);
3102  computeKnownBits(B, BZero, BOne);
3103  return (AZero | BZero).isAllOnesValue();
3104 }
3105 
3106 static SDValue FoldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
3107  ArrayRef<SDValue> Ops,
3108  llvm::SelectionDAG &DAG) {
3109  assert(!Ops.empty() && "Can't concatenate an empty list of vectors!");
3110  assert(llvm::all_of(Ops,
3111  [Ops](SDValue Op) {
3112  return Ops[0].getValueType() == Op.getValueType();
3113  }) &&
3114  "Concatenation of vectors with inconsistent value types!");
3115  assert((Ops.size() * Ops[0].getValueType().getVectorNumElements()) ==
3116  VT.getVectorNumElements() &&
3117  "Incorrect element count in vector concatenation!");
3118 
3119  if (Ops.size() == 1)
3120  return Ops[0];
3121 
3122  // Concat of UNDEFs is UNDEF.
3123  if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
3124  return DAG.getUNDEF(VT);
3125 
3126  // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be
3127  // simplified to one big BUILD_VECTOR.
3128  // FIXME: Add support for SCALAR_TO_VECTOR as well.
3129  EVT SVT = VT.getScalarType();
3131  for (SDValue Op : Ops) {
3132  EVT OpVT = Op.getValueType();
3133  if (Op.isUndef())
3134  Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT));
3135  else if (Op.getOpcode() == ISD::BUILD_VECTOR)
3136  Elts.append(Op->op_begin(), Op->op_end());
3137  else
3138  return SDValue();
3139  }
3140 
3141  // BUILD_VECTOR requires all inputs to be of the same type, find the
3142  // maximum type and extend them all.
3143  for (SDValue Op : Elts)
3144  SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
3145 
3146  if (SVT.bitsGT(VT.getScalarType()))
3147  for (SDValue &Op : Elts)
3148  Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT)
3149  ? DAG.getZExtOrTrunc(Op, DL, SVT)
3150  : DAG.getSExtOrTrunc(Op, DL, SVT);
3151 
3152  return DAG.getBuildVector(VT, DL, Elts);
3153 }
3154 
3155 /// Gets or creates the specified node.
3156 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) {
3158  AddNodeIDNode(ID, Opcode, getVTList(VT), None);
3159  void *IP = nullptr;
3160  if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
3161  return SDValue(E, 0);
3162 
3163  auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(),
3164  getVTList(VT));
3165  CSEMap.InsertNode(N, IP);
3166 
3167  InsertNode(N);
3168  return SDValue(N, 0);
3169 }
3170 
3171 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
3172  SDValue Operand) {
3173  // Constant fold unary operations with an integer constant operand. Even
3174  // opaque constant will be folded, because the folding of unary operations
3175  // doesn't create new constants with different values. Nevertheless, the
3176  // opaque flag is preserved during folding to prevent future folding with
3177  // other constants.
3178  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) {
3179  const APInt &Val = C->getAPIntValue();
3180  switch (Opcode) {
3181  default: break;
3182  case ISD::SIGN_EXTEND:
3183  return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
3184  C->isTargetOpcode(), C->isOpaque());
3185  case ISD::ANY_EXTEND:
3186  case ISD::ZERO_EXTEND:
3187  case ISD::TRUNCATE:
3188  return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
3189  C->isTargetOpcode(), C->isOpaque());
3190  case ISD::UINT_TO_FP:
3191  case ISD::SINT_TO_FP: {
3194  (void)apf.convertFromAPInt(Val,
3195  Opcode==ISD::SINT_TO_FP,
3197  return getConstantFP(apf, DL, VT);
3198  }
3199  case ISD::BITCAST:
3200  if (VT == MVT::f16 && C->getValueType(0) == MVT::i16)
3201  return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT);
3202  if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
3203  return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT);
3204  if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
3205  return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT);
3206  if (VT == MVT::f128 && C->getValueType(0) == MVT::i128)
3207  return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT);
3208  break;
3209  case ISD::BSWAP:
3210  return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(),
3211  C->isOpaque());
3212  case ISD::CTPOP:
3213  return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(),
3214  C->isOpaque());
3215  case ISD::CTLZ:
3216  case ISD::CTLZ_ZERO_UNDEF:
3217  return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(),
3218  C->isOpaque());
3219  case ISD::CTTZ:
3220  case ISD::CTTZ_ZERO_UNDEF:
3221  return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(),
3222  C->isOpaque());
3223  }
3224  }
3225 
3226  // Constant fold unary operations with a floating point constant operand.
3227  if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) {
3228  APFloat V = C->getValueAPF(); // make copy
3229  switch (Opcode) {
3230  case ISD::FNEG:
3231  V.changeSign();
3232  return getConstantFP(V, DL, VT);
3233  case ISD::FABS:
3234  V.clearSign();
3235  return getConstantFP(V, DL, VT);
3236  case ISD::FCEIL: {
3238  if (fs == APFloat::opOK || fs == APFloat::opInexact)
3239  return getConstantFP(V, DL, VT);
3240  break;
3241  }
3242  case ISD::FTRUNC: {
3244  if (fs == APFloat::opOK || fs == APFloat::opInexact)
3245  return getConstantFP(V, DL, VT);
3246  break;
3247  }
3248  case ISD::FFLOOR: {
3250  if (fs == APFloat::opOK || fs == APFloat::opInexact)
3251  return getConstantFP(V, DL, VT);
3252  break;
3253  }
3254  case ISD::FP_EXTEND: {
3255  bool ignored;
3256  // This can return overflow, underflow, or inexact; we don't care.
3257  // FIXME need to be more flexible about rounding mode.
3258  (void)V.convert(EVTToAPFloatSemantics(VT),
3259  APFloat::rmNearestTiesToEven, &ignored);
3260  return getConstantFP(V, DL, VT);
3261  }
3262  case ISD::FP_TO_SINT:
3263  case ISD::FP_TO_UINT: {
3264  integerPart x[2];
3265  bool ignored;
3266  static_assert(integerPartWidth >= 64, "APFloat parts too small!");
3267  // FIXME need to be more flexible about rounding mode.
3269  Opcode==ISD::FP_TO_SINT,
3270  APFloat::rmTowardZero, &ignored);
3271  if (s==APFloat::opInvalidOp) // inexact is OK, in fact usual
3272  break;
3273  APInt api(VT.getSizeInBits(), x);
3274  return getConstant(api, DL, VT);
3275  }
3276  case ISD::BITCAST:
3277  if (VT == MVT::i16 && C->getValueType(0) == MVT::f16)
3278  return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
3279  else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
3280  return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
3281  else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
3282  return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
3283  break;
3284  }
3285  }
3286 
3287  // Constant fold unary operations with a vector integer or float operand.
3288  if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) {
3289  if (BV->isConstant()) {
3290  switch (Opcode) {
3291  default:
3292  // FIXME: Entirely reasonable to perform folding of other unary
3293  // operations here as the need arises.
3294  break;
3295  case ISD::FNEG:
3296  case ISD::FABS:
3297  case ISD::FCEIL:
3298  case ISD::FTRUNC:
3299  case ISD::FFLOOR:
3300  case ISD::FP_EXTEND:
3301  case ISD::FP_TO_SINT:
3302  case ISD::FP_TO_UINT:
3303  case ISD::TRUNCATE:
3304  case ISD::UINT_TO_FP:
3305  case ISD::SINT_TO_FP:
3306  case ISD::BSWAP:
3307  case ISD::CTLZ:
3308  case ISD::CTLZ_ZERO_UNDEF:
3309  case ISD::CTTZ:
3310  case ISD::CTTZ_ZERO_UNDEF:
3311  case ISD::CTPOP: {
3312  SDValue Ops = { Operand };
3313  if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops))
3314  return Fold;
3315  }
3316  }
3317  }
3318  }
3319 
3320  unsigned OpOpcode = Operand.getNode()->getOpcode();
3321  switch (Opcode) {
3322  case ISD::TokenFactor:
3323  case ISD::MERGE_VALUES:
3324  case ISD::CONCAT_VECTORS:
3325  return Operand; // Factor, merge or concat of one node? No need.
3326  case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
3327  case ISD::FP_EXTEND:
3328  assert(VT.isFloatingPoint() &&
3329  Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
3330  if (Operand.getValueType() == VT) return Operand; // noop conversion.
3331  assert((!VT.isVector() ||
3332  VT.getVectorNumElements() ==
3333  Operand.getValueType().getVectorNumElements()) &&
3334  "Vector element count mismatch!");
3335  assert(Operand.getValueType().bitsLT(VT) &&
3336  "Invalid fpext node, dst < src!");
3337  if (Operand.isUndef())
3338  return getUNDEF(VT);
3339  break;
3340  case ISD::SIGN_EXTEND:
3341  assert(VT.isInteger() && Operand.getValueType().isInteger() &&
3342  "Invalid SIGN_EXTEND!");
3343  if (Operand.getValueType() == VT) return Operand; // noop extension
3344  assert((!VT.isVector() ||
3345  VT.getVectorNumElements() ==
3346  Operand.getValueType().getVectorNumElements()) &&
3347  "Vector element count mismatch!");
3348  assert(Operand.getValueType().bitsLT(VT) &&
3349  "Invalid sext node, dst < src!");
3350  if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
3351  return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
3352  else if (OpOpcode == ISD::UNDEF)
3353  // sext(undef) = 0, because the top bits will all be the same.
3354  return getConstant(0, DL, VT);
3355  break;
3356  case ISD::ZERO_EXTEND:
3357  assert(VT.isInteger() && Operand.getValueType().isInteger() &&
3358  "Invalid ZERO_EXTEND!");
3359  if (Operand.getValueType() == VT) return Operand; // noop extension
3360  assert((!VT.isVector() ||
3361  VT.getVectorNumElements() ==
3362  Operand.getValueType().getVectorNumElements()) &&
3363  "Vector element count mismatch!");
3364  assert(Operand.getValueType().bitsLT(VT) &&
3365  "Invalid zext node, dst < src!");
3366  if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x)
3367  return getNode(ISD::ZERO_EXTEND, DL, VT,
3368  Operand.getNode()->getOperand(0));
3369  else if (OpOpcode == ISD::UNDEF)
3370  // zext(undef) = 0, because the top bits will be zero.
3371  return getConstant(0, DL, VT);
3372  break;
3373  case ISD::ANY_EXTEND:
3374  assert(VT.isInteger() && Operand.getValueType().isInteger() &&
3375  "Invalid ANY_EXTEND!");
3376  if (Operand.getValueType() == VT) return Operand; // noop extension
3377  assert((!VT.isVector() ||
3378  VT.getVectorNumElements() ==
3379  Operand.getValueType().getVectorNumElements()) &&
3380  "Vector element count mismatch!");
3381  assert(Operand.getValueType().bitsLT(VT) &&
3382  "Invalid anyext node, dst < src!");
3383 
3384  if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
3385  OpOpcode == ISD::ANY_EXTEND)
3386  // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
3387  return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
3388  else if (OpOpcode == ISD::UNDEF)
3389  return getUNDEF(VT);
3390 
3391  // (ext (trunx x)) -> x
3392  if (OpOpcode == ISD::TRUNCATE) {
3393  SDValue OpOp = Operand.getNode()->getOperand(0);
3394  if (OpOp.getValueType() == VT)
3395  return OpOp;
3396  }
3397  break;
3398  case ISD::TRUNCATE:
3399  assert(VT.isInteger() && Operand.getValueType().isInteger() &&
3400  "Invalid TRUNCATE!");
3401  if (Operand.getValueType() == VT) return Operand; // noop truncate
3402  assert((!VT.isVector() ||
3403  VT.getVectorNumElements() ==
3404  Operand.getValueType().getVectorNumElements()) &&
3405  "Vector element count mismatch!");
3406  assert(Operand.getValueType().bitsGT(VT) &&
3407  "Invalid truncate node, src < dst!");
3408  if (OpOpcode == ISD::TRUNCATE)
3409  return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
3410  if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
3411  OpOpcode == ISD::ANY_EXTEND) {
3412  // If the source is smaller than the dest, we still need an extend.
3413  if (Operand.getNode()->getOperand(0).getValueType().getScalarType()
3414  .bitsLT(VT.getScalarType()))
3415  return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
3416  if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT))
3417  return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
3418  return Operand.getNode()->getOperand(0);
3419  }
3420  if (OpOpcode == ISD::UNDEF)
3421  return getUNDEF(VT);
3422  break;
3423  case ISD::BSWAP:
3424  assert(VT.isInteger() && VT == Operand.getValueType() &&
3425  "Invalid BSWAP!");
3426  assert((VT.getScalarSizeInBits() % 16 == 0) &&
3427  "BSWAP types must be a multiple of 16 bits!");
3428  if (OpOpcode == ISD::UNDEF)
3429  return getUNDEF(VT);
3430  break;
3431  case ISD::BITREVERSE:
3432  assert(VT.isInteger() && VT == Operand.getValueType() &&
3433  "Invalid BITREVERSE!");
3434  if (OpOpcode == ISD::UNDEF)
3435  return getUNDEF(VT);
3436  break;
3437  case ISD::BITCAST:
3438  // Basic sanity checking.
3439  assert(VT.getSizeInBits() == Operand.getValueSizeInBits() &&
3440  "Cannot BITCAST between types of different sizes!");
3441  if (VT == Operand.getValueType()) return Operand; // noop conversion.
3442  if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
3443  return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
3444  if (OpOpcode == ISD::UNDEF)
3445  return getUNDEF(VT);
3446  break;
3447  case ISD::SCALAR_TO_VECTOR:
3448  assert(VT.isVector() && !Operand.getValueType().isVector() &&
3449  (VT.getVectorElementType() == Operand.getValueType() ||
3450  (VT.getVectorElementType().isInteger() &&
3451  Operand.getValueType().isInteger() &&
3452  VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
3453  "Illegal SCALAR_TO_VECTOR node!");
3454  if (OpOpcode == ISD::UNDEF)
3455  return getUNDEF(VT);
3456  // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
3457  if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
3458  isa<ConstantSDNode>(Operand.getOperand(1)) &&
3459  Operand.getConstantOperandVal(1) == 0 &&
3460  Operand.getOperand(0).getValueType() == VT)
3461  return Operand.getOperand(0);
3462  break;
3463  case ISD::FNEG:
3464  // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
3465  if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB)
3466  // FIXME: FNEG has no fast-math-flags to propagate; use the FSUB's flags?
3467  return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1),
3468  Operand.getNode()->getOperand(0),
3469  &cast<BinaryWithFlagsSDNode>(Operand.getNode())->Flags);
3470  if (OpOpcode == ISD::FNEG) // --X -> X
3471  return Operand.getNode()->getOperand(0);
3472  break;
3473  case ISD::FABS:
3474  if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
3475  return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0));
3476  break;
3477  }
3478 
3479  SDNode *N;
3480  SDVTList VTs = getVTList(VT);
3481  SDValue Ops[] = {Operand};
3482  if (VT != MVT::Glue) { // Don't CSE flag producing nodes
3484  AddNodeIDNode(ID, Opcode, VTs, Ops);
3485  void *IP = nullptr;
3486  if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
3487  return SDValue(E, 0);
3488 
3489  N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
3490  createOperands(N, Ops);
3491  CSEMap.InsertNode(N, IP);
3492  } else {
3493  N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
3494  createOperands(N, Ops);
3495  }
3496 
3497  InsertNode(N);
3498  return SDValue(N, 0);
3499 }
3500 
3501 static std::pair<APInt, bool> FoldValue(unsigned Opcode, const APInt &C1,
3502  const APInt &C2) {
3503  switch (Opcode) {
3504  case ISD::ADD: return std::make_pair(C1 + C2, true);
3505  case ISD::SUB: return std::make_pair(C1 - C2, true);
3506  case ISD::MUL: return std::make_pair(C1 * C2, true);
3507  case ISD::AND: return std::make_pair(C1 & C2, true);
3508  case ISD::OR: return std::make_pair(C1 | C2, true);
3509  case ISD::XOR: return std::make_pair(C1 ^ C2, true);
3510  case ISD::SHL: return std::make_pair(C1 << C2, true);
3511  case ISD::SRL: return std::make_pair(C1.lshr(C2), true);
3512  case ISD::SRA: return std::make_pair(C1.ashr(C2), true);
3513  case ISD::ROTL: return std::make_pair(C1.rotl(C2), true);
3514  case ISD::ROTR: return std::make_pair(C1.rotr(C2), true);
3515  case ISD::SMIN: return std::make_pair(C1.sle(C2) ? C1 : C2, true);
3516  case ISD::SMAX: return std::make_pair(C1.sge(C2) ? C1 : C2, true);
3517  case ISD::UMIN: return std::make_pair(C1.ule(C2) ? C1 : C2, true);
3518  case ISD::UMAX: return std::make_pair(C1.uge(C2) ? C1 : C2, true);
3519  case ISD::UDIV:
3520  if (!C2.getBoolValue())
3521  break;
3522  return std::make_pair(C1.udiv(C2), true);
3523  case ISD::UREM:
3524  if (!C2.getBoolValue())
3525  break;
3526  return std::make_pair(C1.urem(C2), true);
3527  case ISD::SDIV:
3528  if (!C2.getBoolValue())
3529  break;
3530  return std::make_pair(C1.sdiv(C2), true);
3531  case ISD::SREM:
3532  if (!C2.getBoolValue())
3533  break;
3534  return std::make_pair(C1.srem(C2), true);
3535  }
3536  return std::make_pair(APInt(1, 0), false);
3537 }
3538 
3540  EVT VT, const ConstantSDNode *Cst1,
3541  const ConstantSDNode *Cst2) {
3542  if (Cst1->isOpaque() || Cst2->isOpaque())
3543  return SDValue();
3544 
3545  std::pair<APInt, bool> Folded = FoldValue(Opcode, Cst1->getAPIntValue(),
3546  Cst2->getAPIntValue());
3547  if (!Folded.second)
3548  return SDValue();
3549  return getConstant(Folded.first, DL, VT);
3550 }
3551 
3553  const GlobalAddressSDNode *GA,
3554  const SDNode *N2) {
3555  if (GA->getOpcode() != ISD::GlobalAddress)
3556  return SDValue();
3557  if (!TLI->isOffsetFoldingLegal(GA))
3558  return SDValue();
3559  const ConstantSDNode *Cst2 = dyn_cast<ConstantSDNode>(N2);
3560  if (!Cst2)
3561  return SDValue();
3562  int64_t Offset = Cst2->getSExtValue();
3563  switch (Opcode) {
3564  case ISD::ADD: break;
3565  case ISD::SUB: Offset = -uint64_t(Offset); break;
3566  default: return SDValue();
3567  }
3568  return getGlobalAddress(GA->getGlobal(), SDLoc(Cst2), VT,
3569  GA->getOffset() + uint64_t(Offset));
3570 }
3571 
3573  EVT VT, SDNode *Cst1,
3574  SDNode *Cst2) {
3575  // If the opcode is a target-specific ISD node, there's nothing we can
3576  // do here and the operand rules may not line up with the below, so
3577  // bail early.
3578  if (Opcode >= ISD::BUILTIN_OP_END)
3579  return SDValue();
3580 
3581  // Handle the case of two scalars.
3582  if (const ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1)) {
3583  if (const ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2)) {
3584  SDValue Folded = FoldConstantArithmetic(Opcode, DL, VT, Scalar1, Scalar2);
3585  assert((!Folded || !VT.isVector()) &&
3586  "Can't fold vectors ops with scalar operands");
3587  return Folded;
3588  }
3589  }
3590 
3591  // fold (add Sym, c) -> Sym+c
3592  if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst1))
3593  return FoldSymbolOffset(Opcode, VT, GA, Cst2);
3594  if (isCommutativeBinOp(Opcode))
3595  if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst2))
3596  return FoldSymbolOffset(Opcode, VT, GA, Cst1);
3597 
3598  // For vectors extract each constant element into Inputs so we can constant
3599  // fold them individually.
3602  if (!BV1 || !BV2)
3603  return SDValue();
3604 
3605  assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!");
3606 
3607  EVT SVT = VT.getScalarType();
3608  SmallVector<SDValue, 4> Outputs;
3609  for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) {
3610  SDValue V1 = BV1->getOperand(I);
3611  SDValue V2 = BV2->getOperand(I);
3612 
3613  // Avoid BUILD_VECTOR nodes that perform implicit truncation.
3614  // FIXME: This is valid and could be handled by truncation.
3615  if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
3616  return SDValue();
3617 
3618  // Fold one vector element.
3619  SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2);
3620 
3621  // Scalar folding only succeeded if the result is a constant or UNDEF.
3622  if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
3623  ScalarResult.getOpcode() != ISD::ConstantFP)
3624  return SDValue();
3625  Outputs.push_back(ScalarResult);
3626  }
3627 
3628  assert(VT.getVectorNumElements() == Outputs.size() &&
3629  "Vector size mismatch!");
3630 
3631  // We may have a vector type but a scalar result. Create a splat.
3632  Outputs.resize(VT.getVectorNumElements(), Outputs.back());
3633 
3634  // Build a big vector out of the scalar elements we generated.
3635  return getBuildVector(VT, SDLoc(), Outputs);
3636 }
3637 
3639  const SDLoc &DL, EVT VT,
3640  ArrayRef<SDValue> Ops,
3641  const SDNodeFlags *Flags) {
3642  // If the opcode is a target-specific ISD node, there's nothing we can
3643  // do here and the operand rules may not line up with the below, so
3644  // bail early.
3645  if (Opcode >= ISD::BUILTIN_OP_END)
3646  return SDValue();
3647 
3648  // We can only fold vectors - maybe merge with FoldConstantArithmetic someday?
3649  if (!VT.isVector())
3650  return SDValue();
3651 
3652  unsigned NumElts = VT.getVectorNumElements();
3653 
3654  auto IsScalarOrSameVectorSize = [&](const SDValue &Op) {
3655  return !Op.getValueType().isVector() ||
3656  Op.getValueType().getVectorNumElements() == NumElts;
3657  };
3658 
3659  auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) {
3661  return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) ||
3662  (BV && BV->isConstant());
3663  };
3664 
3665  // All operands must be vector types with the same number of elements as
3666  // the result type and must be either UNDEF or a build vector of constant
3667  // or UNDEF scalars.
3668  if (!all_of(Ops, IsConstantBuildVectorOrUndef) ||
3669  !all_of(Ops, IsScalarOrSameVectorSize))
3670  return SDValue();
3671 
3672  // If we are comparing vectors, then the result needs to be a i1 boolean
3673  // that is then sign-extended back to the legal result type.
3674  EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType());
3675 
3676  // Find legal integer scalar type for constant promotion and
3677  // ensure that its scalar size is at least as large as source.
3678  EVT LegalSVT = VT.getScalarType();
3679  if (LegalSVT.isInteger()) {
3680  LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
3681  if (LegalSVT.bitsLT(VT.getScalarType()))
3682  return SDValue();
3683  }
3684 
3685  // Constant fold each scalar lane separately.
3686  SmallVector<SDValue, 4> ScalarResults;
3687  for (unsigned i = 0; i != NumElts; i++) {
3688  SmallVector<SDValue, 4> ScalarOps;
3689  for (SDValue Op : Ops) {
3690  EVT InSVT = Op.getValueType().getScalarType();
3692  if (!InBV) {
3693  // We've checked that this is UNDEF or a constant of some kind.
3694  if (Op.isUndef())
3695  ScalarOps.push_back(getUNDEF(InSVT));
3696  else
3697  ScalarOps.push_back(Op);
3698  continue;
3699  }
3700 
3701  SDValue ScalarOp = InBV->getOperand(i);
3702  EVT ScalarVT = ScalarOp.getValueType();
3703 
3704  // Build vector (integer) scalar operands may need implicit
3705  // truncation - do this before constant folding.
3706  if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT))
3707  ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp);
3708 
3709  ScalarOps.push_back(ScalarOp);
3710  }
3711 
3712  // Constant fold the scalar operands.
3713  SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags);
3714 
3715  // Legalize the (integer) scalar constant if necessary.
3716  if (LegalSVT != SVT)
3717  ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
3718 
3719  // Scalar folding only succeeded if the result is a constant or UNDEF.
3720  if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
3721  ScalarResult.getOpcode() != ISD::ConstantFP)
3722  return SDValue();
3723  ScalarResults.push_back(ScalarResult);
3724  }
3725 
3726  return getBuildVector(VT, DL, ScalarResults);
3727 }
3728 
3729 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
3730  SDValue N1, SDValue N2,
3731  const SDNodeFlags *Flags) {
3736 
3737  // Canonicalize constant to RHS if commutative.
3738  if (isCommutativeBinOp(Opcode)) {
3739  if (N1C && !N2C) {
3740  std::swap(N1C, N2C);
3741  std::swap(N1, N2);
3742  } else if (N1CFP && !N2CFP) {
3743  std::swap(N1CFP, N2CFP);
3744  std::swap(N1, N2);
3745  }
3746  }
3747 
3748  switch (Opcode) {
3749  default: break;
3750  case ISD::TokenFactor:
3751  assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
3752  N2.getValueType() == MVT::Other && "Invalid token factor!");
3753  // Fold trivial token factors.
3754  if (N1.getOpcode() == ISD::EntryToken) return N2;
3755  if (N2.getOpcode() == ISD::EntryToken) return N1;
3756  if (N1 == N2) return N1;
3757  break;
3758  case ISD::CONCAT_VECTORS: {
3759  // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF.
3760  SDValue Ops[] = {N1, N2};
3761  if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this))
3762  return V;
3763  break;
3764  }
3765  case ISD::AND:
3766  assert(VT.isInteger() && "This operator does not apply to FP types!");
3767  assert(N1.getValueType() == N2.getValueType() &&
3768  N1.getValueType() == VT && "Binary operator types must match!");
3769  // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
3770  // worth handling here.
3771  if (N2C && N2C->isNullValue())
3772  return N2;
3773  if (N2C && N2C->isAllOnesValue()) // X & -1 -> X
3774  return N1;
3775  break;
3776  case ISD::OR:
3777  case ISD::XOR:
3778  case ISD::ADD:
3779  case ISD::SUB:
3780  assert(VT.isInteger() && "This operator does not apply to FP types!");
3781  assert(N1.getValueType() == N2.getValueType() &&
3782  N1.getValueType() == VT && "Binary operator types must match!");
3783  // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
3784  // it's worth handling here.
3785  if (N2C && N2C->isNullValue())
3786  return N1;
3787  break;
3788  case ISD::UDIV:
3789  case ISD::UREM:
3790  case ISD::MULHU:
3791  case ISD::MULHS:
3792  case ISD::MUL:
3793  case ISD::SDIV:
3794  case ISD::SREM:
3795  case ISD::SMIN:
3796  case ISD::SMAX:
3797  case ISD::UMIN:
3798  case ISD::UMAX:
3799  assert(VT.isInteger() && "This operator does not apply to FP types!");
3800  assert(N1.getValueType() == N2.getValueType() &&
3801  N1.getValueType() == VT && "Binary operator types must match!");
3802  break;
3803  case ISD::FADD:
3804  case ISD::FSUB:
3805  case ISD::FMUL:
3806  case ISD::FDIV:
3807  case ISD::FREM:
3808  if (getTarget().Options.UnsafeFPMath) {
3809  if (Opcode == ISD::FADD) {
3810  // x+0 --> x
3811  if (N2CFP && N2CFP->getValueAPF().isZero())
3812  return N1;
3813  } else if (Opcode == ISD::FSUB) {
3814  // x-0 --> x
3815  if (N2CFP && N2CFP->getValueAPF().isZero())
3816  return N1;
3817  } else if (Opcode == ISD::FMUL) {
3818  // x*0 --> 0
3819  if (N2CFP && N2CFP->isZero())
3820  return N2;
3821  // x*1 --> x
3822  if (N2CFP && N2CFP->isExactlyValue(1.0))
3823  return N1;
3824  }
3825  }
3826  assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
3827  assert(N1.getValueType() == N2.getValueType() &&
3828  N1.getValueType() == VT && "Binary operator types must match!");
3829  break;
3830  case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
3831  assert(N1.getValueType() == VT &&
3832  N1.getValueType().isFloatingPoint() &&
3833  N2.getValueType().isFloatingPoint() &&
3834  "Invalid FCOPYSIGN!");
3835  break;
3836  case ISD::SHL:
3837  case ISD::SRA:
3838  case ISD::SRL:
3839  case ISD::ROTL:
3840  case ISD::ROTR:
3841  assert(VT == N1.getValueType() &&
3842  "Shift operators return type must be the same as their first arg");
3843  assert(VT.isInteger() && N2.getValueType().isInteger() &&
3844  "Shifts only work on integers");
3845  assert((!VT.isVector() || VT == N2.getValueType()) &&
3846  "Vector shift amounts must be in the same as their first arg");
3847  // Verify that the shift amount VT is bit enough to hold valid shift
3848  // amounts. This catches things like trying to shift an i1024 value by an
3849  // i8, which is easy to fall into in generic code that uses
3850  // TLI.getShiftAmount().
3852  "Invalid use of small shift amount with oversized value!");
3853 
3854  // Always fold shifts of i1 values so the code generator doesn't need to
3855  // handle them. Since we know the size of the shift has to be less than the
3856  // size of the value, the shift/rotate count is guaranteed to be zero.
3857  if (VT == MVT::i1)
3858  return N1;
3859  if (N2C && N2C->isNullValue())
3860  return N1;
3861  break;
3862  case ISD::FP_ROUND_INREG: {
3863  EVT EVT = cast<VTSDNode>(N2)->getVT();
3864  assert(VT == N1.getValueType() && "Not an inreg round!");
3865  assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
3866  "Cannot FP_ROUND_INREG integer types");
3867  assert(EVT.isVector() == VT.isVector() &&
3868  "FP_ROUND_INREG type should be vector iff the operand "
3869  "type is vector!");
3870  assert((!EVT.isVector() ||
3871  EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
3872  "Vector element counts must match in FP_ROUND_INREG");
3873  assert(EVT.bitsLE(VT) && "Not rounding down!");
3874  (void)EVT;
3875  if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding.
3876  break;
3877  }
3878  case ISD::FP_ROUND:
3879  assert(VT.isFloatingPoint() &&
3880  N1.getValueType().isFloatingPoint() &&
3881  VT.bitsLE(N1.getValueType()) &&
3882  N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&
3883  "Invalid FP_ROUND!");
3884  if (N1.getValueType() == VT) return N1; // noop conversion.
3885  break;
3886  case ISD::AssertSext:
3887  case ISD::AssertZext: {
3888  EVT EVT = cast<VTSDNode>(N2)->getVT();
3889  assert(VT == N1.getValueType() && "Not an inreg extend!");
3890  assert(VT.isInteger() && EVT.isInteger() &&
3891  "Cannot *_EXTEND_INREG FP types");
3892  assert(!EVT.isVector() &&
3893  "AssertSExt/AssertZExt type should be the vector element type "
3894  "rather than the vector type!");
3895  assert(EVT.bitsLE(VT) && "Not extending!");
3896  if (VT == EVT) return N1; // noop assertion.
3897  break;
3898  }
3899  case ISD::SIGN_EXTEND_INREG: {
3900  EVT EVT = cast<VTSDNode>(N2)->getVT();
3901  assert(VT == N1.getValueType() && "Not an inreg extend!");
3902  assert(VT.isInteger() && EVT.isInteger() &&
3903  "Cannot *_EXTEND_INREG FP types");
3904  assert(EVT.isVector() == VT.isVector() &&
3905  "SIGN_EXTEND_INREG type should be vector iff the operand "
3906  "type is vector!");
3907  assert((!EVT.isVector() ||
3908  EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
3909  "Vector element counts must match in SIGN_EXTEND_INREG");
3910  assert(EVT.bitsLE(VT) && "Not extending!");
3911  if (EVT == VT) return N1; // Not actually extending
3912 
3913  auto SignExtendInReg = [&](APInt Val) {
3914  unsigned FromBits = EVT.getScalarSizeInBits();
3915  Val <<= Val.getBitWidth() - FromBits;
3916  Val = Val.ashr(Val.getBitWidth() - FromBits);
3917  return getConstant(Val, DL, VT.getScalarType());
3918  };
3919 
3920  if (N1C) {
3921  const APInt &Val = N1C->getAPIntValue();
3922  return SignExtendInReg(Val);
3923  }
3926  for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
3927  SDValue Op = N1.getOperand(i);
3928  if (Op.isUndef()) {
3929  Ops.push_back(getUNDEF(VT.getScalarType()));
3930  continue;
3931  }
3932  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3933  APInt Val = C->getAPIntValue();
3934  Val = Val.zextOrTrunc(VT.getScalarSizeInBits());
3935  Ops.push_back(SignExtendInReg(Val));
3936  continue;
3937  }
3938  break;
3939  }
3940  if (Ops.size() == VT.getVectorNumElements())
3941  return getBuildVector(VT, DL, Ops);
3942  }
3943  break;
3944  }
3946  // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
3947  if (N1.isUndef())
3948  return getUNDEF(VT);
3949 
3950  // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF
3951  if (N2C && N2C->getZExtValue() >= N1.getValueType().getVectorNumElements())
3952  return getUNDEF(VT);
3953 
3954  // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
3955  // expanding copies of large vectors from registers.
3956  if (N2C &&
3957  N1.getOpcode() == ISD::CONCAT_VECTORS &&
3958  N1.getNumOperands() > 0) {
3959  unsigned Factor =
3961  return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
3962  N1.getOperand(N2C->getZExtValue() / Factor),
3963  getConstant(N2C->getZExtValue() % Factor, DL,
3964  N2.getValueType()));
3965  }
3966 
3967  // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
3968  // expanding large vector constants.
3969  if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
3970  SDValue Elt = N1.getOperand(N2C->getZExtValue());
3971 
3972  if (VT != Elt.getValueType())
3973  // If the vector element type is not legal, the BUILD_VECTOR operands
3974  // are promoted and implicitly truncated, and the result implicitly
3975  // extended. Make that explicit here.
3976  Elt = getAnyExtOrTrunc(Elt, DL, VT);
3977 
3978  return Elt;
3979  }
3980 
3981  // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
3982  // operations are lowered to scalars.
3983  if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
3984  // If the indices are the same, return the inserted element else
3985  // if the indices are known different, extract the element from
3986  // the original vector.
3987  SDValue N1Op2 = N1.getOperand(2);
3988  ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2);
3989 
3990  if (N1Op2C && N2C) {
3991  if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
3992  if (VT == N1.getOperand(1).getValueType())
3993  return N1.getOperand(1);
3994  else
3995  return getSExtOrTrunc(N1.getOperand(1), DL, VT);
3996  }
3997 
3998  return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
3999  }
4000  }
4001  break;
4002  case ISD::EXTRACT_ELEMENT:
4003  assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
4004  assert(!N1.getValueType().isVector() && !VT.isVector() &&
4005  (N1.getValueType().isInteger() == VT.isInteger()) &&
4006  N1.getValueType() != VT &&
4007  "Wrong types for EXTRACT_ELEMENT!");
4008 
4009  // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
4010  // 64-bit integers into 32-bit parts. Instead of building the extract of
4011  // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
4012  if (N1.getOpcode() == ISD::BUILD_PAIR)
4013  return N1.getOperand(N2C->getZExtValue());
4014 
4015  // EXTRACT_ELEMENT of a constant int is also very common.
4016  if (N1C) {
4017  unsigned ElementSize = VT.getSizeInBits();
4018  unsigned Shift = ElementSize * N2C->getZExtValue();
4019  APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift);
4020  return getConstant(ShiftedVal.trunc(ElementSize), DL, VT);
4021  }
4022  break;
4024  if (VT.isSimple() && N1.getValueType().isSimple()) {
4025  assert(VT.isVector() && N1.getValueType().isVector() &&
4026  "Extract subvector VTs must be a vectors!");
4029  "Extract subvector VTs must have the same element type!");
4030  assert(VT.getSimpleVT() <= N1.getSimpleValueType() &&
4031  "Extract subvector must be from larger vector to smaller vector!");
4032 
4033  if (N2C) {
4034  assert((VT.getVectorNumElements() + N2C->getZExtValue()
4036  && "Extract subvector overflow!");
4037  }
4038 
4039  // Trivial extraction.
4040  if (VT.getSimpleVT() == N1.getSimpleValueType())
4041  return N1;
4042 
4043  // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created
4044  // during shuffle legalization.
4045  if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) &&
4046  VT == N1.getOperand(1).getValueType())
4047  return N1.getOperand(1);
4048  }
4049  break;
4050  }
4051 
4052  // Perform trivial constant folding.
4053  if (SDValue SV =
4054  FoldConstantArithmetic(Opcode, DL, VT, N1.getNode(), N2.getNode()))
4055  return SV;
4056 
4057  // Constant fold FP operations.
4058  bool HasFPExceptions = TLI->hasFloatingPointExceptions();
4059  if (N1CFP) {
4060  if (N2CFP) {
4061  APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF();
4063  switch (Opcode) {
4064  case ISD::FADD:
4066  if (!HasFPExceptions || s != APFloat::opInvalidOp)
4067  return getConstantFP(V1, DL, VT);
4068  break;
4069  case ISD::FSUB:
4071  if (!HasFPExceptions || s!=APFloat::opInvalidOp)
4072  return getConstantFP(V1, DL, VT);
4073  break;
4074  case ISD::FMUL:
4076  if (!HasFPExceptions || s!=APFloat::opInvalidOp)
4077  return getConstantFP(V1, DL, VT);
4078  break;
4079  case ISD::FDIV:
4081  if (!HasFPExceptions || (s!=APFloat::opInvalidOp &&
4082  s!=APFloat::opDivByZero)) {
4083  return getConstantFP(V1, DL, VT);
4084  }
4085  break;
4086  case ISD::FREM :
4087  s = V1.mod(V2);
4088  if (!HasFPExceptions || (s!=APFloat::opInvalidOp &&
4089  s!=APFloat::opDivByZero)) {
4090  return getConstantFP(V1, DL, VT);
4091  }
4092  break;
4093  case ISD::FCOPYSIGN:
4094  V1.copySign(V2);
4095  return getConstantFP(V1, DL, VT);
4096  default: break;
4097  }
4098  }
4099 
4100  if (Opcode == ISD::FP_ROUND) {
4101  APFloat V = N1CFP->getValueAPF(); // make copy
4102  bool ignored;
4103  // This can return overflow, underflow, or inexact; we don't care.
4104  // FIXME need to be more flexible about rounding mode.
4105  (void)V.convert(EVTToAPFloatSemantics(VT),
4106  APFloat::rmNearestTiesToEven, &ignored);
4107  return getConstantFP(V, DL, VT);
4108  }
4109  }
4110 
4111  // Canonicalize an UNDEF to the RHS, even over a constant.
4112  if (N1.isUndef()) {
4113  if (isCommutativeBinOp(Opcode)) {
4114  std::swap(N1, N2);
4115  } else {
4116  switch (Opcode) {
4117  case ISD::FP_ROUND_INREG:
4119  case ISD::SUB:
4120  case ISD::FSUB:
4121  case ISD::FDIV:
4122  case ISD::FREM:
4123  case ISD::SRA:
4124  return N1; // fold op(undef, arg2) -> undef
4125  case ISD::UDIV:
4126  case ISD::SDIV:
4127  case ISD::UREM:
4128  case ISD::SREM:
4129  case ISD::SRL:
4130  case ISD::SHL:
4131  if (!VT.isVector())
4132  return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0
4133  // For vectors, we can't easily build an all zero vector, just return
4134  // the LHS.
4135  return N2;
4136  }
4137  }
4138  }
4139 
4140  // Fold a bunch of operators when the RHS is undef.
4141  if (N2.isUndef()) {
4142  switch (Opcode) {
4143  case ISD::XOR:
4144  if (N1.isUndef())
4145  // Handle undef ^ undef -> 0 special case. This is a common
4146  // idiom (misuse).
4147  return getConstant(0, DL, VT);
4149  case ISD::ADD:
4150  case ISD::ADDC:
4151  case ISD::ADDE:
4152  case ISD::SUB:
4153  case ISD::UDIV:
4154  case ISD::SDIV:
4155  case ISD::UREM:
4156  case ISD::SREM:
4157  return N2; // fold op(arg1, undef) -> undef
4158  case ISD::FADD:
4159  case ISD::FSUB:
4160  case ISD::FMUL:
4161  case ISD::FDIV:
4162  case ISD::FREM:
4164  return N2;
4165  break;
4166  case ISD::MUL:
4167  case ISD::AND:
4168  case ISD::SRL:
4169  case ISD::SHL:
4170  if (!VT.isVector())
4171  return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0
4172  // For vectors, we can't easily build an all zero vector, just return
4173  // the LHS.
4174  return N1;
4175  case ISD::OR:
4176  if (!VT.isVector())
4177  return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), DL, VT);
4178  // For vectors, we can't easily build an all one vector, just return
4179  // the LHS.
4180  return N1;
4181  case ISD::SRA:
4182  return N1;
4183  }
4184  }
4185 
4186  // Memoize this node if possible.
4187  SDNode *N;
4188  SDVTList VTs = getVTList(VT);
4189  if (VT != MVT::Glue) {
4190  SDValue Ops[] = {N1, N2};
4192  AddNodeIDNode(ID, Opcode, VTs, Ops);
4193  void *IP = nullptr;
4194  if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
4195  if (Flags)
4196  E->intersectFlagsWith(Flags);
4197  return SDValue(E, 0);
4198  }
4199 
4200  N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, Flags);
4201  CSEMap.InsertNode(N, IP);
4202  } else {
4203  N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, Flags);
4204  }
4205 
4206  InsertNode(N);
4207  return SDValue(N, 0);
4208 }
4209 
4210 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4211  SDValue N1, SDValue N2, SDValue N3) {
4212  // Perform various simplifications.
4213  switch (Opcode) {
4214  case ISD::FMA: {
4218  if (N1CFP && N2CFP && N3CFP) {
4219  APFloat V1 = N1CFP->getValueAPF();
4220  const APFloat &V2 = N2CFP->getValueAPF();
4221  const APFloat &V3 = N3CFP->getValueAPF();
4222  APFloat::opStatus s =
4225  return getConstantFP(V1, DL, VT);
4226  }
4227  break;
4228  }
4229  case ISD::CONCAT_VECTORS: {
4230  // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF.
4231  SDValue Ops[] = {N1, N2, N3};
4232  if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this))
4233  return V;
4234  break;
4235  }
4236  case ISD::SETCC: {
4237  // Use FoldSetCC to simplify SETCC's.
4238  if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL))
4239  return V;
4240  // Vector constant folding.
4241  SDValue Ops[] = {N1, N2, N3};
4242  if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops))
4243  return V;
4244  break;
4245  }
4246  case ISD::SELECT:
4247  if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
4248  if (N1C->getZExtValue())
4249  return N2; // select true, X, Y -> X
4250  return N3; // select false, X, Y -> Y
4251  }
4252 
4253  if (N2 == N3) return N2; // select C, X, X -> X
4254  break;
4255  case ISD::VECTOR_SHUFFLE:
4256  llvm_unreachable("should use getVectorShuffle constructor!");
4257  case ISD::INSERT_VECTOR_ELT: {
4259  // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF
4260  if (N3C && N3C->getZExtValue() >= N1.getValueType().getVectorNumElements())
4261  return getUNDEF(VT);
4262  break;
4263  }
4264  case ISD::INSERT_SUBVECTOR: {
4265  SDValue Index = N3;
4266  if (VT.isSimple() && N1.getValueType().isSimple()
4267  && N2.getValueType().isSimple()) {
4268  assert(VT.isVector() && N1.getValueType().isVector() &&
4269  N2.getValueType().isVector() &&
4270  "Insert subvector VTs must be a vectors");
4271  assert(VT == N1.getValueType() &&
4272  "Dest and insert subvector source types must match!");
4274  "Insert subvector must be from smaller vector to larger vector!");
4275  if (isa<ConstantSDNode>(Index)) {
4277  cast<ConstantSDNode>(Index)->getZExtValue()
4278  <= VT.getVectorNumElements())
4279  && "Insert subvector overflow!");
4280  }
4281 
4282  // Trivial insertion.
4283  if (VT.getSimpleVT() == N2.getSimpleValueType())
4284  return N2;
4285  }
4286  break;
4287  }
4288  case ISD::BITCAST:
4289  // Fold bit_convert nodes from a type to themselves.
4290  if (N1.getValueType() == VT)
4291  return N1;
4292  break;
4293  }
4294 
4295  // Memoize node if it doesn't produce a flag.
4296  SDNode *N;
4297  SDVTList VTs = getVTList(VT);
4298  SDValue Ops[] = {N1, N2, N3};
4299  if (VT != MVT::Glue) {
4301  AddNodeIDNode(ID, Opcode, VTs, Ops);
4302  void *IP = nullptr;
4303  if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
4304  return SDValue(E, 0);
4305 
4306  N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4307  createOperands(N, Ops);
4308  CSEMap.InsertNode(N, IP);
4309  } else {
4310  N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4311  createOperands(N, Ops);
4312  }
4313 
4314  InsertNode(N);
4315  return SDValue(N, 0);
4316 }
4317 
4318 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4319  SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
4320  SDValue Ops[] = { N1, N2, N3, N4 };
4321  return getNode(Opcode, DL, VT, Ops);
4322 }
4323 
4324 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4325  SDValue N1, SDValue N2, SDValue N3, SDValue N4,
4326  SDValue N5) {
4327  SDValue Ops[] = { N1, N2, N3, N4, N5 };
4328  return getNode(Opcode, DL, VT, Ops);
4329 }
4330 
4331 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
4332 /// the incoming stack arguments to be loaded from the stack.
4334  SmallVector<SDValue, 8> ArgChains;
4335 
4336  // Include the original chain at the beginning of the list. When this is
4337  // used by target LowerCall hooks, this helps legalize find the
4338  // CALLSEQ_BEGIN node.
4339  ArgChains.push_back(Chain);
4340 
4341  // Add a chain value for each stack argument.
4342  for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
4343  UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
4344  if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
4345  if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
4346  if (FI->getIndex() < 0)
4347  ArgChains.push_back(SDValue(L, 1));
4348 
4349  // Build a tokenfactor for all the chains.
4350  return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
4351 }
4352 
4353 /// getMemsetValue - Vectorized representation of the memset value
4354 /// operand.
4356  const SDLoc &dl) {
4357  assert(!Value.isUndef());
4358 
4359  unsigned NumBits = VT.getScalarSizeInBits();
4360  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
4361  assert(C->getAPIntValue().getBitWidth() == 8);
4362  APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
4363  if (VT.isInteger())
4364  return DAG.getConstant(Val, dl, VT);
4365  return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl,
4366  VT);
4367  }
4368 
4369  assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?");
4370  EVT IntVT = VT.getScalarType();
4371  if (!IntVT.isInteger())
4372  IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits());
4373 
4374  Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value);
4375  if (NumBits > 8) {
4376  // Use a multiplication with 0x010101... to extend the input to the
4377  // required length.
4378  APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
4379  Value = DAG.getNode(ISD::MUL, dl, IntVT, Value,
4380  DAG.getConstant(Magic, dl, IntVT));
4381  }
4382 
4383  if (VT != Value.getValueType() && !VT.isInteger())
4384  Value = DAG.getBitcast(VT.getScalarType(), Value);
4385  if (VT != Value.getValueType())
4386  Value = DAG.getSplatBuildVector(VT, dl, Value);
4387 
4388  return Value;
4389 }
4390 
4391 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
4392 /// used when a memcpy is turned into a memset when the source is a constant
4393 /// string ptr.
4394 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG,
4395  const TargetLowering &TLI, StringRef Str) {
4396  // Handle vector with all elements zero.
4397  if (Str.empty()) {
4398  if (VT.isInteger())
4399  return DAG.getConstant(0, dl, VT);
4400  else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
4401  return DAG.getConstantFP(0.0, dl, VT);
4402  else if (VT.isVector()) {
4403  unsigned NumElts = VT.getVectorNumElements();
4404  MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
4405  return DAG.getNode(ISD::BITCAST, dl, VT,
4406  DAG.getConstant(0, dl,
4408  EltVT, NumElts)));
4409  } else
4410  llvm_unreachable("Expected type!");
4411  }
4412 
4413  assert(!VT.isVector() && "Can't handle vector type here!");
4414  unsigned NumVTBits = VT.getSizeInBits();
4415  unsigned NumVTBytes = NumVTBits / 8;
4416  unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size()));
4417 
4418  APInt Val(NumVTBits, 0);
4419  if (DAG.getDataLayout().isLittleEndian()) {
4420  for (unsigned i = 0; i != NumBytes; ++i)
4421  Val |= (uint64_t)(unsigned char)Str[i] << i*8;
4422  } else {
4423  for (unsigned i = 0; i != NumBytes; ++i)
4424  Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8;
4425  }
4426 
4427  // If the "cost" of materializing the integer immediate is less than the cost
4428  // of a load, then it is cost effective to turn the load into the immediate.
4429  Type *Ty = VT.getTypeForEVT(*DAG.getContext());
4430  if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
4431  return DAG.getConstant(Val, dl, VT);
4432  return SDValue(nullptr, 0);
4433 }
4434 
4436  const SDLoc &DL) {
4437  EVT VT = Base.getValueType();
4438  return getNode(ISD::ADD, DL, VT, Base, getConstant(Offset, DL, VT));
4439 }
4440 
4441 /// isMemSrcFromString - Returns true if memcpy source is a string constant.
4442 ///
4443 static bool isMemSrcFromString(SDValue Src, StringRef &Str) {
4444  uint64_t SrcDelta = 0;
4445  GlobalAddressSDNode *G = nullptr;
4446  if (Src.getOpcode() == ISD::GlobalAddress)
4447  G = cast<GlobalAddressSDNode>(Src);
4448  else if (Src.getOpcode() == ISD::ADD &&
4449  Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
4450  Src.getOperand(1).getOpcode() == ISD::Constant) {
4451  G = cast<GlobalAddressSDNode>(Src.getOperand(0));
4452  SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
4453  }
4454  if (!G)
4455  return false;
4456 
4457  return getConstantStringInfo(G->getGlobal(), Str,
4458  SrcDelta + G->getOffset(), false);
4459 }
4460 
4461 /// Determines the optimal series of memory ops to replace the memset / memcpy.
4462 /// Return true if the number of memory ops is below the threshold (Limit).
4463 /// It returns the types of the sequence of memory ops to perform
4464 /// memset / memcpy by reference.
4465 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
4466  unsigned Limit, uint64_t Size,
4467  unsigned DstAlign, unsigned SrcAlign,
4468  bool IsMemset,
4469  bool ZeroMemset,
4470  bool MemcpyStrSrc,
4471  bool AllowOverlap,
4472  unsigned DstAS, unsigned SrcAS,
4473  SelectionDAG &DAG,
4474  const TargetLowering &TLI) {
4475  assert((SrcAlign == 0 || SrcAlign >= DstAlign) &&
4476  "Expecting memcpy / memset source to meet alignment requirement!");
4477  // If 'SrcAlign' is zero, that means the memory operation does not need to
4478  // load the value, i.e. memset or memcpy from constant string. Otherwise,
4479  // it's the inferred alignment of the source. 'DstAlign', on the other hand,
4480  // is the specified alignment of the memory operation. If it is zero, that
4481  // means it's possible to change the alignment of the destination.
4482  // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
4483  // not need to be loaded.
4484  EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign,
4485  IsMemset, ZeroMemset, MemcpyStrSrc,
4486  DAG.getMachineFunction());
4487 
4488  if (VT == MVT::Other) {
4489  if (DstAlign >= DAG.getDataLayout().getPointerPrefAlignment(DstAS) ||
4490  TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign)) {
4491  VT = TLI.getPointerTy(DAG.getDataLayout(), DstAS);
4492  } else {
4493  switch (DstAlign & 7) {
4494  case 0: VT = MVT::i64; break;
4495  case 4: VT = MVT::i32; break;
4496  case 2: VT = MVT::i16; break;
4497  default: VT = MVT::i8; break;
4498  }
4499  }
4500 
4501  MVT LVT = MVT::i64;
4502  while (!TLI.isTypeLegal(LVT))
4503  LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
4504  assert(LVT.isInteger());
4505 
4506  if (VT.bitsGT(LVT))
4507  VT = LVT;
4508  }
4509 
4510  unsigned NumMemOps = 0;
4511  while (Size != 0) {
4512  unsigned VTSize = VT.getSizeInBits() / 8;
4513  while (VTSize > Size) {
4514  // For now, only use non-vector load / store's for the left-over pieces.
4515  EVT NewVT = VT;
4516  unsigned NewVTSize;
4517 
4518  bool Found = false;
4519  if (VT.isVector() || VT.isFloatingPoint()) {
4520  NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
4521  if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) &&
4522  TLI.isSafeMemOpType(NewVT.getSimpleVT()))
4523  Found = true;
4524  else if (NewVT == MVT::i64 &&
4526  TLI.isSafeMemOpType(MVT::f64)) {
4527  // i64 is usually not legal on 32-bit targets, but f64 may be.
4528  NewVT = MVT::f64;
4529  Found = true;
4530  }
4531  }
4532 
4533  if (!Found) {
4534  do {
4535  NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
4536  if (NewVT == MVT::i8)
4537  break;
4538  } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT()));
4539  }
4540  NewVTSize = NewVT.getSizeInBits() / 8;
4541 
4542  // If the new VT cannot cover all of the remaining bits, then consider
4543  // issuing a (or a pair of) unaligned and overlapping load / store.
4544  // FIXME: Only does this for 64-bit or more since we don't have proper
4545  // cost model for unaligned load / store.
4546  bool Fast;
4547  if (NumMemOps && AllowOverlap &&
4548  VTSize >= 8 && NewVTSize < Size &&
4549  TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign, &Fast) && Fast)
4550  VTSize = Size;
4551  else {
4552  VT = NewVT;
4553  VTSize = NewVTSize;
4554  }
4555  }
4556 
4557  if (++NumMemOps > Limit)
4558  return false;
4559 
4560  MemOps.push_back(VT);
4561  Size -= VTSize;
4562  }
4563 
4564  return true;
4565 }
4566 
4568  // On Darwin, -Os means optimize for size without hurting performance, so
4569  // only really optimize for size when -Oz (MinSize) is used.
4570  if (MF.getTarget().getTargetTriple().isOSDarwin())
4571  return MF.getFunction()->optForMinSize();
4572  return MF.getFunction()->optForSize();
4573 }
4574 
4576  SDValue Chain, SDValue Dst, SDValue Src,
4577  uint64_t Size, unsigned Align,
4578  bool isVol, bool AlwaysInline,
4579  MachinePointerInfo DstPtrInfo,
4580  MachinePointerInfo SrcPtrInfo) {
4581  // Turn a memcpy of undef to nop.
4582  if (Src.isUndef())
4583  return Chain;
4584 
4585  // Expand memcpy to a series of load and store ops if the size operand falls
4586  // below a certain threshold.
4587  // TODO: In the AlwaysInline case, if the size is big then generate a loop
4588  // rather than maybe a humongous number of loads and stores.
4589  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4590  std::vector<EVT> MemOps;
4591  bool DstAlignCanChange = false;
4592  MachineFunction &MF = DAG.getMachineFunction();
4593  MachineFrameInfo &MFI = MF.getFrameInfo();
4594  bool OptSize = shouldLowerMemFuncForSize(MF);
4596  if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
4597  DstAlignCanChange = true;
4598  unsigned SrcAlign = DAG.InferPtrAlignment(Src);
4599  if (Align > SrcAlign)
4600  SrcAlign = Align;
4601  StringRef Str;
4602  bool CopyFromStr = isMemSrcFromString(Src, Str);
4603  bool isZeroStr = CopyFromStr && Str.empty();
4604  unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
4605 
4606  if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
4607  (DstAlignCanChange ? 0 : Align),
4608  (isZeroStr ? 0 : SrcAlign),
4609  false, false, CopyFromStr, true,
4610  DstPtrInfo.getAddrSpace(),
4611  SrcPtrInfo.getAddrSpace(),
4612  DAG, TLI))
4613  return SDValue();
4614 
4615  if (DstAlignCanChange) {
4616  Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
4617  unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
4618 
4619  // Don't promote to an alignment that would require dynamic stack
4620  // realignment.
4621  const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
4622  if (!TRI->needsStackRealignment(MF))
4623  while (NewAlign > Align &&
4625  NewAlign /= 2;
4626 
4627  if (NewAlign > Align) {
4628  // Give the stack frame object a larger alignment if needed.
4629  if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
4630  MFI.setObjectAlignment(FI->getIndex(), NewAlign);
4631  Align = NewAlign;
4632  }
4633  }
4634 
4635  MachineMemOperand::Flags MMOFlags =
4637  SmallVector<SDValue, 8> OutChains;
4638  unsigned NumMemOps = MemOps.size();
4639  uint64_t SrcOff = 0, DstOff = 0;
4640  for (unsigned i = 0; i != NumMemOps; ++i) {
4641  EVT VT = MemOps[i];
4642  unsigned VTSize = VT.getSizeInBits() / 8;
4643  SDValue Value, Store;
4644 
4645  if (VTSize > Size) {
4646  // Issuing an unaligned load / store pair that overlaps with the previous
4647  // pair. Adjust the offset accordingly.
4648  assert(i == NumMemOps-1 && i != 0);
4649  SrcOff -= VTSize - Size;
4650  DstOff -= VTSize - Size;
4651  }
4652 
4653  if (CopyFromStr &&
4654  (isZeroStr || (VT.isInteger() && !VT.isVector()))) {
4655  // It's unlikely a store of a vector immediate can be done in a single
4656  // instruction. It would require a load from a constantpool first.
4657  // We only handle zero vectors here.
4658  // FIXME: Handle other cases where store of vector immediate is done in
4659  // a single instruction.
4660  Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff));
4661  if (Value.getNode())
4662  Store = DAG.getStore(Chain, dl, Value,
4663  DAG.getMemBasePlusOffset(Dst, DstOff, dl),
4664  DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags);
4665  }
4666 
4667  if (!Store.getNode()) {
4668  // The type might not be legal for the target. This should only happen
4669  // if the type is smaller than a legal type, as on PPC, so the right
4670  // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
4671  // to Load/Store if NVT==VT.
4672  // FIXME does the case above also need this?
4673  EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
4674  assert(NVT.bitsGE(VT));
4675  Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
4676  DAG.getMemBasePlusOffset(Src, SrcOff, dl),
4677  SrcPtrInfo.getWithOffset(SrcOff), VT,
4678  MinAlign(SrcAlign, SrcOff), MMOFlags);
4679  OutChains.push_back(Value.getValue(1));
4680  Store = DAG.getTruncStore(
4681  Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
4682  DstPtrInfo.getWithOffset(DstOff), VT, Align, MMOFlags);
4683  }
4684  OutChains.push_back(Store);
4685  SrcOff += VTSize;
4686  DstOff += VTSize;
4687  Size -= VTSize;
4688  }
4689 
4690  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
4691 }
4692 
4694  SDValue Chain, SDValue Dst, SDValue Src,
4695  uint64_t Size, unsigned Align,
4696  bool isVol, bool AlwaysInline,
4697  MachinePointerInfo DstPtrInfo,
4698  MachinePointerInfo SrcPtrInfo) {
4699  // Turn a memmove of undef to nop.
4700  if (Src.isUndef())
4701  return Chain;
4702 
4703  // Expand memmove to a series of load and store ops if the size operand falls
4704  // below a certain threshold.
4705  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4706  std::vector<EVT> MemOps;
4707  bool DstAlignCanChange = false;
4708  MachineFunction &MF = DAG.getMachineFunction();
4709  MachineFrameInfo &MFI = MF.getFrameInfo();
4710  bool OptSize = shouldLowerMemFuncForSize(MF);
4712  if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
4713  DstAlignCanChange = true;
4714  unsigned SrcAlign = DAG.InferPtrAlignment(Src);
4715  if (Align > SrcAlign)
4716  SrcAlign = Align;
4717  unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
4718 
4719  if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
4720  (DstAlignCanChange ? 0 : Align), SrcAlign,
4721  false, false, false, false,
4722  DstPtrInfo.getAddrSpace(),
4723  SrcPtrInfo.getAddrSpace(),
4724  DAG, TLI))
4725  return SDValue();
4726 
4727  if (DstAlignCanChange) {
4728  Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
4729  unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
4730  if (NewAlign > Align) {
4731  // Give the stack frame object a larger alignment if needed.
4732  if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
4733  MFI.setObjectAlignment(FI->getIndex(), NewAlign);
4734  Align = NewAlign;
4735  }
4736  }
4737 
4738  MachineMemOperand::Flags MMOFlags =
4740  uint64_t SrcOff = 0, DstOff = 0;
4741  SmallVector<SDValue, 8> LoadValues;
4742  SmallVector<SDValue, 8> LoadChains;
4743  SmallVector<SDValue, 8> OutChains;
4744  unsigned NumMemOps = MemOps.size();
4745  for (unsigned i = 0; i < NumMemOps; i++) {
4746  EVT VT = MemOps[i];
4747  unsigned VTSize = VT.getSizeInBits() / 8;
4748  SDValue Value;
4749 
4750  Value =
4751  DAG.getLoad(VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl),
4752  SrcPtrInfo.getWithOffset(SrcOff), SrcAlign, MMOFlags);
4753  LoadValues.push_back(Value);
4754  LoadChains.push_back(Value.getValue(1));
4755  SrcOff += VTSize;
4756  }
4757  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
4758  OutChains.clear();
4759  for (unsigned i = 0; i < NumMemOps; i++) {
4760  EVT VT = MemOps[i];
4761  unsigned VTSize = VT.getSizeInBits() / 8;
4762  SDValue Store;
4763 
4764  Store = DAG.getStore(Chain, dl, LoadValues[i],
4765  DAG.getMemBasePlusOffset(Dst, DstOff, dl),
4766  DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags);
4767  OutChains.push_back(Store);
4768  DstOff += VTSize;
4769  }
4770 
4771  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
4772 }
4773 
4774 /// \brief Lower the call to 'memset' intrinsic function into a series of store
4775 /// operations.
4776 ///
4777 /// \param DAG Selection DAG where lowered code is placed.
4778 /// \param dl Link to corresponding IR location.
4779 /// \param Chain Control flow dependency.
4780 /// \param Dst Pointer to destination memory location.
4781 /// \param Src Value of byte to write into the memory.
4782 /// \param Size Number of bytes to write.
4783 /// \param Align Alignment of the destination in bytes.
4784 /// \param isVol True if destination is volatile.
4785 /// \param DstPtrInfo IR information on the memory pointer.
4786 /// \returns New head in the control flow, if lowering was successful, empty
4787 /// SDValue otherwise.
4788 ///
4789 /// The function tries to replace 'llvm.memset' intrinsic with several store
4790 /// operations and value calculation code. This is usually profitable for small
4791 /// memory size.
4793  SDValue Chain, SDValue Dst, SDValue Src,
4794  uint64_t Size, unsigned Align, bool isVol,
4795  MachinePointerInfo DstPtrInfo) {
4796  // Turn a memset of undef to nop.
4797  if (Src.isUndef())
4798  return Chain;
4799 
4800  // Expand memset to a series of load/store ops if the size operand
4801  // falls below a certain threshold.
4802  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4803  std::vector<EVT> MemOps;
4804  bool DstAlignCanChange = false;
4805  MachineFunction &MF = DAG.getMachineFunction();
4806  MachineFrameInfo &MFI = MF.getFrameInfo();
4807  bool OptSize = shouldLowerMemFuncForSize(MF);
4809  if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
4810  DstAlignCanChange = true;
4811  bool IsZeroVal =
4812  isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
4813  if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize),
4814  Size, (DstAlignCanChange ? 0 : Align), 0,
4815  true, IsZeroVal, false, true,
4816  DstPtrInfo.getAddrSpace(), ~0u,
4817  DAG, TLI))
4818  return SDValue();
4819 
4820  if (DstAlignCanChange) {
4821  Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
4822  unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
4823  if (NewAlign > Align) {
4824  // Give the stack frame object a larger alignment if needed.
4825  if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
4826  MFI.setObjectAlignment(FI->getIndex(), NewAlign);
4827  Align = NewAlign;
4828  }
4829  }
4830 
4831  SmallVector<SDValue, 8> OutChains;
4832  uint64_t DstOff = 0;
4833  unsigned NumMemOps = MemOps.size();
4834 
4835  // Find the largest store and generate the bit pattern for it.
4836  EVT LargestVT = MemOps[0];
4837  for (unsigned i = 1; i < NumMemOps; i++)
4838  if (MemOps[i].bitsGT(LargestVT))
4839  LargestVT = MemOps[i];
4840  SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
4841 
4842  for (unsigned i = 0; i < NumMemOps; i++) {
4843  EVT VT = MemOps[i];
4844  unsigned VTSize = VT.getSizeInBits() / 8;
4845  if (VTSize > Size) {
4846  // Issuing an unaligned load / store pair that overlaps with the previous
4847  // pair. Adjust the offset accordingly.
4848  assert(i == NumMemOps-1 && i != 0);
4849  DstOff -= VTSize - Size;
4850  }
4851 
4852  // If this store is smaller than the largest store see whether we can get
4853  // the smaller value for free with a truncate.
4854  SDValue Value = MemSetValue;
4855  if (VT.bitsLT(LargestVT)) {
4856  if (!LargestVT.isVector() && !VT.isVector() &&
4857  TLI.isTruncateFree(LargestVT, VT))
4858  Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
4859  else
4860  Value = getMemsetValue(Src, VT, DAG, dl);
4861  }
4862  assert(Value.getValueType() == VT && "Value with wrong type.");
4863  SDValue Store = DAG.getStore(
4864  Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
4865  DstPtrInfo.getWithOffset(DstOff), Align,
4867  OutChains.push_back(Store);
4868  DstOff += VT.getSizeInBits() / 8;
4869  Size -= VTSize;
4870  }
4871 
4872  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
4873 }
4874 
4876  unsigned AS) {
4877  // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all
4878  // pointer operands can be losslessly bitcasted to pointers of address space 0
4879  if (AS != 0 && !TLI->isNoopAddrSpaceCast(AS, 0)) {
4880  report_fatal_error("cannot lower memory intrinsic in address space " +
4881  Twine(AS));
4882  }
4883 }
4884 
4886  SDValue Src, SDValue Size, unsigned Align,
4887  bool isVol, bool AlwaysInline, bool isTailCall,
4888  MachinePointerInfo DstPtrInfo,
4889  MachinePointerInfo SrcPtrInfo) {
4890  assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4891 
4892  // Check to see if we should lower the memcpy to loads and stores first.
4893  // For cases within the target-specified limits, this is the best choice.
4894  ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4895  if (ConstantSize) {
4896  // Memcpy with size zero? Just return the original chain.
4897  if (ConstantSize->isNullValue())
4898  return Chain;
4899 
4900  SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
4901  ConstantSize->getZExtValue(),Align,
4902  isVol, false, DstPtrInfo, SrcPtrInfo);
4903  if (Result.getNode())
4904  return Result;
4905  }
4906 
4907  // Then check to see if we should lower the memcpy with target-specific
4908  // code. If the target chooses to do this, this is the next best.
4909  if (TSI) {
4910  SDValue Result = TSI->EmitTargetCodeForMemcpy(
4911  *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline,
4912  DstPtrInfo, SrcPtrInfo);
4913  if (Result.getNode())
4914  return Result;
4915  }
4916 
4917  // If we really need inline code and the target declined to provide it,
4918  // use a (potentially long) sequence of loads and stores.
4919  if (AlwaysInline) {
4920  assert(ConstantSize && "AlwaysInline requires a constant size!");
4921  return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
4922  ConstantSize->getZExtValue(), Align, isVol,
4923  true, DstPtrInfo, SrcPtrInfo);
4924  }
4925 
4926  checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
4927  checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
4928 
4929  // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
4930  // memcpy is not guaranteed to be safe. libc memcpys aren't required to
4931  // respect volatile, so they may do things like read or write memory
4932  // beyond the given memory regions. But fixing this isn't easy, and most
4933  // people don't care.
4934 
4935  // Emit a library call.
4938  Entry.Ty = getDataLayout().getIntPtrType(*getContext());
4939  Entry.Node = Dst; Args.push_back(Entry);
4940  Entry.Node = Src; Args.push_back(Entry);
4941  Entry.Node = Size; Args.push_back(Entry);
4942  // FIXME: pass in SDLoc
4944  CLI.setDebugLoc(dl)
4945  .setChain(Chain)
4949  TLI->getPointerTy(getDataLayout())),
4950  std::move(Args))
4951  .setDiscardResult()
4952  .setTailCall(isTailCall);
4953 
4954  std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4955  return CallResult.second;
4956 }
4957 
4959  SDValue Src, SDValue Size, unsigned Align,
4960  bool isVol, bool isTailCall,
4961  MachinePointerInfo DstPtrInfo,
4962  MachinePointerInfo SrcPtrInfo) {
4963  assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4964 
4965  // Check to see if we should lower the memmove to loads and stores first.
4966  // For cases within the target-specified limits, this is the best choice.
4967  ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4968  if (ConstantSize) {
4969  // Memmove with size zero? Just return the original chain.
4970  if (ConstantSize->isNullValue())
4971  return Chain;
4972 
4973  SDValue Result =
4974  getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
4975  ConstantSize->getZExtValue(), Align, isVol,
4976  false, DstPtrInfo, SrcPtrInfo);
4977  if (Result.getNode())
4978  return Result;
4979  }
4980 
4981  // Then check to see if we should lower the memmove with target-specific
4982  // code. If the target chooses to do this, this is the next best.
4983  if (TSI) {
4984  SDValue Result = TSI->EmitTargetCodeForMemmove(
4985  *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo);
4986  if (Result.getNode())
4987  return Result;
4988  }
4989 
4990  checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
4991  checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
4992 
4993  // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
4994  // not be safe. See memcpy above for more details.
4995 
4996  // Emit a library call.
4999  Entry.Ty = getDataLayout().getIntPtrType(*getContext());
5000  Entry.Node = Dst; Args.push_back(Entry);
5001  Entry.Node = Src; Args.push_back(Entry);
5002  Entry.Node = Size; Args.push_back(Entry);
5003  // FIXME: pass in SDLoc
5005  CLI.setDebugLoc(dl)
5006  .setChain(Chain)
5010  TLI->getPointerTy(getDataLayout())),
5011  std::move(Args))
5012  .setDiscardResult()
5013  .setTailCall(isTailCall);
5014 
5015  std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
5016  return CallResult.second;
5017 }
5018 
5020  SDValue Src, SDValue Size, unsigned Align,
5021  bool isVol, bool isTailCall,
5022  MachinePointerInfo DstPtrInfo) {
5023  assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
5024 
5025  // Check to see if we should lower the memset to stores first.
5026  // For cases within the target-specified limits, this is the best choice.
5027  ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
5028  if (ConstantSize) {
5029  // Memset with size zero? Just return the original chain.
5030  if (ConstantSize->isNullValue())
5031  return Chain;
5032 
5033  SDValue Result =
5034  getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
5035  Align, isVol, DstPtrInfo);
5036 
5037  if (Result.getNode())
5038  return Result;
5039  }
5040 
5041  // Then check to see if we should lower the memset with target-specific
5042  // code. If the target chooses to do this, this is the next best.
5043  if (TSI) {
5044  SDValue Result = TSI->EmitTargetCodeForMemset(
5045  *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo);
5046  if (Result.getNode())
5047  return Result;
5048  }
5049 
5050  checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
5051 
5052  // Emit a library call.
5053  Type *IntPtrTy = getDataLayout().getIntPtrType(*getContext());
5056  Entry.Node = Dst; Entry.Ty = IntPtrTy;
5057  Args.push_back(Entry);
5058  Entry.Node = Src;
5059  Entry.Ty = Src.getValueType().getTypeForEVT(*getContext());
5060  Args.push_back(Entry);
5061  Entry.Node = Size;
5062  Entry.Ty = IntPtrTy;
5063  Args.push_back(Entry);
5064 
5065  // FIXME: pass in SDLoc
5067  CLI.setDebugLoc(dl)
5068  .setChain(Chain)
5072  TLI->getPointerTy(getDataLayout())),
5073  std::move(Args))
5074  .setDiscardResult()
5075  .setTailCall(isTailCall);
5076 
5077  std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
5078  return CallResult.second;
5079 }
5080 
5081 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
5082  SDVTList VTList, ArrayRef<SDValue> Ops,
5083  MachineMemOperand *MMO) {
5085  ID.AddInteger(MemVT.getRawBits());
5086  AddNodeIDNode(ID, Opcode, VTList, Ops);
5087  ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5088  void* IP = nullptr;
5089  if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5090  cast<AtomicSDNode>(E)->refineAlignment(MMO);
5091  return SDValue(E, 0);
5092  }
5093 
5094  auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
5095  VTList, MemVT, MMO);
5096  createOperands(N, Ops);
5097 
5098  CSEMap.InsertNode(N, IP);
5099  InsertNode(N);
5100  return SDValue(N, 0);
5101 }
5102 
5104  unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain,
5105  SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo,
5106  unsigned Alignment, AtomicOrdering SuccessOrdering,
5107  AtomicOrdering FailureOrdering, SynchronizationScope SynchScope) {
5108  assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
5110  assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
5111 
5112  if (Alignment == 0) // Ensure that codegen never sees alignment 0
5113  Alignment = getEVTAlignment(MemVT);
5114 
5116 
5117  // FIXME: Volatile isn't really correct; we should keep track of atomic
5118  // orderings in the memoperand.
5121  MachineMemOperand *MMO =
5122  MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
5123  AAMDNodes(), nullptr, SynchScope, SuccessOrdering,
5124  FailureOrdering);
5125 
5126  return getAtomicCmpSwap(Opcode, dl, MemVT, VTs, Chain, Ptr, Cmp, Swp, MMO);
5127 }
5128 
5129 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl,
5130  EVT MemVT, SDVTList VTs, SDValue Chain,
5131  SDValue Ptr, SDValue Cmp, SDValue Swp,
5132  MachineMemOperand *MMO) {
5133  assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
5135  assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
5136 
5137  SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
5138  return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
5139 }
5140 
5141 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
5142  SDValue Chain, SDValue Ptr, SDValue Val,
5143  const Value *PtrVal, unsigned Alignment,
5144  AtomicOrdering Ordering,
5145  SynchronizationScope SynchScope) {
5146  if (Alignment == 0) // Ensure that codegen never sees alignment 0
5147  Alignment = getEVTAlignment(MemVT);
5148 
5150  // An atomic store does not load. An atomic load does not store.
5151  // (An atomicrmw obviously both loads and stores.)
5152  // For now, atomics are considered to be volatile always, and they are
5153  // chained as such.
5154  // FIXME: Volatile isn't really correct; we should keep track of atomic
5155  // orderings in the memoperand.
5156  auto Flags = MachineMemOperand::MOVolatile;
5157  if (Opcode != ISD::ATOMIC_STORE)
5158  Flags |= MachineMemOperand::MOLoad;
5159  if (Opcode != ISD::ATOMIC_LOAD)
5160  Flags |= MachineMemOperand::MOStore;
5161 
5162  MachineMemOperand *MMO =
5163  MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
5164  MemVT.getStoreSize(), Alignment, AAMDNodes(),
5165  nullptr, SynchScope, Ordering);
5166 
5167  return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO);
5168 }
5169 
5170 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
5171  SDValue Chain, SDValue Ptr, SDValue Val,
5172  MachineMemOperand *MMO) {
5173  assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
5174  Opcode == ISD::ATOMIC_LOAD_SUB ||
5175  Opcode == ISD::ATOMIC_LOAD_AND ||
5176  Opcode == ISD::ATOMIC_LOAD_OR ||
5177  Opcode == ISD::ATOMIC_LOAD_XOR ||
5178  Opcode == ISD::ATOMIC_LOAD_NAND ||
5179  Opcode == ISD::ATOMIC_LOAD_MIN ||
5180  Opcode == ISD::ATOMIC_LOAD_MAX ||
5181  Opcode == ISD::ATOMIC_LOAD_UMIN ||
5182  Opcode == ISD::ATOMIC_LOAD_UMAX ||
5183  Opcode == ISD::ATOMIC_SWAP ||
5184  Opcode == ISD::ATOMIC_STORE) &&
5185  "Invalid Atomic Op");
5186 
5187  EVT VT = Val.getValueType();
5188 
5189  SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
5190  getVTList(VT, MVT::Other);
5191  SDValue Ops[] = {Chain, Ptr, Val};
5192  return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
5193 }
5194 
5195 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
5196  EVT VT, SDValue Chain, SDValue Ptr,
5197  MachineMemOperand *MMO) {
5198  assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
5199 
5200  SDVTList VTs = getVTList(VT, MVT::Other);
5201  SDValue Ops[] = {Chain, Ptr};
5202  return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
5203 }
5204 
5205 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
5207  if (Ops.size() == 1)
5208  return Ops[0];
5209 
5210  SmallVector<EVT, 4> VTs;
5211  VTs.reserve(Ops.size());
5212  for (unsigned i = 0; i < Ops.size(); ++i)
5213  VTs.push_back(Ops[i].getValueType());
5214  return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops);
5215 }
5216 
5218  unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
5219  EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align, bool Vol,
5220  bool ReadMem, bool WriteMem, unsigned Size) {
5221  if (Align == 0) // Ensure that codegen never sees alignment 0
5222  Align = getEVTAlignment(MemVT);
5223 
5225  auto Flags = MachineMemOperand::MONone;
5226  if (WriteMem)
5227  Flags |= MachineMemOperand::MOStore;
5228  if (ReadMem)
5229  Flags |= MachineMemOperand::MOLoad;
5230  if (Vol)
5232  if (!Size)
5233  Size = MemVT.getStoreSize();
5234  MachineMemOperand *MMO =
5235  MF.getMachineMemOperand(PtrInfo, Flags, Size, Align);
5236 
5237  return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
5238 }
5239 
5241  SDVTList VTList,
5242  ArrayRef<SDValue> Ops, EVT MemVT,
5243  MachineMemOperand *MMO) {
5244  assert((Opcode == ISD::INTRINSIC_VOID ||
5245  Opcode == ISD::INTRINSIC_W_CHAIN ||
5246  Opcode == ISD::PREFETCH ||
5247  Opcode == ISD::LIFETIME_START ||
5248  Opcode == ISD::LIFETIME_END ||
5249  (Opcode <= INT_MAX &&
5250  (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
5251  "Opcode is not a memory-accessing opcode!");
5252 
5253  // Memoize the node unless it returns a flag.
5255  if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
5257  AddNodeIDNode(ID, Opcode, VTList, Ops);
5258  ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5259  void *IP = nullptr;
5260  if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5261  cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
5262  return SDValue(E, 0);
5263  }
5264 
5265  N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
5266  VTList, MemVT, MMO);
5267  createOperands(N, Ops);
5268 
5269  CSEMap.InsertNode(N, IP);
5270  } else {
5271  N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
5272  VTList, MemVT, MMO);
5273  createOperands(N, Ops);
5274  }
5275  InsertNode(N);
5276  return SDValue(N, 0);
5277 }
5278 
5279 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
5280 /// MachinePointerInfo record from it. This is particularly useful because the
5281 /// code generator has many cases where it doesn't bother passing in a
5282 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
5284  int64_t Offset = 0) {
5285  // If this is FI+Offset, we can model it.
5286  if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
5288  FI->getIndex(), Offset);
5289 
5290  // If this is (FI+Offset1)+Offset2, we can model it.
5291  if (Ptr.getOpcode() != ISD::ADD ||
5292  !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
5293  !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
5294  return MachinePointerInfo();
5295 
5296  int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
5298  DAG.getMachineFunction(), FI,
5299  Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
5300 }
5301 
5302 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
5303 /// MachinePointerInfo record from it. This is particularly useful because the
5304 /// code generator has many cases where it doesn't bother passing in a
5305 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
5307  SDValue OffsetOp) {
5308  // If the 'Offset' value isn't a constant, we can't handle this.
5309  if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
5310  return InferPointerInfo(DAG, Ptr, OffsetNode->getSExtValue());
5311  if (OffsetOp.isUndef())
5312  return InferPointerInfo(DAG, Ptr);
5313  return MachinePointerInfo();
5314 }
5315 
5317  EVT VT, const SDLoc &dl, SDValue Chain,
5319  MachinePointerInfo PtrInfo, EVT MemVT,
5320  unsigned Alignment,
5321  MachineMemOperand::Flags MMOFlags,
5322  const AAMDNodes &AAInfo, const MDNode *Ranges) {
5323  assert(Chain.getValueType() == MVT::Other &&
5324  "Invalid chain type");
5325  if (Alignment == 0) // Ensure that codegen never sees alignment 0
5326  Alignment = getEVTAlignment(MemVT);
5327 
5328  MMOFlags |= MachineMemOperand::MOLoad;
5329  assert((MMOFlags & MachineMemOperand::MOStore) == 0);
5330  // If we don't have a PtrInfo, infer the trivial frame index case to simplify
5331  // clients.
5332  if (PtrInfo.V.isNull())
5333  PtrInfo = InferPointerInfo(*this, Ptr, Offset);
5334 
5337  PtrInfo, MMOFlags, MemVT.getStoreSize(), Alignment, AAInfo, Ranges);
5338  return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
5339 }
5340 
5342  EVT VT, const SDLoc &dl, SDValue Chain,
5343  SDValue Ptr, SDValue Offset, EVT MemVT,
5344  MachineMemOperand *MMO) {
5345  if (VT == MemVT) {
5346  ExtType = ISD::NON_EXTLOAD;
5347  } else if (ExtType == ISD::NON_EXTLOAD) {
5348  assert(VT == MemVT && "Non-extending load from different memory type!");
5349  } else {
5350  // Extending load.
5351  assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
5352  "Should only be an extending load, not truncating!");
5353  assert(VT.isInteger() == MemVT.isInteger() &&
5354  "Cannot convert from FP to Int or Int -> FP!");
5355  assert(VT.isVector() == MemVT.isVector() &&
5356  "Cannot use an ext load to convert to or from a vector!");
5357  assert((!VT.isVector() ||
5358  VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
5359  "Cannot use an ext load to change the number of vector elements!");
5360  }
5361 
5362  bool Indexed = AM != ISD::UNINDEXED;
5363  assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
5364 
5365  SDVTList VTs = Indexed ?
5367  SDValue Ops[] = { Chain, Ptr, Offset };
5369  AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
5370  ID.AddInteger(MemVT.getRawBits());
5371  ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>(
5372  dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO));
5373  ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5374  void *IP = nullptr;
5375  if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5376  cast<LoadSDNode>(E)->refineAlignment(MMO);
5377  return SDValue(E, 0);
5378  }
5379  auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
5380  ExtType, MemVT, MMO);
5381  createOperands(N, Ops);
5382 
5383  CSEMap.InsertNode(N, IP);
5384  InsertNode(N);
5385  return SDValue(N, 0);
5386 }
5387 
5389  SDValue Ptr, MachinePointerInfo PtrInfo,
5390  unsigned Alignment,
5391  MachineMemOperand::Flags MMOFlags,
5392  const AAMDNodes &AAInfo, const MDNode *Ranges) {
5394  return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
5395  PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges);
5396 }
5397 
5399  SDValue Ptr, MachineMemOperand *MMO) {
5401  return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
5402  VT, MMO);
5403 }
5404 
5406  EVT VT, SDValue Chain, SDValue Ptr,
5407  MachinePointerInfo PtrInfo, EVT MemVT,
5408  unsigned Alignment,
5409  MachineMemOperand::Flags MMOFlags,
5410  const AAMDNodes &AAInfo) {
5412  return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo,
5413  MemVT, Alignment, MMOFlags, AAInfo);
5414 }
5415 
5417  EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT,
5418  MachineMemOperand *MMO) {
5420  return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
5421  MemVT, MMO);
5422 }
5423 
5425  SDValue Base, SDValue Offset,
5426  ISD::MemIndexedMode AM) {
5427  LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
5428  assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
5429  // Don't propagate the invariant or dereferenceable flags.
5430  auto MMOFlags =
5431  LD->getMemOperand()->getFlags() &
5433  return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
5434  LD->getChain(), Base, Offset, LD->getPointerInfo(),
5435  LD->getMemoryVT(), LD->getAlignment(), MMOFlags,
5436  LD->getAAInfo());
5437 }
5438 
5440  SDValue Ptr, MachinePointerInfo PtrInfo,
5441  unsigned Alignment,
5442  MachineMemOperand::Flags MMOFlags,
5443  const AAMDNodes &AAInfo) {
5444  assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
5445  if (Alignment == 0) // Ensure that codegen never sees alignment 0
5446  Alignment = getEVTAlignment(Val.getValueType());
5447 
5448  MMOFlags |= MachineMemOperand::MOStore;
5449  assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
5450 
5451  if (PtrInfo.V.isNull())
5452  PtrInfo = InferPointerInfo(*this, Ptr);
5453 
5456  PtrInfo, MMOFlags, Val.getValueType().getStoreSize(), Alignment, AAInfo);
5457  return getStore(Chain, dl, Val, Ptr, MMO);
5458 }
5459 
5461  SDValue Ptr, MachineMemOperand *MMO) {
5462  assert(Chain.getValueType() == MVT::Other &&
5463  "Invalid chain type");
5464  EVT VT = Val.getValueType();
5465  SDVTList VTs = getVTList(MVT::Other);
5467  SDValue Ops[] = { Chain, Val, Ptr, Undef };
5469  AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
5470  ID.AddInteger(VT.getRawBits());
5471  ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
5472  dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO));
5473  ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5474  void *IP = nullptr;
5475  if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5476  cast<StoreSDNode>(E)->refineAlignment(MMO);
5477  return SDValue(E, 0);
5478  }
5479  auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
5480  ISD::UNINDEXED, false, VT, MMO);
5481  createOperands(N, Ops);
5482 
5483  CSEMap.InsertNode(N, IP);
5484  InsertNode(N);
5485  return SDValue(N, 0);
5486 }
5487 
5489  SDValue Ptr, MachinePointerInfo PtrInfo,
5490  EVT SVT, unsigned Alignment,
5491  MachineMemOperand::Flags MMOFlags,
5492  const AAMDNodes &AAInfo) {
5493  assert(Chain.getValueType() == MVT::Other &&
5494  "Invalid chain type");
5495  if (Alignment == 0) // Ensure that codegen never sees alignment 0
5496  Alignment = getEVTAlignment(SVT);
5497 
5498  MMOFlags |= MachineMemOperand::MOStore;
5499  assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
5500 
5501  if (PtrInfo.V.isNull())
5502  PtrInfo = InferPointerInfo(*this, Ptr);
5503 
5506  PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo);
5507  return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
5508 }
5509 
5511  SDValue Ptr, EVT SVT,
5512  MachineMemOperand *MMO) {
5513  EVT VT = Val.getValueType();
5514 
5515  assert(Chain.getValueType() == MVT::Other &&
5516  "Invalid chain type");
5517  if (VT == SVT)
5518  return getStore(Chain, dl, Val, Ptr, MMO);
5519 
5520  assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
5521  "Should only be a truncating store, not extending!");
5522  assert(VT.isInteger() == SVT.isInteger() &&
5523  "Can't do FP-INT conversion!");
5524  assert(VT.isVector() == SVT.isVector() &&
5525  "Cannot use trunc store to convert to or from a vector!");
5526  assert((!VT.isVector() ||
5527  VT.getVectorNumElements() == SVT.getVectorNumElements()) &&
5528  "Cannot use trunc store to change the number of vector elements!");
5529 
5530  SDVTList VTs = getVTList(MVT::Other);
5532  SDValue Ops[] = { Chain, Val, Ptr, Undef };
5534  AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
5535  ID.AddInteger(SVT.getRawBits());
5536  ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
5537  dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO));
5538  ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5539  void *IP = nullptr;
5540  if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5541  cast<StoreSDNode>(E)->refineAlignment(MMO);
5542  return SDValue(E, 0);
5543  }
5544  auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
5545  ISD::UNINDEXED, true, SVT, MMO);
5546  createOperands(N, Ops);
5547 
5548  CSEMap.InsertNode(N, IP);
5549  InsertNode(N);
5550  return SDValue(N, 0);
5551 }
5552 
5554  SDValue Base, SDValue Offset,
5555  ISD::MemIndexedMode AM) {
5556  StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
5557  assert(ST->getOffset().isUndef() && "Store is already a indexed store!");
5558  SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
5559  SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
5561  AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
5562  ID.AddInteger(ST->getMemoryVT().getRawBits());
5563  ID.AddInteger(ST->getRawSubclassData());
5565  void *IP = nullptr;
5566  if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
5567  return SDValue(E, 0);
5568 
5569  auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
5570  ST->isTruncatingStore(), ST->getMemoryVT(),
5571  ST->getMemOperand());
5572  createOperands(N, Ops);
5573 
5574  CSEMap.InsertNode(N, IP);
5575  InsertNode(N);
5576  return SDValue(N, 0);
5577 }
5578 
5580  SDValue Ptr, SDValue Mask, SDValue Src0,
5581  EVT MemVT, MachineMemOperand *MMO,
5582  ISD::LoadExtType ExtTy, bool isExpanding) {
5583 
5584  SDVTList VTs = getVTList(VT, MVT::Other);
5585  SDValue Ops[] = { Chain, Ptr, Mask, Src0 };
5587  AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops);
5588  ID.AddInteger(VT.getRawBits());
5589  ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>(
5590  dl.getIROrder(), VTs, ExtTy, isExpanding, MemVT, MMO));
5591  ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5592  void *IP = nullptr;
5593  if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5594  cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
5595  return SDValue(E, 0);
5596  }
5597  auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
5598  ExtTy, isExpanding, MemVT, MMO);
5599  createOperands(N, Ops);
5600 
5601  CSEMap.InsertNode(N, IP);
5602  InsertNode(N);
5603  return SDValue(N, 0);
5604 }
5605 
5607  SDValue Val, SDValue Ptr, SDValue Mask,
5608  EVT MemVT, MachineMemOperand *MMO,
5609  bool IsTruncating, bool IsCompressing) {
5610  assert(Chain.getValueType() == MVT::Other &&
5611  "Invalid chain type");
5612  EVT VT = Val.getValueType();
5613  SDVTList VTs = getVTList(MVT::Other);
5614  SDValue Ops[] = { Chain, Ptr, Mask, Val };
5616  AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops);
5617  ID.AddInteger(VT.getRawBits());
5618  ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>(
5619  dl.getIROrder(), VTs, IsTruncating, IsCompressing, MemVT, MMO));
5620  ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5621  void *IP = nullptr;
5622  if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5623  cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
5624  return SDValue(E, 0);
5625  }
5626  auto *N = newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
5627  IsTruncating, IsCompressing, MemVT, MMO);
5628  createOperands(N, Ops);
5629 
5630  CSEMap.InsertNode(N, IP);
5631  InsertNode(N);
5632  return SDValue(N, 0);
5633 }
5634 
5636  ArrayRef<SDValue> Ops,
5637  MachineMemOperand *MMO) {
5638  assert(Ops.size() == 5 && "Incompatible number of operands");
5639 
5641  AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops);
5642  ID.AddInteger(VT.getRawBits());
5643  ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>(
5644  dl.getIROrder(), VTs, VT, MMO));
5645  ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5646  void *IP = nullptr;
5647  if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5648  cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
5649  return SDValue(E, 0);
5650  }
5651 
5652  auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(),
5653  VTs, VT, MMO);
5654  createOperands(N, Ops);
5655 
5656  assert(N->getValue().getValueType() == N->getValueType(0) &&
5657  "Incompatible type of the PassThru value in MaskedGatherSDNode");
5658  assert(N->getMask().getValueType().getVectorNumElements() ==
5660  "Vector width mismatch between mask and data");
5661  assert(N->getIndex().getValueType().getVectorNumElements() ==
5663  "Vector width mismatch between index and data");
5664 
5665  CSEMap.InsertNode(N, IP);
5666  InsertNode(N);
5667  return SDValue(N, 0);
5668 }
5669 
5671  ArrayRef<SDValue> Ops,
5672  MachineMemOperand *MMO) {
5673  assert(Ops.size() == 5 && "Incompatible number of operands");
5674 
5676  AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops);
5677  ID.AddInteger(VT.getRawBits());
5678  ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>(
5679  dl.getIROrder(), VTs, VT, MMO));
5680  ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5681  void *IP = nullptr;
5682  if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5683  cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
5684  return SDValue(E, 0);
5685  }
5686  auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(),
5687  VTs, VT, MMO);
5688  createOperands(N, Ops);
5689 
5690  assert(N->getMask().getValueType().getVectorNumElements() ==
5691  N->getValue().getValueType().getVectorNumElements() &&
5692  "Vector width mismatch between mask and data");
5693  assert(N->getIndex().getValueType().getVectorNumElements() ==
5694  N->getValue().getValueType().getVectorNumElements() &&
5695  "Vector width mismatch between index and data");
5696 
5697  CSEMap.InsertNode(N, IP);
5698  InsertNode(N);
5699  return SDValue(N, 0);
5700 }
5701 
5703  SDValue Ptr, SDValue SV, unsigned Align) {
5704  SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) };
5705  return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
5706 }
5707 
5708 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5709  ArrayRef<SDUse> Ops) {
5710  switch (Ops.size()) {
5711  case 0: return getNode(Opcode, DL, VT);
5712  case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0]));
5713  case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
5714  case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
5715  default: break;
5716  }
5717 
5718  // Copy from an SDUse array into an SDValue array for use with
5719  // the regular getNode logic.
5720  SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end());
5721  return getNode(Opcode, DL, VT, NewOps);
5722 }
5723 
5724 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5725  ArrayRef<SDValue> Ops, const SDNodeFlags *Flags) {
5726  unsigned NumOps = Ops.size();
5727  switch (NumOps) {
5728  case 0: return getNode(Opcode, DL, VT);
5729  case 1: return getNode(Opcode, DL, VT, Ops[0]);
5730  case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags);
5731  case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
5732  default: break;
5733  }
5734 
5735  switch (Opcode) {
5736  default: break;
5737  case ISD::CONCAT_VECTORS: {
5738  // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF.
5739  if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this))
5740  return V;
5741  break;
5742  }
5743  case ISD::SELECT_CC: {
5744  assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
5745  assert(Ops[0].getValueType() == Ops[1].getValueType() &&
5746  "LHS and RHS of condition must have same type!");
5747  assert(Ops[2].getValueType() == Ops[3].getValueType() &&
5748  "True and False arms of SelectCC must have same type!");
5749  assert(Ops[2].getValueType() == VT &&
5750  "select_cc node must be of same type as true and false value!");
5751  break;
5752  }
5753  case ISD::BR_CC: {
5754  assert(NumOps == 5 && "BR_CC takes 5 operands!");
5755  assert(Ops[2].getValueType() == Ops[3].getValueType() &&
5756  "LHS/RHS of comparison should match types!");
5757  break;
5758  }
5759  }
5760 
5761  // Memoize nodes.
5762  SDNode *N;
5763  SDVTList VTs = getVTList(VT);
5764 
5765  if (VT != MVT::Glue) {
5767  AddNodeIDNode(ID, Opcode, VTs, Ops);
5768  void *IP = nullptr;
5769 
5770  if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
5771  return SDValue(E, 0);
5772 
5773  N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5774  createOperands(N, Ops);
5775 
5776  CSEMap.InsertNode(N, IP);
5777  } else {
5778  N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5779  createOperands(N, Ops);
5780  }
5781 
5782  InsertNode(N);
5783  return SDValue(N, 0);
5784 }
5785 
5786 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
5787  ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
5788  return getNode(Opcode, DL, getVTList(ResultTys), Ops);
5789 }
5790 
5791 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
5792  ArrayRef<SDValue> Ops) {
5793  if (VTList.NumVTs == 1)
5794  return getNode(Opcode, DL, VTList.VTs[0], Ops);
5795 
5796 #if 0
5797  switch (Opcode) {
5798  // FIXME: figure out how to safely handle things like
5799  // int foo(int x) { return 1 << (x & 255); }
5800  // int bar() { return foo(256); }
5801  case ISD::SRA_PARTS:
5802  case ISD::SRL_PARTS:
5803  case ISD::SHL_PARTS:
5804  if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
5805  cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
5806  return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
5807  else if (N3.getOpcode() == ISD::AND)
5808  if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
5809  // If the and is only masking out bits that cannot effect the shift,
5810  // eliminate the and.
5811  unsigned NumBits = VT.getScalarSizeInBits()*2;
5812  if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
5813  return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
5814  }
5815  break;
5816  }
5817 #endif
5818 
5819  // Memoize the node unless it returns a flag.
5820  SDNode *N;
5821  if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
5823  AddNodeIDNode(ID, Opcode, VTList, Ops);
5824  void *IP = nullptr;
5825  if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
5826  return SDValue(E, 0);
5827 
5828  N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
5829  createOperands(N, Ops);
5830  CSEMap.InsertNode(N, IP);
5831  } else {
5832  N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
5833  createOperands(N, Ops);
5834  }
5835  InsertNode(N);
5836  return SDValue(N, 0);
5837 }
5838 
5839 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
5840  SDVTList VTList) {
5841  return getNode(Opcode, DL, VTList, None);
5842 }
5843 
5844 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
5845  SDValue N1) {
5846  SDValue Ops[] = { N1 };
5847  return getNode(Opcode, DL, VTList, Ops);
5848 }
5849 
5850 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
5851  SDValue N1, SDValue N2) {
5852  SDValue Ops[] = { N1, N2 };
5853  return getNode(Opcode, DL, VTList, Ops);
5854 }
5855 
5856 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
5857  SDValue N1, SDValue N2, SDValue N3) {
5858  SDValue Ops[] = { N1, N2, N3 };
5859  return getNode(Opcode, DL, VTList, Ops);
5860 }
5861 
5862 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
5863  SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
5864  SDValue Ops[] = { N1, N2, N3, N4 };
5865  return getNode(Opcode, DL, VTList, Ops);
5866 }
5867 
5868 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
5869  SDValue N1, SDValue N2, SDValue N3, SDValue N4,
5870  SDValue N5) {
5871  SDValue Ops[] = { N1, N2, N3, N4, N5 };
5872  return getNode(Opcode, DL, VTList, Ops);
5873 }
5874 
5876  return makeVTList(SDNode::getValueTypeList(VT), 1);
5877 }
5878 
5881  ID.AddInteger(2U);
5882  ID.AddInteger(VT1.getRawBits());
5883  ID.AddInteger(VT2.getRawBits());
5884 
5885  void *IP = nullptr;
5886  SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5887  if (!Result) {
5888  EVT *Array = Allocator.Allocate<EVT>(2);
5889  Array[0] = VT1;
5890  Array[1] = VT2;
5891  Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
5892  VTListMap.InsertNode(Result, IP);
5893  }
5894  return Result->getSDVTList();
5895 }
5896 
5899  ID.AddInteger(3U);
5900  ID.AddInteger(VT1.getRawBits());
5901  ID.AddInteger(VT2.getRawBits());
5902  ID.AddInteger(VT3.getRawBits());
5903 
5904  void *IP = nullptr;
5905  SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5906  if (!Result) {
5907  EVT *Array = Allocator.Allocate<EVT>(3);
5908  Array[0] = VT1;
5909  Array[1] = VT2;
5910  Array[2] = VT3;
5911  Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
5912  VTListMap.InsertNode(Result, IP);
5913  }
5914  return Result->getSDVTList();
5915 }
5916 
5919  ID.AddInteger(4U);
5920  ID.AddInteger(VT1.getRawBits());
5921  ID.AddInteger(VT2.getRawBits());
5922  ID.AddInteger(VT3.getRawBits());
5923  ID.AddInteger(VT4.getRawBits());
5924 
5925  void *IP = nullptr;
5926  SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5927  if (!Result) {
5928  EVT *Array = Allocator.Allocate<EVT>(4);
5929  Array[0] = VT1;
5930  Array[1] = VT2;
5931  Array[2] = VT3;
5932  Array[3] = VT4;
5933  Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
5934  VTListMap.InsertNode(Result, IP);
5935  }
5936  return Result->getSDVTList();
5937 }
5938 
5940  unsigned NumVTs = VTs.size();
5942  ID.AddInteger(NumVTs);
5943  for (unsigned index = 0; index < NumVTs; index++) {
5944  ID.AddInteger(VTs[index].getRawBits());
5945  }
5946 
5947  void *IP = nullptr;
5948  SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5949  if (!Result) {
5950  EVT *Array = Allocator.Allocate<EVT>(NumVTs);
5951  std::copy(VTs.begin(), VTs.end(), Array);
5952  Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
5953  VTListMap.InsertNode(Result, IP);
5954  }
5955  return Result->getSDVTList();
5956 }
5957 
5958 
5959 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
5960 /// specified operands. If the resultant node already exists in the DAG,
5961 /// this does not modify the specified node, instead it returns the node that
5962 /// already exists. If the resultant node does not exist in the DAG, the
5963 /// input node is returned. As a degenerate case, if you specify the same
5964 /// input operands as the node already has, the input node is returned.
5966  assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
5967 
5968  // Check to see if there is no change.
5969  if (Op == N->getOperand(0)) return N;
5970 
5971  // See if the modified node already exists.
5972  void *InsertPos = nullptr;
5973  if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
5974  return Existing;
5975 
5976  // Nope it doesn't. Remove the node from its current place in the maps.
5977  if (InsertPos)
5978  if (!RemoveNodeFromCSEMaps(N))
5979  InsertPos = nullptr;
5980 
5981  // Now we update the operands.
5982  N->OperandList[0].set(Op);
5983 
5984  // If this gets put into a CSE map, add it.
5985  if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5986  return N;
5987 }
5988 
5990  assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
5991 
5992  // Check to see if there is no change.
5993  if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
5994  return N; // No operands changed, just return the input node.
5995 
5996  // See if the modified node already exists.
5997  void *InsertPos = nullptr;
5998  if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
5999  return Existing;
6000 
6001  // Nope it doesn't. Remove the node from its current place in the maps.
6002  if (InsertPos)
6003  if (!RemoveNodeFromCSEMaps(N))
6004  InsertPos = nullptr;
6005 
6006  // Now we update the operands.
6007  if (N->OperandList[0] != Op1)
6008  N->OperandList[0].set(Op1);
6009  if (N->OperandList[1] != Op2)
6010  N->OperandList[1].set(Op2);
6011 
6012  // If this gets put into a CSE map, add it.
6013  if (InsertPos) CSEMap.InsertNode(N, InsertPos);
6014  return N;
6015 }
6016 
6019  SDValue Ops[] = { Op1, Op2, Op3 };
6020  return UpdateNodeOperands(N, Ops);
6021 }
6022 
6025  SDValue Op3, SDValue Op4) {
6026  SDValue Ops[] = { Op1, Op2, Op3, Op4 };
6027  return UpdateNodeOperands(N, Ops);
6028 }
6029 
6032  SDValue Op3, SDValue Op4, SDValue Op5) {
6033  SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
6034  return UpdateNodeOperands(N, Ops);
6035 }
6036 
6039  unsigned NumOps = Ops.size();
6040  assert(N->getNumOperands() == NumOps &&
6041  "Update with wrong number of operands");
6042 
6043  // If no operands changed just return the input node.
6044  if (std::equal(Ops.begin(), Ops.end(), N->op_begin()))
6045  return N;
6046 
6047  // See if the modified node already exists.
6048  void *InsertPos = nullptr;
6049  if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos))
6050  return Existing;
6051 
6052  // Nope it doesn't. Remove the node from its current place in the maps.
6053  if (InsertPos)
6054  if (!RemoveNodeFromCSEMaps(N))
6055  InsertPos = nullptr;
6056 
6057  // Now we update the operands.
6058  for (unsigned i = 0; i != NumOps; ++i)
6059  if (N->OperandList[i] != Ops[i])
6060  N->OperandList[i].set(Ops[i]);
6061 
6062  // If this gets put into a CSE map, add it.
6063  if (InsertPos) CSEMap.InsertNode(N, InsertPos);
6064  return N;
6065 }
6066 
6067 /// DropOperands - Release the operands and set this node to have
6068 /// zero operands.
6070  // Unlike the code in MorphNodeTo that does this, we don't need to
6071  // watch for dead nodes here.
6072  for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
6073  SDUse &Use = *I++;
6074  Use.set(SDValue());
6075  }
6076 }
6077 
6078 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
6079 /// machine opcode.
6080 ///
6081 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6082  EVT VT) {
6083  SDVTList VTs = getVTList(VT);
6084  return SelectNodeTo(N, MachineOpc, VTs, None);
6085 }
6086 
6087 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6088  EVT VT, SDValue Op1) {
6089  SDVTList VTs = getVTList(VT);
6090  SDValue Ops[] = { Op1 };
6091  return SelectNodeTo(N, MachineOpc, VTs, Ops);
6092 }
6093 
6094 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6095  EVT VT, SDValue Op1,
6096  SDValue Op2) {
6097  SDVTList VTs = getVTList(VT);
6098  SDValue Ops[] = { Op1, Op2 };
6099  return SelectNodeTo(N, MachineOpc, VTs, Ops);
6100 }
6101 
6102 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6103  EVT VT, SDValue Op1,
6104  SDValue Op2, SDValue Op3) {
6105  SDVTList VTs = getVTList(VT);
6106  SDValue Ops[] = { Op1, Op2, Op3 };
6107  return SelectNodeTo(N, MachineOpc, VTs, Ops);
6108 }
6109 
6110 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6111  EVT VT, ArrayRef<SDValue> Ops) {
6112  SDVTList VTs = getVTList(VT);
6113  return SelectNodeTo(N, MachineOpc, VTs, Ops);
6114 }
6115 
6116 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6117  EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) {
6118  SDVTList VTs = getVTList(VT1, VT2);
6119  return SelectNodeTo(N, MachineOpc, VTs, Ops);
6120 }
6121 
6122 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6123  EVT VT1, EVT VT2) {
6124  SDVTList VTs = getVTList(VT1, VT2);
6125  return SelectNodeTo(N, MachineOpc, VTs, None);
6126 }
6127 
6128 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6129  EVT VT1, EVT VT2, EVT VT3,
6130  ArrayRef<SDValue> Ops) {
6131  SDVTList VTs = getVTList(VT1, VT2, VT3);
6132  return SelectNodeTo(N, MachineOpc, VTs, Ops);
6133 }
6134 
6135 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6136  EVT VT1, EVT VT2,
6137  SDValue Op1, SDValue Op2) {
6138  SDVTList VTs = getVTList(VT1, VT2);
6139  SDValue Ops[] = { Op1, Op2 };
6140  return SelectNodeTo(N, MachineOpc, VTs, Ops);
6141 }
6142 
6143 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6144  SDVTList VTs,ArrayRef<SDValue> Ops) {
6145  SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops);
6146  // Reset the NodeID to -1.
6147  New->setNodeId(-1);
6148  if (New != N) {
6149  ReplaceAllUsesWith(N, New);
6150  RemoveDeadNode(N);
6151  }
6152  return New;
6153 }
6154 
6155 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away
6156 /// the line number information on the merged node since it is not possible to
6157 /// preserve the information that operation is associated with multiple lines.
6158 /// This will make the debugger working better at -O0, were there is a higher
6159 /// probability having other instructions associated with that line.
6160 ///
6161 /// For IROrder, we keep the smaller of the two
6162 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) {
6163  DebugLoc NLoc = N->getDebugLoc();
6164  if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) {
6165  N->setDebugLoc(DebugLoc());
6166  }
6167  unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
6168  N->setIROrder(Order);
6169  return N;
6170 }
6171 
6172 /// MorphNodeTo - This *mutates* the specified node to have the specified
6173 /// return type, opcode, and operands.
6174 ///
6175 /// Note that MorphNodeTo returns the resultant node. If there is already a
6176 /// node of the specified opcode and operands, it returns that node instead of
6177 /// the current one. Note that the SDLoc need not be the same.
6178 ///
6179 /// Using MorphNodeTo is faster than creating a new node and swapping it in
6180 /// with ReplaceAllUsesWith both because it often avoids allocating a new
6181 /// node, and because it doesn't require CSE recalculation for any of
6182 /// the node's users.
6183 ///
6184 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG.
6185 /// As a consequence it isn't appropriate to use from within the DAG combiner or
6186 /// the legalizer which maintain worklists that would need to be updated when
6187 /// deleting things.
6189  SDVTList VTs, ArrayRef<SDValue> Ops) {
6190  // If an identical node already exists, use it.
6191  void *IP = nullptr;
6192  if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
6194  AddNodeIDNode(ID, Opc, VTs, Ops);
6195  if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP))
6196  return UpdateSDLocOnMergeSDNode(ON, SDLoc(N));
6197  }
6198 
6199  if (!RemoveNodeFromCSEMaps(N))
6200  IP = nullptr;
6201 
6202  // Start the morphing.
6203  N->NodeType = Opc;
6204  N->ValueList = VTs.VTs;
6205  N->NumValues = VTs.NumVTs;
6206 
6207  // Clear the operands list, updating used nodes to remove this from their
6208  // use list. Keep track of any operands that become dead as a result.
6209  SmallPtrSet<SDNode*, 16> DeadNodeSet;
6210  for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
6211  SDUse &Use = *I++;
6212  SDNode *Used = Use.getNode();
6213  Use.set(SDValue());
6214  if (Used->use_empty())
6215  DeadNodeSet.insert(Used);
6216  }
6217 
6218  // For MachineNode, initialize the memory references information.
6219  if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N))
6220  MN->setMemRefs(nullptr, nullptr);
6221 
6222  // Swap for an appropriately sized array from the recycler.
6223  removeOperands(N);
6224  createOperands(N, Ops);
6225 
6226  // Delete any nodes that are still dead after adding the uses for the
6227  // new operands.
6228  if (!DeadNodeSet.empty()) {
6229  SmallVector<SDNode *, 16> DeadNodes;
6230  for (SDNode *N : DeadNodeSet)
6231  if (N->use_empty())
6232  DeadNodes.push_back(N);
6233  RemoveDeadNodes(DeadNodes);
6234  }
6235 
6236  if (IP)
6237  CSEMap.InsertNode(N, IP); // Memoize the new node.
6238  return N;
6239 }
6240 
6241 
6242 /// getMachineNode - These are used for target selectors to create a new node
6243 /// with specified return type(s), MachineInstr opcode, and operands.
6244 ///
6245 /// Note that getMachineNode returns the resultant node. If there is already a
6246 /// node of the specified opcode and operands, it returns that node instead of
6247 /// the current one.
6249  EVT VT) {
6250  SDVTList VTs = getVTList(VT);
6251  return getMachineNode(Opcode, dl, VTs, None);
6252 }
6253 
6255  EVT VT, SDValue Op1) {
6256  SDVTList VTs = getVTList(VT);
6257  SDValue Ops[] = { Op1 };
6258  return getMachineNode(Opcode, dl, VTs, Ops);
6259 }
6260 
6262  EVT VT, SDValue Op1, SDValue Op2) {
6263  SDVTList VTs = getVTList(VT);
6264  SDValue Ops[] = { Op1, Op2 };
6265  return getMachineNode(Opcode, dl, VTs, Ops);
6266 }
6267 
6269  EVT VT, SDValue Op1, SDValue Op2,
6270  SDValue Op3) {
6271  SDVTList VTs = getVTList(VT);
6272  SDValue Ops[] = { Op1, Op2, Op3 };
6273  return getMachineNode(Opcode, dl, VTs, Ops);
6274 }
6275 
6277  EVT VT, ArrayRef<SDValue> Ops) {
6278  SDVTList VTs = getVTList(VT);
6279  return getMachineNode(Opcode, dl, VTs, Ops);
6280 }
6281 
6283  EVT VT1, EVT VT2, SDValue Op1,
6284  SDValue Op2) {
6285  SDVTList VTs = getVTList(VT1, VT2);
6286  SDValue Ops[] = { Op1, Op2 };
6287  return getMachineNode(Opcode, dl, VTs, Ops);
6288 }
6289 
6291  EVT VT1, EVT VT2, SDValue Op1,
6292  SDValue Op2, SDValue Op3) {
6293  SDVTList VTs = getVTList(VT1, VT2);
6294  SDValue Ops[] = { Op1, Op2, Op3 };
6295  return getMachineNode(Opcode, dl, VTs, Ops);
6296 }
6297 
6299  EVT VT1, EVT VT2,
6300  ArrayRef<SDValue> Ops) {
6301  SDVTList VTs = getVTList(VT1, VT2);
6302  return getMachineNode(Opcode, dl, VTs, Ops);
6303 }
6304 
6306  EVT VT1, EVT VT2, EVT VT3,
6307  SDValue Op1, SDValue Op2) {
6308  SDVTList VTs = getVTList(VT1, VT2, VT3);
6309  SDValue Ops[] = { Op1, Op2 };
6310  return getMachineNode(Opcode, dl, VTs, Ops);
6311 }
6312 
6314  EVT VT1, EVT VT2, EVT VT3,
6315  SDValue Op1, SDValue Op2,
6316  SDValue Op3) {
6317  SDVTList VTs = getVTList(VT1, VT2, VT3);
6318  SDValue Ops[] = { Op1, Op2, Op3 };
6319  return getMachineNode(Opcode, dl, VTs, Ops);
6320 }
6321 
6323  EVT VT1, EVT VT2, EVT VT3,
6324  ArrayRef<SDValue> Ops) {
6325  SDVTList VTs = getVTList(VT1, VT2, VT3);
6326  return getMachineNode(Opcode, dl, VTs, Ops);
6327 }
6328 
6330  ArrayRef<EVT> ResultTys,
6331  ArrayRef<SDValue> Ops) {
6332  SDVTList VTs = getVTList(ResultTys);
6333  return getMachineNode(Opcode, dl, VTs, Ops);
6334 }
6335 
6337  SDVTList VTs,
6338  ArrayRef<SDValue> Ops) {
6339  bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
6340  MachineSDNode *N;
6341  void *IP = nullptr;
6342 
6343  if (DoCSE) {
6345  AddNodeIDNode(ID, ~Opcode, VTs, Ops);
6346  IP = nullptr;
6347  if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
6348  return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL));
6349  }
6350  }
6351 
6352  // Allocate a new MachineSDNode.
6353  N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
6354  createOperands(N, Ops);
6355 
6356  if (DoCSE)
6357  CSEMap.InsertNode(N, IP);
6358 
6359  InsertNode(N);
6360  return N;
6361 }
6362 
6363 /// getTargetExtractSubreg - A convenience function for creating
6364 /// TargetOpcode::EXTRACT_SUBREG nodes.
6366  SDValue Operand) {
6367  SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
6368  SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
6369  VT, Operand, SRIdxVal);
6370  return SDValue(Subreg, 0);
6371 }
6372 
6373 /// getTargetInsertSubreg - A convenience function for creating
6374 /// TargetOpcode::INSERT_SUBREG nodes.
6376  SDValue Operand, SDValue Subreg) {
6377  SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
6378  SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
6379  VT, Operand, Subreg, SRIdxVal);
6380  return SDValue(Result, 0);
6381 }
6382 
6383 /// getNodeIfExists - Get the specified node if it's already available, or
6384 /// else return NULL.
6386  ArrayRef<SDValue> Ops,
6387  const SDNodeFlags *Flags) {
6388  if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
6390  AddNodeIDNode(ID, Opcode, VTList, Ops);
6391  void *IP = nullptr;
6392  if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) {
6393  if (Flags)
6394  E->intersectFlagsWith(Flags);
6395  return E;
6396  }
6397  }
6398  return nullptr;
6399 }
6400 
6401 /// getDbgValue - Creates a SDDbgValue node.
6402 ///
6403 /// SDNode
6405  unsigned R, bool IsIndirect, uint64_t Off,
6406  const DebugLoc &DL, unsigned O) {
6407  assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
6408  "Expected inlined-at fields to agree");
6409  return new (DbgInfo->getAlloc())
6410  SDDbgValue(Var, Expr, N, R, IsIndirect, Off, DL, O);
6411 }
6412 
6413 /// Constant
6415  const Value *C, uint64_t Off,
6416  const DebugLoc &DL, unsigned O) {
6417  assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
6418  "Expected inlined-at fields to agree");
6419  return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, Off, DL, O);
6420 }
6421 
6422 /// FrameIndex
6424  unsigned FI, uint64_t Off,
6425  const DebugLoc &DL,
6426  unsigned O) {
6427  assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
6428  "Expected inlined-at fields to agree");
6429  return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, FI, Off, DL, O);
6430 }
6431 
6432 namespace {
6433 
6434 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
6435 /// pointed to by a use iterator is deleted, increment the use iterator
6436 /// so that it doesn't dangle.
6437 ///
6438 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
6441 
6442  void NodeDeleted(SDNode *N, SDNode *E) override {
6443  // Increment the iterator as needed.
6444  while (UI != UE && N == *UI)
6445  ++UI;
6446  }
6447 
6448 public:
6449  RAUWUpdateListener(SelectionDAG &d,
6452  : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
6453 };
6454 
6455 }
6456 
6457 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
6458 /// This can cause recursive merging of nodes in the DAG.
6459 ///
6460 /// This version assumes From has a single result value.
6461 ///
6463  SDNode *From = FromN.getNode();
6464  assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
6465  "Cannot replace with this method!");
6466  assert(From != To.getNode() && "Cannot replace uses of with self");
6467 
6468  // Preserve Debug Values
6469  TransferDbgValues(FromN, To);
6470 
6471  // Iterate over all the existing uses of From. New uses will be added
6472  // to the beginning of the use list, which we avoid visiting.
6473  // This specifically avoids visiting uses of From that arise while the
6474  // replacement is happening, because any such uses would be the result
6475  // of CSE: If an existing node looks like From after one of its operands
6476  // is replaced by To, we don't want to replace of all its users with To
6477  // too. See PR3018 for more info.
6478  SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
6479  RAUWUpdateListener Listener(*this, UI, UE);
6480  while (UI != UE) {
6481  SDNode *User = *UI;
6482 
6483  // This node is about to morph, remove its old self from the CSE maps.
6484  RemoveNodeFromCSEMaps(User);
6485 
6486  // A user can appear in a use list multiple times, and when this
6487  // happens the uses are usually next to each other in the list.
6488  // To help reduce the number of CSE recomputations, process all
6489  // the uses of this user that we can find this way.
6490  do {
6491  SDUse &Use = UI.getUse();
6492  ++UI;
6493  Use.set(To);
6494  } while (UI != UE && *UI == User);
6495 
6496  // Now that we have modified User, add it back to the CSE maps. If it
6497  // already exists there, recursively merge the results together.
6498  AddModifiedNodeToCSEMaps(User);
6499  }
6500 
6501 
6502  // If we just RAUW'd the root, take note.
6503  if (FromN == getRoot())
6504  setRoot(To);
6505 }
6506 
6507 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
6508 /// This can cause recursive merging of nodes in the DAG.
6509 ///
6510 /// This version assumes that for each value of From, there is a
6511 /// corresponding value in To in the same position with the same type.
6512 ///
6514 #ifndef NDEBUG
6515  for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
6516  assert((!From->hasAnyUseOfValue(i) ||
6517  From->getValueType(i) == To->getValueType(i)) &&
6518  "Cannot use this version of ReplaceAllUsesWith!");
6519 #endif
6520 
6521  // Handle the trivial case.
6522  if (From == To)
6523  return;
6524 
6525  // Preserve Debug Info. Only do this if there's a use.
6526  for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
6527  if (From->hasAnyUseOfValue(i)) {
6528  assert((i < To->getNumValues()) && "Invalid To location");
6529  TransferDbgValues(SDValue(From, i), SDValue(To, i));
6530  }
6531 
6532  // Iterate over just the existing users of From. See the comments in
6533  // the ReplaceAllUsesWith above.
6534  SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
6535  RAUWUpdateListener Listener(*this, UI, UE);
6536  while (UI != UE) {
6537  SDNode *User = *UI;
6538 
6539  // This node is about to morph, remove its old self from the CSE maps.
6540  RemoveNodeFromCSEMaps(User);
6541 
6542  // A user can appear in a use list multiple times, and when this
6543  // happens the uses are usually next to each other in the list.
6544  // To help reduce the number of CSE recomputations, process all
6545  // the uses of this user that we can find this way.
6546  do {
6547  SDUse &Use = UI.getUse();
6548  ++UI;
6549  Use.setNode(To);
6550  } while (UI != UE && *UI == User);
6551 
6552  // Now that we have modified User, add it back to the CSE maps. If it
6553  // already exists there, recursively merge the results together.
6554  AddModifiedNodeToCSEMaps(User);
6555  }
6556 
6557  // If we just RAUW'd the root, take note.
6558  if (From == getRoot().getNode())
6559  setRoot(SDValue(To, getRoot().getResNo()));
6560 }
6561 
6562 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
6563 /// This can cause recursive merging of nodes in the DAG.
6564 ///
6565 /// This version can replace From with any result values. To must match the
6566 /// number and types of values returned by From.
6568  if (From->getNumValues() == 1) // Handle the simple case efficiently.
6569  return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
6570 
6571  // Preserve Debug Info.
6572  for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
6573  TransferDbgValues(SDValue(From, i), *To);
6574 
6575  // Iterate over just the existing users of From. See the comments in
6576  // the ReplaceAllUsesWith above.
6577  SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
6578  RAUWUpdateListener Listener(*this, UI, UE);
6579  while (UI != UE) {
6580  SDNode *User = *UI;
6581 
6582  // This node is about to morph, remove its old self from the CSE maps.
6583  RemoveNodeFromCSEMaps(User);
6584 
6585  // A user can appear in a use list multiple times, and when this
6586  // happens the uses are usually next to each other in the list.
6587  // To help reduce the number of CSE recomputations, process all
6588  // the uses of this user that we can find this way.
6589  do {
6590  SDUse &Use = UI.getUse();
6591  const SDValue &ToOp = To[Use.getResNo()];
6592  ++UI;
6593  Use.set(ToOp);
6594  } while (UI != UE && *UI == User);
6595 
6596  // Now that we have modified User, add it back to the CSE maps. If it
6597  // already exists there, recursively merge the results together.
6598  AddModifiedNodeToCSEMaps(User);
6599  }
6600 
6601  // If we just RAUW'd the root, take note.
6602  if (From == getRoot().getNode())
6603  setRoot(SDValue(To[getRoot().getResNo()]));
6604 }
6605 
6606 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
6607 /// uses of other values produced by From.getNode() alone. The Deleted
6608 /// vector is handled the same way as for ReplaceAllUsesWith.
6610  // Handle the really simple, really trivial case efficiently.
6611  if (From == To) return;
6612 
6613  // Handle the simple, trivial, case efficiently.
6614  if (From.getNode()->getNumValues() == 1) {
6615  ReplaceAllUsesWith(From, To);
6616  return;
6617  }
6618 
6619  // Preserve Debug Info.
6620  TransferDbgValues(From, To);
6621 
6622  // Iterate over just the existing users of From. See the comments in
6623  // the ReplaceAllUsesWith above.
6624  SDNode::use_iterator UI = From.getNode()->use_begin(),
6625  UE = From.getNode()->use_end();
6626  RAUWUpdateListener Listener(*this, UI, UE);
6627  while (UI != UE) {
6628  SDNode *User = *UI;
6629  bool UserRemovedFromCSEMaps = false;
6630 
6631  // A user can appear in a use list multiple times, and when this
6632  // happens the uses are usually next to each other in the list.
6633  // To help reduce the number of CSE recomputations, process all
6634  // the uses of this user that we can find this way.
6635  do {
6636  SDUse &Use = UI.getUse();
6637 
6638  // Skip uses of different values from the same node.
6639  if (Use.getResNo() != From.getResNo()) {
6640  ++UI;
6641  continue;
6642  }
6643 
6644  // If this node hasn't been modified yet, it's still in the CSE maps,
6645  // so remove its old self from the CSE maps.
6646  if (!UserRemovedFromCSEMaps) {
6647  RemoveNodeFromCSEMaps(User);
6648  UserRemovedFromCSEMaps = true;
6649  }
6650 
6651  ++UI;
6652  Use.set(To);
6653  } while (UI != UE && *UI == User);
6654 
6655  // We are iterating over all uses of the From node, so if a use
6656  // doesn't use the specific value, no changes are made.
6657  if (!UserRemovedFromCSEMaps)
6658  continue;
6659 
6660  // Now that we have modified User, add it back to the CSE maps. If it
6661  // already exists there, recursively merge the results together.
6662  AddModifiedNodeToCSEMaps(User);
6663  }
6664 
6665  // If we just RAUW'd the root, take note.
6666  if (From == getRoot())
6667  setRoot(To);
6668 }
6669 
6670 namespace {
6671  /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
6672  /// to record information about a use.
6673  struct UseMemo {
6674  SDNode *User;
6675  unsigned Index;
6676  SDUse *Use;
6677  };
6678 
6679  /// operator< - Sort Memos by User.
6680  bool operator<(const UseMemo &L, const UseMemo &R) {
6681  return (intptr_t)L.User < (intptr_t)R.User;
6682  }
6683 }
6684 
6685 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
6686 /// uses of other values produced by From.getNode() alone. The same value
6687 /// may appear in both the From and To list. The Deleted vector is
6688 /// handled the same way as for ReplaceAllUsesWith.
6690  const SDValue *To,
6691  unsigned Num){
6692  // Handle the simple, trivial case efficiently.
6693  if (Num == 1)
6694  return ReplaceAllUsesOfValueWith(*From, *To);
6695 
6696  TransferDbgValues(*From, *To);
6697 
6698  // Read up all the uses and make records of them. This helps
6699  // processing new uses that are introduced during the
6700  // replacement process.
6702  for (unsigned i = 0; i != Num; ++i) {
6703  unsigned FromResNo = From[i].getResNo();
6704  SDNode *FromNode = From[i].getNode();
6705  for (SDNode::use_iterator UI = FromNode->use_begin(),
6706  E = FromNode->use_end(); UI != E; ++UI) {
6707  SDUse &Use = UI.getUse();
6708  if (Use.getResNo() == FromResNo) {
6709  UseMemo Memo = { *UI, i, &Use };
6710  Uses.push_back(Memo);
6711  }
6712  }
6713  }
6714 
6715  // Sort the uses, so that all the uses from a given User are together.
6716  std::sort(Uses.begin(), Uses.end());
6717 
6718  for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
6719  UseIndex != UseIndexEnd; ) {
6720  // We know that this user uses some value of From. If it is the right
6721  // value, update it.
6722  SDNode *User = Uses[UseIndex].User;
6723 
6724  // This node is about to morph, remove its old self from the CSE maps.
6725  RemoveNodeFromCSEMaps(User);
6726 
6727  // The Uses array is sorted, so all the uses for a given User
6728  // are next to each other in the list.
6729  // To help reduce the number of CSE recomputations, process all
6730  // the uses of this user that we can find this way.
6731  do {
6732  unsigned i = Uses[UseIndex].Index;
6733  SDUse &Use = *Uses[UseIndex].Use;
6734  ++UseIndex;
6735 
6736  Use.set(To[i]);
6737  } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
6738 
6739  // Now that we have modified User, add it back to the CSE maps. If it
6740  // already exists there, recursively merge the results together.
6741  AddModifiedNodeToCSEMaps(User);
6742  }
6743 }
6744 
6745 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
6746 /// based on their topological order. It returns the maximum id and a vector
6747 /// of the SDNodes* in assigned order by reference.
6749 
6750  unsigned DAGSize = 0;
6751 
6752  // SortedPos tracks the progress of the algorithm. Nodes before it are
6753  // sorted, nodes after it are unsorted. When the algorithm completes
6754  // it is at the end of the list.
6755  allnodes_iterator SortedPos = allnodes_begin();
6756 
6757  // Visit all the nodes. Move nodes with no operands to the front of
6758  // the list immediately. Annotate nodes that do have operands with their
6759  // operand count. Before we do this, the Node Id fields of the nodes
6760  // may contain arbitrary values. After, the Node Id fields for nodes
6761  // before SortedPos will contain the topological sort index, and the
6762  // Node Id fields for nodes At SortedPos and after will contain the
6763  // count of outstanding operands.
6764  for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
6765  SDNode *N = &*I++;
6766  checkForCycles(N, this);
6767  unsigned Degree = N->getNumOperands();
6768  if (Degree == 0) {
6769  // A node with no uses, add it to the result array immediately.
6770  N->setNodeId(DAGSize++);
6771  allnodes_iterator Q(N);
6772  if (Q != SortedPos)
6773  SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
6774  assert(SortedPos != AllNodes.end() && "Overran node list");
6775  ++SortedPos;
6776  } else {
6777  // Temporarily use the Node Id as scratch space for the degree count.
6778  N->setNodeId(Degree);
6779  }
6780  }
6781 
6782  // Visit all the nodes. As we iterate, move nodes into sorted order,
6783  // such that by the time the end is reached all nodes will be sorted.
6784  for (SDNode &Node : allnodes()) {
6785  SDNode *N = &Node;
6786  checkForCycles(N, this);
6787  // N is in sorted position, so all its uses have one less operand
6788  // that needs to be sorted.
6789  for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
6790  UI != UE; ++UI) {
6791  SDNode *P = *UI;
6792  unsigned Degree = P->getNodeId();
6793  assert(Degree != 0 && "Invalid node degree");
6794  --Degree;
6795  if (Degree == 0) {
6796  // All of P's operands are sorted, so P may sorted now.
6797  P->setNodeId(DAGSize++);
6798  if (P->getIterator() != SortedPos)
6799  SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
6800  assert(SortedPos != AllNodes.end() && "Overran node list");
6801  ++SortedPos;
6802  } else {
6803  // Update P's outstanding operand count.
6804  P->setNodeId(Degree);
6805  }
6806  }
6807  if (Node.getIterator() == SortedPos) {
6808 #ifndef NDEBUG
6809  allnodes_iterator I(N);
6810  SDNode *S = &*++I;
6811  dbgs() << "Overran sorted position:\n";
6812  S->dumprFull(this); dbgs() << "\n";
6813  dbgs() << "Checking if this is due to cycles\n";
6814  checkForCycles(this, true);
6815 #endif
6816  llvm_unreachable(nullptr);
6817  }
6818  }
6819 
6820  assert(SortedPos == AllNodes.end() &&
6821  "Topological sort incomplete!");
6822  assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
6823  "First node in topological sort is not the entry token!");
6824  assert(AllNodes.front().getNodeId() == 0 &&
6825  "First node in topological sort has non-zero id!");
6826  assert(AllNodes.front().getNumOperands() == 0 &&
6827  "First node in topological sort has operands!");
6828  assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
6829  "Last node in topologic sort has unexpected id!");
6830  assert(AllNodes.back().use_empty() &&
6831  "Last node in topologic sort has users!");
6832  assert(DAGSize == allnodes_size() && "Node count mismatch!");
6833  return DAGSize;
6834 }
6835 
6836 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
6837 /// value is produced by SD.
6838 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
6839  if (SD) {
6840  assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue());
6841  SD->setHasDebugValue(true);
6842  }
6843  DbgInfo->add(DB, SD, isParameter);
6844 }
6845 
6846 /// TransferDbgValues - Transfer SDDbgValues. Called in replace nodes.
6847 void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) {
6848  if (From == To || !From.getNode()->getHasDebugValue())
6849  return;
6850  SDNode *FromNode = From.getNode();
6851  SDNode *ToNode = To.getNode();
6852  ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode);
6853  SmallVector<SDDbgValue *, 2> ClonedDVs;
6854  for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end();
6855  I != E; ++I) {
6856  SDDbgValue *Dbg = *I;
6857  // Only add Dbgvalues attached to same ResNo.
6858  if (Dbg->getKind() == SDDbgValue::SDNODE &&
6859  Dbg->getSDNode() == From.getNode() &&
6860  Dbg->getResNo() == From.getResNo() && !Dbg->isInvalidated()) {
6861  assert(FromNode != ToNode &&
6862  "Should not transfer Debug Values intranode");
6863  SDDbgValue *Clone =
6864  getDbgValue(Dbg->getVariable(), Dbg->getExpression(), ToNode,
6865  To.getResNo(), Dbg->isIndirect(), Dbg->getOffset(),
6866  Dbg->getDebugLoc(), Dbg->getOrder());
6867  ClonedDVs.push_back(Clone);
6868  Dbg->setIsInvalidated();
6869  }
6870  }
6871  for (SDDbgValue *I : ClonedDVs)
6872  AddDbgValue(I, ToNode, false);
6873 }
6874 
6875 //===----------------------------------------------------------------------===//
6876 // SDNode Class
6877 //===----------------------------------------------------------------------===//
6878 
6880  ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
6881  return Const != nullptr && Const->isNullValue();
6882 }
6883 
6886  return Const != nullptr && Const->isZero() && !Const->isNegative();
6887 }
6888 
6890  ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
6891  return Const != nullptr && Const->isAllOnesValue();
6892 }
6893 
6895  ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
6896  return Const != nullptr && Const->isOne();
6897 }
6898 
6900  return V.getOpcode() == ISD::XOR && isAllOnesConstant(V.getOperand(1));
6901 }
6902 
6904  if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
6905  return CN;
6906 
6907  if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
6908  BitVector UndefElements;
6909  ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements);
6910 
6911  // BuildVectors can truncate their operands. Ignore that case here.
6912  // FIXME: We blindly ignore splats which include undef which is overly
6913  // pessimistic.
6914  if (CN && UndefElements.none() &&
6915  CN->getValueType(0) == N.getValueType().getScalarType())
6916  return CN;
6917  }
6918 
6919  return nullptr;
6920 }
6921 
6923  if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
6924  return CN;
6925 
6926  if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
6927  BitVector UndefElements;
6928  ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements);
6929 
6930  if (CN && UndefElements.none())
6931  return CN;
6932  }
6933 
6934  return nullptr;
6935 }
6936 
6938  DropOperands();
6939 }
6940 
6941 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
6942  const DebugLoc &DL,
6943  const GlobalValue *GA, EVT VT,
6944  int64_t o, unsigned char TF)
6945  : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
6946  TheGlobal = GA;
6947 }
6948 
6950  EVT VT, unsigned SrcAS,
6951  unsigned DestAS)
6952  : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)),
6953  SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
6954 
6955 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
6956  SDVTList VTs, EVT memvt, MachineMemOperand *mmo)
6957  : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
6958  MemSDNodeBits.IsVolatile = MMO->isVolatile();
6959  MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal();
6960  MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable();
6961  MemSDNodeBits.IsInvariant = MMO->isInvariant();
6962 
6963  // We check here that the size of the memory operand fits within the size of
6964  // the MMO. This is because the MMO might indicate only a possible address
6965  // range instead of specifying the affected memory addresses precisely.
6966  assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!");
6967 }
6968 
6969 /// Profile - Gather unique data for the node.
6970 ///
6972  AddNodeIDNode(ID, this);
6973 }
6974 
6975 namespace {
6976  struct EVTArray {
6977  std::vector<EVT> VTs;
6978 
6979  EVTArray() {
6980  VTs.reserve(MVT::LAST_VALUETYPE);
6981  for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
6982  VTs.push_back(MVT((MVT::SimpleValueType)i));
6983  }
6984  };
6985 }
6986 
6990 
6991 /// getValueTypeList - Return a pointer to the specified value type.
6992 ///
6993 const EVT *SDNode::getValueTypeList(EVT VT) {
6994  if (VT.isExtended()) {
6996  return &(*EVTs->insert(VT).first);
6997  } else {
6999  "Value type out of range!");
7000  return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
7001  }
7002 }
7003 
7004 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
7005 /// indicated value. This method ignores uses of other values defined by this
7006 /// operation.
7007 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
7008  assert(Value < getNumValues() && "Bad value!");
7009 
7010  // TODO: Only iterate over uses of a given value of the node
7011  for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
7012  if (UI.getUse().getResNo() == Value) {
7013  if (NUses == 0)
7014  return false;
7015  --NUses;
7016  }
7017  }
7018 
7019  // Found exactly the right number of uses?
7020  return NUses == 0;
7021 }
7022 
7023 
7024 /// hasAnyUseOfValue - Return true if there are any use of the indicated
7025 /// value. This method ignores uses of other values defined by this operation.
7026 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
7027  assert(Value < getNumValues() && "Bad value!");
7028 
7029  for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
7030  if (UI.getUse().getResNo() == Value)
7031  return true;
7032 
7033  return false;
7034 }
7035 
7036 
7037 /// isOnlyUserOf - Return true if this node is the only use of N.
7038 ///
7039 bool SDNode::isOnlyUserOf(const SDNode *N) const {
7040  bool Seen = false;
7041  for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
7042  SDNode *User = *I;
7043  if (User == this)
7044  Seen = true;
7045  else
7046  return false;
7047  }
7048 
7049  return Seen;
7050 }
7051 
7052 /// isOperand - Return true if this node is an operand of N.
7053 ///
7054 bool SDValue::isOperandOf(const SDNode *N) const {
7055  for (const SDValue &Op : N->op_values())
7056  if (*this == Op)
7057  return true;
7058  return false;
7059 }
7060 
7061 bool SDNode::isOperandOf(const SDNode *N) const {
7062  for (const SDValue &Op : N->op_values())
7063  if (this == Op.getNode())
7064  return true;
7065  return false;
7066 }
7067 
7068 /// reachesChainWithoutSideEffects - Return true if this operand (which must
7069 /// be a chain) reaches the specified operand without crossing any
7070 /// side-effecting instructions on any chain path. In practice, this looks
7071 /// through token factors and non-volatile loads. In order to remain efficient,
7072 /// this only looks a couple of nodes in, it does not do an exhaustive search.
7074  unsigned Depth) const {
7075  if (*this == Dest) return true;
7076 
7077  // Don't search too deeply, we just want to be able to see through
7078  // TokenFactor's etc.
7079  if (Depth == 0) return false;
7080 
7081  // If this is a token factor, all inputs to the TF happen in parallel. If any
7082  // of the operands of the TF does not reach dest, then we cannot do the xform.
7083  if (getOpcode() == ISD::TokenFactor) {
7084  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
7085  if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1))
7086  return false;
7087  return true;
7088  }
7089 
7090  // Loads don't have side effects, look through them.
7091  if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
7092  if (!Ld->isVolatile())
7093  return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
7094  }
7095  return false;
7096 }
7097 
7098 bool SDNode::hasPredecessor(const SDNode *N) const {
7101  Worklist.push_back(this);
7102  return hasPredecessorHelper(N, Visited, Worklist);
7103 }
7104 
7105 uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
7106  assert(Num < NumOperands && "Invalid child # of SDNode!");
7107  return cast<ConstantSDNode>(OperandList[Num])->getZExtValue();
7108 }
7109 
7111  if (auto *FlagsNode = dyn_cast<BinaryWithFlagsSDNode>(this))
7112  return &FlagsNode->Flags;
7113  return nullptr;
7114 }
7115 
7117  if (auto *FlagsNode = dyn_cast<BinaryWithFlagsSDNode>(this))
7118  FlagsNode->Flags.intersectWith(Flags);
7119 }
7120 
7122  assert(N->getNumValues() == 1 &&
7123  "Can't unroll a vector with multiple results!");
7124 
7125  EVT VT = N->getValueType(0);
7126  unsigned NE = VT.getVectorNumElements();
7127  EVT EltVT = VT.getVectorElementType();
7128  SDLoc dl(N);
7129 
7130  SmallVector<SDValue, 8> Scalars;
7131  SmallVector<SDValue, 4> Operands(N->getNumOperands());
7132 
7133  // If ResNE is 0, fully unroll the vector op.
7134  if (ResNE == 0)
7135  ResNE = NE;
7136  else if (NE > ResNE)
7137  NE = ResNE;
7138 
7139  unsigned i;
7140  for (i= 0; i != NE; ++i) {
7141  for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
7142  SDValue Operand = N->getOperand(j);
7143  EVT OperandVT = Operand.getValueType();
7144  if (OperandVT.isVector()) {
7145  // A vector operand; extract a single element.
7146  EVT OperandEltVT = OperandVT.getVectorElementType();
7147  Operands[j] =
7148  getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, Operand,
7149  getConstant(i, dl, TLI->getVectorIdxTy(getDataLayout())));
7150  } else {
7151  // A scalar operand; just use it as is.
7152  Operands[j] = Operand;
7153  }
7154  }
7155 
7156  switch (N->getOpcode()) {
7157  default: {
7158  Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands,
7159  N->getFlags()));
7160  break;
7161  }
7162  case ISD::VSELECT:
7163  Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
7164  break;
7165  case ISD::SHL:
7166  case ISD::SRA:
7167  case ISD::SRL:
7168  case ISD::ROTL:
7169  case ISD::ROTR:
7170  Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
7171  getShiftAmountOperand(Operands[0].getValueType(),
7172  Operands[1])));
7173  break;
7175  case ISD::FP_ROUND_INREG: {
7176  EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
7177  Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
7178  Operands[0],
7179  getValueType(ExtVT)));
7180  }
7181  }
7182  }
7183 
7184  for (; i < ResNE; ++i)
7185  Scalars.push_back(getUNDEF(EltVT));
7186 
7187  EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE);
7188  return getBuildVector(VecVT, dl, Scalars);
7189 }
7190 
7192  LoadSDNode *Base,
7193  unsigned Bytes,
7194  int Dist) const {
7195  if (LD->isVolatile() || Base->isVolatile())
7196  return false;
7197  if (LD->isIndexed() || Base->isIndexed())
7198  return false;
7199  if (LD->getChain() != Base->getChain())
7200  return false;
7201  EVT VT = LD->getValueType(0);
7202  if (VT.getSizeInBits() / 8 != Bytes)
7203  return false;
7204 
7205  SDValue Loc = LD->getOperand(1);
7206  SDValue BaseLoc = Base->getOperand(1);
7207  if (Loc.getOpcode() == ISD::FrameIndex) {
7208  if (BaseLoc.getOpcode() != ISD::FrameIndex)
7209  return false;
7211  int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
7212  int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
7213  int FS = MFI.getObjectSize(FI);
7214  int BFS = MFI.getObjectSize(BFI);
7215  if (FS != BFS || FS != (int)Bytes) return false;
7216  return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
7217  }
7218 
7219  // Handle X + C.
7220  if (isBaseWithConstantOffset(Loc)) {
7221  int64_t LocOffset = cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
7222  if (Loc.getOperand(0) == BaseLoc) {
7223  // If the base location is a simple address with no offset itself, then
7224  // the second load's first add operand should be the base address.
7225  if (LocOffset == Dist * (int)Bytes)
7226  return true;
7227  } else if (isBaseWithConstantOffset(BaseLoc)) {
7228  // The base location itself has an offset, so subtract that value from the
7229  // second load's offset before comparing to distance * size.
7230  int64_t BOffset =
7231  cast<ConstantSDNode>(BaseLoc.getOperand(1))->getSExtValue();
7232  if (Loc.getOperand(0) == BaseLoc.getOperand(0)) {
7233  if ((LocOffset - BOffset) == Dist * (int)Bytes)
7234  return true;
7235  }
7236  }
7237  }
7238  const GlobalValue *GV1 = nullptr;
7239  const GlobalValue *GV2 = nullptr;
7240  int64_t Offset1 = 0;
7241  int64_t Offset2 = 0;
7242  bool isGA1 = TLI->isGAPlusOffset(Loc.getNode(), GV1, Offset1);
7243  bool isGA2 = TLI->isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
7244  if (isGA1 && isGA2 && GV1 == GV2)
7245  return Offset1 == (Offset2 + Dist*Bytes);
7246  return false;
7247 }
7248 
7249 
7250 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
7251 /// it cannot be inferred.
7253  // If this is a GlobalAddress + cst, return the alignment.
7254  const GlobalValue *GV;
7255  int64_t GVOffset = 0;
7256  if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
7257  unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
7258  APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
7259  llvm::computeKnownBits(const_cast<GlobalValue *>(GV), KnownZero, KnownOne,
7260  getDataLayout());
7261  unsigned AlignBits = KnownZero.countTrailingOnes();
7262  unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
7263  if (Align)
7264  return MinAlign(Align, GVOffset);
7265  }
7266 
7267  // If this is a direct reference to a stack slot, use information about the
7268  // stack slot's alignment.
7269  int FrameIdx = 1 << 31;
7270  int64_t FrameOffset = 0;
7271  if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
7272  FrameIdx = FI->getIndex();
7273  } else if (isBaseWithConstantOffset(Ptr) &&
7274  isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
7275  // Handle FI+Cst
7276  FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
7277  FrameOffset = Ptr.getConstantOperandVal(1);
7278  }
7279 
7280  if (FrameIdx != (1 << 31)) {
7282  unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
7283  FrameOffset);
7284  return FIInfoAlign;
7285  }
7286 
7287  return 0;
7288 }
7289 
7290 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
7291 /// which is split (or expanded) into two not necessarily identical pieces.
7292 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
7293  // Currently all types are split in half.
7294  EVT LoVT, HiVT;
7295  if (!VT.isVector()) {
7296  LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
7297  } else {
7298  unsigned NumElements = VT.getVectorNumElements();
7299  assert(!(NumElements & 1) && "Splitting vector, but not in half!");
7300  LoVT = HiVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(),
7301  NumElements/2);
7302  }
7303  return std::make_pair(LoVT, HiVT);
7304 }
7305 
7306 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
7307 /// low/high part.
7308 std::pair<SDValue, SDValue>
7309 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
7310  const EVT &HiVT) {
7313  "More vector elements requested than available!");
7314  SDValue Lo, Hi;
7315  Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
7316  getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout())));
7317  Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N,
7318  getConstant(LoVT.getVectorNumElements(), DL,
7319  TLI->getVectorIdxTy(getDataLayout())));
7320  return std::make_pair(Lo, Hi);
7321 }
7322 
7325  unsigned Start, unsigned Count) {
7326  EVT VT = Op.getValueType();
7327  if (Count == 0)
7328  Count = VT.getVectorNumElements();
7329 
7330  EVT EltVT = VT.getVectorElementType();
7331  EVT IdxTy = TLI->getVectorIdxTy(getDataLayout());
7332  SDLoc SL(Op);
7333  for (unsigned i = Start, e = Start + Count; i != e; ++i) {
7334  Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
7335  Op, getConstant(i, SL, IdxTy)));
7336  }
7337 }
7338 
7339 // getAddressSpace - Return the address space this GlobalAddress belongs to.
7341  return getGlobal()->getType()->getAddressSpace();
7342 }
7343 
7344 
7347  return Val.MachineCPVal->getType();
7348  return Val.ConstVal->getType();
7349 }
7350 
7352  APInt &SplatUndef,
7353  unsigned &SplatBitSize,
7354  bool &HasAnyUndefs,
7355  unsigned MinSplatBits,
7356  bool isBigEndian) const {
7357  EVT VT = getValueType(0);
7358  assert(VT.isVector() && "Expected a vector type");
7359  unsigned sz = VT.getSizeInBits();
7360  if (MinSplatBits > sz)
7361  return false;
7362 
7363  SplatValue = APInt(sz, 0);
7364  SplatUndef = APInt(sz, 0);
7365 
7366  // Get the bits. Bits with undefined values (when the corresponding element
7367  // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
7368  // in SplatValue. If any of the values are not constant, give up and return
7369  // false.
7370  unsigned int nOps = getNumOperands();
7371  assert(nOps > 0 && "isConstantSplat has 0-size build vector");
7372  unsigned EltBitSize = VT.getScalarSizeInBits();
7373 
7374  for (unsigned j = 0; j < nOps; ++j) {
7375  unsigned i = isBigEndian ? nOps-1-j : j;
7376  SDValue OpVal = getOperand(i);
7377  unsigned BitPos = j * EltBitSize;
7378 
7379  if (OpVal.isUndef())
7380  SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize);
7381  else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal))
7382  SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize).
7383  zextOrTrunc(sz) << BitPos;
7384  else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal))
7385  SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos;
7386  else
7387  return false;
7388  }
7389 
7390  // The build_vector is all constants or undefs. Find the smallest element
7391  // size that splats the vector.
7392 
7393  HasAnyUndefs = (SplatUndef != 0);
7394  while (sz > 8) {
7395 
7396  unsigned HalfSize = sz / 2;
7397  APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
7398  APInt LowValue = SplatValue.trunc(HalfSize);
7399  APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
7400  APInt LowUndef = SplatUndef.trunc(HalfSize);
7401 
7402  // If the two halves do not match (ignoring undef bits), stop here.
7403  if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
7404  MinSplatBits > HalfSize)
7405  break;
7406 
7407  SplatValue = HighValue | LowValue;
7408  SplatUndef = HighUndef & LowUndef;
7409 
7410  sz = HalfSize;
7411  }
7412 
7413  SplatBitSize = sz;
7414  return true;
7415 }
7416 
7418  if (UndefElements) {
7419  UndefElements->clear();
7420  UndefElements->resize(getNumOperands());
7421  }
7422  SDValue Splatted;
7423  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
7424  SDValue Op = getOperand(i);
7425  if (Op.isUndef()) {
7426  if (UndefElements)
7427  (*UndefElements)[i] = true;
7428  } else if (!Splatted) {
7429  Splatted = Op;
7430  } else if (Splatted != Op) {
7431  return SDValue();
7432  }
7433  }
7434 
7435  if (!Splatted) {
7436  assert(getOperand(0).isUndef() &&
7437  "Can only have a splat without a constant for all undefs.");
7438  return getOperand(0);
7439  }
7440 
7441  return Splatted;
7442 }
7443 
7446  return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements));
7447 }
7448 
7451  return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements));
7452 }
7453 
7454 int32_t
7456  uint32_t BitWidth) const {
7457  if (ConstantFPSDNode *CN =
7458  dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) {
7459  bool IsExact;
7460  APSInt IntVal(BitWidth);
7461  const APFloat &APF = CN->getValueAPF();
7462  if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) !=
7463  APFloat::opOK ||
7464  !IsExact)
7465  return -1;
7466 
7467  return IntVal.exactLogBase2();
7468  }
7469  return -1;
7470 }
7471 
7473  for (const SDValue &Op : op_values()) {
7474  unsigned Opc = Op.getOpcode();
7475  if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
7476  return false;
7477  }
7478  return true;
7479 }
7480 
7482  // Find the first non-undef value in the shuffle mask.
7483  unsigned i, e;
7484  for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
7485  /* search */;
7486 
7487  assert(i != e && "VECTOR_SHUFFLE node with all undef indices!");
7488 
7489  // Make sure all remaining elements are either undef or the same as the first
7490  // non-undef value.
7491  for (int Idx = Mask[i]; i != e; ++i)
7492  if (Mask[i] >= 0 && Mask[i] != Idx)
7493  return false;
7494  return true;
7495 }
7496 
7497 // \brief Returns the SDNode if it is a constant integer BuildVector
7498 // or constant integer.
7500  if (isa<ConstantSDNode>(N))
7501  return N.getNode();
7503  return N.getNode();
7504  // Treat a GlobalAddress supporting constant offset folding as a
7505  // constant integer.
7506  if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N))
7507  if (GA->getOpcode() == ISD::GlobalAddress &&
7508  TLI->isOffsetFoldingLegal(GA))
7509  return GA;
7510  return nullptr;
7511 }
7512 
7514  if (isa<ConstantFPSDNode>(N))
7515  return N.getNode();
7516 
7518  return N.getNode();
7519 
7520  return nullptr;
7521 }
7522 
7523 #ifndef NDEBUG
7524 static void checkForCyclesHelper(const SDNode *N,
7527  const llvm::SelectionDAG *DAG) {
7528  // If this node has already been checked, don't check it again.
7529  if (Checked.count(N))
7530  return;
7531 
7532  // If a node has already been visited on this depth-first walk, reject it as
7533  // a cycle.
7534  if (!Visited.insert(N).second) {
7535  errs() << "Detected cycle in SelectionDAG\n";
7536  dbgs() << "Offending node:\n";
7537  N->dumprFull(DAG); dbgs() << "\n";
7538  abort();
7539  }
7540 
7541  for (const SDValue &Op : N->op_values())
7542  checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG);
7543 
7544  Checked.insert(N);
7545  Visited.erase(N);
7546 }
7547 #endif
7548 
7550  const llvm::SelectionDAG *DAG,
7551  bool force) {
7552 #ifndef NDEBUG
7553  bool check = force;
7554 #ifdef EXPENSIVE_CHECKS
7555  check = true;
7556 #endif // EXPENSIVE_CHECKS
7557  if (check) {
7558  assert(N && "Checking nonexistent SDNode");
7561  checkForCyclesHelper(N, visited, checked, DAG);
7562  }
7563 #endif // !NDEBUG
7564 }
7565 
7566 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) {
7567  checkForCycles(DAG->getRoot().getNode(), DAG, force);
7568 }
MachineLoop * L
void clearAllBits()
Set every bit to 0.
Definition: APInt.h:1221
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)
This mutates the specified node to have the specified return type, opcode, and operands.
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
Definition: BitVector.h:193
opStatus roundToIntegral(roundingMode RM)
Definition: APFloat.h:954
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:500
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition: ISDOpcodes.h:467
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned char TargetFlags=0)
APInt ashr(unsigned shiftAmt) const
Arithmetic right-shift function.
Definition: APInt.cpp:1035
void AddPointer(const void *Ptr)
Add* - Add various data types to Bit data.
Definition: FoldingSet.cpp:52
bool use_empty() const
Return true if there are no uses of this node.
void computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
unsigned Log2_32_Ceil(uint32_t Value)
Log2_32_Ceil - This function returns the ceil log base 2 of the specified value, 32 if the value is z...
Definition: MathExtras.h:526
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:762
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:102
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
SDValue getValue(unsigned R) const
static APInt getSignBit(unsigned BitWidth)
Get the SignBit for a specific bit width.
Definition: APInt.h:451
APInt byteSwap() const
Definition: APInt.cpp:744
static void commuteMask(MutableArrayRef< int > Mask)
Change values in a shuffle permute mask assuming the two vector operands have swapped position...
ConstantFPSDNode * getConstantFPSplatNode(BitVector *UndefElements=nullptr) const
Returns the splatted constant FP or null if this is not a constant FP splat.
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
const char * getSymbol() const
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
Definition: APFloat.h:995
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand...
SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT)
Convert Op, which must be of integer type, to the integer type VT, by using an extension appropriate ...
bool isExactlyValue(double V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant, which is required to be operand #1) half of the integer or float value specified as operand #0.
Definition: ISDOpcodes.h:184
SDValue getSplatValue(BitVector *UndefElements=nullptr) const
Returns the splatted value or a null value if this is not a splat.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:226
Flags getFlags() const
Return the raw flags of the source value,.
static APInt getAllOnesValue(unsigned numBits)
Get the all-ones value.
Definition: APInt.h:458
LLVMContext * getContext() const
Definition: SelectionDAG.h:333
SDValue getTargetIndex(int Index, EVT VT, int64_t Offset=0, unsigned char TargetFlags=0)
Keeps track of dbg_value information through SDISel.
Definition: SelectionDAG.h:101
bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef...
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1309
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
Definition: SelectionDAG.h:804
virtual SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, unsigned Align, bool isVolatile, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memcpy.
void dumprFull(const SelectionDAG *G=nullptr) const
printrFull to dbgs().
SDValue getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
NodeType getExtForLoadExtType(bool IsFP, LoadExtType)
DELETED_NODE - This is an illegal value that is used to catch errors.
Definition: ISDOpcodes.h:42
MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to reference metadata in the IR...
Definition: ISDOpcodes.h:645
unsigned getMaxStoresPerMemcpy(bool OptSize) const
Get maximum # of store operations permitted for llvm.memcpy.
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an vector value) starting with the ...
Definition: ISDOpcodes.h:304
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
bool isKnownNeverNaN(SDValue Op) const
Test whether the given SDValue is known to never be NaN.
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:572
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
size_t i
bool none() const
none - Returns true if none of the bits are set.
Definition: BitVector.h:151
Various leaf nodes.
Definition: ISDOpcodes.h:60
SDDbgValue * getDbgValue(MDNode *Var, MDNode *Expr, SDNode *N, unsigned R, bool IsIndirect, uint64_t Off, const DebugLoc &DL, unsigned O)
Creates a SDDbgValue node.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition: ISDOpcodes.h:313
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:39
SDVTList getVTList() const
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
Definition: ISDOpcodes.h:449
void setBit(unsigned bitPosition)
Set a given bit to 1.
Definition: APInt.cpp:553
SDValue getSignExtendVectorInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return an operation which will sign extend the low lanes of the operand into the specified vector typ...
unsigned getBitWidth() const
getBitWidth - Return the bitwidth of this constant.
Definition: Constants.h:148
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:219
virtual SDValue EmitTargetCodeForMemmove(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, unsigned Align, bool isVolatile, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memmove.
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:329
unsigned getPointerTypeSizeInBits(Type *) const
Layout pointer size, in bits, based on the type.
Definition: DataLayout.cpp:617
bool isExtended() const
isExtended - Test if the given EVT is extended (as opposed to being simple).
Definition: ValueTypes.h:113
unsigned getPrefTypeAlignment(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Definition: DataLayout.cpp:699
Clients of various APIs that cause global effects on the DAG can optionally implement this interface...
Definition: SelectionDAG.h:215
iterator end() const
Definition: ArrayRef.h:130
static bool isBinOpWithFlags(unsigned Opcode)
Returns true if the opcode is a binary operation with flags.
friend struct DAGUpdateListener
DAGUpdateListener is a friend so it can manipulate the listener stack.
Definition: SelectionDAG.h:255
const TargetSubtargetInfo & getSubtarget() const
Definition: SelectionDAG.h:330
bool isKnownToBeAPowerOfTwo(SDValue Val) const
Test if the given value is known to have exactly one bit set.
unsigned InferPtrAlignment(SDValue Ptr) const
Infer alignment of a load / store address.
void computeKnownBits(SDValue Op, APInt &KnownZero, APInt &KnownOne, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in the KnownZero/KnownO...
unsigned getPointerPrefAlignment(unsigned AS=0) const
Return target's alignment for stack-based pointers FIXME: The defaults need to be removed once all of...
Definition: DataLayout.cpp:599
size_type count(PtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:380
std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, bool isInteger)
Return the result of a logical AND between different comparisons of identical values: ((X op1 Y) & (X...
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
Definition: APInt.h:536
const GlobalValue * getGlobal() const
static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef< int > M)
Swaps the values of N1 and N2.
virtual void addSelectionDAGCSEId(FoldingSetNodeID &ID)=0
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space...
Definition: Type.cpp:655
bool areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base, unsigned Bytes, int Dist) const
Return true if loads are next to each other and can be merged.
SDValue getBasicBlock(MachineBasicBlock *MBB)
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition: ISDOpcodes.h:237
Completely target-dependent object reference.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
static ISD::NodeType getExtendForContent(BooleanContent Content)
static sys::Mutex Lock
bool getHasDebugValue() const
bool NewNodesMustHaveLegalTypes
When true, additional steps are taken to ensure that getConstant() and similar functions return DAG n...
Definition: SelectionDAG.h:251
Type * getTypeForEVT(LLVMContext &Context) const
getTypeForEVT - This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:204
TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or anything else with this node...
Definition: ISDOpcodes.h:131
unsigned getRawSubclassData() const
Return the SubclassData value, without HasDebugValue.
static const fltSemantics & EVTToAPFloatSemantics(EVT VT)
Returns an APFloat semantics tag appropriate for the given type.
bool isBitwiseNot(SDValue V)
Returns true if V is a bitwise not operation.
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
Definition: ISDOpcodes.h:711
unsigned getNumOperands() const
Return the number of values used by this operation.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:736
bool isInvalidated() const
unsigned getNumOperands() const
unsigned getValueSizeInBits() const
Returns the size of the value in bits.
void Deallocate(SubClass *E)
Deallocate - Release storage for the pointed-to object.
A debug info location.
Definition: DebugLoc.h:34
Metadata node.
Definition: Metadata.h:830
const SDValue & getOperand(unsigned Num) const
const Function * getFunction() const
getFunction - Return the LLVM function that this machine code represents
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, unsigned Align=1, bool *=nullptr) const
Determine if the target supports unaligned memory accesses.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:471
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
Definition: ISDOpcodes.h:440
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT TVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
[US]{MIN/MAX} - Binary minimum or maximum or signed or unsigned integers.
Definition: ISDOpcodes.h:330
SDNode * isConstantFPBuildVectorOrConstantFP(SDValue N)
Test whether the given value is a constant FP or similar node.
void setNodeId(int Id)
Set unique node id.
static bool isCommutativeBinOp(unsigned Opcode)
Returns true if the opcode is a commutative binary operation.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
ConstantSDNode * getConstantSplatNode(BitVector *UndefElements=nullptr) const
Returns the splatted constant or null if this is not a constant splat.
SDDbgValue * getFrameIndexDbgValue(MDNode *Var, MDNode *Expr, unsigned FI, uint64_t Off, const DebugLoc &DL, unsigned O)
FrameIndex.
const MachinePointerInfo & getPointerInfo() const
static MachinePointerInfo InferPointerInfo(SelectionDAG &DAG, SDValue Ptr, int64_t Offset=0)
InferPointerInfo - If the specified ptr/offset is a frame index, infer a MachinePointerInfo record fr...
void changeSign()
Definition: APFloat.h:975
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
Definition: SelectionDAG.h:387
Same for subtraction.
Definition: ISDOpcodes.h:240
void reserve(size_type N)
Definition: SmallVector.h:377
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit. ...
Definition: APInt.h:1363
const SDValue & getValue() const
void DeleteNode(SDNode *N)
Remove the specified node from the system.
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1 at the ...
Definition: ISDOpcodes.h:299
SDValue getConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offs=0, bool isT=false, unsigned char TargetFlags=0)
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:999
std::size_t countLeadingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the most significant bit to the least stopping at the first 1...
Definition: MathExtras.h:180
uint16_t PersistentId
Unique and persistent id per SDNode in the DAG.
unsigned int NumVTs
EntryToken - This is the marker used to indicate the start of a region.
Definition: ISDOpcodes.h:45
bool optForSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition: Function.h:464
void AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter)
Add a dbg_value SDNode.
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:369
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition: ISDOpcodes.h:39
unsigned getResNo() const
get the index which selects a specific result in the SDNode
SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To, unsigned Num)
Like ReplaceAllUsesOfValueWith, but for multiple values at once.
bool bitsLT(EVT VT) const
bitsLT - Return true if this has less bits than VT.
Definition: ValueTypes.h:212
virtual void NodeUpdated(SDNode *N)
The node N that was updated.
bool isUndef() const
Return true if the type of the node type undefined.
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
Definition: ISDOpcodes.h:263
bool optForMinSize() const
Optimize this function for minimum size (-Oz).
Definition: Function.h:461
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
bool isAllOnesValue() const
SDValue getExternalSymbol(const char *Sym, EVT VT)
MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemoryVT, MachineMemOperand *MMO)
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:159
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:345
SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, const SDLoc &DL)
Returns sum of the base pointer and offset.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist)
Returns true if N is a predecessor of any node in Worklist.
SDValue getAnyExtendVectorInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return an operation which will any-extend the low lanes of the operand into the specified vector type...
const Triple & getTargetTriple() const
bool isKnownNeverZero(SDValue Op) const
Test whether the given SDValue is known to never be positive or negative zero.
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:324
APInt rotl(unsigned rotateAmt) const
Rotate left by rotateAmt.
Definition: APInt.cpp:1251
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
Definition: ISDOpcodes.h:388
bool isVector() const
isVector - Return true if this is a vector value type.
Definition: ValueTypes.h:133
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
opStatus divide(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:942
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition: APInt.cpp:1865
The address of a basic block.
Definition: Constants.h:822
void clear()
Clear state and free memory necessary to make this SelectionDAG ready to process a new block...
bool isNegative() const
Return true if the value is negative.
A description of a memory reference used in the backend.
void clear()
clear - Clear all bits.
Definition: BitVector.h:188
SDValue FoldSymbolOffset(unsigned Opcode, EVT VT, const GlobalAddressSDNode *GA, const SDNode *N2)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT, unsigned SrcAS, unsigned DestAS)
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
struct fuzzer::@269 Flags
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
void Reset()
Deallocate all but the current slab and reset the current pointer to the beginning of it...
Definition: Allocator.h:192
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:143
void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, APInt &KnownZero, APInt &KnownOne)
Compute known bits from the range metadata.
Shift and rotation operations.
Definition: ISDOpcodes.h:344
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef...
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:327
SDValue getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO)
virtual void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero, APInt &KnownOne, const SelectionDAG &DAG, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition: APInt.cpp:1122
CallLoweringInfo & setChain(SDValue InChain)
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:190
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:32
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
unsigned getAddressSpace() const
MachinePointerInfo getWithOffset(int64_t O) const
SimpleValueType SimpleTy
static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, unsigned AS)
APInt bitcastToAPInt() const
Definition: APFloat.h:1012
void AddInteger(signed I)
Definition: FoldingSet.cpp:61
The memory access is dereferenceable (i.e., doesn't trap).
EVT getScalarType() const
getScalarType - If this is a vector type, return the element type, otherwise return this...
Definition: ValueTypes.h:233
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt) For double-word atomic operations: ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi) ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi) These correspond to the atomicrmw instruction.
Definition: ISDOpcodes.h:719
SynchronizationScope
Definition: Instructions.h:50
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
allnodes_const_iterator allnodes_end() const
Definition: SelectionDAG.h:362
bool bitsGE(EVT VT) const
bitsGE - Return true if this has no less bits than VT.
Definition: ValueTypes.h:206
int getMaskElt(unsigned Idx) const
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
This is an SDNode representing atomic operations.
SDValue FoldConstantVectorArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, const SDNodeFlags *Flags=nullptr)
MDNode * getExpression() const
SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond, const SDLoc &dl)
Constant fold a setcc to true or false.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
ELFYAML::ELF_STO Other
Definition: ELFYAML.cpp:662
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
Definition: ValueTypes.h:123
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
iterator_range< allnodes_iterator > allnodes()
Definition: SelectionDAG.h:370
unsigned getEVTAlignment(EVT MemoryVT) const
Compute the default alignment value for the given type.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:60
bool getBoolValue() const
Convert APInt to a boolean value.
Definition: APInt.h:405
AtomicOrdering
Atomic ordering for LLVM's memory model.
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
EVT getVectorElementType() const
getVectorElementType - Given a vector type, return the type of each element.
Definition: ValueTypes.h:239
unsigned AssignTopologicalOrder()
Topological-sort the AllNodes list and a assign a unique node id for each node in the DAG based on th...
static void VerifySDNode(SDNode *N)
VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector...
auto reverse(ContainerTy &&C, typename std::enable_if< has_rbegin< ContainerTy >::value >::type *=nullptr) -> decltype(make_range(C.rbegin(), C.rend()))
Definition: STLExtras.h:241
APInt shl(unsigned shiftAmt) const
Left-shift function.
Definition: APInt.h:850
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG, const SDLoc &dl)
getMemsetValue - Vectorized representation of the memset value operand.
static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, unsigned Align, bool isVol, MachinePointerInfo DstPtrInfo)
Lower the call to 'memset' intrinsic function into a series of store operations.
#define F(x, y, z)
Definition: MD5.cpp:51
void checkForCycles(const SelectionDAG *DAG, bool force=false)
unsigned getIROrder() const
Return the node ordering.
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:410
SDValue getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO)
bool isIndirect() const
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
Definition: APFloat.cpp:4139
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
SDValue getRegisterMask(const uint32_t *RegMask)
DAGUpdateListener *const Next
Definition: SelectionDAG.h:216
MachineBasicBlock * MBB
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition: ISDOpcodes.h:363
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:200
Function Alias Analysis false
bool sgt(const APInt &RHS) const
Signed greather than comparison.
Definition: APInt.h:1101
DbgValueKind getKind() const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
opStatus subtract(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:932
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:220
MachineConstantPoolValue * getMachineCPVal() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
Definition: SelectionDAG.h:737
static GCRegistry::Add< OcamlGC > B("ocaml","ocaml 3.10-compatible GC")
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
Definition: APFloat.h:153
SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Mask, EVT MemVT, MachineMemOperand *MMO, bool IsTruncating=false, bool IsCompressing=false)
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:842
const APInt & getAPIntValue() const
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification, or lowering of the constant.
Definition: ISDOpcodes.h:125
EVT getMemoryVT() const
Return the type of the in-memory value.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:487
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
Definition: ISDOpcodes.h:427
const ConstantInt * getConstantIntValue() const
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements. ...
Definition: SelectionDAG.h:647
void setIROrder(unsigned Order)
Set the node ordering.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:135
virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const
Return true if it is beneficial to convert a load of a constant to just the constant itself...
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, const Value *PtrVal, unsigned Alignment, AtomicOrdering Ordering, SynchronizationScope SynchScope)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands...
void init(MachineFunction &mf)
Prepare this SelectionDAG to process code in the given MachineFunction.
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:151
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:141
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:328
bool bitsLE(EVT VT) const
bitsLE - Return true if this has no more bits than VT.
Definition: ValueTypes.h:218
Expected< const typename ELFT::Sym * > getSymbol(typename ELFT::SymRange Symbols, uint32_t Index)
Definition: Object/ELF.h:236
Maximum length of the test input libFuzzer tries to guess a good value based on the corpus and reports it always prefer smaller inputs during the corpus shuffle When libFuzzer itself reports a bug this exit code will be used If indicates the maximal total time in seconds to run the fuzzer minimizes the provided crash input Use with etc Experimental Use value profile to guide fuzzing Number of simultaneous worker processes to run the jobs If min(jobs, NumberOfCpuCores()/2)\" is used.") FUZZER_FLAG_INT(reload
unsigned char getTargetFlags() const
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition: APInt.cpp:501
UNDEF - An undefined node.
Definition: ISDOpcodes.h:178
This class is used to represent ISD::STORE nodes.
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:453
static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList)
AddNodeIDValueTypes - Value type lists are intern'd so we can represent them solely with their pointe...
static const fltSemantics & IEEEsingle()
Definition: APFloat.cpp:100
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the specified, possibly variable...
Definition: ISDOpcodes.h:274
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
This corresponds to the llvm.lifetime.
Definition: ISDOpcodes.h:743
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Get a value with high bits set.
Definition: APInt.h:518
virtual const SelectionDAGTargetInfo * getSelectionDAGInfo() const
SDNode * getNode() const
get the SDNode which holds the desired result
FoldingSetNodeID - This class is used to gather all the unique data bits of a node.
Definition: FoldingSet.h:316
The memory access is volatile.
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
MinAlign - A and B are either alignments or offsets.
Definition: MathExtras.h:589
bool hasPredecessor(const SDNode *N) const
Return true if N is a predecessor of this node.
unsigned getScalarSizeInBits() const
Definition: ValueTypes.h:262
unsigned getStoreSize() const
getStoreSize - Return the number of bytes overwritten by a store of the specified value type...
Definition: ValueTypes.h:268
SDValue getMDNode(const MDNode *MD)
Return an MDNodeSDNode which holds an MDNode.
#define P(N)
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
bool isOnlyUserOf(const SDNode *N) const
Return true if this node is the only use of N.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:166
void clearSign()
Definition: APFloat.h:976
unsigned UnsafeFPMath
UnsafeFPMath - This flag is enabled when the -enable-unsafe-fp-math flag is specified on the command ...
static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, unsigned Align, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV)
Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to the shuffle node in input but with swa...
bool isZero() const
Return true if the value is positive or negative zero.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Definition: ArrayRef.h:283
APInt trunc(unsigned width) const
Truncate to new width.
Definition: APInt.cpp:916
bool hasFloatingPointExceptions() const
Return true if target supports floating point exceptions.
bool isMachineConstantPoolEntry() const
MVT - Machine Value Type.
static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC, SDVTList VTList, ArrayRef< SDValue > OpList)
const SDValue & getOperand(unsigned i) const
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
Simple binary floating point operators.
Definition: ISDOpcodes.h:246
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align=0, bool Vol=false, bool ReadMem=true, bool WriteMem=true, unsigned Size=0)
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
bool isOperationLegalOrCustom(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
static bool doNotCSE(SDNode *N)
doNotCSE - Return true if CSE should not be performed for this node.
bool sge(const APInt &RHS) const
Signed greather or equal comparison.
Definition: APInt.h:1135
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void DropOperands()
Release the operands and set this node to have zero operands.
virtual EVT getOptimalMemOpType(uint64_t, unsigned, unsigned, bool, bool, bool, MachineFunction &) const
Returns the target specific optimal type for load and store operations as a result of memset...
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
This is an important base class in LLVM.
Definition: Constant.h:42
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Definition: SmallVector.h:115
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:818
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
Definition: ISDOpcodes.h:279
const Constant * getConstVal() const
This file contains the declarations for the subclasses of Constant, which represent the different fla...
LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * Allocate(size_t Size, size_t Alignment)
Allocate space at the specified alignment.
Definition: Allocator.h:212
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:228
virtual bool isSafeMemOpType(MVT) const
Returns true if it's safe to use load / store of the specified type to expand memcpy / memset inline...
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:269
const unsigned int integerPartWidth
Definition: APInt.h:40
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:368
static const fltSemantics & IEEEhalf()
Definition: APFloat.cpp:97
unsigned getMaxStoresPerMemmove(bool OptSize) const
Get maximum # of store operations permitted for llvm.memmove.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition: StringRef.h:587
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
unsigned getScalarValueSizeInBits() const
void AddBoolean(bool B)
Definition: FoldingSet.h:336
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition: APInt.h:484
bool sle(const APInt &RHS) const
Signed less or equal comparison.
Definition: APInt.h:1067
void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
static void AddNodeIDOperands(FoldingSetNodeID &ID, ArrayRef< SDValue > Ops)
AddNodeIDOperands - Various routines for adding operands to the NodeID data.
This class provides iterator support for SDUse operands that use a specific SDNode.
BumpPtrAllocator & getAlloc()
Definition: SelectionDAG.h:132
uint32_t Offset
ArrayRef< SDDbgValue * > GetDbgValues(const SDNode *SD)
Get the debug values which reference the given SDNode.
static ManagedStatic< std::set< EVT, EVT::compareRawBits > > EVTs
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1255
iterator begin() const
Definition: ArrayRef.h:129
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1119
unsigned getOpcode() const
opStatus multiply(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:939
TargetIndex - Like a constant pool entry, but with completely target-dependent semantics.
Definition: ISDOpcodes.h:144
APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
Definition: APInt.cpp:1854
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition: ISDOpcodes.h:57
unsigned char getTargetFlags() const
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
self_iterator getIterator()
Definition: ilist_node.h:81
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y)...
unsigned countPopulation() const
Count the number of bits set.
Definition: APInt.h:1397
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:628
ilist< SDNode >::size_type allnodes_size() const
Definition: SelectionDAG.h:366
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:136
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::LoadExtType, bool IsExpanding=false)
bool isVolatile() const
const SDValue & getValue() const
ConstantSDNode * isConstOrConstSplat(SDValue V)
Returns the SDNode if it is a constant splat BuildVector or constant int.
opStatus convertToInteger(integerPart *Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
Definition: APFloat.h:986
static bool isValueValidForType(EVT VT, const APFloat &Val)
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
Definition: GlobalValue.h:232
Bit counting operators with an undefined result for zero inputs.
Definition: ISDOpcodes.h:350
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo...
Definition: ISDOpcodes.h:705
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition: ISDOpcodes.h:485
LLVM_NODISCARD bool empty() const
Definition: SmallPtrSet.h:98
static ManagedStatic< sys::SmartMutex< true > > VTMutex
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:392
ArrayRef< SDDbgValue * > getSDDbgValues(const SDNode *Node)
Definition: SelectionDAG.h:138
EVT - Extended Value Type.
Definition: ValueTypes.h:31
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:213
std::vector< ArgListEntry > ArgListTy
static const fltSemantics & IEEEquad()
Definition: APFloat.cpp:106
const APFloat & getValueAPF() const
static const APInt * getValidShiftAmountConstant(SDValue V)
If a SHL/SRA/SRL node has a constant or splat constant shift amount that is less than the element bit...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool isEqualTo(SDValue A, SDValue B) const
Test whether two SDValues are known to compare equal.
Abstract base class for all machine specific constantpool value subclasses.
This structure contains all information that is necessary for lowering calls.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements)
getVectorVT - Returns the EVT that represents a vector NumElements in length, where each element is o...
Definition: ValueTypes.h:70
SDDbgValue * getConstantDbgValue(MDNode *Var, MDNode *Expr, const Value *C, uint64_t Off, const DebugLoc &DL, unsigned O)
Constant.
This class contains a discriminated union of information about pointers in memory operands...
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:391
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
HANDLENODE node - Used as a handle for various purposes.
Definition: ISDOpcodes.h:659
void copySign(const APFloat &RHS)
Definition: APFloat.h:977
APInt rotr(unsigned rotateAmt) const
Rotate right by rotateAmt.
Definition: APInt.cpp:1262
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
Definition: SelectionDAG.h:378
const BlockAddress * getBlockAddress() const
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
SDValue CreateStackTemporary(EVT VT, unsigned minAlign=1)
Create a stack temporary, suitable for holding the specified value type.
bool isNull() const
Test if the pointer held in the union is null, regardless of which type it is.
Definition: PointerUnion.h:115
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
Definition: ISDOpcodes.h:594
void intersectFlagsWith(const SDNodeFlags *Flags)
Clear any flags in this node that aren't also set in Flags.
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition: APInt.cpp:1902
const MachinePointerInfo & getPointerInfo() const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
MachineMemOperand * MMO
Memory reference information.
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
static const char *const Magic
Definition: Archive.cpp:25
The memory access writes data.
const SDValue & getOffset() const
bool bitsGT(EVT VT) const
bitsGT - Return true if this has more bits than VT.
Definition: ValueTypes.h:200
bool isOSDarwin() const
isOSDarwin - Is this a "Darwin" OS (OS X, iOS, or watchOS).
Definition: Triple.h:455
SDValue getZeroExtendVectorInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return an operation which will zero extend the low lanes of the operand into the specified vector typ...
ArrayRef< int > getMask() const
bool ugt(const APInt &RHS) const
Unsigned greather than comparison.
Definition: APInt.h:1083
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type...
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
Definition: APInt.cpp:703
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
Definition: DataLayout.cpp:709
TokenFactor - This node takes multiple tokens as input and produces a single token result...
Definition: ISDOpcodes.h:50
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:689
SDValue expandVACopy(SDNode *Node)
Expand the specified ISD::VACOPY node as the Legalize pass would.
bool isBuildVectorAllOnes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are ~0 or undef...
Iterator for intrusive lists based on ilist_node.
static bool isMemSrcFromString(SDValue Src, StringRef &Str)
isMemSrcFromString - Returns true if memcpy source is a string constant.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:425
const DebugLoc & getDebugLoc() const
This is the shared class of boolean and integer constants.
Definition: Constants.h:88
void dump() const
Dump this node, for debugging.
allnodes_const_iterator allnodes_begin() const
Definition: SelectionDAG.h:361
virtual void NodeDeleted(SDNode *N, SDNode *E)
The node N that was deleted and, if E is not null, an equivalent node E that replaced it...
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition: APInt.cpp:533
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side...
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false...
Definition: SmallPtrSet.h:375
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:285
std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provides VTs and return the low/high part...
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:166
X = FP_ROUND_INREG(Y, VT) - This operator takes an FP register, and rounds it to a floating point val...
Definition: ISDOpcodes.h:482
SDNode * SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT)
These are used for target selectors to mutate the specified node to have the specified return type...
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
ADDRSPACECAST - This operator converts between pointers of different address spaces.
Definition: ISDOpcodes.h:504
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:843
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:230
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
Definition: SelectionDAG.h:639
SDNode * getSDNode() const
virtual const TargetLowering * getTargetLowering() const
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
bool allOperandsUndef(const SDNode *N)
Return true if the node has at least one operand and all operands of the specified node are ISD::UNDE...
const DataFlowGraph & G
Definition: RDFGraph.cpp:206
An SDNode that represents everything that will be needed to construct a MachineInstr.
const SDValue & getChain() const
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:625
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:347
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:382
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
This is an abstract virtual class for memory operations.
unsigned char getTargetFlags() const
SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0)
Append the extracted elements from Start to Count out of the vector Op in Args.
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:558
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset...
static const int FIRST_TARGET_MEMORY_OPCODE
FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations which do not reference a specific me...
Definition: ISDOpcodes.h:769
int32_t exactLogBase2() const
Definition: APInt.h:1547
Represents one node in the SelectionDAG.
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
Definition: Constants.cpp:623
CondCode getSetCCInverse(CondCode Operation, bool isInteger)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
Type * getType() const
getType - get type of this MachineConstantPoolValue.
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, unsigned Align, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
static GCRegistry::Add< ShadowStackGC > C("shadow-stack","Very portable GC for uncooperative code generators")
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const SelectionDAG &DAG, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
unsigned Log2_32(uint32_t Value)
Log2_32 - This function returns the floor log base 2 of the specified value, -1 if the value is zero...
Definition: MathExtras.h:513
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:586
static APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
Definition: APInt.h:550
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
SDNode * isConstantIntBuildVectorOrConstantInt(SDValue N)
Test whether the given value is a constant int or similar node.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SynchronizationScope SynchScope=CrossThread, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Class for arbitrary precision integers.
Definition: APInt.h:77
bool isBuildVectorOfConstantFPSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantFPSDNode or undef...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
bool exceedsNaturalStackAlignment(unsigned Align) const
Returns true if the given alignment exceeds the natural stack alignment.
Definition: DataLayout.h:252
bool haveNoCommonBitsSet(SDValue A, SDValue B) const
Return true if A and B have no common bits set.
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, SDNode *Cst1, SDNode *Cst2)
A "pseudo-class" with methods for operating on BUILD_VECTORs.
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:354
int64_t getSExtValue() const
op_iterator op_begin() const
static use_iterator use_end()
bool isNullFPConstant(SDValue V)
Returns true if V is an FP constant with a value of positive zero.
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:400
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:403
opStatus mod(const APFloat &RHS)
Definition: APFloat.h:948
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:130
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:464
CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, bool isInteger)
Return the result of a logical OR between different comparisons of identical values: ((X op1 Y) | (X ...
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:259
SDValue getShiftAmountOperand(EVT LHSTy, SDValue Op)
Return the specified value casted to the target's desired shift amount type.
SDValue getBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, bool isTarget=false, unsigned char TargetFlags=0)
iterator_range< value_op_iterator > op_values() const
static const fltSemantics & IEEEdouble()
Definition: APFloat.cpp:103
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
Definition: APInt.h:503
Flags
Flags values. These may be or'd together.
The memory access reads data.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
CallLoweringInfo & setTailCall(bool Value=true)
unsigned char getTargetFlags() const
opStatus add(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:925
unsigned countLeadingOnes() const
Count the number of leading one bits.
Definition: APInt.cpp:676
SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack...
ConstantFPSDNode * isConstOrConstSplatFP(SDValue V)
Returns the SDNode if it is a constant splat BuildVector or constant float.
opStatus
IEEE-754R 7: Default exception handling.
Definition: APFloat.h:172
These are IR-level optimization flags that may be propagated to SDNodes.
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:259
Represents a use of a SDNode.
unsigned getIROrder() const
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
Definition: SmallVector.h:119
uint64_t getConstantOperandVal(unsigned i) const
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:333
static SDValue FoldCONCAT_VECTORS(const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, llvm::SelectionDAG &DAG)
bool hasAnyUseOfValue(unsigned Value) const
Return true if there are any use of the indicated value.
static void checkForCyclesHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallPtrSetImpl< const SDNode * > &Checked, const llvm::SelectionDAG *DAG)
static bool shouldLowerMemFuncForSize(const MachineFunction &MF)
bool isUndef() const
APInt udiv(const APInt &RHS) const
Unsigned division operation.
Definition: APInt.cpp:1817
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:418
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:536
unsigned getMaxStoresPerMemset(bool OptSize) const
Get maximum # of store operations permitted for llvm.memset.
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
Definition: ValueTypes.h:256
void ReplaceAllUsesWith(SDValue From, SDValue Op)
Modify anything using 'From' to use 'To' instead.
#define I(x, y, z)
Definition: MD5.cpp:54
#define N
ArrayRef< SDUse > ops() const
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:135
bool isOperandOf(const SDNode *N) const
Return true if this node is an operand of N.
void Profile(FoldingSetNodeID &ID) const
Gather unique data for the node.
bool isIndexed() const
Return true if this is a pre/post inc/dec load/store.
The memory access always returns the same value (or traps).
static std::pair< APInt, bool > FoldValue(unsigned Opcode, const APInt &C1, const APInt &C2)
op_iterator op_end() const
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const SDValue & getOffset() const
unsigned countTrailingOnes() const
Count the number of trailing one bits.
Definition: APInt.h:1385
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:287
intptr_t getRawBits() const
Definition: ValueTypes.h:346
Same for multiplication.
Definition: ISDOpcodes.h:243
bool reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth=2) const
Return true if this operand (which must be a chain) reaches the specified operand without crossing an...
static int isSignedOp(ISD::CondCode Opcode)
For an integer comparison, return 1 if the comparison is a signed operation and 2 if the result is an...
DebugLoc getDebugLoc() const
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition: MathExtras.h:723
int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, uint32_t BitWidth) const
If this is a constant FP splat and the splatted constant FP is an exact power or 2, return the log base 2 integer value.
int getNodeId() const
Return the unique node id.
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS, const AllocaInst *Alloca=nullptr)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
EVT getValueType() const
Return the ValueType of the referenced return value.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getCondCode(ISD::CondCode Cond)
SDNode * getNode() const
Convenience function for get().getNode().
bool getConstantStringInfo(const Value *V, StringRef &Str, uint64_t Offset=0, bool TrimAtNul=true)
This function computes the length of a null-terminated C string pointed to by V.
SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo, unsigned Alignment, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SynchronizationScope SynchScope)
Gets a node for an atomic cmpxchg op.
SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue SV, unsigned Align)
VAArg produces a result and token chain, and takes a pointer and a source value as input...
const DebugLoc & getDebugLoc() const
Return the source location info.
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
SDValue getIndexedStore(SDValue OrigStoe, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition: ISDOpcodes.h:291
bool isFloatingPoint() const
isFloatingPoint - Return true if this is a FP, or a vector FP type.
Definition: ValueTypes.h:118
static bool FindOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, bool AllowOverlap, unsigned DstAS, unsigned SrcAS, SelectionDAG &DAG, const TargetLowering &TLI)
Determines the optimal series of memory ops to replace the memset / memcpy.
SDNode * getNodeIfExists(unsigned Opcode, SDVTList VTs, ArrayRef< SDValue > Ops, const SDNodeFlags *Flags=nullptr)
Get the specified node if it's already available, or else return NULL.
SDValue getJumpTable(int JTI, EVT VT, bool isTarget=false, unsigned char TargetFlags=0)
This class is used to form a handle around another node that is persistent and is updated across invo...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend, roundingMode RM)
Definition: APFloat.h:949
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
bool isSimple() const
isSimple - Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:107
unsigned getAlignment() const
bool operator<(int64_t V1, const APSInt &V2)
Definition: APSInt.h:326
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
uint64_t getOffset() const
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
LLVM Value Representation.
Definition: Value.h:71
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:249
SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a logical NOT operation as (XOR Val, BooleanOne).
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:1007
MemSDNodeBitfields MemSDNodeBits
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool isTruncatingStore() const
Return true if the op does a truncation before store.
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:239
SDVTList getSDVTList()
Definition: SelectionDAG.h:61
void setDebugLoc(DebugLoc dl)
Set source location info.
SDValue getValueType(EVT)
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:81
PREFETCH - This corresponds to a prefetch intrinsic.
Definition: ISDOpcodes.h:685
unsigned getResNo() const
void setHasDebugValue(bool b)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits...
uint64_t getSize() const
Return the size in bytes of the memory reference.
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:331
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.Val alone...
SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label)
unsigned countLeadingZeros() const
The APInt version of the countLeadingZeros functions in MathExtras.h.
Definition: APInt.h:1343
Primary interface to the complete machine description for the target machine.
void add(SDDbgValue *V, const SDNode *Node, bool isParameter)
Definition: SelectionDAG.h:113
FoldingSetNodeIDRef Intern(BumpPtrAllocator &Allocator) const
Intern - Copy this node's data to a memory region allocated from the given allocator and return a Fol...
Definition: FoldingSet.cpp:176
SDValue getSrcValue(const Value *v)
Construct a node to track a Value* through the backend.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:47
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:980
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:377
bool needsStackRealignment(const MachineFunction &MF) const
True if storage within the function requires the stack pointer to be aligned more than the normal cal...
ManagedStatic - This transparently changes the behavior of global statics to be lazily constructed on...
Definition: ManagedStatic.h:63
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
virtual SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, unsigned Align, bool isVolatile, MachinePointerInfo DstPtrInfo) const
Emit target-specific code that performs a memset.
unsigned getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
Definition: ISDOpcodes.h:197
static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, const TargetLowering &TLI, StringRef Str)
getMemsetStringVal - Similar to getMemsetValue.
Conversion operators.
Definition: ISDOpcodes.h:397
BooleanContent
Enum that describes how the target represents true/false values.
static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC)
AddNodeIDOpcode - Add the node opcode to the NodeID data.
static APInt getNullValue(unsigned numBits)
Get the '0' value.
Definition: APInt.h:465
int * Ptr
static ManagedStatic< EVTArray > SimpleVTArray
APInt abs() const
Get the absolute value;.
Definition: APInt.h:1559
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:381
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
Definition: ISDOpcodes.h:698
MDNode * getVariable() const
SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT SrcTy)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value...
bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
bool isOperandOf(const SDNode *N) const
Return true if this node is an operand of N.
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:406
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
bool isTargetMemoryOpcode() const
Test if this node has a target-specific memory-referencing opcode (in the <target>ISD namespace and g...
unsigned getAlignment() const
bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal on this target.
void erase(const SDNode *Node)
Invalidate all DbgValues attached to the node and remove it from the Node-to-DbgValues map...
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
bool isBigEndian() const
Definition: DataLayout.h:221
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
SDDbgValue - Holds the information from a dbg_value node through SDISel.
static GCRegistry::Add< ErlangGC > A("erlang","erlang-compatible garbage collector")
FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW, FLOG, FLOG2, FLOG10, FEXP, FEXP2, FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR - Perform various unary floating point operations.
Definition: ISDOpcodes.h:516
void setObjectAlignment(int ObjectIdx, unsigned Align)
setObjectAlignment - Change the alignment of the specified stack object.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
getIntegerVT - Returns the EVT that represents an integer with the given number of bits...
Definition: ValueTypes.h:61
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition: APInt.h:1051
static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N)
If this is an SDNode with special info, add this info to the NodeID data.
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
static IntegerType * getInt8Ty(LLVMContext &C)
Definition: Type.cpp:167
SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall, MachinePointerInfo DstPtrInfo)
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
Definition: ISDOpcodes.h:694
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
Definition: ISDOpcodes.h:321
static bool isSplatMask(const int *Mask, EVT VT)
unsigned getResNo() const
Convenience function for get().getResNo().
MVT getSimpleVT() const
getSimpleVT - Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:226
const SDNodeFlags * getFlags() const
This could be defined as a virtual function and implemented more simply and directly, but it is not to avoid creating a vtable for this class.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode...
SDValue expandVAArg(SDNode *Node)
Expand the specified ISD::VAARG node as the Legalize pass would.
This file describes how to lower LLVM code to machine code.
unsigned char getTargetFlags() const
unsigned getOrder() const
uint64_t getZExtValue() const
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:799
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:326
uint64_t integerPart
Definition: APInt.h:33
static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs)
makeVTList - Return an instance of the SDVTList struct initialized with the specified members...
unsigned getVectorNumElements() const
getVectorNumElements - Given a vector type, return the number of elements it contains.
Definition: ValueTypes.h:248
SRCVALUE - This is a node type that holds a Value* that is used to make reference to a value in the L...
Definition: ISDOpcodes.h:641
void resize(size_type N)
Definition: SmallVector.h:352
This class is used to represent ISD::LOAD nodes.