98void SelectionDAG::DAGNodeDeletedListener::anchor() {}
99void SelectionDAG::DAGNodeInsertedListener::anchor() {}
101#define DEBUG_TYPE "selectiondag"
105 cl::desc(
"Gang up loads and stores generated by inlining of memcpy"));
108 cl::desc(
"Number limit for gluing ld/st of memcpy."),
113 cl::desc(
"DAG combiner limit number of steps when searching DAG "
114 "for predecessor nodes"));
152 if (
auto OptAPInt =
N->getOperand(0)->bitcastToAPInt()) {
154 N->getValueType(0).getVectorElementType().getSizeInBits();
155 SplatVal = OptAPInt->
trunc(EltSize);
165 unsigned SplatBitSize;
167 unsigned EltSize =
N->getValueType(0).getVectorElementType().getSizeInBits();
172 const bool IsBigEndian =
false;
173 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs,
174 EltSize, IsBigEndian) &&
175 EltSize == SplatBitSize;
183 while (
N->getOpcode() == ISD::BITCAST)
184 N =
N->getOperand(0).getNode();
193 unsigned i = 0, e =
N->getNumOperands();
196 while (i != e &&
N->getOperand(i).isUndef())
200 if (i == e)
return false;
212 unsigned EltSize =
N->getValueType(0).getScalarSizeInBits();
213 if (OptAPInt->countr_one() < EltSize)
221 for (++i; i != e; ++i)
222 if (
N->getOperand(i) != NotZero && !
N->getOperand(i).isUndef())
229 while (
N->getOpcode() == ISD::BITCAST)
230 N =
N->getOperand(0).getNode();
239 bool IsAllUndef =
true;
252 if (
auto OptAPInt =
Op->bitcastToAPInt()) {
253 unsigned EltSize =
N->getValueType(0).getScalarSizeInBits();
254 if (OptAPInt->countr_zero() < EltSize)
302 assert(
N->getValueType(0).isVector() &&
"Expected a vector!");
304 unsigned EltSize =
N->getValueType(0).getScalarSizeInBits();
305 if (EltSize <= NewEltSize)
309 return (
N->getOperand(0).getValueType().getScalarSizeInBits() <=
314 return (
N->getOperand(0).getValueType().getScalarSizeInBits() <=
327 APInt C =
Op->getAsAPIntVal().trunc(EltSize);
328 if (
Signed &&
C.trunc(NewEltSize).sext(EltSize) !=
C)
330 if (!
Signed &&
C.trunc(NewEltSize).zext(EltSize) !=
C)
341 if (
N->getNumOperands() == 0)
347 return N->getOpcode() ==
ISD::FREEZE &&
N->getOperand(0).isUndef();
350template <
typename ConstNodeType>
352 std::function<
bool(ConstNodeType *)> Match,
353 bool AllowUndefs,
bool AllowTruncation) {
363 EVT SVT =
Op.getValueType().getScalarType();
364 for (
unsigned i = 0, e =
Op.getNumOperands(); i != e; ++i) {
365 if (AllowUndefs &&
Op.getOperand(i).isUndef()) {
372 if (!Cst || (!AllowTruncation && Cst->getValueType(0) != SVT) ||
387 bool AllowUndefs,
bool AllowTypeMismatch) {
388 if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType())
394 return Match(LHSCst, RHSCst);
397 if (LHS.getOpcode() != RHS.getOpcode() ||
403 for (
unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
406 bool LHSUndef = AllowUndefs && LHSOp.
isUndef();
407 bool RHSUndef = AllowUndefs && RHSOp.
isUndef();
410 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef))
412 if (!AllowTypeMismatch && (LHSOp.
getValueType() != SVT ||
415 if (!Match(LHSCst, RHSCst))
437 switch (VecReduceOpcode) {
440 case ISD::VECREDUCE_FADD:
441 case ISD::VECREDUCE_SEQ_FADD:
442 case ISD::VP_REDUCE_FADD:
443 case ISD::VP_REDUCE_SEQ_FADD:
445 case ISD::VECREDUCE_FMUL:
446 case ISD::VECREDUCE_SEQ_FMUL:
447 case ISD::VP_REDUCE_FMUL:
448 case ISD::VP_REDUCE_SEQ_FMUL:
450 case ISD::VECREDUCE_ADD:
451 case ISD::VP_REDUCE_ADD:
453 case ISD::VECREDUCE_MUL:
454 case ISD::VP_REDUCE_MUL:
456 case ISD::VECREDUCE_AND:
457 case ISD::VP_REDUCE_AND:
459 case ISD::VECREDUCE_OR:
460 case ISD::VP_REDUCE_OR:
462 case ISD::VECREDUCE_XOR:
463 case ISD::VP_REDUCE_XOR:
465 case ISD::VECREDUCE_SMAX:
466 case ISD::VP_REDUCE_SMAX:
468 case ISD::VECREDUCE_SMIN:
469 case ISD::VP_REDUCE_SMIN:
471 case ISD::VECREDUCE_UMAX:
472 case ISD::VP_REDUCE_UMAX:
474 case ISD::VECREDUCE_UMIN:
475 case ISD::VP_REDUCE_UMIN:
477 case ISD::VECREDUCE_FMAX:
478 case ISD::VP_REDUCE_FMAX:
480 case ISD::VECREDUCE_FMIN:
481 case ISD::VP_REDUCE_FMIN:
483 case ISD::VECREDUCE_FMAXIMUM:
484 case ISD::VP_REDUCE_FMAXIMUM:
485 return ISD::FMAXIMUM;
486 case ISD::VECREDUCE_FMINIMUM:
487 case ISD::VP_REDUCE_FMINIMUM:
488 return ISD::FMINIMUM;
496#define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) \
499#include "llvm/IR/VPIntrinsics.def"
507#define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) case ISD::VPSD:
508#define VP_PROPERTY_BINARYOP return true;
509#define END_REGISTER_VP_SDNODE(VPSD) break;
510#include "llvm/IR/VPIntrinsics.def"
519 case ISD::VP_REDUCE_ADD:
520 case ISD::VP_REDUCE_MUL:
521 case ISD::VP_REDUCE_AND:
522 case ISD::VP_REDUCE_OR:
523 case ISD::VP_REDUCE_XOR:
524 case ISD::VP_REDUCE_SMAX:
525 case ISD::VP_REDUCE_SMIN:
526 case ISD::VP_REDUCE_UMAX:
527 case ISD::VP_REDUCE_UMIN:
528 case ISD::VP_REDUCE_FMAX:
529 case ISD::VP_REDUCE_FMIN:
530 case ISD::VP_REDUCE_FMAXIMUM:
531 case ISD::VP_REDUCE_FMINIMUM:
532 case ISD::VP_REDUCE_FADD:
533 case ISD::VP_REDUCE_FMUL:
534 case ISD::VP_REDUCE_SEQ_FADD:
535 case ISD::VP_REDUCE_SEQ_FMUL:
545#define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, ...) \
548#include "llvm/IR/VPIntrinsics.def"
557#define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, EVLPOS) \
560#include "llvm/IR/VPIntrinsics.def"
570#define BEGIN_REGISTER_VP_SDNODE(VPOPC, ...) case ISD::VPOPC:
571#define VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC) return ISD::SDOPC;
572#define END_REGISTER_VP_SDNODE(VPOPC) break;
573#include "llvm/IR/VPIntrinsics.def"
582#define BEGIN_REGISTER_VP_SDNODE(VPOPC, ...) break;
583#define VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC) case ISD::SDOPC:
584#define END_REGISTER_VP_SDNODE(VPOPC) return ISD::VPOPC;
585#include "llvm/IR/VPIntrinsics.def"
632 bool isIntegerLike) {
657 bool IsInteger =
Type.isInteger();
662 unsigned Op = Op1 | Op2;
678 bool IsInteger =
Type.isInteger();
713 ID.AddPointer(VTList.
VTs);
719 for (
const auto &
Op :
Ops) {
720 ID.AddPointer(
Op.getNode());
721 ID.AddInteger(
Op.getResNo());
728 for (
const auto &
Op :
Ops) {
729 ID.AddPointer(
Op.getNode());
730 ID.AddInteger(
Op.getResNo());
743 switch (
N->getOpcode()) {
752 ID.AddPointer(
C->getConstantIntValue());
753 ID.AddBoolean(
C->isOpaque());
786 case ISD::PSEUDO_PROBE:
799 ID.AddInteger(CP->getAlign().value());
800 ID.AddInteger(CP->getOffset());
801 if (CP->isMachineConstantPoolEntry())
802 CP->getMachineCPVal()->addSelectionDAGCSEId(
ID);
804 ID.AddPointer(CP->getConstVal());
805 ID.AddInteger(CP->getTargetFlags());
817 ID.AddInteger(LD->getMemoryVT().getRawBits());
818 ID.AddInteger(LD->getRawSubclassData());
819 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
820 ID.AddInteger(LD->getMemOperand()->getFlags());
825 ID.AddInteger(ST->getMemoryVT().getRawBits());
826 ID.AddInteger(ST->getRawSubclassData());
827 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
828 ID.AddInteger(ST->getMemOperand()->getFlags());
839 case ISD::VP_LOAD_FF: {
841 ID.AddInteger(LD->getMemoryVT().getRawBits());
842 ID.AddInteger(LD->getRawSubclassData());
843 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
844 ID.AddInteger(LD->getMemOperand()->getFlags());
847 case ISD::VP_STORE: {
855 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD: {
862 case ISD::EXPERIMENTAL_VP_STRIDED_STORE: {
869 case ISD::VP_GATHER: {
877 case ISD::VP_SCATTER: {
909 case ISD::MSCATTER: {
917 case ISD::ATOMIC_CMP_SWAP:
918 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
919 case ISD::ATOMIC_SWAP:
920 case ISD::ATOMIC_LOAD_ADD:
921 case ISD::ATOMIC_LOAD_SUB:
922 case ISD::ATOMIC_LOAD_AND:
923 case ISD::ATOMIC_LOAD_CLR:
924 case ISD::ATOMIC_LOAD_OR:
925 case ISD::ATOMIC_LOAD_XOR:
926 case ISD::ATOMIC_LOAD_NAND:
927 case ISD::ATOMIC_LOAD_MIN:
928 case ISD::ATOMIC_LOAD_MAX:
929 case ISD::ATOMIC_LOAD_UMIN:
930 case ISD::ATOMIC_LOAD_UMAX:
931 case ISD::ATOMIC_LOAD:
932 case ISD::ATOMIC_STORE: {
946 case ISD::ADDRSPACECAST: {
968 case ISD::MDNODE_SDNODE:
976 ID.AddInteger(MN->getRawSubclassData());
977 ID.AddInteger(MN->getPointerInfo().getAddrSpace());
978 ID.AddInteger(MN->getMemOperand()->getFlags());
979 ID.AddInteger(MN->getMemoryVT().getRawBits());
1002 if (
N->getValueType(0) == MVT::Glue)
1005 switch (
N->getOpcode()) {
1007 case ISD::HANDLENODE:
1013 for (
unsigned i = 1, e =
N->getNumValues(); i != e; ++i)
1014 if (
N->getValueType(i) == MVT::Glue)
1031 if (
Node.use_empty())
1046 while (!DeadNodes.
empty()) {
1055 DUL->NodeDeleted(
N,
nullptr);
1058 RemoveNodeFromCSEMaps(
N);
1089 RemoveNodeFromCSEMaps(
N);
1093 DeleteNodeNotInCSEMaps(
N);
1096void SelectionDAG::DeleteNodeNotInCSEMaps(
SDNode *
N) {
1097 assert(
N->getIterator() != AllNodes.begin() &&
1098 "Cannot delete the entry node!");
1099 assert(
N->use_empty() &&
"Cannot delete a node that is not dead!");
1108 assert(!(V->isVariadic() && isParameter));
1110 ByvalParmDbgValues.push_back(V);
1112 DbgValues.push_back(V);
1115 DbgValMap[
Node].push_back(V);
1119 DbgValMapType::iterator
I = DbgValMap.find(
Node);
1120 if (
I == DbgValMap.end())
1122 for (
auto &Val:
I->second)
1123 Val->setIsInvalidated();
1127void SelectionDAG::DeallocateNode(
SDNode *
N) {
1150void SelectionDAG::verifyNode(
SDNode *
N)
const {
1151 switch (
N->getOpcode()) {
1153 if (
N->isTargetOpcode())
1157 EVT VT =
N->getValueType(0);
1158 assert(
N->getNumValues() == 1 &&
"Too many results!");
1160 "Wrong return type!");
1161 assert(
N->getNumOperands() == 2 &&
"Wrong number of operands!");
1162 assert(
N->getOperand(0).getValueType() ==
N->getOperand(1).getValueType() &&
1163 "Mismatched operand types!");
1165 "Wrong operand type!");
1167 "Wrong return type size");
1171 assert(
N->getNumValues() == 1 &&
"Too many results!");
1172 assert(
N->getValueType(0).isVector() &&
"Wrong return type!");
1173 assert(
N->getNumOperands() ==
N->getValueType(0).getVectorNumElements() &&
1174 "Wrong number of operands!");
1175 EVT EltVT =
N->getValueType(0).getVectorElementType();
1176 for (
const SDUse &
Op :
N->ops()) {
1177 assert((
Op.getValueType() == EltVT ||
1178 (EltVT.
isInteger() &&
Op.getValueType().isInteger() &&
1179 EltVT.
bitsLE(
Op.getValueType()))) &&
1180 "Wrong operand type!");
1181 assert(
Op.getValueType() ==
N->getOperand(0).getValueType() &&
1182 "Operands must all have the same type");
1194void SelectionDAG::InsertNode(SDNode *
N) {
1195 AllNodes.push_back(
N);
1197 N->PersistentId = NextPersistentId++;
1201 DUL->NodeInserted(
N);
1208bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *
N) {
1209 bool Erased =
false;
1210 switch (
N->getOpcode()) {
1211 case ISD::HANDLENODE:
return false;
1214 "Cond code doesn't exist!");
1223 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>(
1229 Erased = MCSymbols.erase(MCSN->getMCSymbol());
1235 Erased = ExtendedValueTypeNodes.erase(VT);
1246 Erased = CSEMap.RemoveNode(
N);
1253 if (!Erased &&
N->getValueType(
N->getNumValues()-1) != MVT::Glue &&
1268SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *
N) {
1272 SDNode *Existing = CSEMap.GetOrInsertNode(
N);
1273 if (Existing !=
N) {
1284 DUL->NodeDeleted(
N, Existing);
1285 DeleteNodeNotInCSEMaps(
N);
1292 DUL->NodeUpdated(
N);
1299SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *
N,
SDValue Op,
1305 FoldingSetNodeID
ID;
1308 SDNode *
Node = FindNodeOrInsertPos(
ID, SDLoc(
N), InsertPos);
1310 Node->intersectFlagsWith(
N->getFlags());
1318SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *
N,
1325 FoldingSetNodeID
ID;
1328 SDNode *
Node = FindNodeOrInsertPos(
ID, SDLoc(
N), InsertPos);
1330 Node->intersectFlagsWith(
N->getFlags());
1343 FoldingSetNodeID
ID;
1346 SDNode *
Node = FindNodeOrInsertPos(
ID, SDLoc(
N), InsertPos);
1348 Node->intersectFlagsWith(
N->getFlags());
1361 : TM(tm), OptLevel(OL), EntryNode(
ISD::EntryToken, 0,
DebugLoc(),
1364 InsertNode(&EntryNode);
1375 SDAGISelPass = PassPtr;
1379 LibInfo = LibraryInfo;
1380 Context = &MF->getFunction().getContext();
1385 FnVarLocs = VarLocs;
1389 assert(!UpdateListeners &&
"Dangling registered DAGUpdateListeners");
1391 OperandRecycler.clear(OperandAllocator);
1399void SelectionDAG::allnodes_clear() {
1400 assert(&*AllNodes.begin() == &EntryNode);
1401 AllNodes.remove(AllNodes.begin());
1402 while (!AllNodes.empty())
1403 DeallocateNode(&AllNodes.front());
1405 NextPersistentId = 0;
1411 SDNode *
N = CSEMap.FindNodeOrInsertPos(
ID, InsertPos);
1413 switch (
N->getOpcode()) {
1418 "debug location. Use another overload.");
1425 const SDLoc &
DL,
void *&InsertPos) {
1426 SDNode *
N = CSEMap.FindNodeOrInsertPos(
ID, InsertPos);
1428 switch (
N->getOpcode()) {
1434 if (
N->getDebugLoc() !=
DL.getDebugLoc())
1441 if (
DL.getIROrder() &&
DL.getIROrder() <
N->getIROrder())
1442 N->setDebugLoc(
DL.getDebugLoc());
1451 OperandRecycler.clear(OperandAllocator);
1452 OperandAllocator.Reset();
1455 ExtendedValueTypeNodes.clear();
1456 ExternalSymbols.clear();
1457 TargetExternalSymbols.clear();
1463 EntryNode.UseList =
nullptr;
1464 InsertNode(&EntryNode);
1470 return VT.
bitsGT(
Op.getValueType())
1476std::pair<SDValue, SDValue>
1480 "Strict no-op FP extend/round not allowed.");
1487 return std::pair<SDValue, SDValue>(Res,
SDValue(Res.
getNode(), 1));
1491 return VT.
bitsGT(
Op.getValueType()) ?
1497 return VT.
bitsGT(
Op.getValueType()) ?
1503 return VT.
bitsGT(
Op.getValueType()) ?
1511 auto Type =
Op.getValueType();
1515 auto Size =
Op.getValueSizeInBits();
1526 auto Type =
Op.getValueType();
1530 auto Size =
Op.getValueSizeInBits();
1541 auto Type =
Op.getValueType();
1545 auto Size =
Op.getValueSizeInBits();
1559 return getNode(TLI->getExtendForContent(BType), SL, VT,
Op);
1563 EVT OpVT =
Op.getValueType();
1565 "Cannot getZeroExtendInReg FP types");
1567 "getZeroExtendInReg type should be vector iff the operand "
1571 "Vector element counts must match in getZeroExtendInReg");
1583 EVT OpVT =
Op.getValueType();
1585 "Cannot getVPZeroExtendInReg FP types");
1587 "getVPZeroExtendInReg type and operand type should be vector!");
1589 "Vector element counts must match in getZeroExtendInReg");
1628 return getNode(ISD::VP_XOR,
DL, VT, Val, TrueValue, Mask, EVL);
1639 return getNode(ISD::VP_ZERO_EXTEND,
DL, VT,
Op, Mask, EVL);
1641 return getNode(ISD::VP_TRUNCATE,
DL, VT,
Op, Mask, EVL);
1650 switch (TLI->getBooleanContents(OpVT)) {
1661 bool isT,
bool isO) {
1667 bool isT,
bool isO) {
1668 return getConstant(*ConstantInt::get(*Context, Val),
DL, VT, isT, isO);
1672 EVT VT,
bool isT,
bool isO) {
1689 EltVT = TLI->getTypeToTransformTo(*
getContext(), EltVT);
1695 Elt = ConstantInt::get(*
getContext(), NewVal);
1707 EVT ViaEltVT = TLI->getTypeToTransformTo(*
getContext(), EltVT);
1714 "Can only handle an even split!");
1718 for (
unsigned i = 0; i != Parts; ++i)
1720 NewVal.
extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits),
DL,
1721 ViaEltVT, isT, isO));
1726 unsigned ViaVecNumElts = VT.
getSizeInBits() / ViaEltSizeInBits;
1737 NewVal.
extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits),
DL,
1738 ViaEltVT, isT, isO));
1743 std::reverse(EltParts.
begin(), EltParts.
end());
1762 "APInt size does not match type size!");
1771 if ((
N = FindNodeOrInsertPos(
ID,
DL, IP)))
1776 N = newSDNode<ConstantSDNode>(isT, isO, Elt, VTs);
1777 CSEMap.InsertNode(
N, IP);
1789 bool isT,
bool isO) {
1797 IsTarget, IsOpaque);
1829 EVT VT,
bool isTarget) {
1850 if ((
N = FindNodeOrInsertPos(
ID,
DL, IP)))
1855 N = newSDNode<ConstantFPSDNode>(isTarget, Elt, VTs);
1856 CSEMap.InsertNode(
N, IP);
1870 if (EltVT == MVT::f32)
1872 if (EltVT == MVT::f64)
1874 if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1875 EltVT == MVT::f16 || EltVT == MVT::bf16) {
1886 EVT VT, int64_t
Offset,
bool isTargetGA,
1887 unsigned TargetFlags) {
1888 assert((TargetFlags == 0 || isTargetGA) &&
1889 "Cannot set target flags on target-independent globals");
1907 ID.AddInteger(TargetFlags);
1909 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP))
1912 auto *
N = newSDNode<GlobalAddressSDNode>(
1913 Opc,
DL.getIROrder(),
DL.getDebugLoc(), GV, VTs,
Offset, TargetFlags);
1914 CSEMap.InsertNode(
N, IP);
1928 auto *
N = newSDNode<DeactivationSymbolSDNode>(GV, VTs);
1929 CSEMap.InsertNode(
N, IP);
1941 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
1944 auto *
N = newSDNode<FrameIndexSDNode>(FI, VTs, isTarget);
1945 CSEMap.InsertNode(
N, IP);
1951 unsigned TargetFlags) {
1952 assert((TargetFlags == 0 || isTarget) &&
1953 "Cannot set target flags on target-independent jump tables");
1959 ID.AddInteger(TargetFlags);
1961 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
1964 auto *
N = newSDNode<JumpTableSDNode>(JTI, VTs, isTarget, TargetFlags);
1965 CSEMap.InsertNode(
N, IP);
1973 return getNode(ISD::JUMP_TABLE_DEBUG_INFO,
DL, MVT::Glue, Chain,
1979 bool isTarget,
unsigned TargetFlags) {
1980 assert((TargetFlags == 0 || isTarget) &&
1981 "Cannot set target flags on target-independent globals");
1990 ID.AddInteger(Alignment->value());
1993 ID.AddInteger(TargetFlags);
1995 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
1998 auto *
N = newSDNode<ConstantPoolSDNode>(isTarget,
C, VTs,
Offset, *Alignment,
2000 CSEMap.InsertNode(
N, IP);
2009 bool isTarget,
unsigned TargetFlags) {
2010 assert((TargetFlags == 0 || isTarget) &&
2011 "Cannot set target flags on target-independent globals");
2018 ID.AddInteger(Alignment->value());
2020 C->addSelectionDAGCSEId(
ID);
2021 ID.AddInteger(TargetFlags);
2023 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2026 auto *
N = newSDNode<ConstantPoolSDNode>(isTarget,
C, VTs,
Offset, *Alignment,
2028 CSEMap.InsertNode(
N, IP);
2038 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2041 auto *
N = newSDNode<BasicBlockSDNode>(
MBB);
2042 CSEMap.InsertNode(
N, IP);
2049 ValueTypeNodes.size())
2056 N = newSDNode<VTSDNode>(VT);
2062 SDNode *&
N = ExternalSymbols[Sym];
2064 N = newSDNode<ExternalSymbolSDNode>(
false, Sym, 0,
getVTList(VT));
2073 N = newSDNode<MCSymbolSDNode>(Sym,
getVTList(VT));
2079 unsigned TargetFlags) {
2081 TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)];
2083 N = newSDNode<ExternalSymbolSDNode>(
true, Sym, TargetFlags,
getVTList(VT));
2089 if ((
unsigned)
Cond >= CondCodeNodes.size())
2090 CondCodeNodes.resize(
Cond+1);
2092 if (!CondCodeNodes[
Cond]) {
2093 auto *
N = newSDNode<CondCodeSDNode>(
Cond);
2094 CondCodeNodes[
Cond] =
N;
2102 bool ConstantFold) {
2104 "APInt size does not match type size!");
2121 bool ConstantFold) {
2122 if (EC.isScalable())
2135 const APInt &StepVal) {
2159 "Must have the same number of vector elements as mask elements!");
2161 "Invalid VECTOR_SHUFFLE");
2169 int NElts = Mask.size();
2171 [&](
int M) {
return M < (NElts * 2) && M >= -1; }) &&
2172 "Index out of range");
2180 for (
int i = 0; i != NElts; ++i)
2181 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
2188 if (TLI->hasVectorBlend()) {
2197 for (
int i = 0; i < NElts; ++i) {
2198 if (MaskVec[i] <
Offset || MaskVec[i] >= (
Offset + NElts))
2202 if (UndefElements[MaskVec[i] -
Offset]) {
2208 if (!UndefElements[i])
2213 BlendSplat(N1BV, 0);
2215 BlendSplat(N2BV, NElts);
2220 bool AllLHS =
true, AllRHS =
true;
2222 for (
int i = 0; i != NElts; ++i) {
2223 if (MaskVec[i] >= NElts) {
2228 }
else if (MaskVec[i] >= 0) {
2232 if (AllLHS && AllRHS)
2234 if (AllLHS && !N2Undef)
2247 bool Identity =
true, AllSame =
true;
2248 for (
int i = 0; i != NElts; ++i) {
2249 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity =
false;
2250 if (MaskVec[i] != MaskVec[0]) AllSame =
false;
2252 if (Identity && NElts)
2261 while (V.getOpcode() == ISD::BITCAST)
2285 if (AllSame && SameNumElts) {
2286 EVT BuildVT = BV->getValueType(0);
2293 NewBV =
getNode(ISD::BITCAST, dl, VT, NewBV);
2303 for (
int i = 0; i != NElts; ++i)
2304 ID.AddInteger(MaskVec[i]);
2307 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
2313 int *MaskAlloc = OperandAllocator.Allocate<
int>(NElts);
2316 auto *
N = newSDNode<ShuffleVectorSDNode>(VTs, dl.
getIROrder(),
2318 createOperands(
N,
Ops);
2320 CSEMap.InsertNode(
N, IP);
2341 ID.AddInteger(Reg.id());
2343 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2346 auto *
N = newSDNode<RegisterSDNode>(Reg, VTs);
2347 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(
N, FLI, UA);
2348 CSEMap.InsertNode(
N, IP);
2356 ID.AddPointer(RegMask);
2358 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2361 auto *
N = newSDNode<RegisterMaskSDNode>(RegMask);
2362 CSEMap.InsertNode(
N, IP);
2377 ID.AddPointer(Label);
2379 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2384 createOperands(
N,
Ops);
2386 CSEMap.InsertNode(
N, IP);
2392 int64_t
Offset,
bool isTarget,
2393 unsigned TargetFlags) {
2401 ID.AddInteger(TargetFlags);
2403 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2406 auto *
N = newSDNode<BlockAddressSDNode>(
Opc, VTs, BA,
Offset, TargetFlags);
2407 CSEMap.InsertNode(
N, IP);
2418 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2421 auto *
N = newSDNode<SrcValueSDNode>(V);
2422 CSEMap.InsertNode(
N, IP);
2433 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2436 auto *
N = newSDNode<MDNodeSDNode>(MD);
2437 CSEMap.InsertNode(
N, IP);
2443 if (VT == V.getValueType())
2450 unsigned SrcAS,
unsigned DestAS) {
2455 ID.AddInteger(SrcAS);
2456 ID.AddInteger(DestAS);
2459 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
2463 VTs, SrcAS, DestAS);
2464 createOperands(
N,
Ops);
2466 CSEMap.InsertNode(
N, IP);
2478 EVT OpTy =
Op.getValueType();
2480 if (OpTy == ShTy || OpTy.
isVector())
return Op;
2489 EVT VT =
Node->getValueType(0);
2498 if (MA && *MA > TLI.getMinStackArgumentAlignment()) {
2536 Align RedAlign = UseABI ?
DL.getABITypeAlign(Ty) :
DL.getPrefTypeAlign(Ty);
2538 if (TLI->isTypeLegal(VT) || !VT.
isVector())
2546 if (RedAlign > StackAlign) {
2549 unsigned NumIntermediates;
2550 TLI->getVectorTypeBreakdown(*
getContext(), VT, IntermediateVT,
2551 NumIntermediates, RegisterVT);
2553 Align RedAlign2 = UseABI ?
DL.getABITypeAlign(Ty) :
DL.getPrefTypeAlign(Ty);
2554 if (RedAlign2 < RedAlign)
2555 RedAlign = RedAlign2;
2560 RedAlign = std::min(RedAlign, StackAlign);
2575 false,
nullptr, StackID);
2590 "Don't know how to choose the maximum size when creating a stack "
2599 Align Align = std::max(
DL.getPrefTypeAlign(Ty1),
DL.getPrefTypeAlign(Ty2));
2607 auto GetUndefBooleanConstant = [&]() {
2609 TLI->getBooleanContents(OpVT) ==
2646 return GetUndefBooleanConstant();
2651 return GetUndefBooleanConstant();
2660 const APInt &C2 = N2C->getAPIntValue();
2662 const APInt &C1 = N1C->getAPIntValue();
2672 if (N1CFP && N2CFP) {
2677 return GetUndefBooleanConstant();
2682 return GetUndefBooleanConstant();
2688 return GetUndefBooleanConstant();
2693 return GetUndefBooleanConstant();
2698 return GetUndefBooleanConstant();
2704 return GetUndefBooleanConstant();
2731 if (!TLI->isCondCodeLegal(SwappedCond, OpVT.
getSimpleVT()))
2733 return getSetCC(dl, VT, N2, N1, SwappedCond);
2734 }
else if ((N2CFP && N2CFP->getValueAPF().isNaN()) ||
2749 return GetUndefBooleanConstant();
2760 unsigned BitWidth =
Op.getScalarValueSizeInBits();
2768 unsigned Opc =
Op.getOpcode();
2777 return (NoFPClass & TestMask) == TestMask;
2779 case ISD::ARITH_FENCE:
2784 return Op->getFlags().hasNoNaNs();
2796 unsigned Depth)
const {
2804 const APInt &DemandedElts,
2805 unsigned Depth)
const {
2812 unsigned Depth )
const {
2818 unsigned Depth)
const {
2823 const APInt &DemandedElts,
2824 unsigned Depth)
const {
2825 EVT VT =
Op.getValueType();
2832 for (
unsigned EltIdx = 0; EltIdx != NumElts; ++EltIdx) {
2833 if (!DemandedElts[EltIdx])
2837 KnownZeroElements.
setBit(EltIdx);
2839 return KnownZeroElements;
2849 unsigned Opcode = V.getOpcode();
2850 EVT VT = V.getValueType();
2853 "scalable demanded bits are ignored");
2865 UndefElts = V.getOperand(0).isUndef()
2874 APInt UndefLHS, UndefRHS;
2883 (DemandedElts & UndefLHS) == (DemandedElts & UndefRHS)) {
2884 UndefElts = UndefLHS | UndefRHS;
2897 return TLI->isSplatValueForTargetNode(V, DemandedElts, UndefElts, *
this,
2914 for (
unsigned i = 0; i != NumElts; ++i) {
2920 if (!DemandedElts[i])
2922 if (Scl && Scl !=
Op)
2933 for (
int i = 0; i != (int)NumElts; ++i) {
2939 if (!DemandedElts[i])
2941 if (M < (
int)NumElts)
2944 DemandedRHS.
setBit(M - NumElts);
2956 auto CheckSplatSrc = [&](
SDValue Src,
const APInt &SrcElts) {
2958 return (SrcElts.popcount() == 1) ||
2960 (SrcElts & SrcUndefs).
isZero());
2962 if (!DemandedLHS.
isZero())
2963 return CheckSplatSrc(V.getOperand(0), DemandedLHS);
2964 return CheckSplatSrc(V.getOperand(1), DemandedRHS);
2970 if (Src.getValueType().isScalableVector())
2972 uint64_t Idx = V.getConstantOperandVal(1);
2973 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2975 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts).
shl(Idx);
2977 UndefElts = UndefSrcElts.
extractBits(NumElts, Idx);
2988 if (Src.getValueType().isScalableVector())
2992 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts);
2994 UndefElts = UndefSrcElts.
trunc(NumElts);
2999 case ISD::BITCAST: {
3001 EVT SrcVT = Src.getValueType();
3011 if ((
BitWidth % SrcBitWidth) == 0) {
3013 unsigned Scale =
BitWidth / SrcBitWidth;
3015 APInt ScaledDemandedElts =
3017 for (
unsigned I = 0;
I != Scale; ++
I) {
3021 SubDemandedElts &= ScaledDemandedElts;
3025 if (!SubUndefElts.
isZero())
3039 EVT VT = V.getValueType();
3049 (AllowUndefs || !UndefElts);
3055 EVT VT = V.getValueType();
3056 unsigned Opcode = V.getOpcode();
3077 SplatIdx = (UndefElts & DemandedElts).
countr_one();
3092 if (!SVN->isSplat())
3094 int Idx = SVN->getSplatIndex();
3095 int NumElts = V.getValueType().getVectorNumElements();
3096 SplatIdx = Idx % NumElts;
3097 return V.getOperand(Idx / NumElts);
3109 if (LegalTypes && !TLI->isTypeLegal(SVT)) {
3112 LegalSVT = TLI->getTypeToTransformTo(*
getContext(), LegalSVT);
3113 if (LegalSVT.
bitsLT(SVT))
3121std::optional<ConstantRange>
3123 unsigned Depth)
const {
3126 "Unknown shift node");
3128 unsigned BitWidth = V.getScalarValueSizeInBits();
3131 const APInt &ShAmt = Cst->getAPIntValue();
3133 return std::nullopt;
3138 const APInt *MinAmt =
nullptr, *MaxAmt =
nullptr;
3139 for (
unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
3140 if (!DemandedElts[i])
3144 MinAmt = MaxAmt =
nullptr;
3147 const APInt &ShAmt = SA->getAPIntValue();
3149 return std::nullopt;
3150 if (!MinAmt || MinAmt->
ugt(ShAmt))
3152 if (!MaxAmt || MaxAmt->ult(ShAmt))
3155 assert(((!MinAmt && !MaxAmt) || (MinAmt && MaxAmt)) &&
3156 "Failed to find matching min/max shift amounts");
3157 if (MinAmt && MaxAmt)
3167 return std::nullopt;
3170std::optional<unsigned>
3172 unsigned Depth)
const {
3175 "Unknown shift node");
3176 if (std::optional<ConstantRange> AmtRange =
3178 if (
const APInt *ShAmt = AmtRange->getSingleElement())
3179 return ShAmt->getZExtValue();
3180 return std::nullopt;
3183std::optional<unsigned>
3185 EVT VT = V.getValueType();
3192std::optional<unsigned>
3194 unsigned Depth)
const {
3197 "Unknown shift node");
3198 if (std::optional<ConstantRange> AmtRange =
3200 return AmtRange->getUnsignedMin().getZExtValue();
3201 return std::nullopt;
3204std::optional<unsigned>
3206 EVT VT = V.getValueType();
3213std::optional<unsigned>
3215 unsigned Depth)
const {
3218 "Unknown shift node");
3219 if (std::optional<ConstantRange> AmtRange =
3221 return AmtRange->getUnsignedMax().getZExtValue();
3222 return std::nullopt;
3225std::optional<unsigned>
3227 EVT VT = V.getValueType();
3238 EVT VT =
Op.getValueType();
3253 unsigned Depth)
const {
3254 unsigned BitWidth =
Op.getScalarValueSizeInBits();
3258 if (
auto OptAPInt =
Op->bitcastToAPInt()) {
3268 assert((!
Op.getValueType().isFixedLengthVector() ||
3269 NumElts ==
Op.getValueType().getVectorNumElements()) &&
3270 "Unexpected vector size");
3275 unsigned Opcode =
Op.getOpcode();
3283 "Expected SPLAT_VECTOR implicit truncation");
3290 unsigned ScalarSize =
Op.getOperand(0).getScalarValueSizeInBits();
3292 "Expected SPLAT_VECTOR_PARTS scalars to cover element width");
3299 const APInt &Step =
Op.getConstantOperandAPInt(0);
3308 const APInt MinNumElts =
3314 .
umul_ov(MinNumElts, Overflow);
3318 const APInt MaxValue = (MaxNumElts - 1).
umul_ov(Step, Overflow);
3326 assert(!
Op.getValueType().isScalableVector());
3329 for (
unsigned i = 0, e =
Op.getNumOperands(); i != e; ++i) {
3330 if (!DemandedElts[i])
3339 "Expected BUILD_VECTOR implicit truncation");
3363 assert(!
Op.getValueType().isScalableVector());
3366 APInt DemandedLHS, DemandedRHS;
3370 DemandedLHS, DemandedRHS))
3375 if (!!DemandedLHS) {
3383 if (!!DemandedRHS) {
3392 const APInt &Multiplier =
Op.getConstantOperandAPInt(0);
3397 if (
Op.getValueType().isScalableVector())
3401 EVT SubVectorVT =
Op.getOperand(0).getValueType();
3403 unsigned NumSubVectors =
Op.getNumOperands();
3404 for (
unsigned i = 0; i != NumSubVectors; ++i) {
3406 DemandedElts.
extractBits(NumSubVectorElts, i * NumSubVectorElts);
3407 if (!!DemandedSub) {
3419 if (
Op.getValueType().isScalableVector())
3426 unsigned NumSubElts =
Sub.getValueType().getVectorNumElements();
3428 APInt DemandedSrcElts = DemandedElts;
3429 DemandedSrcElts.
clearBits(Idx, Idx + NumSubElts);
3432 if (!!DemandedSubElts) {
3437 if (!!DemandedSrcElts) {
3447 if (
Op.getValueType().isScalableVector() || Src.getValueType().isScalableVector())
3450 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3451 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts).
shl(Idx);
3456 if (
Op.getValueType().isScalableVector())
3460 if (DemandedElts != 1)
3470 case ISD::BITCAST: {
3471 if (
Op.getValueType().isScalableVector())
3491 if ((
BitWidth % SubBitWidth) == 0) {
3498 unsigned SubScale =
BitWidth / SubBitWidth;
3499 APInt SubDemandedElts(NumElts * SubScale, 0);
3500 for (
unsigned i = 0; i != NumElts; ++i)
3501 if (DemandedElts[i])
3502 SubDemandedElts.
setBit(i * SubScale);
3504 for (
unsigned i = 0; i != SubScale; ++i) {
3507 unsigned Shifts = IsLE ? i : SubScale - 1 - i;
3508 Known.
insertBits(Known2, SubBitWidth * Shifts);
3513 if ((SubBitWidth %
BitWidth) == 0) {
3514 assert(
Op.getValueType().isVector() &&
"Expected bitcast to vector");
3519 unsigned SubScale = SubBitWidth /
BitWidth;
3520 APInt SubDemandedElts =
3525 for (
unsigned i = 0; i != NumElts; ++i)
3526 if (DemandedElts[i]) {
3527 unsigned Shifts = IsLE ? i : NumElts - 1 - i;
3558 bool SelfMultiply =
Op.getOperand(0) ==
Op.getOperand(1);
3562 Op.getOperand(0), DemandedElts,
false,
Depth + 1);
3568 if (
Op->getFlags().hasNoSignedWrap() &&
3569 Op.getOperand(0) ==
Op.getOperand(1) &&
3596 unsigned SignBits1 =
3600 unsigned SignBits0 =
3606 assert((
Op.getResNo() == 0 ||
Op.getResNo() == 1) &&
"Unknown result");
3609 bool SelfMultiply =
Op.getOperand(0) ==
Op.getOperand(1);
3610 if (
Op.getResNo() == 0)
3617 assert((
Op.getResNo() == 0 ||
Op.getResNo() == 1) &&
"Unknown result");
3620 bool SelfMultiply =
Op.getOperand(0) ==
Op.getOperand(1);
3621 if (
Op.getResNo() == 0)
3674 if (
Op.getResNo() != 1)
3680 if (TLI->getBooleanContents(
Op.getValueType().isVector(),
false) ==
3689 unsigned OpNo =
Op->isStrictFPOpcode() ? 1 : 0;
3691 if (TLI->getBooleanContents(
Op.getOperand(OpNo).getValueType()) ==
3701 bool NUW =
Op->getFlags().hasNoUnsignedWrap();
3702 bool NSW =
Op->getFlags().hasNoSignedWrap();
3709 if (std::optional<unsigned> ShMinAmt =
3718 Op->getFlags().hasExact());
3721 if (std::optional<unsigned> ShMinAmt =
3729 Op->getFlags().hasExact());
3735 unsigned Amt =
C->getAPIntValue().urem(
BitWidth);
3750 unsigned Amt =
C->getAPIntValue().urem(
BitWidth);
3756 DemandedElts,
Depth + 1);
3777 assert((
Op.getResNo() == 0 ||
Op.getResNo() == 1) &&
"Unknown result");
3780 unsigned LoBits =
Op.getOperand(0).getScalarValueSizeInBits();
3781 unsigned HiBits =
Op.getOperand(1).getScalarValueSizeInBits();
3784 Known = Known2.
concat(Known);
3798 if (
Op.getResNo() == 0)
3843 (Opcode == ISD::MGATHER)
3855 const Constant *Cst = TLI->getTargetConstantFromLoad(LD);
3860 !
Op.getValueType().isScalableVector()) {
3873 for (
unsigned i = 0; i != NumElts; ++i) {
3874 if (!DemandedElts[i])
3884 APInt Value = CFP->getValueAPF().bitcastToAPInt();
3903 }
else if (
Op.getResNo() == 0) {
3904 unsigned ScalarMemorySize = LD->getMemoryVT().getScalarSizeInBits();
3905 KnownBits KnownScalarMemory(ScalarMemorySize);
3906 if (
const MDNode *MD = LD->getRanges())
3917 Known = KnownScalarMemory;
3924 if (
Op.getValueType().isScalableVector())
3926 EVT InVT =
Op.getOperand(0).getValueType();
3938 if (
Op.getValueType().isScalableVector())
3940 EVT InVT =
Op.getOperand(0).getValueType();
3956 if (
Op.getValueType().isScalableVector())
3958 EVT InVT =
Op.getOperand(0).getValueType();
3978 Known.
Zero |= (~InMask);
3979 Known.
One &= (~Known.Zero);
3999 if ((NoFPClass & NegativeTestMask) == NegativeTestMask) {
4005 if ((NoFPClass & PositiveTestMask) == PositiveTestMask) {
4022 Op.getOpcode() ==
ISD::ADD, Flags.hasNoSignedWrap(),
4023 Flags.hasNoUnsignedWrap(), Known, Known2);
4030 if (
Op.getResNo() == 1) {
4032 if (TLI->getBooleanContents(
Op.getOperand(0).getValueType()) ==
4041 "We only compute knownbits for the difference here.");
4048 Borrow = Borrow.
trunc(1);
4062 if (
Op.getResNo() == 1) {
4064 if (TLI->getBooleanContents(
Op.getOperand(0).getValueType()) ==
4073 assert(
Op.getResNo() == 0 &&
"We only compute knownbits for the sum here.");
4083 Carry = Carry.
trunc(1);
4119 const unsigned Index =
Op.getConstantOperandVal(1);
4120 const unsigned EltBitWidth =
Op.getValueSizeInBits();
4127 Known = Known.
trunc(EltBitWidth);
4143 Known = Known.
trunc(EltBitWidth);
4149 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
4159 if (
Op.getValueType().isScalableVector())
4168 bool DemandedVal =
true;
4169 APInt DemandedVecElts = DemandedElts;
4171 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
4172 unsigned EltIdx = CEltNo->getZExtValue();
4173 DemandedVal = !!DemandedElts[EltIdx];
4181 if (!!DemandedVecElts) {
4199 Known = Known2.
abs();
4232 if (CstLow && CstHigh) {
4237 const APInt &ValueHigh = CstHigh->getAPIntValue();
4238 if (ValueLow.
sle(ValueHigh)) {
4241 unsigned MinSignBits = std::min(LowSignBits, HighSignBits);
4264 if (IsMax && CstLow) {
4292 case ISD::ATOMIC_LOAD: {
4294 if (
Op.getResNo() == 0) {
4296 unsigned ScalarMemorySize = AT->getMemoryVT().getScalarSizeInBits();
4297 KnownBits KnownScalarMemory(ScalarMemorySize);
4298 if (
const MDNode *MD = AT->getRanges())
4301 switch (AT->getExtensionType()) {
4309 switch (TLI->getExtendForAtomicOps()) {
4322 Known = KnownScalarMemory;
4329 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
4330 if (
Op.getResNo() == 1) {
4335 if (TLI->getBooleanContents(
Op.getValueType().isVector(),
false) ==
4342 case ISD::ATOMIC_CMP_SWAP:
4343 case ISD::ATOMIC_SWAP:
4344 case ISD::ATOMIC_LOAD_ADD:
4345 case ISD::ATOMIC_LOAD_SUB:
4346 case ISD::ATOMIC_LOAD_AND:
4347 case ISD::ATOMIC_LOAD_CLR:
4348 case ISD::ATOMIC_LOAD_OR:
4349 case ISD::ATOMIC_LOAD_XOR:
4350 case ISD::ATOMIC_LOAD_NAND:
4351 case ISD::ATOMIC_LOAD_MIN:
4352 case ISD::ATOMIC_LOAD_MAX:
4353 case ISD::ATOMIC_LOAD_UMIN:
4354 case ISD::ATOMIC_LOAD_UMAX: {
4356 if (
Op.getResNo() == 0) {
4358 unsigned MemBits = AT->getMemoryVT().getScalarSizeInBits();
4380 if (
Op.getValueType().isScalableVector())
4384 TLI->computeKnownBitsForTargetNode(
Op, Known, DemandedElts, *
this,
Depth);
4526 return C->getAPIntValue().zextOrTrunc(
BitWidth).isPowerOf2();
4534 if (
C &&
C->getAPIntValue() == 1)
4544 if (
C &&
C->getAPIntValue().isSignMask())
4556 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E))
4557 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
4565 if (
C->getAPIntValue().zextOrTrunc(
BitWidth).isPowerOf2())
4603 return C1->getValueAPF().getExactLog2Abs() >= 0;
4612 EVT VT =
Op.getValueType();
4624 unsigned Depth)
const {
4625 EVT VT =
Op.getValueType();
4630 unsigned FirstAnswer = 1;
4633 const APInt &Val =
C->getAPIntValue();
4643 unsigned Opcode =
Op.getOpcode();
4648 return VTBits-Tmp+1;
4662 unsigned NumSrcBits =
Op.getOperand(0).getValueSizeInBits();
4664 if (NumSrcSignBits > (NumSrcBits - VTBits))
4665 return NumSrcSignBits - (NumSrcBits - VTBits);
4671 for (
unsigned i = 0, e =
Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) {
4672 if (!DemandedElts[i])
4679 APInt T =
C->getAPIntValue().trunc(VTBits);
4680 Tmp2 =
T.getNumSignBits();
4684 if (
SrcOp.getValueSizeInBits() != VTBits) {
4686 "Expected BUILD_VECTOR implicit truncation");
4687 unsigned ExtraBits =
SrcOp.getValueSizeInBits() - VTBits;
4688 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1);
4691 Tmp = std::min(Tmp, Tmp2);
4702 Tmp = std::min(Tmp, Tmp2);
4709 APInt DemandedLHS, DemandedRHS;
4713 DemandedLHS, DemandedRHS))
4716 Tmp = std::numeric_limits<unsigned>::max();
4719 if (!!DemandedRHS) {
4721 Tmp = std::min(Tmp, Tmp2);
4726 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
4730 case ISD::BITCAST: {
4742 if (VTBits == SrcBits)
4748 if ((SrcBits % VTBits) == 0) {
4751 unsigned Scale = SrcBits / VTBits;
4752 APInt SrcDemandedElts =
4762 for (
unsigned i = 0; i != NumElts; ++i)
4763 if (DemandedElts[i]) {
4764 unsigned SubOffset = i % Scale;
4765 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset);
4766 SubOffset = SubOffset * VTBits;
4767 if (Tmp <= SubOffset)
4769 Tmp2 = std::min(Tmp2, Tmp - SubOffset);
4779 return VTBits - Tmp + 1;
4781 Tmp = VTBits -
Op.getOperand(0).getScalarValueSizeInBits();
4788 return std::max(Tmp, Tmp2);
4793 EVT SrcVT = Src.getValueType();
4801 if (std::optional<unsigned> ShAmt =
4803 Tmp = std::min(Tmp + *ShAmt, VTBits);
4806 if (std::optional<ConstantRange> ShAmtRange =
4808 unsigned MaxShAmt = ShAmtRange->getUnsignedMax().getZExtValue();
4809 unsigned MinShAmt = ShAmtRange->getUnsignedMin().getZExtValue();
4817 EVT ExtVT = Ext.getValueType();
4818 SDValue Extendee = Ext.getOperand(0);
4820 unsigned SizeDifference =
4822 if (SizeDifference <= MinShAmt) {
4823 Tmp = SizeDifference +
4826 return Tmp - MaxShAmt;
4832 return Tmp - MaxShAmt;
4842 FirstAnswer = std::min(Tmp, Tmp2);
4852 if (Tmp == 1)
return 1;
4854 return std::min(Tmp, Tmp2);
4857 if (Tmp == 1)
return 1;
4859 return std::min(Tmp, Tmp2);
4871 if (CstLow && CstHigh) {
4876 Tmp2 = CstHigh->getAPIntValue().getNumSignBits();
4877 return std::min(Tmp, Tmp2);
4886 return std::min(Tmp, Tmp2);
4894 return std::min(Tmp, Tmp2);
4898 if (
Op.getResNo() == 0 &&
Op.getOperand(0) ==
Op.getOperand(1))
4909 if (
Op.getResNo() != 1)
4915 if (TLI->getBooleanContents(VT.
isVector(),
false) ==
4923 unsigned OpNo =
Op->isStrictFPOpcode() ? 1 : 0;
4925 if (TLI->getBooleanContents(
Op.getOperand(OpNo).getValueType()) ==
4940 unsigned RotAmt =
C->getAPIntValue().urem(VTBits);
4944 RotAmt = (VTBits - RotAmt) % VTBits;
4948 if (Tmp > (RotAmt + 1))
return (Tmp - RotAmt);
4955 if (Tmp == 1)
return 1;
4960 if (CRHS->isAllOnes()) {
4966 if ((Known.
Zero | 1).isAllOnes())
4976 if (Tmp2 == 1)
return 1;
4980 return std::min(Tmp, Tmp2) - 1;
4983 if (Tmp2 == 1)
return 1;
4988 if (CLHS->isZero()) {
4993 if ((Known.
Zero | 1).isAllOnes())
5007 if (Tmp == 1)
return 1;
5008 return std::min(Tmp, Tmp2) - 1;
5012 if (SignBitsOp0 == 1)
5015 if (SignBitsOp1 == 1)
5017 unsigned OutValidBits =
5018 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1);
5019 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1;
5027 return std::min(Tmp, Tmp2);
5036 unsigned NumSrcBits =
Op.getOperand(0).getScalarValueSizeInBits();
5038 if (NumSrcSignBits > (NumSrcBits - VTBits))
5039 return NumSrcSignBits - (NumSrcBits - VTBits);
5046 const int BitWidth =
Op.getValueSizeInBits();
5047 const int Items =
Op.getOperand(0).getValueSizeInBits() /
BitWidth;
5051 const int rIndex = Items - 1 -
Op.getConstantOperandVal(1);
5066 bool DemandedVal =
true;
5067 APInt DemandedVecElts = DemandedElts;
5069 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
5070 unsigned EltIdx = CEltNo->getZExtValue();
5071 DemandedVal = !!DemandedElts[EltIdx];
5074 Tmp = std::numeric_limits<unsigned>::max();
5080 Tmp = std::min(Tmp, Tmp2);
5082 if (!!DemandedVecElts) {
5084 Tmp = std::min(Tmp, Tmp2);
5086 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
5097 const unsigned BitWidth =
Op.getValueSizeInBits();
5098 const unsigned EltBitWidth =
Op.getOperand(0).getScalarValueSizeInBits();
5111 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
5121 if (Src.getValueType().isScalableVector())
5124 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
5125 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts).
shl(Idx);
5133 Tmp = std::numeric_limits<unsigned>::max();
5134 EVT SubVectorVT =
Op.getOperand(0).getValueType();
5136 unsigned NumSubVectors =
Op.getNumOperands();
5137 for (
unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) {
5139 DemandedElts.
extractBits(NumSubVectorElts, i * NumSubVectorElts);
5143 Tmp = std::min(Tmp, Tmp2);
5145 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
5156 unsigned NumSubElts =
Sub.getValueType().getVectorNumElements();
5158 APInt DemandedSrcElts = DemandedElts;
5159 DemandedSrcElts.
clearBits(Idx, Idx + NumSubElts);
5161 Tmp = std::numeric_limits<unsigned>::max();
5162 if (!!DemandedSubElts) {
5167 if (!!DemandedSrcElts) {
5169 Tmp = std::min(Tmp, Tmp2);
5171 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
5176 if (
const MDNode *Ranges = LD->getRanges()) {
5177 if (DemandedElts != 1)
5182 switch (LD->getExtensionType()) {
5202 case ISD::ATOMIC_CMP_SWAP:
5203 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
5204 case ISD::ATOMIC_SWAP:
5205 case ISD::ATOMIC_LOAD_ADD:
5206 case ISD::ATOMIC_LOAD_SUB:
5207 case ISD::ATOMIC_LOAD_AND:
5208 case ISD::ATOMIC_LOAD_CLR:
5209 case ISD::ATOMIC_LOAD_OR:
5210 case ISD::ATOMIC_LOAD_XOR:
5211 case ISD::ATOMIC_LOAD_NAND:
5212 case ISD::ATOMIC_LOAD_MIN:
5213 case ISD::ATOMIC_LOAD_MAX:
5214 case ISD::ATOMIC_LOAD_UMIN:
5215 case ISD::ATOMIC_LOAD_UMAX:
5216 case ISD::ATOMIC_LOAD: {
5219 if (
Op.getResNo() == 0) {
5220 Tmp = AT->getMemoryVT().getScalarSizeInBits();
5225 if (
Op->getOpcode() == ISD::ATOMIC_LOAD) {
5226 switch (AT->getExtensionType()) {
5230 return VTBits - Tmp + 1;
5232 return VTBits - Tmp;
5237 return VTBits - Tmp + 1;
5239 return VTBits - Tmp;
5246 if (
Op.getResNo() == 0) {
5249 unsigned ExtType = LD->getExtensionType();
5253 Tmp = LD->getMemoryVT().getScalarSizeInBits();
5254 return VTBits - Tmp + 1;
5256 Tmp = LD->getMemoryVT().getScalarSizeInBits();
5257 return VTBits - Tmp;
5259 if (
const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) {
5262 Type *CstTy = Cst->getType();
5267 for (
unsigned i = 0; i != NumElts; ++i) {
5268 if (!DemandedElts[i])
5273 Tmp = std::min(Tmp,
Value.getNumSignBits());
5277 APInt Value = CFP->getValueAPF().bitcastToAPInt();
5278 Tmp = std::min(Tmp,
Value.getNumSignBits());
5302 TLI->ComputeNumSignBitsForTargetNode(
Op, DemandedElts, *
this,
Depth);
5304 FirstAnswer = std::max(FirstAnswer, NumBits);
5315 unsigned Depth)
const {
5317 return Op.getScalarValueSizeInBits() - SignBits + 1;
5321 const APInt &DemandedElts,
5322 unsigned Depth)
const {
5324 return Op.getScalarValueSizeInBits() - SignBits + 1;
5328 unsigned Depth)
const {
5333 EVT VT =
Op.getValueType();
5341 const APInt &DemandedElts,
5343 unsigned Depth)
const {
5344 unsigned Opcode =
Op.getOpcode();
5373 for (
unsigned i = 0, e =
Op.getNumOperands(); i < e; ++i) {
5374 if (!DemandedElts[i])
5384 if (Src.getValueType().isScalableVector())
5387 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
5388 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts).
shl(Idx);
5394 if (
Op.getValueType().isScalableVector())
5399 unsigned NumSubElts =
Sub.getValueType().getVectorNumElements();
5401 APInt DemandedSrcElts = DemandedElts;
5402 DemandedSrcElts.
clearBits(Idx, Idx + NumSubElts);
5416 EVT SrcVT = Src.getValueType();
5420 IndexC->getZExtValue());
5435 if (DemandedElts[IndexC->getZExtValue()] &&
5438 APInt InVecDemandedElts = DemandedElts;
5439 InVecDemandedElts.
clearBit(IndexC->getZExtValue());
5440 if (!!InVecDemandedElts &&
5465 APInt DemandedLHS, DemandedRHS;
5468 DemandedElts, DemandedLHS, DemandedRHS,
5471 if (!DemandedLHS.
isZero() &&
5475 if (!DemandedRHS.
isZero() &&
5523 return isGuaranteedNotToBeUndefOrPoison(V, DemandedElts,
5524 PoisonOnly, Depth + 1);
5536 return TLI->isGuaranteedNotToBeUndefOrPoisonForTargetNode(
5549 return isGuaranteedNotToBeUndefOrPoison(V, PoisonOnly, Depth + 1);
5555 unsigned Depth)
const {
5556 EVT VT =
Op.getValueType();
5566 unsigned Depth)
const {
5567 if (ConsiderFlags &&
Op->hasPoisonGeneratingFlags())
5570 unsigned Opcode =
Op.getOpcode();
5650 if (
Op.getOperand(0).getValueType().isInteger())
5657 unsigned CCOp = Opcode ==
ISD::SETCC ? 2 : 4;
5659 return (
unsigned)CCCode & 0x10U;
5679 case ISD::FP_EXTEND:
5705 EVT VecVT =
Op.getOperand(0).getValueType();
5714 for (
auto [Idx, Elt] :
enumerate(SVN->getMask()))
5715 if (Elt < 0 && DemandedElts[Idx])
5727 return TLI->canCreateUndefOrPoisonForTargetNode(
5737 unsigned Opcode =
Op.getOpcode();
5739 return Op->getFlags().hasDisjoint() ||
5752 unsigned Depth)
const {
5753 EVT VT =
Op.getValueType();
5766 bool SNaN,
unsigned Depth)
const {
5767 assert(!DemandedElts.
isZero() &&
"No demanded elements");
5778 return !
C->getValueAPF().isNaN() ||
5779 (SNaN && !
C->getValueAPF().isSignaling());
5782 unsigned Opcode =
Op.getOpcode();
5815 case ISD::FROUNDEVEN:
5821 case ISD::FNEARBYINT:
5835 case ISD::FP_EXTEND:
5857 case ISD::FMINIMUMNUM:
5858 case ISD::FMAXIMUMNUM: {
5864 case ISD::FMINNUM_IEEE:
5865 case ISD::FMAXNUM_IEEE: {
5876 case ISD::FMAXIMUM: {
5884 EVT SrcVT = Src.getValueType();
5888 Idx->getZExtValue());
5895 if (Src.getValueType().isFixedLengthVector()) {
5896 unsigned Idx =
Op.getConstantOperandVal(1);
5897 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
5898 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts).
shl(Idx);
5908 unsigned Idx =
Op.getConstantOperandVal(2);
5914 APInt DemandedMask =
5916 APInt DemandedSrcElts = DemandedElts & ~DemandedMask;
5919 bool NeverNaN =
true;
5920 if (!DemandedSrcElts.
isZero())
5923 if (NeverNaN && !DemandedSubElts.
isZero())
5932 unsigned NumElts =
Op.getNumOperands();
5933 for (
unsigned I = 0;
I != NumElts; ++
I)
5934 if (DemandedElts[
I] &&
5951 return TLI->isKnownNeverNaNForTargetNode(
Op, DemandedElts, *
this, SNaN,
5960 assert(
Op.getValueType().isFloatingPoint() &&
5961 "Floating point type expected");
5972 assert(!
Op.getValueType().isFloatingPoint() &&
5973 "Floating point types unsupported - use isKnownNeverZeroFloat");
5982 switch (
Op.getOpcode()) {
5996 if (
Op->getFlags().hasNoSignedWrap() ||
Op->getFlags().hasNoUnsignedWrap())
6000 if (ValKnown.
One[0])
6060 if (
Op->getFlags().hasExact())
6076 if (
Op->getFlags().hasExact())
6081 if (
Op->getFlags().hasNoUnsignedWrap())
6092 std::optional<bool> ne =
6099 if (
Op->getFlags().hasNoSignedWrap() ||
Op->getFlags().hasNoUnsignedWrap())
6110 const APInt &Multiplier =
Op.getConstantOperandAPInt(0);
6124 return !C1->isNegative();
6126 switch (
Op.getOpcode()) {
6141 if (
A ==
B)
return true;
6146 if (CA->isZero() && CB->isZero())
return true;
6181 NotOperand = NotOperand->getOperand(0);
6183 if (
Other == NotOperand)
6186 return NotOperand ==
Other->getOperand(0) ||
6187 NotOperand ==
Other->getOperand(1);
6193 A =
A->getOperand(0);
6196 B =
B->getOperand(0);
6199 return MatchNoCommonBitsPattern(
A->getOperand(0),
A->getOperand(1),
B) ||
6200 MatchNoCommonBitsPattern(
A->getOperand(1),
A->getOperand(0),
B);
6206 assert(
A.getValueType() ==
B.getValueType() &&
6207 "Values must have the same type");
6229 "BUILD_VECTOR cannot be used with scalable types");
6231 "Incorrect element count in BUILD_VECTOR!");
6239 bool IsIdentity =
true;
6240 for (
int i = 0; i !=
NumOps; ++i) {
6243 (IdentitySrc &&
Ops[i].getOperand(0) != IdentitySrc) ||
6245 Ops[i].getConstantOperandAPInt(1) != i) {
6249 IdentitySrc =
Ops[i].getOperand(0);
6262 assert(!
Ops.empty() &&
"Can't concatenate an empty list of vectors!");
6265 return Ops[0].getValueType() ==
Op.getValueType();
6267 "Concatenation of vectors with inconsistent value types!");
6270 "Incorrect element count in vector concatenation!");
6272 if (
Ops.size() == 1)
6283 bool IsIdentity =
true;
6284 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i) {
6286 unsigned IdentityIndex = i *
Op.getValueType().getVectorMinNumElements();
6288 Op.getOperand(0).getValueType() != VT ||
6289 (IdentitySrc &&
Op.getOperand(0) != IdentitySrc) ||
6290 Op.getConstantOperandVal(1) != IdentityIndex) {
6294 assert((!IdentitySrc || IdentitySrc ==
Op.getOperand(0)) &&
6295 "Unexpected identity source vector for concat of extracts");
6296 IdentitySrc =
Op.getOperand(0);
6299 assert(IdentitySrc &&
"Failed to set source vector of extracts");
6315 EVT OpVT =
Op.getValueType();
6331 SVT = (SVT.
bitsLT(
Op.getValueType()) ?
Op.getValueType() : SVT);
6355 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP))
6358 auto *
N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
6359 CSEMap.InsertNode(
N, IP);
6371 Flags = Inserter->getFlags();
6372 return getNode(Opcode,
DL, VT, N1, Flags);
6390 case ISD::FP_EXTEND:
6393 case ISD::FP_TO_FP16:
6394 case ISD::FP_TO_BF16:
6401 case ISD::FP16_TO_FP:
6402 case ISD::BF16_TO_FP:
6423 "STEP_VECTOR can only be used with scalable types");
6426 "Unexpected step operand");
6445 case ISD::FP_EXTEND:
6447 "Invalid FP cast!");
6451 "Vector element count mismatch!");
6469 "Invalid SIGN_EXTEND!");
6471 "SIGN_EXTEND result type type should be vector iff the operand "
6476 "Vector element count mismatch!");
6499 unsigned NumSignExtBits =
6510 "Invalid ZERO_EXTEND!");
6512 "ZERO_EXTEND result type type should be vector iff the operand "
6517 "Vector element count mismatch!");
6555 "Invalid ANY_EXTEND!");
6557 "ANY_EXTEND result type type should be vector iff the operand "
6562 "Vector element count mismatch!");
6587 "Invalid TRUNCATE!");
6589 "TRUNCATE result type type should be vector iff the operand "
6594 "Vector element count mismatch!");
6621 assert(VT.
isVector() &&
"This DAG node is restricted to vector types.");
6623 "The input must be the same size or smaller than the result.");
6626 "The destination vector type must have fewer lanes than the input.");
6636 "BSWAP types must be a multiple of 16 bits!");
6650 "Cannot BITCAST between types of different sizes!");
6652 if (OpOpcode == ISD::BITCAST)
6663 "Illegal SCALAR_TO_VECTOR node!");
6678 if (OpOpcode == ISD::FNEG)
6682 if (OpOpcode == ISD::FNEG)
6697 case ISD::VECREDUCE_ADD:
6699 return getNode(ISD::VECREDUCE_XOR,
DL, VT, N1);
6701 case ISD::VECREDUCE_SMIN:
6702 case ISD::VECREDUCE_UMAX:
6704 return getNode(ISD::VECREDUCE_OR,
DL, VT, N1);
6706 case ISD::VECREDUCE_SMAX:
6707 case ISD::VECREDUCE_UMIN:
6709 return getNode(ISD::VECREDUCE_AND,
DL, VT, N1);
6720 "Wrong operand type!");
6727 if (VT != MVT::Glue) {
6731 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
6732 E->intersectFlagsWith(Flags);
6736 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
6738 createOperands(
N,
Ops);
6739 CSEMap.InsertNode(
N, IP);
6741 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
6742 createOperands(
N,
Ops);
6776 if (!C2.getBoolValue())
6780 if (!C2.getBoolValue())
6784 if (!C2.getBoolValue())
6788 if (!C2.getBoolValue())
6808 return std::nullopt;
6813 bool IsUndef1,
const APInt &C2,
6815 if (!(IsUndef1 || IsUndef2))
6823 return std::nullopt;
6831 if (!TLI->isOffsetFoldingLegal(GA))
6836 int64_t
Offset = C2->getSExtValue();
6856 assert(
Ops.size() == 2 &&
"Div/rem should have 2 operands");
6863 [](
SDValue V) { return V.isUndef() ||
6864 isNullConstant(V); });
6902 const APInt &Val =
C->getAPIntValue();
6906 C->isTargetOpcode(),
C->isOpaque());
6913 C->isTargetOpcode(),
C->isOpaque());
6918 C->isTargetOpcode(),
C->isOpaque());
6920 C->isTargetOpcode(),
C->isOpaque());
6948 case ISD::FP16_TO_FP:
6949 case ISD::BF16_TO_FP: {
6966 if (VT == MVT::f16 &&
C->getValueType(0) == MVT::i16)
6968 if (VT == MVT::f32 &&
C->getValueType(0) == MVT::i32)
6970 if (VT == MVT::f64 &&
C->getValueType(0) == MVT::i64)
6972 if (VT == MVT::f128 &&
C->getValueType(0) == MVT::i128)
7006 case ISD::FP_EXTEND: {
7025 case ISD::FP_TO_FP16:
7026 case ISD::FP_TO_BF16: {
7033 return getConstant(V.bitcastToAPInt().getZExtValue(),
DL, VT);
7036 if (VT == MVT::i16 &&
C->getValueType(0) == MVT::f16)
7039 if (VT == MVT::i16 &&
C->getValueType(0) == MVT::bf16)
7042 if (VT == MVT::i32 &&
C->getValueType(0) == MVT::f32)
7045 if (VT == MVT::i64 &&
C->getValueType(0) == MVT::f64)
7046 return getConstant(V.bitcastToAPInt().getZExtValue(),
DL, VT);
7052 if (Opcode == ISD::BITCAST)
7063 if (C1->isOpaque() || C2->isOpaque())
7066 std::optional<APInt> FoldAttempt =
7067 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue());
7073 "Can't fold vectors ops with scalar operands");
7081 if (TLI->isCommutativeBinOp(Opcode))
7097 const APInt &Val = C1->getAPIntValue();
7098 return SignExtendInReg(Val, VT);
7111 ScalarOps.
push_back(SignExtendInReg(Val, OpVT));
7119 SignExtendInReg(
Ops[0].getConstantOperandAPInt(0),
7130 if (C1 && C2 && C3) {
7131 if (C1->isOpaque() || C2->isOpaque() || C3->isOpaque())
7133 const APInt &V1 = C1->getAPIntValue(), &V2 = C2->getAPIntValue(),
7134 &V3 = C3->getAPIntValue();
7150 if (C1 && C2 && C3) {
7171 Ops[0].getValueType() == VT &&
Ops[1].getValueType() == VT &&
7172 (
Ops[0].getOpcode() == ISD::BITCAST ||
7173 Ops[1].getOpcode() == ISD::BITCAST)) {
7184 if (BV1->getConstantRawBits(IsLE, EltBits, RawBits1, UndefElts1) &&
7185 BV2->getConstantRawBits(IsLE, EltBits, RawBits2, UndefElts2)) {
7189 Opcode, RawBits1[
I], UndefElts1[
I], RawBits2[
I], UndefElts2[
I]);
7200 BVEltVT = BV1->getOperand(0).getValueType();
7203 BVEltVT = BV2->getOperand(0).getValueType();
7209 DstBits, RawBits, DstUndefs,
7212 for (
unsigned I = 0, E = DstBits.
size();
I != E; ++
I) {
7230 ?
Ops[0].getConstantOperandAPInt(0) * RHSVal
7231 :
Ops[0].getConstantOperandAPInt(0) << RHSVal;
7236 auto IsScalarOrSameVectorSize = [NumElts](
const SDValue &
Op) {
7237 return !
Op.getValueType().isVector() ||
7238 Op.getValueType().getVectorElementCount() == NumElts;
7241 auto IsBuildVectorSplatVectorOrUndef = [](
const SDValue &
Op) {
7267 LegalSVT = TLI->getTypeToTransformTo(*
getContext(), LegalSVT);
7279 for (
unsigned I = 0;
I != NumVectorElts;
I++) {
7282 EVT InSVT =
Op.getValueType().getScalarType();
7325 if (LegalSVT != SVT)
7326 ScalarResult =
getNode(ExtendCode,
DL, LegalSVT, ScalarResult);
7340 if (
Ops.size() != 2)
7351 if (N1CFP && N2CFP) {
7381 case ISD::FMINIMUMNUM:
7383 case ISD::FMAXIMUMNUM:
7402 if (N1C && N1C->getValueAPF().isNegZero() && N2.
isUndef())
7425 if (SrcEltVT == DstEltVT)
7433 if (SrcBitSize == DstBitSize) {
7438 if (
Op.getValueType() != SrcEltVT)
7481 for (
unsigned I = 0, E = RawBits.
size();
I != E; ++
I) {
7482 if (UndefElements[
I])
7503 ID.AddInteger(
A.value());
7506 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP))
7510 newSDNode<AssertAlignSDNode>(
DL.getIROrder(),
DL.getDebugLoc(), VTs,
A);
7511 createOperands(
N, {Val});
7513 CSEMap.InsertNode(
N, IP);
7525 Flags = Inserter->getFlags();
7526 return getNode(Opcode,
DL, VT, N1, N2, Flags);
7531 if (!TLI->isCommutativeBinOp(Opcode))
7540 if ((N1C && !N2C) || (N1CFP && !N2CFP))
7554 "Operand is DELETED_NODE!");
7570 N2.
getValueType() == MVT::Other &&
"Invalid token factor!");
7574 if (N1 == N2)
return N1;
7590 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7592 N1.
getValueType() == VT &&
"Binary operator types must match!");
7595 if (N2CV && N2CV->
isZero())
7605 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7607 N1.
getValueType() == VT &&
"Binary operator types must match!");
7617 if (N2CV && N2CV->
isZero())
7631 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7633 N1.
getValueType() == VT &&
"Binary operator types must match!");
7636 if (N2C && (N1.
getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) {
7638 const APInt &N2CImm = N2C->getAPIntValue();
7652 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7654 N1.
getValueType() == VT &&
"Binary operator types must match!");
7667 "Types of operands of UCMP/SCMP must match");
7669 "Operands and return type of must both be scalars or vectors");
7673 "Result and operands must have the same number of elements");
7679 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7681 N1.
getValueType() == VT &&
"Binary operator types must match!");
7685 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7687 N1.
getValueType() == VT &&
"Binary operator types must match!");
7693 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7695 N1.
getValueType() == VT &&
"Binary operator types must match!");
7701 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7703 N1.
getValueType() == VT &&
"Binary operator types must match!");
7714 N1.
getValueType() == VT &&
"Binary operator types must match!");
7722 "Invalid FCOPYSIGN!");
7725 if (N2C && (N1.
getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) {
7727 const APInt &ShiftImm = N2C->getAPIntValue();
7739 "Shift operators return type must be the same as their first arg");
7741 "Shifts only work on integers");
7743 "Vector shift amounts must be in the same as their first arg");
7750 "Invalid use of small shift amount with oversized value!");
7757 if (N2CV && N2CV->
isZero())
7763 (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&
7769 "AssertNoFPClass is used for a non-floating type");
7774 "FPClassTest value too large");
7783 "Cannot *_EXTEND_INREG FP types");
7785 "AssertSExt/AssertZExt type should be the vector element type "
7786 "rather than the vector type!");
7795 "Cannot *_EXTEND_INREG FP types");
7797 "SIGN_EXTEND_INREG type should be vector iff the operand "
7801 "Vector element counts must match in SIGN_EXTEND_INREG");
7803 if (
EVT == VT)
return N1;
7811 "FP_TO_*INT_SAT type should be vector iff the operand type is "
7815 "Vector element counts must match in FP_TO_*INT_SAT");
7817 "Type to saturate to must be a scalar.");
7824 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \
7825 element type of the vector.");
7847 N2C->getZExtValue() % Factor);
7856 "BUILD_VECTOR used for scalable vectors");
7879 if (N1Op2C && N2C) {
7909 assert(N2C && (
unsigned)N2C->getZExtValue() < 2 &&
"Bad EXTRACT_ELEMENT!");
7913 "Wrong types for EXTRACT_ELEMENT!");
7924 unsigned Shift = ElementSize * N2C->getZExtValue();
7925 const APInt &Val = N1C->getAPIntValue();
7932 "Extract subvector VTs must be vectors!");
7934 "Extract subvector VTs must have the same element type!");
7936 "Cannot extract a scalable vector from a fixed length vector!");
7939 "Extract subvector must be from larger vector to smaller vector!");
7940 assert(N2C &&
"Extract subvector index must be a constant");
7944 "Extract subvector overflow!");
7945 assert(N2C->getAPIntValue().getBitWidth() ==
7947 "Constant index for EXTRACT_SUBVECTOR has an invalid size");
7949 "Extract index is not a multiple of the output vector length");
7964 return N1.
getOperand(N2C->getZExtValue() / Factor);
8005 if (TLI->isCommutativeBinOp(Opcode)) {
8084 if (VT != MVT::Glue) {
8088 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
8089 E->intersectFlagsWith(Flags);
8093 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
8095 createOperands(
N,
Ops);
8096 CSEMap.InsertNode(
N, IP);
8098 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
8099 createOperands(
N,
Ops);
8112 Flags = Inserter->getFlags();
8113 return getNode(Opcode,
DL, VT, N1, N2, N3, Flags);
8122 "Operand is DELETED_NODE!");
8141 "SETCC operands must have the same type!");
8143 "SETCC type should be vector iff the operand type is vector!");
8146 "SETCC vector element counts must match!");
8166 "INSERT_VECTOR_ELT vector type mismatch");
8168 "INSERT_VECTOR_ELT scalar fp/int mismatch");
8171 "INSERT_VECTOR_ELT fp scalar type mismatch");
8174 "INSERT_VECTOR_ELT int scalar size mismatch");
8220 "Dest and insert subvector source types must match!");
8222 "Insert subvector VTs must be vectors!");
8224 "Insert subvector VTs must have the same element type!");
8226 "Cannot insert a scalable vector into a fixed length vector!");
8229 "Insert subvector must be from smaller vector to larger vector!");
8231 "Insert subvector index must be constant");
8235 "Insert subvector overflow!");
8238 "Constant index for INSERT_SUBVECTOR has an invalid size");
8282 case ISD::VP_TRUNCATE:
8283 case ISD::VP_SIGN_EXTEND:
8284 case ISD::VP_ZERO_EXTEND:
8293 assert(VT == VecVT &&
"Vector and result type don't match.");
8295 "All inputs must be vectors.");
8296 assert(VecVT == PassthruVT &&
"Vector and passthru types don't match.");
8298 "Vector and mask must have same number of elements.");
8305 case ISD::PARTIAL_REDUCE_UMLA:
8306 case ISD::PARTIAL_REDUCE_SMLA:
8307 case ISD::PARTIAL_REDUCE_SUMLA:
8308 case ISD::PARTIAL_REDUCE_FMLA: {
8313 "Expected the second and third operands of the PARTIAL_REDUCE_MLA "
8314 "node to have the same type!");
8316 "Expected the first operand of the PARTIAL_REDUCE_MLA node to have "
8317 "the same type as its result!");
8320 "Expected the element count of the second and third operands of the "
8321 "PARTIAL_REDUCE_MLA node to be a positive integer multiple of the "
8322 "element count of the first operand and the result!");
8324 "Expected the second and third operands of the PARTIAL_REDUCE_MLA "
8325 "node to have an element type which is the same as or smaller than "
8326 "the element type of the first operand and result!");
8348 if (VT != MVT::Glue) {
8352 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
8353 E->intersectFlagsWith(Flags);
8357 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
8359 createOperands(
N,
Ops);
8360 CSEMap.InsertNode(
N, IP);
8362 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
8363 createOperands(
N,
Ops);
8383 Flags = Inserter->getFlags();
8384 return getNode(Opcode,
DL, VT, N1, N2, N3, N4, Flags);
8399 Flags = Inserter->getFlags();
8400 return getNode(Opcode,
DL, VT, N1, N2, N3, N4, N5, Flags);
8417 if (FI->getIndex() < 0)
8432 assert(
C->getAPIntValue().getBitWidth() == 8);
8437 return DAG.
getConstant(Val, dl, VT,
false, IsOpaque);
8442 assert(
Value.getValueType() == MVT::i8 &&
"memset with non-byte fill value?");
8458 if (VT !=
Value.getValueType())
8471 if (Slice.Array ==
nullptr) {
8474 return DAG.
getNode(ISD::BITCAST, dl, VT,
8480 unsigned NumVTBytes = NumVTBits / 8;
8481 unsigned NumBytes = std::min(NumVTBytes,
unsigned(Slice.Length));
8483 APInt Val(NumVTBits, 0);
8485 for (
unsigned i = 0; i != NumBytes; ++i)
8488 for (
unsigned i = 0; i != NumBytes; ++i)
8489 Val |= (
uint64_t)(
unsigned char)Slice[i] << (NumVTBytes-i-1)*8;
8508 APInt(
Base.getValueSizeInBits().getFixedValue(),
8509 Offset.getKnownMinValue()));
8521 if (TLI->shouldPreservePtrArith(this->getMachineFunction().getFunction(),
8536 else if (Src->isAnyAdd() &&
8540 SrcDelta = Src.getConstantOperandVal(1);
8546 SrcDelta +
G->getOffset());
8562 assert(OutLoadChains.
size() &&
"Missing loads in memcpy inlining");
8563 assert(OutStoreChains.
size() &&
"Missing stores in memcpy inlining");
8565 for (
unsigned i = From; i < To; ++i) {
8567 GluedLoadChains.
push_back(OutLoadChains[i]);
8574 for (
unsigned i = From; i < To; ++i) {
8577 ST->getBasePtr(), ST->getMemoryVT(),
8578 ST->getMemOperand());
8600 std::vector<EVT> MemOps;
8601 bool DstAlignCanChange =
false;
8607 DstAlignCanChange =
true;
8609 if (!SrcAlign || Alignment > *SrcAlign)
8610 SrcAlign = Alignment;
8611 assert(SrcAlign &&
"SrcAlign must be set");
8615 bool isZeroConstant = CopyFromConstant && Slice.Array ==
nullptr;
8617 const MemOp Op = isZeroConstant
8621 *SrcAlign, isVol, CopyFromConstant);
8627 if (DstAlignCanChange) {
8628 Type *Ty = MemOps[0].getTypeForEVT(
C);
8629 Align NewAlign =
DL.getABITypeAlign(Ty);
8635 if (!
TRI->hasStackRealignment(MF))
8637 NewAlign = std::min(NewAlign, *StackAlign);
8639 if (NewAlign > Alignment) {
8643 Alignment = NewAlign;
8653 BatchAA && SrcVal &&
8661 unsigned NumMemOps = MemOps.size();
8663 for (
unsigned i = 0; i != NumMemOps; ++i) {
8668 if (VTSize >
Size) {
8671 assert(i == NumMemOps-1 && i != 0);
8672 SrcOff -= VTSize -
Size;
8673 DstOff -= VTSize -
Size;
8676 if (CopyFromConstant &&
8684 if (SrcOff < Slice.Length) {
8686 SubSlice.
move(SrcOff);
8689 SubSlice.
Array =
nullptr;
8691 SubSlice.
Length = VTSize;
8694 if (
Value.getNode()) {
8698 DstPtrInfo.
getWithOffset(DstOff), Alignment, MMOFlags, NewAAInfo);
8703 if (!Store.getNode()) {
8712 bool isDereferenceable =
8715 if (isDereferenceable)
8730 DstPtrInfo.
getWithOffset(DstOff), VT, Alignment, MMOFlags, NewAAInfo);
8740 unsigned NumLdStInMemcpy = OutStoreChains.
size();
8742 if (NumLdStInMemcpy) {
8748 for (
unsigned i = 0; i < NumLdStInMemcpy; ++i) {
8754 if (NumLdStInMemcpy <= GluedLdStLimit) {
8756 NumLdStInMemcpy, OutLoadChains,
8759 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit;
8760 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit;
8761 unsigned GlueIter = 0;
8763 for (
unsigned cnt = 0; cnt < NumberLdChain; ++cnt) {
8764 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit;
8765 unsigned IndexTo = NumLdStInMemcpy - GlueIter;
8768 OutLoadChains, OutStoreChains);
8769 GlueIter += GluedLdStLimit;
8773 if (RemainingLdStInMemcpy) {
8775 RemainingLdStInMemcpy, OutLoadChains,
8787 bool isVol,
bool AlwaysInline,
8801 std::vector<EVT> MemOps;
8802 bool DstAlignCanChange =
false;
8808 DstAlignCanChange =
true;
8810 if (!SrcAlign || Alignment > *SrcAlign)
8811 SrcAlign = Alignment;
8812 assert(SrcAlign &&
"SrcAlign must be set");
8822 if (DstAlignCanChange) {
8823 Type *Ty = MemOps[0].getTypeForEVT(
C);
8824 Align NewAlign =
DL.getABITypeAlign(Ty);
8830 if (!
TRI->hasStackRealignment(MF))
8832 NewAlign = std::min(NewAlign, *StackAlign);
8834 if (NewAlign > Alignment) {
8838 Alignment = NewAlign;
8852 unsigned NumMemOps = MemOps.size();
8853 for (
unsigned i = 0; i < NumMemOps; i++) {
8858 bool isDereferenceable =
8861 if (isDereferenceable)
8867 SrcPtrInfo.
getWithOffset(SrcOff), *SrcAlign, SrcMMOFlags, NewAAInfo);
8874 for (
unsigned i = 0; i < NumMemOps; i++) {
8880 Chain, dl, LoadValues[i],
8882 DstPtrInfo.
getWithOffset(DstOff), Alignment, MMOFlags, NewAAInfo);
8922 std::vector<EVT> MemOps;
8923 bool DstAlignCanChange =
false;
8930 DstAlignCanChange =
true;
8936 MemOp::Set(
Size, DstAlignCanChange, Alignment, IsZeroVal, isVol),
8940 if (DstAlignCanChange) {
8943 Align NewAlign =
DL.getABITypeAlign(Ty);
8949 if (!
TRI->hasStackRealignment(MF))
8951 NewAlign = std::min(NewAlign, *StackAlign);
8953 if (NewAlign > Alignment) {
8957 Alignment = NewAlign;
8963 unsigned NumMemOps = MemOps.size();
8966 EVT LargestVT = MemOps[0];
8967 for (
unsigned i = 1; i < NumMemOps; i++)
8968 if (MemOps[i].bitsGT(LargestVT))
8969 LargestVT = MemOps[i];
8976 for (
unsigned i = 0; i < NumMemOps; i++) {
8979 if (VTSize >
Size) {
8982 assert(i == NumMemOps-1 && i != 0);
8983 DstOff -= VTSize -
Size;
8990 if (VT.
bitsLT(LargestVT)) {
9005 SDValue TailValue = DAG.
getNode(ISD::BITCAST, dl, SVT, MemSetValue);
9010 assert(
Value.getValueType() == VT &&
"Value with wrong type.");
9037 bool AllowReturnsFirstArg) {
9043 AllowReturnsFirstArg &&
9047std::pair<SDValue, SDValue>
9050 const char *LibCallName = TLI->getLibcallName(RTLIB::MEMCMP);
9067 TLI->getLibcallCallingConv(RTLIB::MEMCMP),
9073 return TLI->LowerCallTo(CLI);
9080 const char *LibCallName = TLI->getLibcallName(RTLIB::STRLEN);
9100 return TLI->LowerCallTo(CLI);
9105 Align Alignment,
bool isVol,
bool AlwaysInline,
const CallInst *CI,
9114 if (ConstantSize->
isZero())
9118 *
this, dl, Chain, Dst, Src, ConstantSize->
getZExtValue(), Alignment,
9119 isVol,
false, DstPtrInfo, SrcPtrInfo, AAInfo, BatchAA);
9120 if (Result.getNode())
9127 SDValue Result = TSI->EmitTargetCodeForMemcpy(
9128 *
this, dl, Chain, Dst, Src,
Size, Alignment, isVol, AlwaysInline,
9129 DstPtrInfo, SrcPtrInfo);
9130 if (Result.getNode())
9137 assert(ConstantSize &&
"AlwaysInline requires a constant size!");
9139 *
this, dl, Chain, Dst, Src, ConstantSize->
getZExtValue(), Alignment,
9140 isVol,
true, DstPtrInfo, SrcPtrInfo, AAInfo, BatchAA);
9155 Args.emplace_back(Dst, PtrTy);
9156 Args.emplace_back(Src, PtrTy);
9160 bool IsTailCall =
false;
9161 RTLIB::LibcallImpl MemCpyImpl = TLI->getMemcpyImpl();
9163 if (OverrideTailCall.has_value()) {
9164 IsTailCall = *OverrideTailCall;
9166 bool LowersToMemcpy = MemCpyImpl == RTLIB::impl_memcpy;
9173 TLI->getLibcallImplCallingConv(MemCpyImpl),
9174 Dst.getValueType().getTypeForEVT(*
getContext()),
9181 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
9182 return CallResult.second;
9187 Type *SizeTy,
unsigned ElemSz,
9194 Args.emplace_back(Dst, ArgTy);
9195 Args.emplace_back(Src, ArgTy);
9196 Args.emplace_back(
Size, SizeTy);
9198 RTLIB::Libcall LibraryCall =
9200 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
9214 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
9215 return CallResult.second;
9221 std::optional<bool> OverrideTailCall,
9231 if (ConstantSize->
isZero())
9235 *
this, dl, Chain, Dst, Src, ConstantSize->
getZExtValue(), Alignment,
9236 isVol,
false, DstPtrInfo, SrcPtrInfo, AAInfo);
9237 if (Result.getNode())
9245 TSI->EmitTargetCodeForMemmove(*
this, dl, Chain, Dst, Src,
Size,
9246 Alignment, isVol, DstPtrInfo, SrcPtrInfo);
9247 if (Result.getNode())
9260 Args.emplace_back(Dst, PtrTy);
9261 Args.emplace_back(Src, PtrTy);
9266 RTLIB::LibcallImpl MemmoveImpl = TLI->getLibcallImpl(RTLIB::MEMMOVE);
9268 bool IsTailCall =
false;
9269 if (OverrideTailCall.has_value()) {
9270 IsTailCall = *OverrideTailCall;
9272 bool LowersToMemmove = MemmoveImpl == RTLIB::impl_memmove;
9279 TLI->getLibcallImplCallingConv(MemmoveImpl),
9280 Dst.getValueType().getTypeForEVT(*
getContext()),
9287 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
9288 return CallResult.second;
9293 Type *SizeTy,
unsigned ElemSz,
9300 Args.emplace_back(Dst, IntPtrTy);
9301 Args.emplace_back(Src, IntPtrTy);
9302 Args.emplace_back(
Size, SizeTy);
9304 RTLIB::Libcall LibraryCall =
9306 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
9320 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
9321 return CallResult.second;
9326 bool isVol,
bool AlwaysInline,
9335 if (ConstantSize->
isZero())
9340 isVol,
false, DstPtrInfo, AAInfo);
9342 if (Result.getNode())
9349 SDValue Result = TSI->EmitTargetCodeForMemset(
9350 *
this, dl, Chain, Dst, Src,
Size, Alignment, isVol, AlwaysInline, DstPtrInfo);
9351 if (Result.getNode())
9358 assert(ConstantSize &&
"AlwaysInline requires a constant size!");
9361 isVol,
true, DstPtrInfo, AAInfo);
9363 "getMemsetStores must return a valid sequence when AlwaysInline");
9384 Args.emplace_back(
Size,
DL.getIntPtrType(Ctx));
9391 Args.emplace_back(Src, Src.getValueType().getTypeForEVT(Ctx));
9392 Args.emplace_back(
Size,
DL.getIntPtrType(Ctx));
9393 CLI.
setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET),
9394 Dst.getValueType().getTypeForEVT(Ctx),
9396 TLI->getPointerTy(
DL)),
9400 RTLIB::LibcallImpl MemsetImpl = TLI->getLibcallImpl(RTLIB::MEMSET);
9401 bool LowersToMemset = MemsetImpl == RTLIB::impl_memset;
9412 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
9413 return CallResult.second;
9418 Type *SizeTy,
unsigned ElemSz,
9425 Args.emplace_back(
Size, SizeTy);
9427 RTLIB::Libcall LibraryCall =
9429 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
9443 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
9444 return CallResult.second;
9454 ID.AddInteger(getSyntheticNodeSubclassData<AtomicSDNode>(
9455 dl.
getIROrder(), Opcode, VTList, MemVT, MMO, ExtType));
9460 E->refineAlignment(MMO);
9461 E->refineRanges(MMO);
9466 VTList, MemVT, MMO, ExtType);
9467 createOperands(
N,
Ops);
9469 CSEMap.InsertNode(
N, IP);
9480 assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
9481 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
9491 assert((Opcode == ISD::ATOMIC_LOAD_ADD || Opcode == ISD::ATOMIC_LOAD_SUB ||
9492 Opcode == ISD::ATOMIC_LOAD_AND || Opcode == ISD::ATOMIC_LOAD_CLR ||
9493 Opcode == ISD::ATOMIC_LOAD_OR || Opcode == ISD::ATOMIC_LOAD_XOR ||
9494 Opcode == ISD::ATOMIC_LOAD_NAND || Opcode == ISD::ATOMIC_LOAD_MIN ||
9495 Opcode == ISD::ATOMIC_LOAD_MAX || Opcode == ISD::ATOMIC_LOAD_UMIN ||
9496 Opcode == ISD::ATOMIC_LOAD_UMAX || Opcode == ISD::ATOMIC_LOAD_FADD ||
9497 Opcode == ISD::ATOMIC_LOAD_FSUB || Opcode == ISD::ATOMIC_LOAD_FMAX ||
9498 Opcode == ISD::ATOMIC_LOAD_FMIN ||
9499 Opcode == ISD::ATOMIC_LOAD_FMINIMUM ||
9500 Opcode == ISD::ATOMIC_LOAD_FMAXIMUM ||
9501 Opcode == ISD::ATOMIC_LOAD_UINC_WRAP ||
9502 Opcode == ISD::ATOMIC_LOAD_UDEC_WRAP ||
9503 Opcode == ISD::ATOMIC_LOAD_USUB_COND ||
9504 Opcode == ISD::ATOMIC_LOAD_USUB_SAT || Opcode == ISD::ATOMIC_SWAP ||
9505 Opcode == ISD::ATOMIC_STORE) &&
9506 "Invalid Atomic Op");
9521 return getAtomic(ISD::ATOMIC_LOAD, dl, MemVT, VTs,
Ops, MMO, ExtType);
9526 if (
Ops.size() == 1)
9541 if (
Size.hasValue() && !
Size.getValue())
9546 MF.getMachineMemOperand(PtrInfo, Flags,
Size, Alignment, AAInfo);
9557 Opcode == ISD::PREFETCH ||
9558 (Opcode <= (
unsigned)std::numeric_limits<int>::max() &&
9560 "Opcode is not a memory-accessing opcode!");
9564 if (VTList.
VTs[VTList.
NumVTs-1] != MVT::Glue) {
9567 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>(
9568 Opcode, dl.
getIROrder(), VTList, MemVT, MMO));
9573 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9579 VTList, MemVT, MMO);
9580 createOperands(
N,
Ops);
9582 CSEMap.InsertNode(
N, IP);
9585 VTList, MemVT, MMO);
9586 createOperands(
N,
Ops);
9595 SDValue Chain,
int FrameIndex) {
9596 const unsigned Opcode = IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END;
9606 ID.AddInteger(FrameIndex);
9608 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
9613 createOperands(
N,
Ops);
9614 CSEMap.InsertNode(
N, IP);
9624 const unsigned Opcode = ISD::PSEUDO_PROBE;
9630 ID.AddInteger(Index);
9632 if (
SDNode *E = FindNodeOrInsertPos(
ID, Dl, IP))
9635 auto *
N = newSDNode<PseudoProbeSDNode>(
9637 createOperands(
N,
Ops);
9638 CSEMap.InsertNode(
N, IP);
9692 "Invalid chain type");
9704 Alignment, AAInfo, Ranges);
9705 return getLoad(AM, ExtType, VT, dl, Chain, Ptr,
Offset, MemVT, MMO);
9715 assert(VT == MemVT &&
"Non-extending load from different memory type!");
9719 "Should only be an extending load, not truncating!");
9721 "Cannot convert from FP to Int or Int -> FP!");
9723 "Cannot use an ext load to convert to or from a vector!");
9726 "Cannot use an ext load to change the number of vector elements!");
9733 "Range metadata and load type must match!");
9744 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>(
9745 dl.
getIROrder(), VTs, AM, ExtType, MemVT, MMO));
9750 E->refineAlignment(MMO);
9751 E->refineRanges(MMO);
9755 ExtType, MemVT, MMO);
9756 createOperands(
N,
Ops);
9758 CSEMap.InsertNode(
N, IP);
9772 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges);
9790 MemVT, Alignment, MMOFlags, AAInfo);
9805 assert(LD->getOffset().isUndef() &&
"Load is already a indexed load!");
9808 LD->getMemOperand()->getFlags() &
9811 LD->getChain(),
Base,
Offset, LD->getPointerInfo(),
9812 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo());
9831 MF.getMachineMemOperand(PtrInfo, MMOFlags,
Size, Alignment, AAInfo);
9832 return getStore(Chain, dl, Val, Ptr, MMO);
9845 bool IsTruncating) {
9849 IsTruncating =
false;
9850 }
else if (!IsTruncating) {
9851 assert(VT == SVT &&
"No-truncating store from different memory type!");
9854 "Should only be a truncating store, not extending!");
9857 "Cannot use trunc store to convert to or from a vector!");
9860 "Cannot use trunc store to change the number of vector elements!");
9871 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
9872 dl.
getIROrder(), VTs, AM, IsTruncating, SVT, MMO));
9876 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9881 IsTruncating, SVT, MMO);
9882 createOperands(
N,
Ops);
9884 CSEMap.InsertNode(
N, IP);
9897 "Invalid chain type");
9907 PtrInfo, MMOFlags, SVT.
getStoreSize(), Alignment, AAInfo);
9922 assert(ST->getOffset().isUndef() &&
"Store is already a indexed store!");
9924 ST->getMemoryVT(), ST->getMemOperand(), AM,
9925 ST->isTruncatingStore());
9933 const MDNode *Ranges,
bool IsExpanding) {
9944 Alignment, AAInfo, Ranges);
9945 return getLoadVP(AM, ExtType, VT, dl, Chain, Ptr,
Offset, Mask, EVL, MemVT,
9956 assert(Mask.getValueType().getVectorElementCount() ==
9958 "Vector width mismatch between mask and data");
9969 ID.AddInteger(getSyntheticNodeSubclassData<VPLoadSDNode>(
9970 dl.
getIROrder(), VTs, AM, ExtType, IsExpanding, MemVT, MMO));
9975 E->refineAlignment(MMO);
9976 E->refineRanges(MMO);
9980 ExtType, IsExpanding, MemVT, MMO);
9981 createOperands(
N,
Ops);
9983 CSEMap.InsertNode(
N, IP);
9999 Mask, EVL, PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges,
10008 Mask, EVL, VT, MMO, IsExpanding);
10017 const AAMDNodes &AAInfo,
bool IsExpanding) {
10020 EVL, PtrInfo, MemVT, Alignment, MMOFlags, AAInfo,
nullptr,
10030 EVL, MemVT, MMO, IsExpanding);
10037 assert(LD->getOffset().isUndef() &&
"Load is already a indexed load!");
10040 LD->getMemOperand()->getFlags() &
10043 LD->getChain(),
Base,
Offset, LD->getMask(),
10044 LD->getVectorLength(), LD->getPointerInfo(),
10045 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo(),
10046 nullptr, LD->isExpandingLoad());
10053 bool IsCompressing) {
10055 assert(Mask.getValueType().getVectorElementCount() ==
10057 "Vector width mismatch between mask and data");
10067 ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>(
10068 dl.
getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
10071 void *IP =
nullptr;
10072 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10077 IsTruncating, IsCompressing, MemVT, MMO);
10078 createOperands(
N,
Ops);
10080 CSEMap.InsertNode(
N, IP);
10093 bool IsCompressing) {
10104 PtrInfo, MMOFlags, SVT.
getStoreSize(), Alignment, AAInfo);
10113 bool IsCompressing) {
10120 false, IsCompressing);
10123 "Should only be a truncating store, not extending!");
10126 "Cannot use trunc store to convert to or from a vector!");
10129 "Cannot use trunc store to change the number of vector elements!");
10133 SDValue Ops[] = {Chain, Val, Ptr, Undef, Mask, EVL};
10137 ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>(
10141 void *IP =
nullptr;
10142 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10149 createOperands(
N,
Ops);
10151 CSEMap.InsertNode(
N, IP);
10162 assert(ST->getOffset().isUndef() &&
"Store is already an indexed store!");
10165 Offset, ST->getMask(), ST->getVectorLength()};
10168 ID.AddInteger(ST->getMemoryVT().getRawBits());
10169 ID.AddInteger(ST->getRawSubclassData());
10170 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
10171 ID.AddInteger(ST->getMemOperand()->getFlags());
10172 void *IP =
nullptr;
10173 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
10176 auto *
N = newSDNode<VPStoreSDNode>(
10178 ST->isCompressingStore(), ST->getMemoryVT(), ST->getMemOperand());
10179 createOperands(
N,
Ops);
10181 CSEMap.InsertNode(
N, IP);
10201 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedLoadSDNode>(
10202 DL.getIROrder(), VTs, AM, ExtType, IsExpanding, MemVT, MMO));
10205 void *IP =
nullptr;
10206 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
10212 newSDNode<VPStridedLoadSDNode>(
DL.getIROrder(),
DL.getDebugLoc(), VTs, AM,
10213 ExtType, IsExpanding, MemVT, MMO);
10214 createOperands(
N,
Ops);
10215 CSEMap.InsertNode(
N, IP);
10226 bool IsExpanding) {
10229 Undef, Stride, Mask, EVL, VT, MMO, IsExpanding);
10238 Stride, Mask, EVL, MemVT, MMO, IsExpanding);
10247 bool IsTruncating,
bool IsCompressing) {
10257 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedStoreSDNode>(
10258 DL.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
10260 void *IP =
nullptr;
10261 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
10265 auto *
N = newSDNode<VPStridedStoreSDNode>(
DL.getIROrder(),
DL.getDebugLoc(),
10266 VTs, AM, IsTruncating,
10267 IsCompressing, MemVT, MMO);
10268 createOperands(
N,
Ops);
10270 CSEMap.InsertNode(
N, IP);
10282 bool IsCompressing) {
10289 false, IsCompressing);
10292 "Should only be a truncating store, not extending!");
10295 "Cannot use trunc store to convert to or from a vector!");
10298 "Cannot use trunc store to change the number of vector elements!");
10302 SDValue Ops[] = {Chain, Val, Ptr, Undef, Stride, Mask, EVL};
10306 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedStoreSDNode>(
10309 void *IP =
nullptr;
10310 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
10314 auto *
N = newSDNode<VPStridedStoreSDNode>(
DL.getIROrder(),
DL.getDebugLoc(),
10316 IsCompressing, SVT, MMO);
10317 createOperands(
N,
Ops);
10319 CSEMap.InsertNode(
N, IP);
10329 assert(
Ops.size() == 6 &&
"Incompatible number of operands");
10334 ID.AddInteger(getSyntheticNodeSubclassData<VPGatherSDNode>(
10338 void *IP =
nullptr;
10339 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10345 VT, MMO, IndexType);
10346 createOperands(
N,
Ops);
10348 assert(
N->getMask().getValueType().getVectorElementCount() ==
10349 N->getValueType(0).getVectorElementCount() &&
10350 "Vector width mismatch between mask and data");
10351 assert(
N->getIndex().getValueType().getVectorElementCount().isScalable() ==
10352 N->getValueType(0).getVectorElementCount().isScalable() &&
10353 "Scalable flags of index and data do not match");
10355 N->getIndex().getValueType().getVectorElementCount(),
10356 N->getValueType(0).getVectorElementCount()) &&
10357 "Vector width mismatch between index and data");
10359 N->getScale()->getAsAPIntVal().isPowerOf2() &&
10360 "Scale should be a constant power of 2");
10362 CSEMap.InsertNode(
N, IP);
10373 assert(
Ops.size() == 7 &&
"Incompatible number of operands");
10378 ID.AddInteger(getSyntheticNodeSubclassData<VPScatterSDNode>(
10382 void *IP =
nullptr;
10383 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10388 VT, MMO, IndexType);
10389 createOperands(
N,
Ops);
10391 assert(
N->getMask().getValueType().getVectorElementCount() ==
10392 N->getValue().getValueType().getVectorElementCount() &&
10393 "Vector width mismatch between mask and data");
10395 N->getIndex().getValueType().getVectorElementCount().isScalable() ==
10396 N->getValue().getValueType().getVectorElementCount().isScalable() &&
10397 "Scalable flags of index and data do not match");
10399 N->getIndex().getValueType().getVectorElementCount(),
10400 N->getValue().getValueType().getVectorElementCount()) &&
10401 "Vector width mismatch between index and data");
10403 N->getScale()->getAsAPIntVal().isPowerOf2() &&
10404 "Scale should be a constant power of 2");
10406 CSEMap.InsertNode(
N, IP);
10421 "Unindexed masked load with an offset!");
10428 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>(
10429 dl.
getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO));
10432 void *IP =
nullptr;
10433 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10438 AM, ExtTy, isExpanding, MemVT, MMO);
10439 createOperands(
N,
Ops);
10441 CSEMap.InsertNode(
N, IP);
10452 assert(LD->getOffset().isUndef() &&
"Masked load is already a indexed load!");
10454 Offset, LD->getMask(), LD->getPassThru(),
10455 LD->getMemoryVT(), LD->getMemOperand(), AM,
10456 LD->getExtensionType(), LD->isExpandingLoad());
10464 bool IsCompressing) {
10466 "Invalid chain type");
10469 "Unindexed masked store with an offset!");
10476 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>(
10477 dl.
getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
10480 void *IP =
nullptr;
10481 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10487 IsTruncating, IsCompressing, MemVT, MMO);
10488 createOperands(
N,
Ops);
10490 CSEMap.InsertNode(
N, IP);
10501 assert(ST->getOffset().isUndef() &&
10502 "Masked store is already a indexed store!");
10504 ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(),
10505 AM, ST->isTruncatingStore(), ST->isCompressingStore());
10513 assert(
Ops.size() == 6 &&
"Incompatible number of operands");
10518 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>(
10519 dl.
getIROrder(), VTs, MemVT, MMO, IndexType, ExtTy));
10522 void *IP =
nullptr;
10523 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10529 VTs, MemVT, MMO, IndexType, ExtTy);
10530 createOperands(
N,
Ops);
10532 assert(
N->getPassThru().getValueType() ==
N->getValueType(0) &&
10533 "Incompatible type of the PassThru value in MaskedGatherSDNode");
10534 assert(
N->getMask().getValueType().getVectorElementCount() ==
10535 N->getValueType(0).getVectorElementCount() &&
10536 "Vector width mismatch between mask and data");
10537 assert(
N->getIndex().getValueType().getVectorElementCount().isScalable() ==
10538 N->getValueType(0).getVectorElementCount().isScalable() &&
10539 "Scalable flags of index and data do not match");
10541 N->getIndex().getValueType().getVectorElementCount(),
10542 N->getValueType(0).getVectorElementCount()) &&
10543 "Vector width mismatch between index and data");
10545 N->getScale()->getAsAPIntVal().isPowerOf2() &&
10546 "Scale should be a constant power of 2");
10548 CSEMap.InsertNode(
N, IP);
10560 assert(
Ops.size() == 6 &&
"Incompatible number of operands");
10565 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>(
10566 dl.
getIROrder(), VTs, MemVT, MMO, IndexType, IsTrunc));
10569 void *IP =
nullptr;
10570 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10576 VTs, MemVT, MMO, IndexType, IsTrunc);
10577 createOperands(
N,
Ops);
10579 assert(
N->getMask().getValueType().getVectorElementCount() ==
10580 N->getValue().getValueType().getVectorElementCount() &&
10581 "Vector width mismatch between mask and data");
10583 N->getIndex().getValueType().getVectorElementCount().isScalable() ==
10584 N->getValue().getValueType().getVectorElementCount().isScalable() &&
10585 "Scalable flags of index and data do not match");
10587 N->getIndex().getValueType().getVectorElementCount(),
10588 N->getValue().getValueType().getVectorElementCount()) &&
10589 "Vector width mismatch between index and data");
10591 N->getScale()->getAsAPIntVal().isPowerOf2() &&
10592 "Scale should be a constant power of 2");
10594 CSEMap.InsertNode(
N, IP);
10605 assert(
Ops.size() == 7 &&
"Incompatible number of operands");
10610 ID.AddInteger(getSyntheticNodeSubclassData<MaskedHistogramSDNode>(
10611 dl.
getIROrder(), VTs, MemVT, MMO, IndexType));
10614 void *IP =
nullptr;
10615 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10621 VTs, MemVT, MMO, IndexType);
10622 createOperands(
N,
Ops);
10624 assert(
N->getMask().getValueType().getVectorElementCount() ==
10625 N->getIndex().getValueType().getVectorElementCount() &&
10626 "Vector width mismatch between mask and data");
10628 N->getScale()->getAsAPIntVal().isPowerOf2() &&
10629 "Scale should be a constant power of 2");
10630 assert(
N->getInc().getValueType().isInteger() &&
"Non integer update value");
10632 CSEMap.InsertNode(
N, IP);
10647 ID.AddInteger(getSyntheticNodeSubclassData<VPLoadFFSDNode>(
DL.getIROrder(),
10651 void *IP =
nullptr;
10652 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
10656 auto *
N = newSDNode<VPLoadFFSDNode>(
DL.getIROrder(),
DL.getDebugLoc(), VTs,
10658 createOperands(
N,
Ops);
10660 CSEMap.InsertNode(
N, IP);
10675 ID.AddInteger(getSyntheticNodeSubclassData<FPStateAccessSDNode>(
10676 ISD::GET_FPENV_MEM, dl.
getIROrder(), VTs, MemVT, MMO));
10679 void *IP =
nullptr;
10680 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
10683 auto *
N = newSDNode<FPStateAccessSDNode>(ISD::GET_FPENV_MEM, dl.
getIROrder(),
10685 createOperands(
N,
Ops);
10687 CSEMap.InsertNode(
N, IP);
10702 ID.AddInteger(getSyntheticNodeSubclassData<FPStateAccessSDNode>(
10703 ISD::SET_FPENV_MEM, dl.
getIROrder(), VTs, MemVT, MMO));
10706 void *IP =
nullptr;
10707 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
10710 auto *
N = newSDNode<FPStateAccessSDNode>(ISD::SET_FPENV_MEM, dl.
getIROrder(),
10712 createOperands(
N,
Ops);
10714 CSEMap.InsertNode(
N, IP);
10725 if (
Cond.isUndef())
10760 return !Val || Val->getAPIntValue().uge(
X.getScalarValueSizeInBits());
10766 if (
X.getValueType().getScalarType() == MVT::i1)
10779 bool HasNan = (XC && XC->
getValueAPF().isNaN()) ||
10781 bool HasInf = (XC && XC->
getValueAPF().isInfinity()) ||
10784 if (Flags.hasNoNaNs() && (HasNan ||
X.isUndef() ||
Y.isUndef()))
10787 if (Flags.hasNoInfs() && (HasInf ||
X.isUndef() ||
Y.isUndef()))
10810 if (Opcode ==
ISD::FMUL && Flags.hasNoNaNs() && Flags.hasNoSignedZeros())
10825 switch (
Ops.size()) {
10826 case 0:
return getNode(Opcode,
DL, VT);
10836 return getNode(Opcode,
DL, VT, NewOps);
10843 Flags = Inserter->getFlags();
10851 case 0:
return getNode(Opcode,
DL, VT);
10852 case 1:
return getNode(Opcode,
DL, VT,
Ops[0], Flags);
10859 for (
const auto &
Op :
Ops)
10861 "Operand is DELETED_NODE!");
10878 "LHS and RHS of condition must have same type!");
10880 "True and False arms of SelectCC must have same type!");
10882 "select_cc node must be of same type as true and false value!");
10886 "Expected select_cc with vector result to have the same sized "
10887 "comparison type!");
10892 "LHS/RHS of comparison should match types!");
10898 Opcode = ISD::VP_XOR;
10903 Opcode = ISD::VP_AND;
10905 case ISD::VP_REDUCE_MUL:
10908 Opcode = ISD::VP_REDUCE_AND;
10910 case ISD::VP_REDUCE_ADD:
10913 Opcode = ISD::VP_REDUCE_XOR;
10915 case ISD::VP_REDUCE_SMAX:
10916 case ISD::VP_REDUCE_UMIN:
10920 Opcode = ISD::VP_REDUCE_AND;
10922 case ISD::VP_REDUCE_SMIN:
10923 case ISD::VP_REDUCE_UMAX:
10927 Opcode = ISD::VP_REDUCE_OR;
10935 if (VT != MVT::Glue) {
10938 void *IP =
nullptr;
10940 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
10941 E->intersectFlagsWith(Flags);
10945 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
10946 createOperands(
N,
Ops);
10948 CSEMap.InsertNode(
N, IP);
10950 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
10951 createOperands(
N,
Ops);
10954 N->setFlags(Flags);
10965 Flags = Inserter->getFlags();
10979 Flags = Inserter->getFlags();
10989 for (
const auto &
Op :
Ops)
10991 "Operand is DELETED_NODE!");
11000 "Invalid add/sub overflow op!");
11002 Ops[0].getValueType() ==
Ops[1].getValueType() &&
11003 Ops[0].getValueType() == VTList.
VTs[0] &&
11004 "Binary operator types must match!");
11011 if (N2CV && N2CV->
isZero()) {
11042 "Invalid add/sub overflow op!");
11044 Ops[0].getValueType() ==
Ops[1].getValueType() &&
11045 Ops[0].getValueType() == VTList.
VTs[0] &&
11046 Ops[2].getValueType() == VTList.
VTs[1] &&
11047 "Binary operator types must match!");
11051 assert(VTList.
NumVTs == 2 &&
Ops.size() == 2 &&
"Invalid mul lo/hi op!");
11053 VTList.
VTs[0] ==
Ops[0].getValueType() &&
11054 VTList.
VTs[0] ==
Ops[1].getValueType() &&
11055 "Binary operator types must match!");
11061 unsigned OutWidth = Width * 2;
11062 APInt Val = LHS->getAPIntValue();
11065 Val = Val.
sext(OutWidth);
11066 Mul =
Mul.sext(OutWidth);
11068 Val = Val.
zext(OutWidth);
11069 Mul =
Mul.zext(OutWidth);
11080 case ISD::FFREXP: {
11081 assert(VTList.
NumVTs == 2 &&
Ops.size() == 1 &&
"Invalid ffrexp op!");
11083 VTList.
VTs[0] ==
Ops[0].getValueType() &&
"frexp type mismatch");
11091 DL, VTList.
VTs[1]);
11099 "Invalid STRICT_FP_EXTEND!");
11101 Ops[1].getValueType().isFloatingPoint() &&
"Invalid FP cast!");
11103 "STRICT_FP_EXTEND result type should be vector iff the operand "
11104 "type is vector!");
11107 Ops[1].getValueType().getVectorElementCount()) &&
11108 "Vector element count mismatch!");
11110 "Invalid fpext node, dst <= src!");
11113 assert(VTList.
NumVTs == 2 &&
Ops.size() == 3 &&
"Invalid STRICT_FP_ROUND!");
11115 "STRICT_FP_ROUND result type should be vector iff the operand "
11116 "type is vector!");
11119 Ops[1].getValueType().getVectorElementCount()) &&
11120 "Vector element count mismatch!");
11122 Ops[1].getValueType().isFloatingPoint() &&
11125 (
Ops[2]->getAsZExtVal() == 0 ||
Ops[2]->getAsZExtVal() == 1) &&
11126 "Invalid STRICT_FP_ROUND!");
11132 if (VTList.
VTs[VTList.
NumVTs-1] != MVT::Glue) {
11135 void *IP =
nullptr;
11136 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
11137 E->intersectFlagsWith(Flags);
11141 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTList);
11142 createOperands(
N,
Ops);
11143 CSEMap.InsertNode(
N, IP);
11145 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTList);
11146 createOperands(
N,
Ops);
11149 N->setFlags(Flags);
11196 return makeVTList(&(*EVTs.insert(VT).first), 1);
11205 void *IP =
nullptr;
11208 EVT *Array = Allocator.Allocate<
EVT>(2);
11211 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, 2);
11212 VTListMap.InsertNode(Result, IP);
11214 return Result->getSDVTList();
11224 void *IP =
nullptr;
11227 EVT *Array = Allocator.Allocate<
EVT>(3);
11231 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, 3);
11232 VTListMap.InsertNode(Result, IP);
11234 return Result->getSDVTList();
11245 void *IP =
nullptr;
11248 EVT *Array = Allocator.Allocate<
EVT>(4);
11253 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, 4);
11254 VTListMap.InsertNode(Result, IP);
11256 return Result->getSDVTList();
11260 unsigned NumVTs = VTs.
size();
11262 ID.AddInteger(NumVTs);
11263 for (
unsigned index = 0; index < NumVTs; index++) {
11264 ID.AddInteger(VTs[index].getRawBits());
11267 void *IP =
nullptr;
11270 EVT *Array = Allocator.Allocate<
EVT>(NumVTs);
11272 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, NumVTs);
11273 VTListMap.InsertNode(Result, IP);
11275 return Result->getSDVTList();
11286 assert(
N->getNumOperands() == 1 &&
"Update with wrong number of operands");
11289 if (
Op ==
N->getOperand(0))
return N;
11292 void *InsertPos =
nullptr;
11293 if (
SDNode *Existing = FindModifiedNodeSlot(
N,
Op, InsertPos))
11298 if (!RemoveNodeFromCSEMaps(
N))
11299 InsertPos =
nullptr;
11302 N->OperandList[0].set(
Op);
11306 if (InsertPos) CSEMap.InsertNode(
N, InsertPos);
11311 assert(
N->getNumOperands() == 2 &&
"Update with wrong number of operands");
11314 if (Op1 ==
N->getOperand(0) && Op2 ==
N->getOperand(1))
11318 void *InsertPos =
nullptr;
11319 if (
SDNode *Existing = FindModifiedNodeSlot(
N, Op1, Op2, InsertPos))
11324 if (!RemoveNodeFromCSEMaps(
N))
11325 InsertPos =
nullptr;
11328 if (
N->OperandList[0] != Op1)
11329 N->OperandList[0].set(Op1);
11330 if (
N->OperandList[1] != Op2)
11331 N->OperandList[1].set(Op2);
11335 if (InsertPos) CSEMap.InsertNode(
N, InsertPos);
11355 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
11363 "Update with wrong number of operands");
11366 if (std::equal(
Ops.begin(),
Ops.end(),
N->op_begin()))
11370 void *InsertPos =
nullptr;
11371 if (
SDNode *Existing = FindModifiedNodeSlot(
N,
Ops, InsertPos))
11376 if (!RemoveNodeFromCSEMaps(
N))
11377 InsertPos =
nullptr;
11380 for (
unsigned i = 0; i !=
NumOps; ++i)
11381 if (
N->OperandList[i] !=
Ops[i])
11382 N->OperandList[i].set(
Ops[i]);
11386 if (InsertPos) CSEMap.InsertNode(
N, InsertPos);
11403 if (NewMemRefs.
empty()) {
11409 if (NewMemRefs.
size() == 1) {
11410 N->MemRefs = NewMemRefs[0];
11416 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.
size());
11418 N->MemRefs = MemRefsBuffer;
11419 N->NumMemRefs =
static_cast<int>(NewMemRefs.
size());
11491 New->setNodeId(-1);
11511 unsigned Order = std::min(
N->getIROrder(), OLoc.
getIROrder());
11512 N->setIROrder(Order);
11535 void *IP =
nullptr;
11536 if (VTs.
VTs[VTs.
NumVTs-1] != MVT::Glue) {
11540 return UpdateSDLocOnMergeSDNode(ON,
SDLoc(
N));
11543 if (!RemoveNodeFromCSEMaps(
N))
11548 N->ValueList = VTs.
VTs;
11558 if (Used->use_empty())
11559 DeadNodeSet.
insert(Used);
11564 MN->clearMemRefs();
11568 createOperands(
N,
Ops);
11572 if (!DeadNodeSet.
empty()) {
11574 for (
SDNode *
N : DeadNodeSet)
11575 if (
N->use_empty())
11581 CSEMap.InsertNode(
N, IP);
11586 unsigned OrigOpc =
Node->getOpcode();
11591#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
11592 case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break;
11593#define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
11594 case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break;
11595#include "llvm/IR/ConstrainedOps.def"
11598 assert(
Node->getNumValues() == 2 &&
"Unexpected number of results!");
11606 for (
unsigned i = 1, e =
Node->getNumOperands(); i != e; ++i)
11607 Ops.push_back(
Node->getOperand(i));
11724 bool DoCSE = VTs.
VTs[VTs.
NumVTs-1] != MVT::Glue;
11726 void *IP =
nullptr;
11732 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
11738 N = newSDNode<MachineSDNode>(~Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
11739 createOperands(
N,
Ops);
11742 CSEMap.InsertNode(
N, IP);
11755 VT, Operand, SRIdxVal);
11765 VT, Operand, Subreg, SRIdxVal);
11773 bool AllowCommute) {
11776 Flags = Inserter->getFlags();
11783 bool AllowCommute) {
11784 if (VTList.
VTs[VTList.
NumVTs - 1] == MVT::Glue)
11790 void *IP =
nullptr;
11791 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP)) {
11792 E->intersectFlagsWith(Flags);
11801 if (AllowCommute && TLI->isCommutativeBinOp(Opcode))
11810 if (VTList.
VTs[VTList.
NumVTs - 1] != MVT::Glue) {
11813 void *IP =
nullptr;
11814 if (FindNodeOrInsertPos(
ID,
SDLoc(), IP))
11824 SDNode *
N,
unsigned R,
bool IsIndirect,
11827 "Expected inlined-at fields to agree");
11828 return new (DbgInfo->getAlloc())
11830 {}, IsIndirect,
DL, O,
11840 "Expected inlined-at fields to agree");
11841 return new (DbgInfo->getAlloc())
11854 "Expected inlined-at fields to agree");
11866 "Expected inlined-at fields to agree");
11867 return new (DbgInfo->getAlloc())
11869 Dependencies, IsIndirect,
DL, O,
11878 "Expected inlined-at fields to agree");
11879 return new (DbgInfo->getAlloc())
11881 {}, IsIndirect,
DL, O,
11889 unsigned O,
bool IsVariadic) {
11891 "Expected inlined-at fields to agree");
11892 return new (DbgInfo->getAlloc())
11893 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, Locs, Dependencies, IsIndirect,
11894 DL, O, IsVariadic);
11898 unsigned OffsetInBits,
unsigned SizeInBits,
11899 bool InvalidateDbg) {
11902 assert(FromNode && ToNode &&
"Can't modify dbg values");
11907 if (From == To || FromNode == ToNode)
11919 if (Dbg->isInvalidated())
11927 auto NewLocOps = Dbg->copyLocationOps();
11929 NewLocOps.begin(), NewLocOps.end(),
11931 bool Match = Op == FromLocOp;
11941 auto *Expr = Dbg->getExpression();
11947 if (
auto FI = Expr->getFragmentInfo())
11948 if (OffsetInBits + SizeInBits > FI->SizeInBits)
11957 auto AdditionalDependencies = Dbg->getAdditionalDependencies();
11960 Var, Expr, NewLocOps, AdditionalDependencies, Dbg->isIndirect(),
11961 Dbg->getDebugLoc(), std::max(ToNode->
getIROrder(), Dbg->getOrder()),
11962 Dbg->isVariadic());
11965 if (InvalidateDbg) {
11967 Dbg->setIsInvalidated();
11968 Dbg->setIsEmitted();
11974 "Transferred DbgValues should depend on the new SDNode");
11980 if (!
N.getHasDebugValue())
11983 auto GetLocationOperand = [](
SDNode *
Node,
unsigned ResNo) {
11991 if (DV->isInvalidated())
11993 switch (
N.getOpcode()) {
12003 Offset =
N.getConstantOperandVal(1);
12006 if (!RHSConstant && DV->isIndirect())
12013 auto *DIExpr = DV->getExpression();
12014 auto NewLocOps = DV->copyLocationOps();
12016 size_t OrigLocOpsSize = NewLocOps.size();
12017 for (
size_t i = 0; i < OrigLocOpsSize; ++i) {
12022 NewLocOps[i].getSDNode() != &
N)
12033 const auto *TmpDIExpr =
12041 NewLocOps.push_back(RHS);
12050 DV->isVariadic() || OrigLocOpsSize != NewLocOps.size();
12052 auto AdditionalDependencies = DV->getAdditionalDependencies();
12054 DV->getVariable(), DIExpr, NewLocOps, AdditionalDependencies,
12055 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder(), IsVariadic);
12057 DV->setIsInvalidated();
12058 DV->setIsEmitted();
12060 N0.
getNode()->dumprFull(
this);
12061 dbgs() <<
" into " << *DIExpr <<
'\n');
12068 TypeSize ToSize =
N.getValueSizeInBits(0);
12072 auto NewLocOps = DV->copyLocationOps();
12074 for (
size_t i = 0; i < NewLocOps.size(); ++i) {
12076 NewLocOps[i].getSDNode() != &
N)
12088 DV->getAdditionalDependencies(), DV->isIndirect(),
12089 DV->getDebugLoc(), DV->getOrder(), DV->isVariadic());
12092 DV->setIsInvalidated();
12093 DV->setIsEmitted();
12095 dbgs() <<
" into " << *DbgExpression <<
'\n');
12102 assert((!Dbg->getSDNodes().empty() ||
12105 return Op.getKind() == SDDbgOperand::FRAMEIX;
12107 "Salvaged DbgValue should depend on a new SDNode");
12116 "Expected inlined-at fields to agree");
12117 return new (DbgInfo->getAlloc())
SDDbgLabel(Label,
DL, O);
12132 while (UI != UE &&
N == UI->
getUser())
12140 :
SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
12153 "Cannot replace with this method!");
12154 assert(From != To.
getNode() &&
"Cannot replace uses of with self");
12169 RAUWUpdateListener Listener(*
this, UI, UE);
12174 RemoveNodeFromCSEMaps(
User);
12189 AddModifiedNodeToCSEMaps(
User);
12205 for (
unsigned i = 0, e = From->
getNumValues(); i != e; ++i)
12208 "Cannot use this version of ReplaceAllUsesWith!");
12216 for (
unsigned i = 0, e = From->
getNumValues(); i != e; ++i)
12218 assert((i < To->getNumValues()) &&
"Invalid To location");
12227 RAUWUpdateListener Listener(*
this, UI, UE);
12232 RemoveNodeFromCSEMaps(
User);
12248 AddModifiedNodeToCSEMaps(
User);
12265 for (
unsigned i = 0, e = From->
getNumValues(); i != e; ++i) {
12275 RAUWUpdateListener Listener(*
this, UI, UE);
12280 RemoveNodeFromCSEMaps(
User);
12286 bool To_IsDivergent =
false;
12300 AddModifiedNodeToCSEMaps(
User);
12313 if (From == To)
return;
12329 RAUWUpdateListener Listener(*
this, UI, UE);
12332 bool UserRemovedFromCSEMaps =
false;
12349 if (!UserRemovedFromCSEMaps) {
12350 RemoveNodeFromCSEMaps(
User);
12351 UserRemovedFromCSEMaps =
true;
12361 if (!UserRemovedFromCSEMaps)
12366 AddModifiedNodeToCSEMaps(
User);
12385bool operator<(
const UseMemo &L,
const UseMemo &R) {
12386 return (intptr_t)L.User < (intptr_t)R.User;
12393 SmallVectorImpl<UseMemo> &
Uses;
12395 void NodeDeleted(SDNode *
N, SDNode *
E)
override {
12396 for (UseMemo &Memo :
Uses)
12397 if (Memo.User ==
N)
12398 Memo.User =
nullptr;
12402 RAUOVWUpdateListener(SelectionDAG &d, SmallVectorImpl<UseMemo> &uses)
12403 : SelectionDAG::DAGUpdateListener(d),
Uses(uses) {}
12410 switch (
Node->getOpcode()) {
12422 if (TLI->isSDNodeAlwaysUniform(
N)) {
12423 assert(!TLI->isSDNodeSourceOfDivergence(
N, FLI, UA) &&
12424 "Conflicting divergence information!");
12427 if (TLI->isSDNodeSourceOfDivergence(
N, FLI, UA))
12429 for (
const auto &
Op :
N->ops()) {
12430 EVT VT =
Op.getValueType();
12433 if (VT != MVT::Other &&
Op.getNode()->isDivergent() &&
12445 if (
N->SDNodeBits.IsDivergent != IsDivergent) {
12446 N->SDNodeBits.IsDivergent = IsDivergent;
12449 }
while (!Worklist.
empty());
12452void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) {
12454 Order.reserve(AllNodes.size());
12456 unsigned NOps =
N.getNumOperands();
12459 Order.push_back(&
N);
12461 for (
size_t I = 0;
I != Order.size(); ++
I) {
12463 for (
auto *U :
N->users()) {
12464 unsigned &UnsortedOps = Degree[U];
12465 if (0 == --UnsortedOps)
12466 Order.push_back(U);
12471#if !defined(NDEBUG) && LLVM_ENABLE_ABI_BREAKING_CHECKS
12472void SelectionDAG::VerifyDAGDivergence() {
12473 std::vector<SDNode *> TopoOrder;
12474 CreateTopologicalOrder(TopoOrder);
12475 for (
auto *
N : TopoOrder) {
12477 "Divergence bit inconsistency detected");
12500 for (
unsigned i = 0; i != Num; ++i) {
12501 unsigned FromResNo = From[i].
getResNo();
12504 if (
Use.getResNo() == FromResNo) {
12506 Uses.push_back(Memo);
12513 RAUOVWUpdateListener Listener(*
this,
Uses);
12515 for (
unsigned UseIndex = 0, UseIndexEnd =
Uses.size();
12516 UseIndex != UseIndexEnd; ) {
12522 if (
User ==
nullptr) {
12528 RemoveNodeFromCSEMaps(
User);
12535 unsigned i =
Uses[UseIndex].Index;
12540 }
while (UseIndex != UseIndexEnd &&
Uses[UseIndex].
User ==
User);
12544 AddModifiedNodeToCSEMaps(
User);
12552 unsigned DAGSize = 0;
12568 unsigned Degree =
N.getNumOperands();
12571 N.setNodeId(DAGSize++);
12573 if (Q != SortedPos)
12574 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
12575 assert(SortedPos != AllNodes.end() &&
"Overran node list");
12579 N.setNodeId(Degree);
12591 unsigned Degree =
P->getNodeId();
12592 assert(Degree != 0 &&
"Invalid node degree");
12596 P->setNodeId(DAGSize++);
12597 if (
P->getIterator() != SortedPos)
12598 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(
P));
12599 assert(SortedPos != AllNodes.end() &&
"Overran node list");
12603 P->setNodeId(Degree);
12606 if (
Node.getIterator() == SortedPos) {
12610 dbgs() <<
"Overran sorted position:\n";
12612 dbgs() <<
"Checking if this is due to cycles\n";
12619 assert(SortedPos == AllNodes.end() &&
12620 "Topological sort incomplete!");
12622 "First node in topological sort is not the entry token!");
12623 assert(AllNodes.front().getNodeId() == 0 &&
12624 "First node in topological sort has non-zero id!");
12625 assert(AllNodes.front().getNumOperands() == 0 &&
12626 "First node in topological sort has operands!");
12627 assert(AllNodes.back().getNodeId() == (
int)DAGSize-1 &&
12628 "Last node in topologic sort has unexpected id!");
12629 assert(AllNodes.back().use_empty() &&
12630 "Last node in topologic sort has users!");
12637 SortedNodes.
clear();
12644 unsigned NumOperands =
N.getNumOperands();
12645 if (NumOperands == 0)
12649 RemainingOperands[&
N] = NumOperands;
12654 for (
unsigned i = 0U; i < SortedNodes.
size(); ++i) {
12655 const SDNode *
N = SortedNodes[i];
12656 for (
const SDNode *U :
N->users()) {
12659 if (U->getOpcode() == ISD::HANDLENODE)
12661 unsigned &NumRemOperands = RemainingOperands[U];
12662 assert(NumRemOperands &&
"Invalid number of remaining operands");
12664 if (!NumRemOperands)
12669 assert(SortedNodes.
size() == AllNodes.size() &&
"Node count mismatch");
12671 "First node in topological sort is not the entry token");
12672 assert(SortedNodes.
front()->getNumOperands() == 0 &&
12673 "First node in topological sort has operands");
12679 for (
SDNode *SD : DB->getSDNodes()) {
12682 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue());
12683 SD->setHasDebugValue(
true);
12685 DbgInfo->add(DB, isParameter);
12698 if (OldChain == NewMemOpChain || OldChain.
use_empty())
12699 return NewMemOpChain;
12702 OldChain, NewMemOpChain);
12705 return TokenFactor;
12724 if (OutFunction !=
nullptr)
12732 std::string ErrorStr;
12734 ErrorFormatter <<
"Undefined external symbol ";
12735 ErrorFormatter <<
'"' << Symbol <<
'"';
12745 return Const !=
nullptr && Const->isZero();
12754 return Const !=
nullptr && Const->isZero() && !Const->isNegative();
12759 return Const !=
nullptr && Const->isAllOnes();
12764 return Const !=
nullptr && Const->isOne();
12769 return Const !=
nullptr && Const->isMinSignedValue();
12773 unsigned OperandNo) {
12778 APInt Const = ConstV->getAPIntValue().trunc(V.getScalarValueSizeInBits());
12784 return Const.isZero();
12786 return Const.isOne();
12789 return Const.isAllOnes();
12791 return Const.isMinSignedValue();
12793 return Const.isMaxSignedValue();
12798 return OperandNo == 1 && Const.isZero();
12801 return OperandNo == 1 && Const.isOne();
12806 return ConstFP->isZero() &&
12807 (Flags.hasNoSignedZeros() || ConstFP->isNegative());
12809 return OperandNo == 1 && ConstFP->isZero() &&
12810 (Flags.hasNoSignedZeros() || !ConstFP->isNegative());
12812 return ConstFP->isExactlyValue(1.0);
12814 return OperandNo == 1 && ConstFP->isExactlyValue(1.0);
12816 case ISD::FMAXNUM: {
12818 EVT VT = V.getValueType();
12820 APFloat NeutralAF = !Flags.hasNoNaNs()
12822 : !Flags.hasNoInfs()
12825 if (Opcode == ISD::FMAXNUM)
12828 return ConstFP->isExactlyValue(NeutralAF);
12836 while (V.getOpcode() == ISD::BITCAST)
12842 while (V.getOpcode() == ISD::BITCAST && V.getOperand(0).hasOneUse())
12861 !DemandedElts[IndexC->getZExtValue()]) {
12880 unsigned NumBits = V.getScalarValueSizeInBits();
12883 return C && (
C->getAPIntValue().
countr_one() >= NumBits);
12887 bool AllowTruncation) {
12888 EVT VT =
N.getValueType();
12897 bool AllowTruncation) {
12904 EVT VecEltVT =
N->getValueType(0).getVectorElementType();
12906 EVT CVT = CN->getValueType(0);
12907 assert(CVT.
bitsGE(VecEltVT) &&
"Illegal splat_vector element extension");
12908 if (AllowTruncation || CVT == VecEltVT)
12915 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements);
12920 if (CN && (UndefElements.
none() || AllowUndefs)) {
12922 EVT NSVT =
N.getValueType().getScalarType();
12923 assert(CVT.
bitsGE(NSVT) &&
"Illegal build vector element extension");
12924 if (AllowTruncation || (CVT == NSVT))
12933 EVT VT =
N.getValueType();
12941 const APInt &DemandedElts,
12942 bool AllowUndefs) {
12949 BV->getConstantFPSplatNode(DemandedElts, &UndefElements);
12951 if (CN && (UndefElements.
none() || AllowUndefs))
12966 return C &&
C->isZero();
12972 return C &&
C->isOne();
12977 return C &&
C->isExactlyValue(1.0);
12982 unsigned BitWidth =
N.getScalarValueSizeInBits();
12984 return C &&
C->isAllOnes() &&
C->getValueSizeInBits(0) ==
BitWidth;
12990 APInt(
C->getAPIntValue().getBitWidth(), 1));
12996 return C &&
C->isZero();
13001 return C &&
C->isZero();
13010 :
SDNode(
Opc, Order, dl, VTs), MemoryVT(memvt),
MMO(mmo) {
13020 (!
MMO->getType().isValid() ||
13034 std::vector<EVT> VTs;
13047const EVT *SDNode::getValueTypeList(
MVT VT) {
13048 static EVTArray SimpleVTArray;
13051 return &SimpleVTArray.VTs[VT.
SimpleTy];
13060 if (U.getResNo() ==
Value)
13098 return any_of(
N->op_values(),
13099 [
this](
SDValue Op) { return this == Op.getNode(); });
13113 unsigned Depth)
const {
13114 if (*
this == Dest)
return true;
13118 if (
Depth == 0)
return false;
13138 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1);
13144 if (Ld->isUnordered())
13145 return Ld->getChain().reachesChainWithoutSideEffects(Dest,
Depth-1);
13158 this->Flags &= Flags;
13164 bool AllowPartials) {
13179 unsigned CandidateBinOp =
Op.getOpcode();
13180 if (
Op.getValueType().isFloatingPoint()) {
13182 switch (CandidateBinOp) {
13184 if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation())
13194 auto PartialReduction = [&](
SDValue Op,
unsigned NumSubElts) {
13195 if (!AllowPartials || !
Op)
13197 EVT OpVT =
Op.getValueType();
13200 if (!TLI->isExtractSubvectorCheap(SubVT, OpVT, 0))
13219 unsigned Stages =
Log2_32(
Op.getValueType().getVectorNumElements());
13221 for (
unsigned i = 0; i < Stages; ++i) {
13222 unsigned MaskEnd = (1 << i);
13224 if (
Op.getOpcode() != CandidateBinOp)
13225 return PartialReduction(PrevOp, MaskEnd);
13241 return PartialReduction(PrevOp, MaskEnd);
13244 for (
int Index = 0; Index < (int)MaskEnd; ++Index)
13245 if (Shuffle->
getMaskElt(Index) != (
int)(MaskEnd + Index))
13246 return PartialReduction(PrevOp, MaskEnd);
13253 while (
Op.getOpcode() == CandidateBinOp) {
13254 unsigned NumElts =
Op.getValueType().getVectorNumElements();
13263 if (NumSrcElts != (2 * NumElts))
13278 EVT VT =
N->getValueType(0);
13287 else if (NE > ResNE)
13290 if (
N->getNumValues() == 2) {
13293 EVT VT1 =
N->getValueType(1);
13297 for (i = 0; i != NE; ++i) {
13298 for (
unsigned j = 0, e =
N->getNumOperands(); j != e; ++j) {
13299 SDValue Operand =
N->getOperand(j);
13307 SDValue EltOp =
getNode(
N->getOpcode(), dl, {EltVT, EltVT1}, Operands);
13312 for (; i < ResNE; ++i) {
13324 assert(
N->getNumValues() == 1 &&
13325 "Can't unroll a vector with multiple results!");
13331 for (i= 0; i != NE; ++i) {
13332 for (
unsigned j = 0, e =
N->getNumOperands(); j != e; ++j) {
13333 SDValue Operand =
N->getOperand(j);
13341 Operands[j] = Operand;
13345 switch (
N->getOpcode()) {
13370 case ISD::ADDRSPACECAST: {
13373 ASC->getSrcAddressSpace(),
13374 ASC->getDestAddressSpace()));
13380 for (; i < ResNE; ++i)
13389 unsigned Opcode =
N->getOpcode();
13393 "Expected an overflow opcode");
13395 EVT ResVT =
N->getValueType(0);
13396 EVT OvVT =
N->getValueType(1);
13405 else if (NE > ResNE)
13417 for (
unsigned i = 0; i < NE; ++i) {
13418 SDValue Res =
getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]);
13441 if (LD->isVolatile() ||
Base->isVolatile())
13444 if (!LD->isSimple())
13446 if (LD->isIndexed() ||
Base->isIndexed())
13448 if (LD->getChain() !=
Base->getChain())
13450 EVT VT = LD->getMemoryVT();
13458 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *
this,
Offset))
13459 return (Dist * (int64_t)Bytes ==
Offset);
13468 int64_t GVOffset = 0;
13469 if (TLI->isGAPlusOffset(Ptr.
getNode(), GV, GVOffset)) {
13480 int FrameIdx = INT_MIN;
13481 int64_t FrameOffset = 0;
13483 FrameIdx = FI->getIndex();
13491 if (FrameIdx != INT_MIN) {
13496 return std::nullopt;
13506 "Split node must be a scalar type");
13511 return std::make_pair(
Lo,
Hi);
13520 LoVT = HiVT = TLI->getTypeToTransformTo(*
getContext(), VT);
13524 return std::make_pair(LoVT, HiVT);
13532 bool *HiIsEmpty)
const {
13542 "Mixing fixed width and scalable vectors when enveloping a type");
13547 *HiIsEmpty =
false;
13555 return std::make_pair(LoVT, HiVT);
13560std::pair<SDValue, SDValue>
13565 "Splitting vector with an invalid mixture of fixed and scalable "
13568 N.getValueType().getVectorMinNumElements() &&
13569 "More vector elements requested than available!");
13578 return std::make_pair(
Lo,
Hi);
13585 EVT VT =
N.getValueType();
13587 "Expecting the mask to be an evenly-sized vector");
13595 return std::make_pair(
Lo,
Hi);
13600 EVT VT =
N.getValueType();
13608 unsigned Start,
unsigned Count,
13610 EVT VT =
Op.getValueType();
13613 if (EltVT ==
EVT())
13616 for (
unsigned i = Start, e = Start +
Count; i != e; ++i) {
13628 return Val.MachineCPVal->getType();
13629 return Val.ConstVal->getType();
13633 unsigned &SplatBitSize,
13634 bool &HasAnyUndefs,
13635 unsigned MinSplatBits,
13636 bool IsBigEndian)
const {
13640 if (MinSplatBits > VecWidth)
13645 SplatValue =
APInt(VecWidth, 0);
13646 SplatUndef =
APInt(VecWidth, 0);
13653 assert(
NumOps > 0 &&
"isConstantSplat has 0-size build vector");
13656 for (
unsigned j = 0; j <
NumOps; ++j) {
13657 unsigned i = IsBigEndian ?
NumOps - 1 - j : j;
13659 unsigned BitPos = j * EltWidth;
13662 SplatUndef.
setBits(BitPos, BitPos + EltWidth);
13664 SplatValue.
insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos);
13666 SplatValue.
insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos);
13673 HasAnyUndefs = (SplatUndef != 0);
13676 while (VecWidth > 8) {
13681 unsigned HalfSize = VecWidth / 2;
13688 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
13689 MinSplatBits > HalfSize)
13692 SplatValue = HighValue | LowValue;
13693 SplatUndef = HighUndef & LowUndef;
13695 VecWidth = HalfSize;
13704 SplatBitSize = VecWidth;
13711 if (UndefElements) {
13712 UndefElements->
clear();
13719 for (
unsigned i = 0; i !=
NumOps; ++i) {
13720 if (!DemandedElts[i])
13723 if (
Op.isUndef()) {
13725 (*UndefElements)[i] =
true;
13726 }
else if (!Splatted) {
13728 }
else if (Splatted !=
Op) {
13734 unsigned FirstDemandedIdx = DemandedElts.
countr_zero();
13736 "Can only have a splat without a constant for all undefs.");
13753 if (UndefElements) {
13754 UndefElements->
clear();
13765 (*UndefElements)[
I] =
true;
13768 for (
unsigned SeqLen = 1; SeqLen <
NumOps; SeqLen *= 2) {
13769 Sequence.append(SeqLen,
SDValue());
13770 for (
unsigned I = 0;
I !=
NumOps; ++
I) {
13771 if (!DemandedElts[
I])
13773 SDValue &SeqOp = Sequence[
I % SeqLen];
13775 if (
Op.isUndef()) {
13780 if (SeqOp && !SeqOp.
isUndef() && SeqOp !=
Op) {
13786 if (!Sequence.empty())
13790 assert(Sequence.empty() &&
"Failed to empty non-repeating sequence pattern");
13831 const APFloat &APF = CN->getValueAPF();
13837 return IntVal.exactLogBase2();
13843 bool IsLittleEndian,
unsigned DstEltSizeInBits,
13851 assert(((NumSrcOps * SrcEltSizeInBits) % DstEltSizeInBits) == 0 &&
13852 "Invalid bitcast scale");
13857 BitVector SrcUndeElements(NumSrcOps,
false);
13859 for (
unsigned I = 0;
I != NumSrcOps; ++
I) {
13861 if (
Op.isUndef()) {
13862 SrcUndeElements.
set(
I);
13867 assert((CInt || CFP) &&
"Unknown constant");
13868 SrcBitElements[
I] = CInt ? CInt->getAPIntValue().trunc(SrcEltSizeInBits)
13869 : CFP->getValueAPF().bitcastToAPInt();
13873 recastRawBits(IsLittleEndian, DstEltSizeInBits, RawBitElements,
13874 SrcBitElements, UndefElements, SrcUndeElements);
13879 unsigned DstEltSizeInBits,
13884 unsigned NumSrcOps = SrcBitElements.
size();
13885 unsigned SrcEltSizeInBits = SrcBitElements[0].getBitWidth();
13886 assert(((NumSrcOps * SrcEltSizeInBits) % DstEltSizeInBits) == 0 &&
13887 "Invalid bitcast scale");
13888 assert(NumSrcOps == SrcUndefElements.
size() &&
13889 "Vector size mismatch");
13891 unsigned NumDstOps = (NumSrcOps * SrcEltSizeInBits) / DstEltSizeInBits;
13892 DstUndefElements.
clear();
13893 DstUndefElements.
resize(NumDstOps,
false);
13897 if (SrcEltSizeInBits <= DstEltSizeInBits) {
13898 unsigned Scale = DstEltSizeInBits / SrcEltSizeInBits;
13899 for (
unsigned I = 0;
I != NumDstOps; ++
I) {
13900 DstUndefElements.
set(
I);
13901 APInt &DstBits = DstBitElements[
I];
13902 for (
unsigned J = 0; J != Scale; ++J) {
13903 unsigned Idx = (
I * Scale) + (IsLittleEndian ? J : (Scale - J - 1));
13904 if (SrcUndefElements[Idx])
13906 DstUndefElements.
reset(
I);
13907 const APInt &SrcBits = SrcBitElements[Idx];
13909 "Illegal constant bitwidths");
13910 DstBits.
insertBits(SrcBits, J * SrcEltSizeInBits);
13917 unsigned Scale = SrcEltSizeInBits / DstEltSizeInBits;
13918 for (
unsigned I = 0;
I != NumSrcOps; ++
I) {
13919 if (SrcUndefElements[
I]) {
13920 DstUndefElements.
set(
I * Scale, (
I + 1) * Scale);
13923 const APInt &SrcBits = SrcBitElements[
I];
13924 for (
unsigned J = 0; J != Scale; ++J) {
13925 unsigned Idx = (
I * Scale) + (IsLittleEndian ? J : (Scale - J - 1));
13926 APInt &DstBits = DstBitElements[Idx];
13927 DstBits = SrcBits.
extractBits(DstEltSizeInBits, J * DstEltSizeInBits);
13934 unsigned Opc =
Op.getOpcode();
13941std::optional<std::pair<APInt, APInt>>
13945 return std::nullopt;
13949 return std::nullopt;
13956 return std::nullopt;
13958 for (
unsigned i = 2; i <
NumOps; ++i) {
13960 return std::nullopt;
13963 if (Val != (Start + (Stride * i)))
13964 return std::nullopt;
13967 return std::make_pair(Start, Stride);
13973 for (i = 0, e = Mask.size(); i != e && Mask[i] < 0; ++i)
13983 for (
int Idx = Mask[i]; i != e; ++i)
13984 if (Mask[i] >= 0 && Mask[i] != Idx)
13992 SDValue N,
bool AllowOpaques)
const {
13996 return AllowOpaques || !
C->isOpaque();
14005 TLI->isOffsetFoldingLegal(GA))
14033 return std::nullopt;
14035 EVT VT =
N->getValueType(0);
14037 switch (TLI->getBooleanContents(
N.getValueType())) {
14043 return std::nullopt;
14049 return std::nullopt;
14057 assert(!
Node->OperandList &&
"Node already has operands");
14059 "too many operands to fit into SDNode");
14060 SDUse *
Ops = OperandRecycler.allocate(
14063 bool IsDivergent =
false;
14064 for (
unsigned I = 0;
I != Vals.
size(); ++
I) {
14066 Ops[
I].setInitial(Vals[
I]);
14067 EVT VT =
Ops[
I].getValueType();
14070 if (VT != MVT::Other &&
14073 IsDivergent =
true;
14078 if (!TLI->isSDNodeAlwaysUniform(Node)) {
14079 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, UA);
14080 Node->SDNodeBits.IsDivergent = IsDivergent;
14088 while (Vals.
size() > Limit) {
14089 unsigned SliceIdx = Vals.
size() - Limit;
14124 case ISD::FMAXNUM: {
14130 if (Opcode == ISD::FMAXNUM)
14135 case ISD::FMINIMUM:
14136 case ISD::FMAXIMUM: {
14141 if (Opcode == ISD::FMAXIMUM)
14165 const SDLoc &DLoc) {
14169 RTLIB::Libcall LC =
static_cast<RTLIB::Libcall
>(LibFunc);
14176 return TLI->LowerCallTo(CLI).second;
14180 assert(From && To &&
"Invalid SDNode; empty source SDValue?");
14181 auto I = SDEI.find(From);
14182 if (
I == SDEI.end())
14187 NodeExtraInfo NEI =
I->second;
14196 SDEI[To] = std::move(NEI);
14213 auto VisitFrom = [&](
auto &&Self,
const SDNode *
N,
int MaxDepth) {
14214 if (MaxDepth == 0) {
14220 if (!FromReach.
insert(
N).second)
14223 Self(Self,
Op.getNode(), MaxDepth - 1);
14228 auto DeepCopyTo = [&](
auto &&Self,
const SDNode *
N) {
14231 if (!Visited.
insert(
N).second)
14236 if (
N == To &&
Op.getNode() == EntrySDN) {
14241 if (!Self(Self,
Op.getNode()))
14255 for (
int PrevDepth = 0, MaxDepth = 16; MaxDepth <= 1024;
14256 PrevDepth = MaxDepth, MaxDepth *= 2, Visited.
clear()) {
14261 for (
const SDNode *
N : StartFrom)
14262 VisitFrom(VisitFrom,
N, MaxDepth - PrevDepth);
14266 LLVM_DEBUG(
dbgs() << __func__ <<
": MaxDepth=" << MaxDepth <<
" too low\n");
14274 errs() <<
"warning: incomplete propagation of SelectionDAG::NodeExtraInfo\n";
14275 assert(
false &&
"From subgraph too complex - increase max. MaxDepth?");
14277 SDEI[To] = std::move(NEI);
14291 if (!Visited.
insert(
N).second) {
14292 errs() <<
"Detected cycle in SelectionDAG\n";
14293 dbgs() <<
"Offending node:\n";
14294 N->dumprFull(DAG);
dbgs() <<
"\n";
14310 bool check = force;
14311#ifdef EXPENSIVE_CHECKS
14315 assert(
N &&
"Checking nonexistent SDNode");
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool isConstant(const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
static std::optional< bool > isBigEndian(const SmallDenseMap< int64_t, int64_t, 8 > &MemOffset2Idx, int64_t LowestIdx)
Given a map from byte offsets in memory to indices in a load/store, determine if that map corresponds...
#define __asan_unpoison_memory_region(p, size)
#define LLVM_LIKELY(EXPR)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseSet and SmallDenseSet classes.
This file contains constants used for implementing Dwarf debug support.
This file defines a hash set that can be used to remove duplication of nodes in a graph.
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB)
static bool shouldLowerMemFuncForSize(const MachineFunction &MF)
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG)
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
PowerPC Reduce CR logical Operation
const SmallVectorImpl< MachineOperand > & Cond
Remove Loads Into Fake Uses
Contains matchers for matching SelectionDAG nodes and values.
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow)
static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo, BatchAAResults *BatchAA)
static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, const AAMDNodes &AAInfo)
Lower the call to 'memset' intrinsic function into a series of store operations.
static std::optional< APInt > FoldValueWithUndef(unsigned Opcode, const APInt &C1, bool IsUndef1, const APInt &C2, bool IsUndef2)
static SDValue FoldSTEP_VECTOR(const SDLoc &DL, EVT VT, SDValue Step, SelectionDAG &DAG)
static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned OpC, SDVTList VTList, ArrayRef< SDValue > OpList)
static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, const TargetLowering &TLI, const ConstantDataArraySlice &Slice)
getMemsetStringVal - Similar to getMemsetValue.
static cl::opt< bool > EnableMemCpyDAGOpt("enable-memcpy-dag-opt", cl::Hidden, cl::init(true), cl::desc("Gang up loads and stores generated by inlining of memcpy"))
static bool haveNoCommonBitsSetCommutative(SDValue A, SDValue B)
static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList)
AddNodeIDValueTypes - Value type lists are intern'd so we can represent them solely with their pointe...
static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef< int > M)
Swaps the values of N1 and N2.
static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice)
Returns true if memcpy source is constant data.
static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo)
static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC)
AddNodeIDOpcode - Add the node opcode to the NodeID data.
static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike)
static bool doNotCSE(SDNode *N)
doNotCSE - Return true if CSE should not be performed for this node.
static cl::opt< int > MaxLdStGlue("ldstmemcpy-glue-max", cl::desc("Number limit for gluing ld/st of memcpy."), cl::Hidden, cl::init(0))
static void AddNodeIDOperands(FoldingSetNodeID &ID, ArrayRef< SDValue > Ops)
AddNodeIDOperands - Various routines for adding operands to the NodeID data.
static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SelectionDAG &DAG)
Try to simplify vector concatenation to an input value, undef, or build vector.
static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, SelectionDAG &DAG, SDValue Ptr, int64_t Offset=0)
InferPointerInfo - If the specified ptr/offset is a frame index, infer a MachinePointerInfo record fr...
static bool isInTailCallPositionWrapper(const CallInst *CI, const SelectionDAG *SelDAG, bool AllowReturnsFirstArg)
static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N)
If this is an SDNode with special info, add this info to the NodeID data.
static bool gluePropagatesDivergence(const SDNode *Node)
Return true if a glue output should propagate divergence information.
static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G)
static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs)
makeVTList - Return an instance of the SDVTList struct initialized with the specified members.
static void checkForCyclesHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallPtrSetImpl< const SDNode * > &Checked, const llvm::SelectionDAG *DAG)
static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl, SmallVector< SDValue, 32 > &OutChains, unsigned From, unsigned To, SmallVector< SDValue, 16 > &OutLoadChains, SmallVector< SDValue, 16 > &OutStoreChains)
static int isSignedOp(ISD::CondCode Opcode)
For an integer comparison, return 1 if the comparison is a signed operation and 2 if the result is an...
static std::optional< APInt > FoldValue(unsigned Opcode, const APInt &C1, const APInt &C2)
static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SelectionDAG &DAG)
static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, unsigned AS)
static cl::opt< unsigned > MaxSteps("has-predecessor-max-steps", cl::Hidden, cl::init(8192), cl::desc("DAG combiner limit number of steps when searching DAG " "for predecessor nodes"))
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
This file describes how to lower LLVM code to machine code.
static void removeOperands(MachineInstr &MI, unsigned i)
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR)
Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
static const fltSemantics & IEEEsingle()
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
static constexpr roundingMode rmTowardZero
static const fltSemantics & BFloat()
static const fltSemantics & IEEEquad()
static const fltSemantics & IEEEdouble()
static constexpr roundingMode rmTowardNegative
static constexpr roundingMode rmNearestTiesToEven
static constexpr roundingMode rmTowardPositive
static const fltSemantics & IEEEhalf()
opStatus
IEEE-754R 7: Default exception handling.
static APFloat getQNaN(const fltSemantics &Sem, bool Negative=false, const APInt *payload=nullptr)
Factory for QNaN values.
opStatus divide(const APFloat &RHS, roundingMode RM)
void copySign(const APFloat &RHS)
LLVM_ABI opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
opStatus subtract(const APFloat &RHS, roundingMode RM)
bool isExactlyValue(double V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
opStatus add(const APFloat &RHS, roundingMode RM)
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
opStatus multiply(const APFloat &RHS, roundingMode RM)
opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend, roundingMode RM)
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
opStatus mod(const APFloat &RHS)
static APFloat getNaN(const fltSemantics &Sem, bool Negative=false, uint64_t payload=0)
Factory for NaN values.
Class for arbitrary precision integers.
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt usub_sat(const APInt &RHS) const
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
unsigned popcount() const
Count the number of bits set.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
APInt abs() const
Get the absolute value.
LLVM_ABI APInt sadd_sat(const APInt &RHS) const
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
LLVM_ABI APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
void clearAllBits()
Set every bit to 0.
LLVM_ABI APInt rotr(unsigned rotateAmt) const
Rotate right by rotateAmt.
LLVM_ABI APInt reverseBits() const
void ashrInPlace(unsigned ShiftAmt)
Arithmetic right-shift this APInt by ShiftAmt in place.
bool sle(const APInt &RHS) const
Signed less or equal comparison.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
LLVM_ABI APInt sshl_sat(const APInt &RHS) const
LLVM_ABI APInt ushl_sat(const APInt &RHS) const
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
LLVM_ABI APInt rotl(unsigned rotateAmt) const
Rotate left by rotateAmt.
LLVM_ABI void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
void clearLowBits(unsigned loBits)
Set bottom loBits bits to 0.
unsigned logBase2() const
LLVM_ABI APInt uadd_sat(const APInt &RHS) const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
LLVM_ABI APInt srem(const APInt &RHS) const
Function for signed remainder operation.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
void setBits(unsigned loBit, unsigned hiBit)
Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
APInt shl(unsigned shiftAmt) const
Left-shift function.
LLVM_ABI APInt byteSwap() const
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static bool isSameValue(const APInt &I1, const APInt &I2)
Determine if two APInts have the same value, after zero-extending one of them (if needed!...
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
void clearBits(unsigned LoBit, unsigned HiBit)
Clear the bits from LoBit (inclusive) to HiBit (exclusive) to 0.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
bool isOne() const
Determine if this is a value of 1.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
LLVM_ABI APInt ssub_sat(const APInt &RHS) const
An arbitrary precision integer that knows its signedness.
unsigned getSrcAddressSpace() const
unsigned getDestAddressSpace() const
static Capacity get(size_t N)
Get the capacity of an array that can hold at least N elements.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
This is an SDNode representing atomic operations.
static LLVM_ABI BaseIndexOffset match(const SDNode *N, const SelectionDAG &DAG)
Parses tree in N for base, index, offset addresses.
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
void clear()
clear - Removes all bits from the bitvector.
bool none() const
none - Returns true if none of the bits are set.
size_type size() const
size - Returns the number of bits in this bitvector.
int64_t getOffset() const
unsigned getTargetFlags() const
const BlockAddress * getBlockAddress() const
The address of a basic block.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
A "pseudo-class" with methods for operating on BUILD_VECTORs.
LLVM_ABI bool getConstantRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits, SmallVectorImpl< APInt > &RawBitElements, BitVector &UndefElements) const
Extract the raw bit data from a build vector of Undef, Constant or ConstantFP node elements.
static LLVM_ABI void recastRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits, SmallVectorImpl< APInt > &DstBitElements, ArrayRef< APInt > SrcBitElements, BitVector &DstUndefElements, const BitVector &SrcUndefElements)
Recast bit data SrcBitElements to DstEltSizeInBits wide elements.
LLVM_ABI bool getRepeatedSequence(const APInt &DemandedElts, SmallVectorImpl< SDValue > &Sequence, BitVector *UndefElements=nullptr) const
Find the shortest repeating sequence of values in the build vector.
LLVM_ABI ConstantFPSDNode * getConstantFPSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant FP or null if this is not a constant FP splat.
LLVM_ABI std::optional< std::pair< APInt, APInt > > isConstantSequence() const
If this BuildVector is constant and represents the numerical series "<a, a+n, a+2n,...
LLVM_ABI SDValue getSplatValue(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted value or a null value if this is not a splat.
LLVM_ABI bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
LLVM_ABI ConstantSDNode * getConstantSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant or null if this is not a constant splat.
LLVM_ABI int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, uint32_t BitWidth) const
If this is a constant FP splat and the splatted constant FP is an exact power or 2,...
LLVM_ABI bool isConstant() const
This class represents a function call, abstracting a target machine's calling convention.
static LLVM_ABI bool isValueValidForType(EVT VT, const APFloat &Val)
const APFloat & getValueAPF() const
bool isExactlyValue(double V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
ConstantFP - Floating Point Values [float, double].
const APFloat & getValue() const
This is the shared class of boolean and integer constants.
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
const APInt & getValue() const
Return the constant as an APInt value reference.
bool isMachineConstantPoolEntry() const
LLVM_ABI Type * getType() const
This class represents a range of values.
LLVM_ABI ConstantRange multiply(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a multiplication of a value in thi...
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
LLVM_ABI OverflowResult unsignedSubMayOverflow(const ConstantRange &Other) const
Return whether unsigned sub of the two ranges always/never overflows.
LLVM_ABI OverflowResult unsignedAddMayOverflow(const ConstantRange &Other) const
Return whether unsigned add of the two ranges always/never overflows.
LLVM_ABI KnownBits toKnownBits() const
Return known bits for values in this range.
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI OverflowResult unsignedMulMayOverflow(const ConstantRange &Other) const
Return whether unsigned mul of the two ranges always/never overflows.
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
OverflowResult
Represents whether an operation on the given constant range is known to always or never overflow.
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI OverflowResult signedSubMayOverflow(const ConstantRange &Other) const
Return whether signed sub of the two ranges always/never overflows.
uint64_t getZExtValue() const
const APInt & getAPIntValue() const
This is an important base class in LLVM.
LLVM_ABI Constant * getSplatValue(bool AllowPoison=false) const
If all elements of the vector constant have the same value, return that value.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
static LLVM_ABI ExtOps getExtOps(unsigned FromSize, unsigned ToSize, bool Signed)
Returns the ops for a zero- or sign-extension in a DIExpression.
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static LLVM_ABI const DIExpression * convertToVariadicExpression(const DIExpression *Expr)
If Expr is a non-variadic expression (i.e.
static LLVM_ABI std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
Base class for variables.
A parsed version of the target data layout string in and methods for querying it.
bool isLittleEndian() const
Layout endianness...
LLVM_ABI IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
LLVM_ABI Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
LLVM_ABI unsigned getPointerTypeSizeInBits(Type *) const
The pointer representation size in bits for this type.
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Implements a dense probed hash-table based set.
const char * getSymbol() const
unsigned getTargetFlags() const
FoldingSetNodeID - This class is used to gather all the unique data bits of a node.
Data structure describing the variable locations in a function.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
AttributeList getAttributes() const
Return the attribute list for this Function.
int64_t getOffset() const
LLVM_ABI unsigned getAddressSpace() const
unsigned getTargetFlags() const
const GlobalValue * getGlobal() const
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
This class is used to form a handle around another node that is persistent and is updated across invo...
const SDValue & getValue() const
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
This is an important class for using LLVM in a threaded context.
This SDNode is used for LIFETIME_START/LIFETIME_END values.
This class is used to represent ISD::LOAD nodes.
static LocationSize precise(uint64_t Value)
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
const MDOperand & getOperand(unsigned I) const
static MVT getIntegerVT(unsigned BitWidth)
Abstract base class for all machine specific constantpool value subclasses.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
void setObjectAlignment(int ObjectIdx, Align Alignment)
setObjectAlignment - Change the alignment of the specified stack object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Function & getFunction()
Return the LLVM function that this machine code represents.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
A description of a memory reference used in the backend.
const MDNode * getRanges() const
Return the range tag for the memory reference.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
const MachinePointerInfo & getPointerInfo() const
Flags getFlags() const
Return the raw flags of the source value,.
This class contains meta information specific to a module.
An SDNode that represents everything that will be needed to construct a MachineInstr.
This class is used to represent an MGATHER node.
This class is used to represent an MLOAD node.
This class is used to represent an MSCATTER node.
This class is used to represent an MSTORE node.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
LLVM_ABI MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT memvt, MachineMemOperand *MMO)
MachineMemOperand * MMO
Memory reference information.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
unsigned getRawSubclassData() const
Return the SubclassData value, without HasDebugValue.
EVT getMemoryVT() const
Return the type of the in-memory value.
Representation for a specific memory location.
A Module instance is used to store all the information related to an LLVM module.
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Pass interface - Implemented by all 'passes'.
Class to represent pointers.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
unsigned getAddressSpace() const
Return the address space of the Pointer type.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
bool isNull() const
Test if the pointer held in the union is null, regardless of which type it is.
Analysis providing profile information.
void Deallocate(SubClass *E)
Deallocate - Release storage for the pointed-to object.
Wrapper class representing virtual and physical registers.
Keeps track of dbg_value information through SDISel.
LLVM_ABI void add(SDDbgValue *V, bool isParameter)
LLVM_ABI void erase(const SDNode *Node)
Invalidate all DbgValues attached to the node and remove it from the Node-to-DbgValues map.
Holds the information from a dbg_label node through SDISel.
Holds the information for a single machine location through SDISel; either an SDNode,...
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(Register VReg)
static SDDbgOperand fromConst(const Value *Const)
@ SDNODE
Value is the result of an expression.
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
unsigned getIROrder() const
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
LLVM_ABI void dumprFull(const SelectionDAG *G=nullptr) const
printrFull to dbgs().
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVM_ABI bool isOnlyUserOf(const SDNode *N) const
Return true if this node is the only use of N.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
static constexpr size_t getMaxNumOperands()
Return the maximum number of operands that a SDNode can hold.
iterator_range< use_iterator > uses()
MemSDNodeBitfields MemSDNodeBits
LLVM_ABI void Profile(FoldingSetNodeID &ID) const
Gather unique data for the node.
bool getHasDebugValue() const
SDNodeFlags getFlags() const
void setNodeId(int Id)
Set unique node id.
LLVM_ABI void intersectFlagsWith(const SDNodeFlags Flags)
Clear any flags in this node that aren't also set in Flags.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
bool use_empty() const
Return true if there are no uses of this node.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
static LLVM_ABI bool areOnlyUsersOf(ArrayRef< const SDNode * > Nodes, const SDNode *N)
Return true if all the users of N are contained in Nodes.
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
LLVM_ABI bool isOperandOf(const SDNode *N) const
Return true if this node is an operand of N.
const APInt & getConstantOperandAPInt(unsigned Num) const
Helper method returns the APInt of a ConstantSDNode operand.
std::optional< APInt > bitcastToAPInt() const
LLVM_ABI bool hasPredecessor(const SDNode *N) const
Return true if N is a predecessor of this node.
LLVM_ABI bool hasAnyUseOfValue(unsigned Value) const
Return true if there are any use of the indicated value.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool isUndef() const
Returns true if the node type is UNDEF or POISON.
op_iterator op_end() const
op_iterator op_begin() const
static use_iterator use_end()
LLVM_ABI void DropOperands()
Release the operands and set this node to have zero operands.
SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
Create an SDNode.
Represents a use of a SDNode.
SDNode * getUser()
This returns the SDNode that contains this Use.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
LLVM_ABI bool isOperandOf(const SDNode *N) const
Return true if the referenced return value is an operand of N.
LLVM_ABI bool reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth=2) const
Return true if this operand (which must be a chain) reaches the specified operand without crossing an...
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
bool use_empty() const
Return true if there are no nodes using value ResNo of Node.
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
unsigned getOpcode() const
virtual void verifyTargetNode(const SelectionDAG &DAG, const SDNode *N) const
Checks that the given target-specific node is valid. Aborts if it is not.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
LLVM_ABI SDValue getVPZeroExtendInReg(SDValue Op, SDValue Mask, SDValue EVL, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
LLVM_ABI SDValue getShiftAmountOperand(EVT LHSTy, SDValue Op)
Return the specified value casted to the target's desired shift amount type.
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getExtLoadVP(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, bool IsExpanding=false)
SDValue getExtractVectorElt(const SDLoc &DL, EVT VT, SDValue Vec, unsigned Idx)
Extract element at Idx from Vec.
LLVM_ABI SDValue getSplatSourceVector(SDValue V, int &SplatIndex)
If V is a splatted value, return the source vector and its splat index.
LLVM_ABI SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root, MCSymbol *Label)
LLVM_ABI OverflowKind computeOverflowForUnsignedSub(SDValue N0, SDValue N1) const
Determine if the result of the unsigned sub of 2 nodes can overflow.
LLVM_ABI unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth=0) const
Get the upper bound on bit size for this Value Op as a signed integer.
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
LLVM_ABI std::pair< SDValue, SDValue > getStrlen(SDValue Chain, const SDLoc &dl, SDValue Src, const CallInst *CI)
Lower a strlen operation into a target library call and return the resulting chain and call result as...
LLVM_ABI SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
LLVM_ABI SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
bool isKnownNeverSNaN(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
LLVM_ABI std::optional< bool > isBoolConstant(SDValue N) const
Check if a value \op N is a constant using the target's BooleanContent for its type.
LLVM_ABI SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack.
const TargetSubtargetInfo & getSubtarget() const
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI void updateDivergence(SDNode *N)
LLVM_ABI SDValue getSplatValue(SDValue V, bool LegalTypes=false)
If V is a splat vector, return its scalar source operand by extracting that element from the source v...
LLVM_ABI SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond, const SDLoc &dl)
Constant fold a setcc to true or false.
LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getNeutralElement(unsigned Opcode, const SDLoc &DL, EVT VT, SDNodeFlags Flags)
Get the (commutative) neutral element for the given opcode, if it exists.
LLVM_ABI SDValue getAtomicMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Value, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo)
LLVM_ABI SDValue getAtomicLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT MemVT, EVT VT, SDValue Chain, SDValue Ptr, MachineMemOperand *MMO)
LLVM_ABI SDNode * getNodeIfExists(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops, const SDNodeFlags Flags, bool AllowCommute=false)
Get the specified node if it's already available, or else return NULL.
LLVM_ABI SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
LLVM_ABI SDValue getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, uint64_t Guid, uint64_t Index, uint32_t Attr)
Creates a PseudoProbeSDNode with function GUID Guid and the index of the block Index it is probing,...
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
LLVM_ABI SDNode * SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT)
These are used for target selectors to mutate the specified node to have the specified return type,...
LLVM_ABI SelectionDAG(const TargetMachine &TM, CodeGenOptLevel)
LLVM_ABI SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, MachinePointerInfo DstPtrInfo, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getBitcastedSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
LLVM_ABI SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp, MachineMemOperand *MMO)
Gets a node for an atomic cmpxchg op.
LLVM_ABI SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain)
If an existing load has uses of its chain, create a token factor node with that chain and the new mem...
LLVM_ABI bool isConstantIntBuildVectorOrConstantInt(SDValue N, bool AllowOpaques=true) const
Test whether the given value is a constant int or similar node.
LLVM_ABI void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To, unsigned Num)
Like ReplaceAllUsesOfValueWith, but for multiple values at once.
LLVM_ABI SDValue getJumpTableDebugInfo(int JTI, SDValue Chain, const SDLoc &DL)
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getSymbolFunctionGlobalAddress(SDValue Op, Function **TargetFunction=nullptr)
Return a GlobalAddress of the function from the current module with name matching the given ExternalS...
LLVM_ABI std::optional< unsigned > getValidMaximumShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
LLVM_ABI SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
OverflowKind
Used to represent the possible overflow behavior of an operation.
static LLVM_ABI unsigned getHasPredecessorMaxSteps()
LLVM_ABI bool haveNoCommonBitsSet(SDValue A, SDValue B) const
Return true if A and B have no common bits set.
SDValue getExtractSubvector(const SDLoc &DL, EVT VT, SDValue Vec, unsigned Idx)
Return the VT typed sub-vector of Vec at Idx.
LLVM_ABI bool cannotBeOrderedNegativeFP(SDValue Op) const
Test whether the given float value is known to be positive.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI bool calculateDivergence(SDNode *N)
LLVM_ABI SDValue getElementCount(const SDLoc &DL, EVT VT, ElementCount EC, bool ConstantFold=true)
LLVM_ABI SDValue getGetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
LLVM_ABI SDValue getAssertAlign(const SDLoc &DL, SDValue V, Align A)
Return an AssertAlignSDNode.
LLVM_ABI SDNode * mutateStrictFPToFP(SDNode *Node)
Mutate the specified strict FP node to its non-strict equivalent, unlinking the node from its chain a...
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI bool SignBitIsZeroFP(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero, for a floating-point value.
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getInsertSubvector(const SDLoc &DL, SDValue Vec, SDValue SubVec, unsigned Idx)
Insert SubVec at the Idx element of Vec.
LLVM_ABI SDValue getBitcastedZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
LLVM_ABI SDValue getStepVector(const SDLoc &DL, EVT ResVT, const APInt &StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
LLVM_ABI SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI bool shouldOptForSize() const
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
LLVM_ABI SDValue getVPZExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be an integer vector, to the vector-type VT,...
const TargetLowering & getTargetLoweringInfo() const
LLVM_ABI bool isEqualTo(SDValue A, SDValue B) const
Test whether two SDValues are known to compare equal.
static constexpr unsigned MaxRecursionDepth
LLVM_ABI SDValue getStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
bool isGuaranteedNotToBePoison(SDValue Op, unsigned Depth=0) const
Return true if this function can prove that Op is never poison.
LLVM_ABI SDValue expandVACopy(SDNode *Node)
Expand the specified ISD::VACOPY node as the Legalize pass would.
LLVM_ABI SDValue getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI void dump(bool Sorted=false) const
Dump the textual format of this DAG.
LLVM_ABI APInt computeVectorKnownZeroElements(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
For each demanded element of a vector, see if it is known to be zero.
LLVM_ABI void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
bool NewNodesMustHaveLegalTypes
When true, additional steps are taken to ensure that getConstant() and similar functions return DAG n...
LLVM_ABI std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
LLVM_ABI void salvageDebugInfo(SDNode &N)
To be invoked on an SDNode that is slated to be erased.
LLVM_ABI SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)
This mutates the specified node to have the specified return type, opcode, and operands.
LLVM_ABI std::pair< SDValue, SDValue > UnrollVectorOverflowOp(SDNode *N, unsigned ResNE=0)
Like UnrollVectorOp(), but for the [US](ADD|SUB|MUL)O family of opcodes.
allnodes_const_iterator allnodes_begin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
LLVM_ABI SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcastedAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
LLVM_ABI bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
LLVM_ABI void DeleteNode(SDNode *N)
Remove the specified node from the system.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
LLVM_ABI SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
LLVM_ABI SDValue getNegative(SDValue Val, const SDLoc &DL, EVT VT)
Create negative operation as (SUB 0, Val).
LLVM_ABI std::optional< unsigned > getValidShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has a uniform shift amount that is less than the element bit-width of the shi...
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
LLVM_ABI SDValue simplifySelect(SDValue Cond, SDValue TVal, SDValue FVal)
Try to simplify a select/vselect into 1 of its operands or a constant.
LLVM_ABI SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
LLVM_ABI bool isConstantFPBuildVectorOrConstantFP(SDValue N) const
Test whether the given value is a constant FP or similar node.
const DataLayout & getDataLayout() const
LLVM_ABI SDValue expandVAArg(SDNode *Node)
Expand the specified ISD::VAARG node as the Legalize pass would.
LLVM_ABI SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
LLVM_ABI bool doesNodeExist(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops)
Check if a node exists without modifying its flags.
const SelectionDAGTargetInfo & getSelectionDAGInfo() const
LLVM_ABI bool areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base, unsigned Bytes, int Dist) const
Return true if loads are next to each other and can be merged.
LLVM_ABI SDValue getMaskedHistogram(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
LLVM_ABI SDDbgLabel * getDbgLabel(DILabel *Label, const DebugLoc &DL, unsigned O)
Creates a SDDbgLabel node.
LLVM_ABI SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
LLVM_ABI OverflowKind computeOverflowForUnsignedMul(SDValue N0, SDValue N1) const
Determine if the result of the unsigned mul of 2 nodes can overflow.
LLVM_ABI void copyExtraInfo(SDNode *From, SDNode *To)
Copy extra info associated with one node to another.
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
LLVM_ABI SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue SV, unsigned Align)
VAArg produces a result and token chain, and takes a pointer and a source value as input.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getLoadFFVP(EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Mask, SDValue EVL, MachineMemOperand *MMO)
LLVM_ABI SDValue getMDNode(const MDNode *MD)
Return an MDNodeSDNode which holds an MDNode.
LLVM_ABI void clear()
Clear state and free memory necessary to make this SelectionDAG ready to process a new block.
LLVM_ABI std::pair< SDValue, SDValue > getMemcmp(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, const CallInst *CI)
Lower a memcmp operation into a target library call and return the resulting chain and call result as...
LLVM_ABI void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
LLVM_ABI SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV)
Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to the shuffle node in input but with swa...
LLVM_ABI std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
LLVM_ABI SDValue makeStateFunctionCall(unsigned LibFunc, SDValue Ptr, SDValue InChain, const SDLoc &DLoc)
Helper used to make a call to a library function that has one argument of pointer type.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(SDValue Op, bool PoisonOnly=false, unsigned Depth=0) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
LLVM_ABI SDValue getIndexedLoadVP(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue getSrcValue(const Value *v)
Construct a node to track a Value* through the backend.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
LLVM_ABI SDValue getAtomicMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
LLVM_ABI OverflowKind computeOverflowForSignedMul(SDValue N0, SDValue N1) const
Determine if the result of the signed mul of 2 nodes can overflow.
LLVM_ABI MaybeAlign InferPtrAlign(SDValue Ptr) const
Infer alignment of a load / store address.
LLVM_ABI bool MaskedValueIsAllOnes(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if '(Op & Mask) == Mask'.
LLVM_ABI bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
LLVM_ABI void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
LLVM_ABI void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
LLVM_ABI void AddDbgLabel(SDDbgLabel *DB)
Add a dbg_label SDNode.
bool isConstantValueOfAnyType(SDValue N) const
LLVM_ABI SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
LLVM_ABI SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
LLVM_ABI SDDbgValue * getVRegDbgValue(DIVariable *Var, DIExpression *Expr, Register VReg, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a VReg SDDbgValue node.
LLVM_ABI bool isKnownToBeAPowerOfTwo(SDValue Val, unsigned Depth=0) const
Test if the given value is known to have exactly one bit set.
LLVM_ABI SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label)
LLVM_ABI SDValue getIndexedStoreVP(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI bool isKnownNeverZero(SDValue Op, unsigned Depth=0) const
Test whether the given SDValue is known to contain non-zero value(s).
LLVM_ABI SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SDNodeFlags Flags=SDNodeFlags())
LLVM_ABI std::optional< unsigned > getValidMinimumShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
LLVM_ABI SDValue getSetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
LLVM_ABI SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT)
Convert Op, which must be of integer type, to the integer type VT, by using an extension appropriate ...
LLVM_ABI SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
LLVM_ABI std::pair< SDValue, SDValue > getStrictFPExtendOrRound(SDValue Op, SDValue Chain, const SDLoc &DL, EVT VT)
Convert Op, which must be a STRICT operation of float type, to the float type VT, by either extending...
LLVM_ABI std::pair< SDValue, SDValue > SplitEVL(SDValue N, EVT VecVT, const SDLoc &DL)
Split the explicit vector length parameter of a VP operation.
LLVM_ABI SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
LLVM_ABI SDValue getVPLogicalNOT(const SDLoc &DL, SDValue Val, SDValue Mask, SDValue EVL, EVT VT)
Create a vector-predicated logical NOT operation as (VP_XOR Val, BooleanOne, Mask,...
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
iterator_range< allnodes_iterator > allnodes()
LLVM_ABI SDValue getBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, bool isTarget=false, unsigned TargetFlags=0)
LLVM_ABI SDValue WidenVector(const SDValue &N, const SDLoc &DL)
Widen the vector up to the next power of two using INSERT_SUBVECTOR.
LLVM_ABI bool isKnownNeverZeroFloat(SDValue Op) const
Test whether the given floating point SDValue is known to never be positive or negative zero.
LLVM_ABI SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, const MDNode *Ranges=nullptr, bool IsExpanding=false)
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDDbgValue * getConstantDbgValue(DIVariable *Var, DIExpression *Expr, const Value *C, const DebugLoc &DL, unsigned O)
Creates a constant SDDbgValue node.
LLVM_ABI SDValue getScatterVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain, int FrameIndex)
Creates a LifetimeSDNode that starts (IsStart==true) or ends (IsStart==false) the lifetime of the Fra...
ArrayRef< SDDbgValue * > GetDbgValues(const SDNode *SD) const
Get the debug values which reference the given SDNode.
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI OverflowKind computeOverflowForSignedAdd(SDValue N0, SDValue N1) const
Determine if the result of the signed addition of 2 nodes can overflow.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
LLVM_ABI unsigned AssignTopologicalOrder()
Topological-sort the AllNodes list and a assign a unique node id for each node in the DAG based on th...
ilist< SDNode >::size_type allnodes_size() const
LLVM_ABI bool isKnownNeverNaN(SDValue Op, const APInt &DemandedElts, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN in...
LLVM_ABI SDValue FoldConstantBuildVector(BuildVectorSDNode *BV, const SDLoc &DL, EVT DstEltVT)
Fold BUILD_VECTOR of constants/undefs to the destination type BUILD_VECTOR of constants/undefs elemen...
LLVM_ABI SDValue getAtomicMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
LLVM_ABI SDValue getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue getTruncStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, bool IsCompressing=false)
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
LLVM_ABI bool MaskedVectorIsZero(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
Return true if 'Op' is known to be zero in DemandedElts.
LLVM_ABI SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT)
Create a true or false constant of type VT using the target's BooleanContent for type OpVT.
LLVM_ABI SDDbgValue * getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr, unsigned FI, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a FrameIndex SDDbgValue node.
LLVM_ABI SDValue getExtStridedLoadVP(ISD::LoadExtType ExtType, const SDLoc &DL, EVT VT, SDValue Chain, SDValue Ptr, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
LLVM_ABI SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
LLVM_ABI SDValue getJumpTable(int JTI, EVT VT, bool isTarget=false, unsigned TargetFlags=0)
LLVM_ABI bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
LLVM_ABI SDValue getVPPtrExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be of integer type, to the vector-type integer type VT,...
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI void getTopologicallyOrderedNodes(SmallVectorImpl< const SDNode * > &SortedNodes) const
Get all the nodes in their topological order without modifying any states.
LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to extend the Op as a pointer value assuming it was the smaller SrcTy ...
LLVM_ABI bool canCreateUndefOrPoison(SDValue Op, const APInt &DemandedElts, bool PoisonOnly=false, bool ConsiderFlags=true, unsigned Depth=0) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
LLVM_ABI OverflowKind computeOverflowForUnsignedAdd(SDValue N0, SDValue N1) const
Determine if the result of the unsigned addition of 2 nodes can overflow.
SDValue getPOISON(EVT VT)
Return a POISON node. POISON does not have a useful SDLoc.
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getTruncStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Stride, SDValue Mask, SDValue EVL, EVT SVT, MachineMemOperand *MMO, bool IsCompressing=false)
LLVM_ABI void canonicalizeCommutativeBinop(unsigned Opcode, SDValue &N1, SDValue &N2) const
Swap N1 and N2 if Opcode is a commutative binary opcode and the canonical form expects the opposite o...
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVM_ABI SDValue getCondCode(ISD::CondCode Cond)
LLVM_ABI bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
LLVM_ABI bool isKnownToBeAPowerOfTwoFP(SDValue Val, unsigned Depth=0) const
Test if the given fp value is known to be an integer power-of-2, either positive or negative.
LLVM_ABI OverflowKind computeOverflowForSignedSub(SDValue N0, SDValue N1) const
Determine if the result of the signed sub of 2 nodes can overflow.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
LLVM_ABI SDValue simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y, SDNodeFlags Flags)
Try to simplify a floating-point binary operation into 1 of its operands or a constant.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
LLVM_ABI SDValue getDeactivationSymbol(const GlobalValue *GV)
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
LLVM_ABI SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
LLVM_ABI bool isUndef(unsigned Opcode, ArrayRef< SDValue > Ops)
Return true if the result of this operation is always undefined.
LLVM_ABI SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
LLVM_ABI SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
LLVM_ABI std::pair< EVT, EVT > GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, bool *HiIsEmpty) const
Compute the VTs needed for the low/hi parts of a type, dependent on an enveloping VT that has been sp...
LLVM_ABI SDValue foldConstantFPMath(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops)
Fold floating-point operations when all operands are constants and/or undefined.
LLVM_ABI void init(MachineFunction &NewMF, OptimizationRemarkEmitter &NewORE, Pass *PassPtr, const TargetLibraryInfo *LibraryInfo, UniformityInfo *UA, ProfileSummaryInfo *PSIin, BlockFrequencyInfo *BFIin, MachineModuleInfo &MMI, FunctionVarLocs const *FnVarLocs)
Prepare this SelectionDAG to process code in the given MachineFunction.
LLVM_ABI std::optional< ConstantRange > getValidShiftAmountRange(SDValue V, const APInt &DemandedElts, unsigned Depth) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
LLVM_ABI SDValue FoldSymbolOffset(unsigned Opcode, EVT VT, const GlobalAddressSDNode *GA, const SDNode *N2)
LLVM_ABI SDValue getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI SDDbgValue * getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N, unsigned R, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a SDDbgValue node.
LLVM_ABI SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
LLVM_ABI SDValue matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp, ArrayRef< ISD::NodeType > CandidateBinOps, bool AllowPartials=false)
Match a binop + shuffle pyramid that represents a horizontal reduction over the elements of a vector ...
LLVM_ABI bool isADDLike(SDValue Op, bool NoWrap=false) const
Return true if the specified operand is an ISD::OR or ISD::XOR node that can be treated as an ISD::AD...
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
LLVM_ABI SDValue simplifyShift(SDValue X, SDValue Y)
Try to simplify a shift into 1 of its operands or a constant.
LLVM_ABI void transferDbgValues(SDValue From, SDValue To, unsigned OffsetInBits=0, unsigned SizeInBits=0, bool InvalidateDbg=true)
Transfer debug values from one node to another, while optionally generating fragment expressions for ...
LLVM_ABI SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a logical NOT operation as (XOR Val, BooleanOne).
LLVM_ABI SDValue getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, bool IsTruncating=false)
ilist< SDNode >::iterator allnodes_iterator
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
int getMaskElt(unsigned Idx) const
ArrayRef< int > getMask() const
static void commuteMask(MutableArrayRef< int > Mask)
Change values in a shuffle permute mask assuming the two vector operands have swapped position.
static LLVM_ABI bool isSplatMask(ArrayRef< int > Mask)
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
Information about stack frame layout on the target.
virtual TargetStackID::Value getStackIDForScalableVectors() const
Returns the StackID that scalable vectors should be associated with.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
Completely target-dependent object reference.
int64_t getOffset() const
unsigned getTargetFlags() const
Provides information about what library functions are available for the current target.
virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const
Return true if it is beneficial to convert a load of a constant to just the constant itself.
const TargetMachine & getTargetMachine() const
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
unsigned getMaxStoresPerMemcpy(bool OptSize) const
Get maximum # of store operations permitted for llvm.memcpy.
virtual bool shallExtractConstSplatVectorElementToStore(Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const
Return true if the target shall perform extract vector element and store given that the vector is kno...
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
unsigned getMaxStoresPerMemmove(bool OptSize) const
Get maximum # of store operations permitted for llvm.memmove.
virtual unsigned getMaxGluedStoresPerMemcpy() const
Get maximum # of store operations to be glued together.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
unsigned getMaxStoresPerMemset(bool OptSize) const
Get maximum # of store operations permitted for llvm.memset.
virtual bool isLegalStoreImmediate(int64_t Value) const
Return true if the specified immediate is legal for the value input of a store instruction.
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool findOptimalMemOpLowering(LLVMContext &Context, std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const
Determines the optimal series of memory ops to replace the memset / memcpy.
Primary interface to the complete machine description for the target machine.
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
const Triple & getTargetTriple() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const SelectionDAGTargetInfo * getSelectionDAGInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
virtual const TargetLowering * getTargetLowering() const
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
A Use represents the edge between a Value definition and its users.
LLVM_ABI void set(Value *Val)
User * getUser() const
Returns the User that contains this Use.
This class is used to represent an VP_GATHER node.
This class is used to represent a VP_LOAD node.
This class is used to represent an VP_SCATTER node.
This class is used to represent a VP_STORE node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_LOAD node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_STORE node.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
constexpr bool hasKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns true if there exists a value X where RHS.multiplyCoefficientBy(X) will result in a value whos...
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr bool isKnownEven() const
A return value of true indicates we know at compile time that the number of elements (vscale * Min) i...
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt mulhu(const APInt &C1, const APInt &C2)
Performs (2*N)-bit multiplication on zero-extended operands.
LLVM_ABI APInt avgCeilU(const APInt &C1, const APInt &C2)
Compute the ceil of the unsigned average of C1 and C2.
LLVM_ABI APInt avgFloorU(const APInt &C1, const APInt &C2)
Compute the floor of the unsigned average of C1 and C2.
LLVM_ABI APInt fshr(const APInt &Hi, const APInt &Lo, const APInt &Shift)
Perform a funnel shift right.
LLVM_ABI APInt mulhs(const APInt &C1, const APInt &C2)
Performs (2*N)-bit multiplication on sign-extended operands.
APInt abds(const APInt &A, const APInt &B)
Determine the absolute difference of two APInts considered to be signed.
LLVM_ABI APInt fshl(const APInt &Hi, const APInt &Lo, const APInt &Shift)
Perform a funnel shift left.
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
APInt abdu(const APInt &A, const APInt &B)
Determine the absolute difference of two APInts considered to be unsigned.
LLVM_ABI APInt avgFloorS(const APInt &C1, const APInt &C2)
Compute the floor of the signed average of C1 and C2.
LLVM_ABI APInt avgCeilS(const APInt &C1, const APInt &C2)
Compute the ceil of the signed average of C1 and C2.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, bool isIntegerLike)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
ISD namespace - This namespace contains an enum which represents all of the SelectionDAG node types a...
LLVM_ABI CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, EVT Type)
Return the result of a logical AND between different comparisons of identical values: ((X op1 Y) & (X...
LLVM_ABI bool isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are ~0 ...
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ POISON
POISON - A poison node.
@ FGETSIGN
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
@ ADD
Simple integer binary arithmetic operators.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ FMULADD
FMULADD - Performs a * b + c, with, or without, intermediate rounding.
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ TargetIndex
TargetIndex - Like a constant pool entry, but with completely target-dependent semantics.
@ SETCCCARRY
Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but op #2 is a boolean indicating ...
@ SSUBO
Same for subtraction.
@ STEP_VECTOR
STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised of a linear sequence of unsign...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ AssertAlign
AssertAlign - These nodes record if a register contains a value that has a known alignment and the tr...
@ BasicBlock
Various leaf nodes.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ TargetGlobalAddress
TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or anything else with this node...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ AssertNoFPClass
AssertNoFPClass - These nodes record if a register contains a float value that is known to be not som...
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ SPLAT_VECTOR_PARTS
SPLAT_VECTOR_PARTS(SCALAR1, SCALAR2, ...) - Returns a vector with the scalar values joined together a...
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
@ SADDO_CARRY
Carry-using overflow-aware nodes for multiple precision addition and subtraction.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
LLVM_ABI bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
LLVM_ABI NodeType getExtForLoadExtType(bool IsFP, LoadExtType)
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
bool matchUnaryFpPredicate(SDValue Op, std::function< bool(ConstantFPSDNode *)> Match, bool AllowUndefs=false)
Hook for matching ConstantFPSDNode predicate.
bool isExtOpcode(unsigned Opcode)
LLVM_ABI bool isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are 0 o...
LLVM_ABI bool isVectorShrinkable(const SDNode *N, unsigned NewEltSize, bool Signed)
Returns true if the specified node is a vector where all elements can be truncated to the specified e...
LLVM_ABI bool isVPBinaryOp(unsigned Opcode)
Whether this is a vector-predicated binary operation opcode.
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
LLVM_ABI std::optional< unsigned > getBaseOpcodeForVP(unsigned Opcode, bool hasFPExcept)
Translate this VP Opcode to its corresponding non-VP Opcode.
bool isTrueWhenEqual(CondCode Cond)
Return true if the specified condition returns true if the two operands to the condition are equal.
LLVM_ABI std::optional< unsigned > getVPMaskIdx(unsigned Opcode)
The operand position of the vector mask.
unsigned getUnorderedFlavor(CondCode Cond)
This function returns 0 if the condition is always false if an operand is a NaN, 1 if the condition i...
LLVM_ABI std::optional< unsigned > getVPExplicitVectorLengthIdx(unsigned Opcode)
The operand position of the explicit vector length parameter.
bool isEXTLoad(const SDNode *N)
Returns true if the specified node is a EXTLOAD.
LLVM_ABI bool allOperandsUndef(const SDNode *N)
Return true if the node has at least one operand and all operands of the specified node are ISD::UNDE...
LLVM_ABI bool isFreezeUndef(const SDNode *N)
Return true if the specified node is FREEZE(UNDEF).
LLVM_ABI CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
LLVM_ABI std::optional< unsigned > getVPForBaseOpcode(unsigned Opcode)
Translate this non-VP Opcode to its corresponding VP Opcode.
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
LLVM_ABI bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
bool matchUnaryPredicateImpl(SDValue Op, std::function< bool(ConstNodeType *)> Match, bool AllowUndefs=false, bool AllowTruncation=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant BUI...
LLVM_ABI bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
LLVM_ABI NodeType getInverseMinMaxOpcode(unsigned MinMaxOpc)
Given a MinMaxOpc of ISD::(U|S)MIN or ISD::(U|S)MAX, returns ISD::(U|S)MAX and ISD::(U|S)MIN,...
LLVM_ABI bool matchBinaryPredicate(SDValue LHS, SDValue RHS, std::function< bool(ConstantSDNode *, ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTypeMismatch=false)
Attempt to match a binary predicate against a pair of scalar/splat constants or every element of a pa...
LLVM_ABI bool isVPReduction(unsigned Opcode)
Whether this is a vector-predicated reduction opcode.
bool matchUnaryPredicate(SDValue Op, std::function< bool(ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTruncation=false)
Hook for matching ConstantSDNode predicate.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
LLVM_ABI bool isBuildVectorOfConstantFPSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantFPSDNode or undef.
bool isSEXTLoad(const SDNode *N)
Returns true if the specified node is a SEXTLOAD.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LLVM_ABI bool isBuildVectorAllOnes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are ~0 or undef.
LLVM_ABI NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
LLVM_ABI bool isVPOpcode(unsigned Opcode)
Whether this is a vector-predicated Opcode.
LLVM_ABI CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, EVT Type)
Return the result of a logical OR between different comparisons of identical values: ((X op1 Y) | (X ...
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
LLVM_ABI Libcall getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMCPY_ELEMENT_UNORDERED_ATOMIC - Return MEMCPY_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
LLVM_ABI Libcall getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMSET_ELEMENT_UNORDERED_ATOMIC - Return MEMSET_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
LLVM_ABI Libcall getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMMOVE_ELEMENT_UNORDERED_ATOMIC - Return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_* value for the given e...
bool sd_match(SDNode *N, const SelectionDAG *DAG, Pattern &&P)
initializer< Ty > init(const Ty &Val)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
NodeAddr< NodeBase * > Node
This is an optimization pass for GlobalISel generic memory operations.
GenericUniformityInfo< SSAContext > UniformityInfo
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
bool operator<(int64_t V1, const APSInt &V2)
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
void fill(R &&Range, T &&Value)
Provide wrappers to std::fill which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI SDValue peekThroughExtractSubvectors(SDValue V)
Return the non-extracted vector source operand of V if it exists.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MaybeAlign getAlign(const CallInst &I, unsigned Index)
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
LLVM_ABI bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
LLVM_ABI SDValue getBitwiseNotOperand(SDValue V, SDValue Mask, bool AllowUndefs)
If V is a bitwise not, returns the inverted operand.
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
LLVM_ABI bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice, unsigned ElementSize, uint64_t Offset=0)
Returns true if the value V is a pointer into a ConstantDataArray.
LLVM_ABI bool isOneOrOneSplatFP(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant floating-point value, or a splatted vector of a constant float...
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
auto cast_or_null(const Y &Val)
LLVM_ABI bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
LLVM_ABI bool isMinSignedConstant(SDValue V)
Returns true if V is a constant min signed integer value.
LLVM_ABI ConstantFPSDNode * isConstOrConstSplatFP(SDValue N, bool AllowUndefs=false)
Returns the SDNode if it is a constant splat BuildVector or constant float.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 maxNum semantics.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
LLVM_ABI bool isBitwiseNot(SDValue V, bool AllowUndefs=false)
Returns true if V is a bitwise not operation.
LLVM_ABI SDValue peekThroughInsertVectorElt(SDValue V, const APInt &DemandedElts)
Recursively peek through INSERT_VECTOR_ELT nodes, returning the source vector operand of V,...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void checkForCycles(const SelectionDAG *DAG, bool force=false)
void sort(IteratorTy Start, IteratorTy End)
LLVM_READONLY APFloat minimumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimumNumber semantics.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI SDValue peekThroughTruncates(SDValue V)
Return the non-truncated source operand of V if it exists.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr std::underlying_type_t< Enum > to_underlying(Enum E)
Returns underlying integer value of an enum.
FunctionAddr VTableAddr Count
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
LLVM_ABI SDValue peekThroughOneUseBitcasts(SDValue V)
Return the non-bitcasted and one-use source operand of V if it exists.
CodeGenOptLevel
Code generation optimization level.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool isOneOrOneSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 minNum semantics.
@ Mul
Product of integers.
@ Sub
Subtraction of integers.
LLVM_ABI bool isNullConstantOrUndef(SDValue V)
Returns true if V is a constant integer zero or an UNDEF node.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
OutputIt copy(R &&Range, OutputIt Out)
constexpr unsigned BitWidth
bool funcReturnsFirstArgOfCall(const CallInst &CI)
Returns true if the parent of CI returns CI's first argument after calling CI.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isZeroOrZeroSplat(SDValue N, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
LLVM_ABI bool isNullFPConstant(SDValue V)
Returns true if V is an FP constant with a value of positive zero.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
unsigned Log2(Align A)
Returns the log2 of the alignment.
LLVM_ABI bool isZeroOrZeroSplatFP(SDValue N, bool AllowUndefs=false)
Return true if the value is a constant (+/-)0.0 floating-point value or a splatted vector thereof (wi...
LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
LLVM_READONLY APFloat maximumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximumNumber semantics.
LLVM_ABI bool isOnesOrOnesSplat(SDValue N, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
LLVM_ABI bool isNeutralConstant(unsigned Opc, SDNodeFlags Flags, SDValue V, unsigned OperandNo)
Returns true if V is a neutral element of Opc with Flags.
LLVM_ABI bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
MDNode * TBAAStruct
The tag for type-based alias analysis (tbaa struct).
MDNode * TBAA
The tag for type-based alias analysis.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Represents offset+length into a ConstantDataArray.
uint64_t Length
Length of the slice.
uint64_t Offset
Slice starts at this Offset.
void move(uint64_t Delta)
Moves the Offset and adjusts Length accordingly.
const ConstantDataArray * Array
ConstantDataArray pointer.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
intptr_t getRawBits() const
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isExtended() const
Test if the given EVT is extended (as opposed to being simple).
LLVM_ABI const fltSemantics & getFltSemantics() const
Returns an APFloat semantics tag appropriate for the value type.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool bitsLE(EVT VT) const
Return true if this has no more bits than VT.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
LLVM_ABI KnownBits sextInReg(unsigned SrcBitWidth) const
Return known bits for a in-register sign extension of the value we're tracking.
static LLVM_ABI KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from zero-extended multiply-hi.
unsigned countMinSignBits() const
Returns the number of times the sign bit is replicated into the other bits.
static LLVM_ABI KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
void makeNonNegative()
Make this value non-negative.
static LLVM_ABI KnownBits usub_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.usub.sat(LHS, RHS)
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
static LLVM_ABI KnownBits urem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for urem(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
static LLVM_ABI std::optional< bool > ne(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_NE result.
void makeNegative()
Make this value negative.
void setAllConflict()
Make all bits known to be both zero and one.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
KnownBits byteSwap() const
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
void setAllZero()
Make all bits known to be zero and discard any previous information.
KnownBits reverseBits() const
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits unionWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for either this or RHS or both.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
bool isNonZero() const
Returns true if this value is known to be non-zero.
static LLVM_ABI KnownBits abdu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for abdu(LHS, RHS).
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const
Return a subset of the known bits from [bitPosition,bitPosition+numBits).
static LLVM_ABI KnownBits avgFloorU(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgFloorU.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
static LLVM_ABI KnownBits computeForSubBorrow(const KnownBits &LHS, KnownBits RHS, const KnownBits &Borrow)
Compute known bits results from subtracting RHS from LHS with 1-bit Borrow.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static LLVM_ABI KnownBits abds(KnownBits LHS, KnownBits RHS)
Compute known bits for abds(LHS, RHS).
static LLVM_ABI KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static LLVM_ABI KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from sign-extended multiply-hi.
static LLVM_ABI KnownBits srem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for srem(LHS, RHS).
static LLVM_ABI KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for udiv(LHS, RHS).
static LLVM_ABI KnownBits computeForAddSub(bool Add, bool NSW, bool NUW, const KnownBits &LHS, const KnownBits &RHS)
Compute known bits resulting from adding LHS and RHS.
bool isStrictlyPositive() const
Returns true if this value is known to be positive.
static LLVM_ABI KnownBits sdiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for sdiv(LHS, RHS).
static LLVM_ABI KnownBits avgFloorS(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgFloorS.
static bool haveNoCommonBitsSet(const KnownBits &LHS, const KnownBits &RHS)
Return true if LHS and RHS have no common bits set.
bool isNegative() const
Returns true if this value is known to be negative.
static LLVM_ABI KnownBits computeForAddCarry(const KnownBits &LHS, const KnownBits &RHS, const KnownBits &Carry)
Compute known bits resulting from adding LHS, RHS and a 1-bit Carry.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
static LLVM_ABI KnownBits avgCeilU(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgCeilU.
static LLVM_ABI KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
LLVM_ABI KnownBits abs(bool IntMinIsPoison=false) const
Compute known bits for the absolute value.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
static LLVM_ABI KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
static LLVM_ABI KnownBits avgCeilS(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgCeilS.
This class contains a discriminated union of information about pointers in memory operands,...
LLVM_ABI bool isDereferenceable(unsigned Size, LLVMContext &C, const DataLayout &DL) const
Return true if memory region [V, V+Offset+Size) is known to be dereferenceable.
LLVM_ABI unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
MachinePointerInfo getWithOffset(int64_t O) const
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign, bool IsZeroMemset, bool IsVolatile)
static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign, Align SrcAlign, bool IsVolatile, bool MemcpyStrSrc=false)
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Clients of various APIs that cause global effects on the DAG can optionally implement this interface.
DAGUpdateListener *const Next
virtual void NodeDeleted(SDNode *N, SDNode *E)
The node N that was deleted and, if E is not null, an equivalent node E that replaced it.
virtual void NodeInserted(SDNode *N)
The node N that was inserted.
virtual void NodeUpdated(SDNode *N)
The node N that was updated.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setChain(SDValue InChain)