101void SelectionDAG::DAGNodeDeletedListener::anchor() {}
102void SelectionDAG::DAGNodeInsertedListener::anchor() {}
104#define DEBUG_TYPE "selectiondag"
108 cl::desc(
"Gang up loads and stores generated by inlining of memcpy"));
111 cl::desc(
"Number limit for gluing ld/st of memcpy."),
116 cl::desc(
"DAG combiner limit number of steps when searching DAG "
117 "for predecessor nodes"));
134 return getValueAPF().bitwiseIsEqual(V);
156 N->getValueType(0).getVectorElementType().getSizeInBits();
157 if (
auto *Op0 = dyn_cast<ConstantSDNode>(
N->getOperand(0))) {
158 SplatVal = Op0->getAPIntValue().
trunc(EltSize);
161 if (
auto *Op0 = dyn_cast<ConstantFPSDNode>(
N->getOperand(0))) {
162 SplatVal = Op0->getValueAPF().bitcastToAPInt().
trunc(EltSize);
167 auto *BV = dyn_cast<BuildVectorSDNode>(
N);
172 unsigned SplatBitSize;
174 unsigned EltSize =
N->getValueType(0).getVectorElementType().getSizeInBits();
179 const bool IsBigEndian =
false;
180 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs,
181 EltSize, IsBigEndian) &&
182 EltSize == SplatBitSize;
191 N =
N->getOperand(0).getNode();
200 unsigned i = 0, e =
N->getNumOperands();
203 while (i != e &&
N->getOperand(i).isUndef())
207 if (i == e)
return false;
218 unsigned EltSize =
N->getValueType(0).getScalarSizeInBits();
220 if (CN->getAPIntValue().countr_one() < EltSize)
223 if (CFPN->getValueAPF().bitcastToAPInt().countr_one() < EltSize)
231 for (++i; i != e; ++i)
232 if (
N->getOperand(i) != NotZero && !
N->getOperand(i).isUndef())
240 N =
N->getOperand(0).getNode();
249 bool IsAllUndef =
true;
262 unsigned EltSize =
N->getValueType(0).getScalarSizeInBits();
264 if (CN->getAPIntValue().countr_zero() < EltSize)
267 if (CFPN->getValueAPF().bitcastToAPInt().countr_zero() < EltSize)
294 if (!isa<ConstantSDNode>(
Op))
307 if (!isa<ConstantFPSDNode>(
Op))
315 assert(
N->getValueType(0).isVector() &&
"Expected a vector!");
317 unsigned EltSize =
N->getValueType(0).getScalarSizeInBits();
318 if (EltSize <= NewEltSize)
322 return (
N->getOperand(0).getValueType().getScalarSizeInBits() <=
327 return (
N->getOperand(0).getValueType().getScalarSizeInBits() <=
337 if (!isa<ConstantSDNode>(
Op))
340 APInt C =
Op->getAsAPIntVal().trunc(EltSize);
341 if (
Signed &&
C.trunc(NewEltSize).sext(EltSize) !=
C)
343 if (!
Signed &&
C.trunc(NewEltSize).zext(EltSize) !=
C)
354 if (
N->getNumOperands() == 0)
360 return N->getOpcode() ==
ISD::FREEZE &&
N->getOperand(0).isUndef();
363template <
typename ConstNodeType>
365 std::function<
bool(ConstNodeType *)>
Match,
368 if (
auto *
C = dyn_cast<ConstNodeType>(
Op))
376 EVT SVT =
Op.getValueType().getScalarType();
378 if (AllowUndefs &&
Op.getOperand(i).isUndef()) {
384 auto *Cst = dyn_cast<ConstNodeType>(
Op.getOperand(i));
385 if (!Cst || Cst->getValueType(0) != SVT || !
Match(Cst))
391template bool ISD::matchUnaryPredicateImpl<ConstantSDNode>(
393template bool ISD::matchUnaryPredicateImpl<ConstantFPSDNode>(
399 bool AllowUndefs,
bool AllowTypeMismatch) {
400 if (!AllowTypeMismatch &&
LHS.getValueType() !=
RHS.getValueType())
404 if (
auto *LHSCst = dyn_cast<ConstantSDNode>(
LHS))
405 if (
auto *RHSCst = dyn_cast<ConstantSDNode>(
RHS))
406 return Match(LHSCst, RHSCst);
409 if (
LHS.getOpcode() !=
RHS.getOpcode() ||
414 EVT SVT =
LHS.getValueType().getScalarType();
415 for (
unsigned i = 0, e =
LHS.getNumOperands(); i != e; ++i) {
418 bool LHSUndef = AllowUndefs && LHSOp.
isUndef();
419 bool RHSUndef = AllowUndefs && RHSOp.
isUndef();
420 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp);
421 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp);
422 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef))
424 if (!AllowTypeMismatch && (LHSOp.
getValueType() != SVT ||
427 if (!
Match(LHSCst, RHSCst))
449 switch (VecReduceOpcode) {
454 case ISD::VP_REDUCE_FADD:
455 case ISD::VP_REDUCE_SEQ_FADD:
459 case ISD::VP_REDUCE_FMUL:
460 case ISD::VP_REDUCE_SEQ_FMUL:
463 case ISD::VP_REDUCE_ADD:
466 case ISD::VP_REDUCE_MUL:
469 case ISD::VP_REDUCE_AND:
472 case ISD::VP_REDUCE_OR:
475 case ISD::VP_REDUCE_XOR:
478 case ISD::VP_REDUCE_SMAX:
481 case ISD::VP_REDUCE_SMIN:
484 case ISD::VP_REDUCE_UMAX:
487 case ISD::VP_REDUCE_UMIN:
490 case ISD::VP_REDUCE_FMAX:
493 case ISD::VP_REDUCE_FMIN:
496 case ISD::VP_REDUCE_FMAXIMUM:
499 case ISD::VP_REDUCE_FMINIMUM:
508#define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) \
511#include "llvm/IR/VPIntrinsics.def"
519#define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) case ISD::VPSD:
520#define VP_PROPERTY_BINARYOP return true;
521#define END_REGISTER_VP_SDNODE(VPSD) break;
522#include "llvm/IR/VPIntrinsics.def"
531 case ISD::VP_REDUCE_ADD:
532 case ISD::VP_REDUCE_MUL:
533 case ISD::VP_REDUCE_AND:
534 case ISD::VP_REDUCE_OR:
535 case ISD::VP_REDUCE_XOR:
536 case ISD::VP_REDUCE_SMAX:
537 case ISD::VP_REDUCE_SMIN:
538 case ISD::VP_REDUCE_UMAX:
539 case ISD::VP_REDUCE_UMIN:
540 case ISD::VP_REDUCE_FMAX:
541 case ISD::VP_REDUCE_FMIN:
542 case ISD::VP_REDUCE_FMAXIMUM:
543 case ISD::VP_REDUCE_FMINIMUM:
544 case ISD::VP_REDUCE_FADD:
545 case ISD::VP_REDUCE_FMUL:
546 case ISD::VP_REDUCE_SEQ_FADD:
547 case ISD::VP_REDUCE_SEQ_FMUL:
557#define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, ...) \
560#include "llvm/IR/VPIntrinsics.def"
569#define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, EVLPOS) \
572#include "llvm/IR/VPIntrinsics.def"
582#define BEGIN_REGISTER_VP_SDNODE(VPOPC, ...) case ISD::VPOPC:
583#define VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC) return ISD::SDOPC;
584#define END_REGISTER_VP_SDNODE(VPOPC) break;
585#include "llvm/IR/VPIntrinsics.def"
594#define BEGIN_REGISTER_VP_SDNODE(VPOPC, ...) break;
595#define VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC) case ISD::SDOPC:
596#define END_REGISTER_VP_SDNODE(VPOPC) return ISD::VPOPC;
597#include "llvm/IR/VPIntrinsics.def"
644 bool isIntegerLike) {
669 bool IsInteger =
Type.isInteger();
674 unsigned Op = Op1 | Op2;
690 bool IsInteger =
Type.isInteger();
725 ID.AddPointer(VTList.
VTs);
731 for (
const auto &
Op : Ops) {
732 ID.AddPointer(
Op.getNode());
733 ID.AddInteger(
Op.getResNo());
740 for (
const auto &
Op : Ops) {
741 ID.AddPointer(
Op.getNode());
742 ID.AddInteger(
Op.getResNo());
755 switch (
N->getOpcode()) {
764 ID.AddPointer(
C->getConstantIntValue());
765 ID.AddBoolean(
C->isOpaque());
770 ID.AddPointer(cast<ConstantFPSDNode>(
N)->getConstantFPValue());
786 ID.AddInteger(cast<RegisterSDNode>(
N)->
getReg().
id());
789 ID.AddPointer(cast<RegisterMaskSDNode>(
N)->getRegMask());
792 ID.AddPointer(cast<SrcValueSDNode>(
N)->getValue());
796 ID.AddInteger(cast<FrameIndexSDNode>(
N)->getIndex());
800 if (cast<LifetimeSDNode>(
N)->hasOffset()) {
801 ID.AddInteger(cast<LifetimeSDNode>(
N)->
getSize());
806 ID.AddInteger(cast<PseudoProbeSDNode>(
N)->getGuid());
807 ID.AddInteger(cast<PseudoProbeSDNode>(
N)->getIndex());
808 ID.AddInteger(cast<PseudoProbeSDNode>(
N)->getAttributes());
812 ID.AddInteger(cast<JumpTableSDNode>(
N)->getIndex());
813 ID.AddInteger(cast<JumpTableSDNode>(
N)->getTargetFlags());
818 ID.AddInteger(CP->getAlign().value());
819 ID.AddInteger(CP->getOffset());
820 if (CP->isMachineConstantPoolEntry())
821 CP->getMachineCPVal()->addSelectionDAGCSEId(
ID);
823 ID.AddPointer(CP->getConstVal());
824 ID.AddInteger(CP->getTargetFlags());
836 ID.AddInteger(LD->getMemoryVT().getRawBits());
837 ID.AddInteger(LD->getRawSubclassData());
838 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
839 ID.AddInteger(LD->getMemOperand()->getFlags());
844 ID.AddInteger(ST->getMemoryVT().getRawBits());
845 ID.AddInteger(ST->getRawSubclassData());
846 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
847 ID.AddInteger(ST->getMemOperand()->getFlags());
858 case ISD::VP_STORE: {
866 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD: {
873 case ISD::EXPERIMENTAL_VP_STRIDED_STORE: {
880 case ISD::VP_GATHER: {
888 case ISD::VP_SCATTER: {
977 if (
auto *MN = dyn_cast<MemIntrinsicSDNode>(
N)) {
978 ID.AddInteger(MN->getRawSubclassData());
979 ID.AddInteger(MN->getPointerInfo().getAddrSpace());
980 ID.AddInteger(MN->getMemOperand()->getFlags());
981 ID.AddInteger(MN->getMemoryVT().getRawBits());
1004 if (
N->getValueType(0) == MVT::Glue)
1007 switch (
N->getOpcode()) {
1015 for (
unsigned i = 1, e =
N->getNumValues(); i != e; ++i)
1016 if (
N->getValueType(i) == MVT::Glue)
1033 if (Node.use_empty())
1048 while (!DeadNodes.
empty()) {
1057 DUL->NodeDeleted(
N,
nullptr);
1060 RemoveNodeFromCSEMaps(
N);
1091 RemoveNodeFromCSEMaps(
N);
1095 DeleteNodeNotInCSEMaps(
N);
1098void SelectionDAG::DeleteNodeNotInCSEMaps(
SDNode *
N) {
1099 assert(
N->getIterator() != AllNodes.begin() &&
1100 "Cannot delete the entry node!");
1101 assert(
N->use_empty() &&
"Cannot delete a node that is not dead!");
1110 assert(!(V->isVariadic() && isParameter));
1112 ByvalParmDbgValues.push_back(V);
1114 DbgValues.push_back(V);
1115 for (
const SDNode *Node : V->getSDNodes())
1117 DbgValMap[Node].push_back(V);
1122 if (
I == DbgValMap.end())
1124 for (
auto &Val:
I->second)
1125 Val->setIsInvalidated();
1129void SelectionDAG::DeallocateNode(
SDNode *
N) {
1153 switch (
N->getOpcode()) {
1159 EVT VT =
N->getValueType(0);
1160 assert(
N->getNumValues() == 1 &&
"Too many results!");
1162 "Wrong return type!");
1163 assert(
N->getNumOperands() == 2 &&
"Wrong number of operands!");
1164 assert(
N->getOperand(0).getValueType() ==
N->getOperand(1).getValueType() &&
1165 "Mismatched operand types!");
1167 "Wrong operand type!");
1169 "Wrong return type size");
1173 assert(
N->getNumValues() == 1 &&
"Too many results!");
1174 assert(
N->getValueType(0).isVector() &&
"Wrong return type!");
1175 assert(
N->getNumOperands() ==
N->getValueType(0).getVectorNumElements() &&
1176 "Wrong number of operands!");
1177 EVT EltVT =
N->getValueType(0).getVectorElementType();
1179 assert((
Op.getValueType() == EltVT ||
1180 (EltVT.
isInteger() &&
Op.getValueType().isInteger() &&
1181 EltVT.
bitsLE(
Op.getValueType()))) &&
1182 "Wrong operand type!");
1183 assert(
Op.getValueType() ==
N->getOperand(0).getValueType() &&
1184 "Operands must all have the same type");
1196void SelectionDAG::InsertNode(
SDNode *
N) {
1197 AllNodes.push_back(
N);
1199 N->PersistentId = NextPersistentId++;
1202 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1203 DUL->NodeInserted(
N);
1210bool SelectionDAG::RemoveNodeFromCSEMaps(
SDNode *
N) {
1211 bool Erased =
false;
1212 switch (
N->getOpcode()) {
1215 assert(CondCodeNodes[cast<CondCodeSDNode>(
N)->
get()] &&
1216 "Cond code doesn't exist!");
1217 Erased = CondCodeNodes[cast<CondCodeSDNode>(
N)->get()] !=
nullptr;
1218 CondCodeNodes[cast<CondCodeSDNode>(
N)->get()] =
nullptr;
1221 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(
N)->getSymbol());
1225 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>(
1230 auto *MCSN = cast<MCSymbolSDNode>(
N);
1231 Erased = MCSymbols.erase(MCSN->getMCSymbol());
1235 EVT VT = cast<VTSDNode>(
N)->getVT();
1237 Erased = ExtendedValueTypeNodes.erase(VT);
1248 Erased = CSEMap.RemoveNode(
N);
1255 if (!Erased &&
N->getValueType(
N->getNumValues()-1) != MVT::Glue &&
1270SelectionDAG::AddModifiedNodeToCSEMaps(
SDNode *
N) {
1274 SDNode *Existing = CSEMap.GetOrInsertNode(
N);
1275 if (Existing !=
N) {
1283 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1284 DUL->NodeDeleted(
N, Existing);
1285 DeleteNodeNotInCSEMaps(
N);
1291 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1292 DUL->NodeUpdated(
N);
1310 Node->intersectFlagsWith(
N->getFlags());
1330 Node->intersectFlagsWith(
N->getFlags());
1348 Node->intersectFlagsWith(
N->getFlags());
1361 : TM(tm), OptLevel(OL), EntryNode(ISD::EntryToken, 0,
DebugLoc(),
1364 InsertNode(&EntryNode);
1375 SDAGISelPass = PassPtr;
1379 LibInfo = LibraryInfo;
1385 FnVarLocs = VarLocs;
1389 assert(!UpdateListeners &&
"Dangling registered DAGUpdateListeners");
1391 OperandRecycler.clear(OperandAllocator);
1399void SelectionDAG::allnodes_clear() {
1400 assert(&*AllNodes.begin() == &EntryNode);
1401 AllNodes.remove(AllNodes.begin());
1402 while (!AllNodes.empty())
1403 DeallocateNode(&AllNodes.front());
1405 NextPersistentId = 0;
1411 SDNode *
N = CSEMap.FindNodeOrInsertPos(
ID, InsertPos);
1413 switch (
N->getOpcode()) {
1418 "debug location. Use another overload.");
1425 const SDLoc &
DL,
void *&InsertPos) {
1426 SDNode *
N = CSEMap.FindNodeOrInsertPos(
ID, InsertPos);
1428 switch (
N->getOpcode()) {
1434 if (
N->getDebugLoc() !=
DL.getDebugLoc())
1441 if (
DL.getIROrder() &&
DL.getIROrder() <
N->getIROrder())
1442 N->setDebugLoc(
DL.getDebugLoc());
1451 OperandRecycler.clear(OperandAllocator);
1452 OperandAllocator.
Reset();
1455 ExtendedValueTypeNodes.clear();
1456 ExternalSymbols.clear();
1457 TargetExternalSymbols.clear();
1460 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
nullptr);
1461 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
nullptr);
1463 EntryNode.UseList =
nullptr;
1464 InsertNode(&EntryNode);
1470 return VT.
bitsGT(
Op.getValueType())
1476std::pair<SDValue, SDValue>
1480 "Strict no-op FP extend/round not allowed.");
1487 return std::pair<SDValue, SDValue>(Res,
SDValue(Res.
getNode(), 1));
1491 return VT.
bitsGT(
Op.getValueType()) ?
1497 return VT.
bitsGT(
Op.getValueType()) ?
1503 return VT.
bitsGT(
Op.getValueType()) ?
1511 auto Type =
Op.getValueType();
1515 auto Size =
Op.getValueSizeInBits();
1526 auto Type =
Op.getValueType();
1530 auto Size =
Op.getValueSizeInBits();
1541 auto Type =
Op.getValueType();
1545 auto Size =
Op.getValueSizeInBits();
1563 EVT OpVT =
Op.getValueType();
1565 "Cannot getZeroExtendInReg FP types");
1567 "getZeroExtendInReg type should be vector iff the operand "
1571 "Vector element counts must match in getZeroExtendInReg");
1583 EVT OpVT =
Op.getValueType();
1585 "Cannot getVPZeroExtendInReg FP types");
1587 "getVPZeroExtendInReg type and operand type should be vector!");
1589 "Vector element counts must match in getZeroExtendInReg");
1628 return getNode(ISD::VP_XOR,
DL, VT, Val, TrueValue, Mask, EVL);
1639 return getNode(ISD::VP_ZERO_EXTEND,
DL, VT,
Op, Mask, EVL);
1641 return getNode(ISD::VP_TRUNCATE,
DL, VT,
Op, Mask, EVL);
1661 bool isT,
bool isO) {
1667 bool isT,
bool isO) {
1668 return getConstant(*ConstantInt::get(*Context, Val),
DL, VT, isT, isO);
1672 EVT VT,
bool isT,
bool isO) {
1680 if (isa<VectorType>(Elt->
getType()))
1695 Elt = ConstantInt::get(*
getContext(), NewVal);
1714 "Can only handle an even split!");
1718 for (
unsigned i = 0; i != Parts; ++i)
1720 NewVal.
extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits),
DL,
1721 ViaEltVT, isT, isO));
1726 unsigned ViaVecNumElts = VT.
getSizeInBits() / ViaEltSizeInBits;
1737 NewVal.
extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits),
DL,
1738 ViaEltVT, isT, isO));
1743 std::reverse(EltParts.
begin(), EltParts.
end());
1762 "APInt size does not match type size!");
1771 if ((
N = FindNodeOrInsertPos(
ID,
DL, IP)))
1776 N = newSDNode<ConstantSDNode>(isT, isO, Elt, VTs);
1777 CSEMap.InsertNode(
N, IP);
1789 bool isT,
bool isO) {
1797 IsTarget, IsOpaque);
1829 EVT VT,
bool isTarget) {
1837 if (isa<VectorType>(Elt->
getType()))
1850 if ((
N = FindNodeOrInsertPos(
ID,
DL, IP)))
1855 N = newSDNode<ConstantFPSDNode>(isTarget, Elt, VTs);
1856 CSEMap.InsertNode(
N, IP);
1870 if (EltVT == MVT::f32)
1872 if (EltVT == MVT::f64)
1874 if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1875 EltVT == MVT::f16 || EltVT == MVT::bf16) {
1886 EVT VT, int64_t
Offset,
bool isTargetGA,
1887 unsigned TargetFlags) {
1888 assert((TargetFlags == 0 || isTargetGA) &&
1889 "Cannot set target flags on target-independent globals");
1907 ID.AddInteger(TargetFlags);
1909 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP))
1912 auto *
N = newSDNode<GlobalAddressSDNode>(
1913 Opc,
DL.getIROrder(),
DL.getDebugLoc(), GV, VTs,
Offset, TargetFlags);
1914 CSEMap.InsertNode(
N, IP);
1926 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
1929 auto *
N = newSDNode<FrameIndexSDNode>(FI, VTs, isTarget);
1930 CSEMap.InsertNode(
N, IP);
1936 unsigned TargetFlags) {
1937 assert((TargetFlags == 0 || isTarget) &&
1938 "Cannot set target flags on target-independent jump tables");
1944 ID.AddInteger(TargetFlags);
1946 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
1949 auto *
N = newSDNode<JumpTableSDNode>(JTI, VTs, isTarget, TargetFlags);
1950 CSEMap.InsertNode(
N, IP);
1964 bool isTarget,
unsigned TargetFlags) {
1965 assert((TargetFlags == 0 || isTarget) &&
1966 "Cannot set target flags on target-independent globals");
1975 ID.AddInteger(Alignment->value());
1978 ID.AddInteger(TargetFlags);
1980 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
1983 auto *
N = newSDNode<ConstantPoolSDNode>(isTarget,
C, VTs,
Offset, *Alignment,
1985 CSEMap.InsertNode(
N, IP);
1994 bool isTarget,
unsigned TargetFlags) {
1995 assert((TargetFlags == 0 || isTarget) &&
1996 "Cannot set target flags on target-independent globals");
2003 ID.AddInteger(Alignment->value());
2005 C->addSelectionDAGCSEId(
ID);
2006 ID.AddInteger(TargetFlags);
2008 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2011 auto *
N = newSDNode<ConstantPoolSDNode>(isTarget,
C, VTs,
Offset, *Alignment,
2013 CSEMap.InsertNode(
N, IP);
2023 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2026 auto *
N = newSDNode<BasicBlockSDNode>(
MBB);
2027 CSEMap.InsertNode(
N, IP);
2034 ValueTypeNodes.size())
2041 N = newSDNode<VTSDNode>(VT);
2049 N = newSDNode<ExternalSymbolSDNode>(
false,
Sym, 0,
getVTList(VT));
2064 unsigned TargetFlags) {
2066 TargetExternalSymbols[std::pair<std::string, unsigned>(
Sym, TargetFlags)];
2068 N = newSDNode<ExternalSymbolSDNode>(
true,
Sym, TargetFlags,
getVTList(VT));
2074 if ((
unsigned)
Cond >= CondCodeNodes.size())
2075 CondCodeNodes.resize(
Cond+1);
2077 if (!CondCodeNodes[
Cond]) {
2078 auto *
N = newSDNode<CondCodeSDNode>(
Cond);
2079 CondCodeNodes[
Cond] =
N;
2087 bool ConstantFold) {
2089 "APInt size does not match type size!");
2106 bool ConstantFold) {
2107 if (EC.isScalable())
2120 const APInt &StepVal) {
2144 "Must have the same number of vector elements as mask elements!");
2146 "Invalid VECTOR_SHUFFLE");
2154 int NElts = Mask.size();
2156 [&](
int M) {
return M < (NElts * 2) && M >= -1; }) &&
2157 "Index out of range");
2165 for (
int i = 0; i != NElts; ++i)
2166 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
2182 for (
int i = 0; i < NElts; ++i) {
2183 if (MaskVec[i] <
Offset || MaskVec[i] >= (
Offset + NElts))
2187 if (UndefElements[MaskVec[i] -
Offset]) {
2193 if (!UndefElements[i])
2197 if (
auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
2198 BlendSplat(N1BV, 0);
2199 if (
auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
2200 BlendSplat(N2BV, NElts);
2205 bool AllLHS =
true, AllRHS =
true;
2207 for (
int i = 0; i != NElts; ++i) {
2208 if (MaskVec[i] >= NElts) {
2213 }
else if (MaskVec[i] >= 0) {
2217 if (AllLHS && AllRHS)
2219 if (AllLHS && !N2Undef)
2232 bool Identity =
true, AllSame =
true;
2233 for (
int i = 0; i != NElts; ++i) {
2234 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity =
false;
2235 if (MaskVec[i] != MaskVec[0]) AllSame =
false;
2237 if (Identity && NElts)
2247 V = V->getOperand(0);
2250 if (
auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
2270 if (AllSame && SameNumElts) {
2271 EVT BuildVT = BV->getValueType(0);
2288 for (
int i = 0; i != NElts; ++i)
2289 ID.AddInteger(MaskVec[i]);
2292 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
2298 int *MaskAlloc = OperandAllocator.
Allocate<
int>(NElts);
2301 auto *
N = newSDNode<ShuffleVectorSDNode>(VTs, dl.
getIROrder(),
2303 createOperands(
N, Ops);
2305 CSEMap.InsertNode(
N, IP);
2326 ID.AddInteger(Reg.id());
2328 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2331 auto *
N = newSDNode<RegisterSDNode>(Reg, VTs);
2333 CSEMap.InsertNode(
N, IP);
2341 ID.AddPointer(RegMask);
2343 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2346 auto *
N = newSDNode<RegisterMaskSDNode>(RegMask);
2347 CSEMap.InsertNode(
N, IP);
2362 ID.AddPointer(Label);
2364 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2369 createOperands(
N, Ops);
2371 CSEMap.InsertNode(
N, IP);
2377 int64_t
Offset,
bool isTarget,
2378 unsigned TargetFlags) {
2386 ID.AddInteger(TargetFlags);
2388 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2391 auto *
N = newSDNode<BlockAddressSDNode>(Opc, VTs, BA,
Offset, TargetFlags);
2392 CSEMap.InsertNode(
N, IP);
2403 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2406 auto *
N = newSDNode<SrcValueSDNode>(V);
2407 CSEMap.InsertNode(
N, IP);
2418 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2421 auto *
N = newSDNode<MDNodeSDNode>(MD);
2422 CSEMap.InsertNode(
N, IP);
2428 if (VT == V.getValueType())
2435 unsigned SrcAS,
unsigned DestAS) {
2440 ID.AddInteger(SrcAS);
2441 ID.AddInteger(DestAS);
2444 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
2448 VTs, SrcAS, DestAS);
2449 createOperands(
N, Ops);
2451 CSEMap.InsertNode(
N, IP);
2463 EVT OpTy =
Op.getValueType();
2465 if (OpTy == ShTy || OpTy.
isVector())
return Op;
2478 std::deque<SDValue> Subvectors = {Op1};
2479 for (
unsigned I = 0;
I < ScaleFactor;
I++) {
2481 Subvectors.push_back(
2486 while (Subvectors.size() > 1) {
2487 Subvectors.push_back(
2489 Subvectors.pop_front();
2490 Subvectors.pop_front();
2493 assert(Subvectors.size() == 1 &&
2494 "There should only be one subvector after tree flattening");
2496 return Subvectors[0];
2509 if (
Op.getNode() != FPNode)
2513 while (!Worklist.
empty()) {
2546 std::optional<unsigned> CallRetResNo) {
2548 EVT VT = Node->getValueType(0);
2549 unsigned NumResults = Node->getNumValues();
2555 auto getVecDesc = [&]() ->
VecDesc const * {
2556 for (
bool Masked : {
false,
true}) {
2567 if (VT.
isVector() && !(VD = getVecDesc()))
2577 auto *ST = cast<StoreSDNode>(
User);
2578 SDValue StoreValue = ST->getValue();
2579 unsigned ResNo = StoreValue.
getResNo();
2581 if (CallRetResNo == ResNo)
2584 if (!ST->isSimple() || ST->getAddressSpace() != 0)
2587 if (StoresInChain && ST->getChain() != StoresInChain)
2591 if (ST->getAlign() <
2599 ResultStores[ResNo] = ST;
2600 StoresInChain = ST->getChain();
2604 auto AddArgListEntry = [&](
SDValue Node,
Type *Ty) {
2608 Args.push_back(Entry);
2612 for (
const SDValue &
Op : Node->op_values()) {
2613 EVT ArgVT =
Op.getValueType();
2615 AddArgListEntry(
Op, ArgTy);
2622 if (ResNo == CallRetResNo)
2624 EVT ResVT = Node->getValueType(ResNo);
2626 ResultPtrs[ResNo] = ResultPtr;
2639 Type *RetType = CallRetResNo.has_value()
2640 ? Node->getValueType(*CallRetResNo).getTypeForEVT(Ctx)
2652 if (ResNo == CallRetResNo) {
2660 PtrInfo = ST->getPointerInfo();
2666 getLoad(Node->getValueType(ResNo),
DL, CallChain, ResultPtr, PtrInfo);
2667 Results.push_back(LoadResult);
2670 if (CallRetResNo && !Node->hasAnyUseOfValue(*CallRetResNo)) {
2692 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2693 EVT VT = Node->getValueType(0);
2694 SDValue Tmp1 = Node->getOperand(0);
2695 SDValue Tmp2 = Node->getOperand(1);
2696 const MaybeAlign MA(Node->getConstantOperandVal(3));
2728 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
2729 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
2740 Align RedAlign = UseABI ?
DL.getABITypeAlign(Ty) :
DL.getPrefTypeAlign(Ty);
2750 if (RedAlign > StackAlign) {
2753 unsigned NumIntermediates;
2755 NumIntermediates, RegisterVT);
2757 Align RedAlign2 = UseABI ?
DL.getABITypeAlign(Ty) :
DL.getPrefTypeAlign(Ty);
2758 if (RedAlign2 < RedAlign)
2759 RedAlign = RedAlign2;
2764 RedAlign = std::min(RedAlign, StackAlign);
2779 false,
nullptr, StackID);
2794 "Don't know how to choose the maximum size when creating a stack "
2803 Align Align = std::max(
DL.getPrefTypeAlign(Ty1),
DL.getPrefTypeAlign(Ty2));
2811 auto GetUndefBooleanConstant = [&]() {
2850 return GetUndefBooleanConstant();
2855 return GetUndefBooleanConstant();
2864 const APInt &C2 = N2C->getAPIntValue();
2866 const APInt &C1 = N1C->getAPIntValue();
2873 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
2874 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
2876 if (N1CFP && N2CFP) {
2881 return GetUndefBooleanConstant();
2886 return GetUndefBooleanConstant();
2892 return GetUndefBooleanConstant();
2897 return GetUndefBooleanConstant();
2902 return GetUndefBooleanConstant();
2908 return GetUndefBooleanConstant();
2937 return getSetCC(dl, VT, N2, N1, SwappedCond);
2938 }
else if ((N2CFP && N2CFP->getValueAPF().isNaN()) ||
2953 return GetUndefBooleanConstant();
2964 unsigned BitWidth =
Op.getScalarValueSizeInBits();
2972 unsigned Depth)
const {
2980 const APInt &DemandedElts,
2981 unsigned Depth)
const {
2988 unsigned Depth )
const {
2994 unsigned Depth)
const {
2999 const APInt &DemandedElts,
3000 unsigned Depth)
const {
3001 EVT VT =
Op.getValueType();
3008 for (
unsigned EltIdx = 0; EltIdx != NumElts; ++EltIdx) {
3009 if (!DemandedElts[EltIdx])
3013 KnownZeroElements.
setBit(EltIdx);
3015 return KnownZeroElements;
3025 unsigned Opcode = V.getOpcode();
3026 EVT VT = V.getValueType();
3029 "scalable demanded bits are ignored");
3041 UndefElts = V.getOperand(0).isUndef()
3050 APInt UndefLHS, UndefRHS;
3055 UndefElts = UndefLHS | UndefRHS;
3085 for (
unsigned i = 0; i != NumElts; ++i) {
3091 if (!DemandedElts[i])
3093 if (Scl && Scl !=
Op)
3103 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask();
3104 for (
int i = 0; i != (int)NumElts; ++i) {
3110 if (!DemandedElts[i])
3112 if (M < (
int)NumElts)
3115 DemandedRHS.
setBit(M - NumElts);
3127 auto CheckSplatSrc = [&](
SDValue Src,
const APInt &SrcElts) {
3129 return (SrcElts.popcount() == 1) ||
3131 (SrcElts & SrcUndefs).
isZero());
3133 if (!DemandedLHS.
isZero())
3134 return CheckSplatSrc(V.getOperand(0), DemandedLHS);
3135 return CheckSplatSrc(V.getOperand(1), DemandedRHS);
3139 SDValue Src = V.getOperand(0);
3141 if (Src.getValueType().isScalableVector())
3144 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3157 SDValue Src = V.getOperand(0);
3159 if (Src.getValueType().isScalableVector())
3161 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3163 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts);
3165 UndefElts = UndefSrcElts.
trunc(NumElts);
3171 SDValue Src = V.getOperand(0);
3172 EVT SrcVT = Src.getValueType();
3182 if ((
BitWidth % SrcBitWidth) == 0) {
3184 unsigned Scale =
BitWidth / SrcBitWidth;
3186 APInt ScaledDemandedElts =
3188 for (
unsigned I = 0;
I != Scale; ++
I) {
3192 SubDemandedElts &= ScaledDemandedElts;
3196 if (!SubUndefElts.
isZero())
3210 EVT VT = V.getValueType();
3220 (AllowUndefs || !UndefElts);
3226 EVT VT = V.getValueType();
3227 unsigned Opcode = V.getOpcode();
3248 SplatIdx = (UndefElts & DemandedElts).
countr_one();
3262 auto *SVN = cast<ShuffleVectorSDNode>(V);
3263 if (!SVN->isSplat())
3265 int Idx = SVN->getSplatIndex();
3266 int NumElts = V.getValueType().getVectorNumElements();
3267 SplatIdx =
Idx % NumElts;
3268 return V.getOperand(
Idx / NumElts);
3284 if (LegalSVT.
bitsLT(SVT))
3293std::optional<ConstantRange>
3295 unsigned Depth)
const {
3298 "Unknown shift node");
3300 unsigned BitWidth = V.getScalarValueSizeInBits();
3302 if (
auto *Cst = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
3303 const APInt &ShAmt = Cst->getAPIntValue();
3305 return std::nullopt;
3309 if (
auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1))) {
3310 const APInt *MinAmt =
nullptr, *MaxAmt =
nullptr;
3311 for (
unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
3312 if (!DemandedElts[i])
3314 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
3316 MinAmt = MaxAmt =
nullptr;
3319 const APInt &ShAmt = SA->getAPIntValue();
3321 return std::nullopt;
3322 if (!MinAmt || MinAmt->
ugt(ShAmt))
3324 if (!MaxAmt || MaxAmt->ult(ShAmt))
3327 assert(((!MinAmt && !MaxAmt) || (MinAmt && MaxAmt)) &&
3328 "Failed to find matching min/max shift amounts");
3329 if (MinAmt && MaxAmt)
3339 return std::nullopt;
3342std::optional<uint64_t>
3344 unsigned Depth)
const {
3347 "Unknown shift node");
3348 if (std::optional<ConstantRange> AmtRange =
3350 if (
const APInt *ShAmt = AmtRange->getSingleElement())
3351 return ShAmt->getZExtValue();
3352 return std::nullopt;
3355std::optional<uint64_t>
3357 EVT VT = V.getValueType();
3364std::optional<uint64_t>
3366 unsigned Depth)
const {
3369 "Unknown shift node");
3370 if (std::optional<ConstantRange> AmtRange =
3372 return AmtRange->getUnsignedMin().getZExtValue();
3373 return std::nullopt;
3376std::optional<uint64_t>
3378 EVT VT = V.getValueType();
3385std::optional<uint64_t>
3387 unsigned Depth)
const {
3390 "Unknown shift node");
3391 if (std::optional<ConstantRange> AmtRange =
3393 return AmtRange->getUnsignedMax().getZExtValue();
3394 return std::nullopt;
3397std::optional<uint64_t>
3399 EVT VT = V.getValueType();
3410 EVT VT =
Op.getValueType();
3425 unsigned Depth)
const {
3426 unsigned BitWidth =
Op.getScalarValueSizeInBits();
3430 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op)) {
3434 if (
auto *
C = dyn_cast<ConstantFPSDNode>(
Op)) {
3444 assert((!
Op.getValueType().isFixedLengthVector() ||
3445 NumElts ==
Op.getValueType().getVectorNumElements()) &&
3446 "Unexpected vector size");
3451 unsigned Opcode =
Op.getOpcode();
3459 "Expected SPLAT_VECTOR implicit truncation");
3466 unsigned ScalarSize =
Op.getOperand(0).getScalarValueSizeInBits();
3468 "Expected SPLAT_VECTOR_PARTS scalars to cover element width");
3475 const APInt &Step =
Op.getConstantOperandAPInt(0);
3484 const APInt MinNumElts =
3490 .
umul_ov(MinNumElts, Overflow);
3494 const APInt MaxValue = (MaxNumElts - 1).
umul_ov(Step, Overflow);
3502 assert(!
Op.getValueType().isScalableVector());
3506 if (!DemandedElts[i])
3515 "Expected BUILD_VECTOR implicit truncation");
3528 assert(!
Op.getValueType().isScalableVector());
3531 APInt DemandedLHS, DemandedRHS;
3535 DemandedLHS, DemandedRHS))
3540 if (!!DemandedLHS) {
3548 if (!!DemandedRHS) {
3557 const APInt &Multiplier =
Op.getConstantOperandAPInt(0);
3562 if (
Op.getValueType().isScalableVector())
3566 EVT SubVectorVT =
Op.getOperand(0).getValueType();
3569 for (
unsigned i = 0; i != NumSubVectors; ++i) {
3571 DemandedElts.
extractBits(NumSubVectorElts, i * NumSubVectorElts);
3572 if (!!DemandedSub) {
3584 if (
Op.getValueType().isScalableVector())
3593 APInt DemandedSrcElts = DemandedElts;
3598 if (!!DemandedSubElts) {
3603 if (!!DemandedSrcElts) {
3613 if (
Op.getValueType().isScalableVector() || Src.getValueType().isScalableVector())
3616 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3622 if (
Op.getValueType().isScalableVector())
3626 if (DemandedElts != 1)
3637 if (
Op.getValueType().isScalableVector())
3657 if ((
BitWidth % SubBitWidth) == 0) {
3664 unsigned SubScale =
BitWidth / SubBitWidth;
3665 APInt SubDemandedElts(NumElts * SubScale, 0);
3666 for (
unsigned i = 0; i != NumElts; ++i)
3667 if (DemandedElts[i])
3668 SubDemandedElts.
setBit(i * SubScale);
3670 for (
unsigned i = 0; i != SubScale; ++i) {
3673 unsigned Shifts = IsLE ? i : SubScale - 1 - i;
3674 Known.
insertBits(Known2, SubBitWidth * Shifts);
3679 if ((SubBitWidth %
BitWidth) == 0) {
3680 assert(
Op.getValueType().isVector() &&
"Expected bitcast to vector");
3685 unsigned SubScale = SubBitWidth /
BitWidth;
3686 APInt SubDemandedElts =
3691 for (
unsigned i = 0; i != NumElts; ++i)
3692 if (DemandedElts[i]) {
3693 unsigned Shifts = IsLE ? i : NumElts - 1 - i;
3724 bool SelfMultiply =
Op.getOperand(0) ==
Op.getOperand(1);
3728 Op.getOperand(0), DemandedElts,
false,
Depth + 1);
3734 if (
Op->getFlags().hasNoSignedWrap() &&
3735 Op.getOperand(0) ==
Op.getOperand(1) &&
3762 unsigned SignBits1 =
3766 unsigned SignBits0 =
3772 assert((
Op.getResNo() == 0 ||
Op.getResNo() == 1) &&
"Unknown result");
3775 bool SelfMultiply =
Op.getOperand(0) ==
Op.getOperand(1);
3776 if (
Op.getResNo() == 0)
3783 assert((
Op.getResNo() == 0 ||
Op.getResNo() == 1) &&
"Unknown result");
3786 bool SelfMultiply =
Op.getOperand(0) ==
Op.getOperand(1);
3787 if (
Op.getResNo() == 0)
3840 if (
Op.getResNo() != 1)
3855 unsigned OpNo =
Op->isStrictFPOpcode() ? 1 : 0;
3867 bool NUW =
Op->getFlags().hasNoUnsignedWrap();
3868 bool NSW =
Op->getFlags().hasNoSignedWrap();
3875 if (std::optional<uint64_t> ShMinAmt =
3884 Op->getFlags().hasExact());
3887 if (std::optional<uint64_t> ShMinAmt =
3895 Op->getFlags().hasExact());
3900 unsigned Amt =
C->getAPIntValue().urem(
BitWidth);
3906 DemandedElts,
Depth + 1);
3931 assert((
Op.getResNo() == 0 ||
Op.getResNo() == 1) &&
"Unknown result");
3934 unsigned LoBits =
Op.getOperand(0).getScalarValueSizeInBits();
3935 unsigned HiBits =
Op.getOperand(1).getScalarValueSizeInBits();
3938 Known = Known2.
concat(Known);
3952 if (
Op.getResNo() == 0)
3960 EVT EVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
3998 ? cast<MaskedGatherSDNode>(
Op)->getExtensionType()
3999 : cast<MaskedLoadSDNode>(
Op)->getExtensionType();
4001 EVT MemVT = cast<MemSDNode>(
Op)->getMemoryVT();
4014 !
Op.getValueType().isScalableVector()) {
4028 for (
unsigned i = 0; i != NumElts; ++i) {
4029 if (!DemandedElts[i])
4032 if (
auto *CInt = dyn_cast<ConstantInt>(Elt)) {
4038 if (
auto *CFP = dyn_cast<ConstantFP>(Elt)) {
4039 APInt Value = CFP->getValueAPF().bitcastToAPInt();
4050 if (
auto *CInt = dyn_cast<ConstantInt>(Cst)) {
4052 }
else if (
auto *CFP = dyn_cast<ConstantFP>(Cst)) {
4058 }
else if (
Op.getResNo() == 0) {
4059 KnownBits Known0(!LD->getMemoryVT().isScalableVT()
4060 ? LD->getMemoryVT().getFixedSizeInBits()
4062 EVT VT =
Op.getValueType();
4069 if (
const MDNode *MD = LD->getRanges()) {
4080 if (LD->getMemoryVT().isVector())
4081 Known0 = Known0.
trunc(LD->getMemoryVT().getScalarSizeInBits());
4098 if (
Op.getValueType().isScalableVector())
4100 EVT InVT =
Op.getOperand(0).getValueType();
4112 if (
Op.getValueType().isScalableVector())
4114 EVT InVT =
Op.getOperand(0).getValueType();
4130 if (
Op.getValueType().isScalableVector())
4132 EVT InVT =
Op.getOperand(0).getValueType();
4149 EVT VT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
4152 Known.
Zero |= (~InMask);
4153 Known.
One &= (~Known.Zero);
4157 unsigned LogOfAlign =
Log2(cast<AssertAlignSDNode>(
Op)->
getAlign());
4177 Op.getOpcode() ==
ISD::ADD, Flags.hasNoSignedWrap(),
4178 Flags.hasNoUnsignedWrap(), Known, Known2);
4185 if (
Op.getResNo() == 1) {
4196 "We only compute knownbits for the difference here.");
4203 Borrow = Borrow.
trunc(1);
4217 if (
Op.getResNo() == 1) {
4228 assert(
Op.getResNo() == 0 &&
"We only compute knownbits for the sum here.");
4238 Carry = Carry.
trunc(1);
4274 const unsigned Index =
Op.getConstantOperandVal(1);
4275 const unsigned EltBitWidth =
Op.getValueSizeInBits();
4282 Known = Known.
trunc(EltBitWidth);
4298 Known = Known.
trunc(EltBitWidth);
4303 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
4304 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
4314 if (
Op.getValueType().isScalableVector())
4323 bool DemandedVal =
true;
4324 APInt DemandedVecElts = DemandedElts;
4325 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
4326 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
4327 unsigned EltIdx = CEltNo->getZExtValue();
4328 DemandedVal = !!DemandedElts[EltIdx];
4337 if (!!DemandedVecElts) {
4355 Known = Known2.
abs();
4388 if (CstLow && CstHigh) {
4393 const APInt &ValueHigh = CstHigh->getAPIntValue();
4394 if (ValueLow.
sle(ValueHigh)) {
4397 unsigned MinSignBits = std::min(LowSignBits, HighSignBits);
4420 if (IsMax && CstLow) {
4444 EVT VT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
4449 if (
Op.getResNo() == 1) {
4476 cast<AtomicSDNode>(
Op)->getMemoryVT().getScalarSizeInBits();
4478 if (
Op.getResNo() == 0) {
4502 if (
Op.getValueType().isScalableVector())
4648 return C->getAPIntValue().zextOrTrunc(
BitWidth).isPowerOf2();
4656 if (
C &&
C->getAPIntValue() == 1)
4666 if (
C &&
C->getAPIntValue().isSignMask())
4678 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E))
4679 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
4687 if (
C->getAPIntValue().zextOrTrunc(
BitWidth).isPowerOf2())
4725 return C1->getValueAPF().getExactLog2Abs() >= 0;
4734 EVT VT =
Op.getValueType();
4746 unsigned Depth)
const {
4747 EVT VT =
Op.getValueType();
4752 unsigned FirstAnswer = 1;
4754 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op)) {
4755 const APInt &Val =
C->getAPIntValue();
4765 unsigned Opcode =
Op.getOpcode();
4769 Tmp = cast<VTSDNode>(
Op.getOperand(1))->getVT().getSizeInBits();
4770 return VTBits-Tmp+1;
4772 Tmp = cast<VTSDNode>(
Op.getOperand(1))->getVT().getSizeInBits();
4779 unsigned NumSrcBits =
Op.getOperand(0).getValueSizeInBits();
4781 if (NumSrcSignBits > (NumSrcBits - VTBits))
4782 return NumSrcSignBits - (NumSrcBits - VTBits);
4789 if (!DemandedElts[i])
4796 APInt T =
C->getAPIntValue().trunc(VTBits);
4797 Tmp2 =
T.getNumSignBits();
4801 if (
SrcOp.getValueSizeInBits() != VTBits) {
4803 "Expected BUILD_VECTOR implicit truncation");
4804 unsigned ExtraBits =
SrcOp.getValueSizeInBits() - VTBits;
4805 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1);
4808 Tmp = std::min(Tmp, Tmp2);
4815 APInt DemandedLHS, DemandedRHS;
4819 DemandedLHS, DemandedRHS))
4822 Tmp = std::numeric_limits<unsigned>::max();
4825 if (!!DemandedRHS) {
4827 Tmp = std::min(Tmp, Tmp2);
4832 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
4848 if (VTBits == SrcBits)
4854 if ((SrcBits % VTBits) == 0) {
4857 unsigned Scale = SrcBits / VTBits;
4858 APInt SrcDemandedElts =
4868 for (
unsigned i = 0; i != NumElts; ++i)
4869 if (DemandedElts[i]) {
4870 unsigned SubOffset = i % Scale;
4871 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset);
4872 SubOffset = SubOffset * VTBits;
4873 if (Tmp <= SubOffset)
4875 Tmp2 = std::min(Tmp2, Tmp - SubOffset);
4884 Tmp = cast<VTSDNode>(
Op.getOperand(1))->getVT().getScalarSizeInBits();
4885 return VTBits - Tmp + 1;
4887 Tmp = VTBits -
Op.getOperand(0).getScalarValueSizeInBits();
4891 Tmp = cast<VTSDNode>(
Op.getOperand(1))->getVT().getScalarSizeInBits();
4894 return std::max(Tmp, Tmp2);
4899 EVT SrcVT = Src.getValueType();
4907 if (std::optional<uint64_t> ShAmt =
4909 Tmp = std::min<uint64_t>(Tmp + *ShAmt, VTBits);
4912 if (std::optional<ConstantRange> ShAmtRange =
4914 uint64_t MaxShAmt = ShAmtRange->getUnsignedMax().getZExtValue();
4915 uint64_t MinShAmt = ShAmtRange->getUnsignedMin().getZExtValue();
4923 EVT ExtVT = Ext.getValueType();
4924 SDValue Extendee = Ext.getOperand(0);
4928 if (SizeDifference <= MinShAmt) {
4929 Tmp = SizeDifference +
4932 return Tmp - MaxShAmt;
4938 return Tmp - MaxShAmt;
4948 FirstAnswer = std::min(Tmp, Tmp2);
4958 if (Tmp == 1)
return 1;
4960 return std::min(Tmp, Tmp2);
4963 if (Tmp == 1)
return 1;
4965 return std::min(Tmp, Tmp2);
4977 if (CstLow && CstHigh) {
4982 Tmp2 = CstHigh->getAPIntValue().getNumSignBits();
4983 return std::min(Tmp, Tmp2);
4992 return std::min(Tmp, Tmp2);
5000 return std::min(Tmp, Tmp2);
5004 if (
Op.getResNo() == 0 &&
Op.getOperand(0) ==
Op.getOperand(1))
5015 if (
Op.getResNo() != 1)
5029 unsigned OpNo =
Op->isStrictFPOpcode() ? 1 : 0;
5046 unsigned RotAmt =
C->getAPIntValue().urem(VTBits);
5050 RotAmt = (VTBits - RotAmt) % VTBits;
5054 if (Tmp > (RotAmt + 1))
return (Tmp - RotAmt);
5062 if (Tmp == 1)
return 1;
5067 if (CRHS->isAllOnes()) {
5073 if ((Known.
Zero | 1).isAllOnes())
5083 if (Tmp2 == 1)
return 1;
5084 return std::min(Tmp, Tmp2) - 1;
5087 if (Tmp2 == 1)
return 1;
5092 if (CLHS->isZero()) {
5097 if ((Known.
Zero | 1).isAllOnes())
5111 if (Tmp == 1)
return 1;
5112 return std::min(Tmp, Tmp2) - 1;
5116 if (SignBitsOp0 == 1)
5119 if (SignBitsOp1 == 1)
5121 unsigned OutValidBits =
5122 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1);
5123 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1;
5131 return std::min(Tmp, Tmp2);
5140 unsigned NumSrcBits =
Op.getOperand(0).getScalarValueSizeInBits();
5142 if (NumSrcSignBits > (NumSrcBits - VTBits))
5143 return NumSrcSignBits - (NumSrcBits - VTBits);
5150 const int BitWidth =
Op.getValueSizeInBits();
5151 const int Items =
Op.getOperand(0).getValueSizeInBits() /
BitWidth;
5155 const int rIndex = Items - 1 -
Op.getConstantOperandVal(1);
5170 bool DemandedVal =
true;
5171 APInt DemandedVecElts = DemandedElts;
5172 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
5173 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
5174 unsigned EltIdx = CEltNo->getZExtValue();
5175 DemandedVal = !!DemandedElts[EltIdx];
5178 Tmp = std::numeric_limits<unsigned>::max();
5184 Tmp = std::min(Tmp, Tmp2);
5186 if (!!DemandedVecElts) {
5188 Tmp = std::min(Tmp, Tmp2);
5190 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
5201 const unsigned BitWidth =
Op.getValueSizeInBits();
5202 const unsigned EltBitWidth =
Op.getOperand(0).getScalarValueSizeInBits();
5214 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
5215 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
5225 if (Src.getValueType().isScalableVector())
5228 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
5237 Tmp = std::numeric_limits<unsigned>::max();
5238 EVT SubVectorVT =
Op.getOperand(0).getValueType();
5241 for (
unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) {
5243 DemandedElts.
extractBits(NumSubVectorElts, i * NumSubVectorElts);
5247 Tmp = std::min(Tmp, Tmp2);
5249 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
5262 APInt DemandedSrcElts = DemandedElts;
5265 Tmp = std::numeric_limits<unsigned>::max();
5266 if (!!DemandedSubElts) {
5271 if (!!DemandedSrcElts) {
5273 Tmp = std::min(Tmp, Tmp2);
5275 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
5280 if (
const MDNode *Ranges = LD->getRanges()) {
5281 if (DemandedElts != 1)
5286 switch (LD->getExtensionType()) {
5321 Tmp = cast<AtomicSDNode>(
Op)->getMemoryVT().getScalarSizeInBits();
5323 if (
Op.getResNo() == 0) {
5327 return VTBits - Tmp + 1;
5329 return VTBits - Tmp;
5333 return VTBits - Tmp + 1;
5335 return VTBits - Tmp;
5343 if (
Op.getResNo() == 0) {
5346 unsigned ExtType = LD->getExtensionType();
5350 Tmp = LD->getMemoryVT().getScalarSizeInBits();
5351 return VTBits - Tmp + 1;
5353 Tmp = LD->getMemoryVT().getScalarSizeInBits();
5354 return VTBits - Tmp;
5359 Type *CstTy = Cst->getType();
5364 for (
unsigned i = 0; i != NumElts; ++i) {
5365 if (!DemandedElts[i])
5368 if (
auto *CInt = dyn_cast<ConstantInt>(Elt)) {
5370 Tmp = std::min(Tmp,
Value.getNumSignBits());
5373 if (
auto *CFP = dyn_cast<ConstantFP>(Elt)) {
5374 APInt Value = CFP->getValueAPF().bitcastToAPInt();
5375 Tmp = std::min(Tmp,
Value.getNumSignBits());
5401 FirstAnswer = std::max(FirstAnswer, NumBits);
5412 unsigned Depth)
const {
5414 return Op.getScalarValueSizeInBits() - SignBits + 1;
5418 const APInt &DemandedElts,
5419 unsigned Depth)
const {
5421 return Op.getScalarValueSizeInBits() - SignBits + 1;
5425 unsigned Depth)
const {
5430 EVT VT =
Op.getValueType();
5438 const APInt &DemandedElts,
5440 unsigned Depth)
const {
5441 unsigned Opcode =
Op.getOpcode();
5468 if (!DemandedElts[i])
5481 APInt DemandedLHS, DemandedRHS;
5482 auto *SVN = cast<ShuffleVectorSDNode>(
Op);
5484 DemandedElts, DemandedLHS, DemandedRHS,
5487 if (!DemandedLHS.
isZero() &&
5491 if (!DemandedRHS.
isZero() &&
5519 return isGuaranteedNotToBeUndefOrPoison(V, PoisonOnly, Depth + 1);
5525 unsigned Depth)
const {
5526 EVT VT =
Op.getValueType();
5536 unsigned Depth)
const {
5537 if (ConsiderFlags &&
Op->hasPoisonGeneratingFlags())
5540 unsigned Opcode =
Op.getOpcode();
5579 if (
Op.getOperand(0).getValueType().isInteger())
5586 unsigned CCOp = Opcode ==
ISD::SETCC ? 2 : 4;
5587 ISD::CondCode CCCode = cast<CondCodeSDNode>(
Op.getOperand(CCOp))->get();
5588 if (((
unsigned)CCCode & 0x10U))
5619 EVT VecVT =
Op.getOperand(0).getValueType();
5631 auto *SVN = cast<ShuffleVectorSDNode>(
Op);
5633 if (Elt < 0 && DemandedElts[
Idx])
5652 unsigned Opcode =
Op.getOpcode();
5654 return Op->getFlags().hasDisjoint() ||
5676 return !
C->getValueAPF().isNaN() ||
5677 (SNaN && !
C->getValueAPF().isSignaling());
5680 unsigned Opcode =
Op.getOpcode();
5800 assert(
Op.getValueType().isFloatingPoint() &&
5801 "Floating point type expected");
5812 assert(!
Op.getValueType().isFloatingPoint() &&
5813 "Floating point types unsupported - use isKnownNeverZeroFloat");
5822 switch (
Op.getOpcode()) {
5836 if (
Op->getFlags().hasNoSignedWrap() ||
Op->getFlags().hasNoUnsignedWrap())
5840 if (ValKnown.
One[0])
5900 if (
Op->getFlags().hasExact())
5916 if (
Op->getFlags().hasExact())
5921 if (
Op->getFlags().hasNoUnsignedWrap())
5932 std::optional<bool> ne =
5939 if (
Op->getFlags().hasNoSignedWrap() ||
Op->getFlags().hasNoUnsignedWrap())
5950 const APInt &Multiplier =
Op.getConstantOperandAPInt(0);
5964 return !C1->isNegative();
5971 if (
A ==
B)
return true;
5976 if (CA->isZero() && CB->isZero())
return true;
5985 return V.getOperand(0);
5992 SDValue ExtArg = V.getOperand(0);
6011 NotOperand = NotOperand->getOperand(0);
6013 if (
Other == NotOperand)
6016 return NotOperand ==
Other->getOperand(0) ||
6017 NotOperand ==
Other->getOperand(1);
6023 A =
A->getOperand(0);
6026 B =
B->getOperand(0);
6029 return MatchNoCommonBitsPattern(
A->getOperand(0),
A->getOperand(1),
B) ||
6030 MatchNoCommonBitsPattern(
A->getOperand(1),
A->getOperand(0),
B);
6036 assert(
A.getValueType() ==
B.getValueType() &&
6037 "Values must have the same type");
6047 if (cast<ConstantSDNode>(Step)->
isZero())
6056 int NumOps = Ops.
size();
6057 assert(NumOps != 0 &&
"Can't build an empty vector!");
6059 "BUILD_VECTOR cannot be used with scalable types");
6061 "Incorrect element count in BUILD_VECTOR!");
6069 bool IsIdentity =
true;
6070 for (
int i = 0; i != NumOps; ++i) {
6073 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) ||
6074 !isa<ConstantSDNode>(Ops[i].getOperand(1)) ||
6075 Ops[i].getConstantOperandAPInt(1) != i) {
6079 IdentitySrc = Ops[i].getOperand(0);
6092 assert(!Ops.
empty() &&
"Can't concatenate an empty list of vectors!");
6095 return Ops[0].getValueType() ==
Op.getValueType();
6097 "Concatenation of vectors with inconsistent value types!");
6100 "Incorrect element count in vector concatenation!");
6102 if (Ops.
size() == 1)
6113 bool IsIdentity =
true;
6114 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i) {
6116 unsigned IdentityIndex = i *
Op.getValueType().getVectorMinNumElements();
6118 Op.getOperand(0).getValueType() != VT ||
6119 (IdentitySrc &&
Op.getOperand(0) != IdentitySrc) ||
6120 Op.getConstantOperandVal(1) != IdentityIndex) {
6124 assert((!IdentitySrc || IdentitySrc ==
Op.getOperand(0)) &&
6125 "Unexpected identity source vector for concat of extracts");
6126 IdentitySrc =
Op.getOperand(0);
6129 assert(IdentitySrc &&
"Failed to set source vector of extracts");
6144 EVT OpVT =
Op.getValueType();
6156 SVT = (SVT.
bitsLT(
Op.getValueType()) ?
Op.getValueType() : SVT);
6180 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP))
6183 auto *
N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
6184 CSEMap.InsertNode(
N, IP);
6197 return getNode(Opcode,
DL, VT, N1, Flags);
6248 "STEP_VECTOR can only be used with scalable types");
6251 "Unexpected step operand");
6273 "Invalid FP cast!");
6277 "Vector element count mismatch!");
6295 "Invalid SIGN_EXTEND!");
6297 "SIGN_EXTEND result type type should be vector iff the operand "
6302 "Vector element count mismatch!");
6316 "Invalid ZERO_EXTEND!");
6318 "ZERO_EXTEND result type type should be vector iff the operand "
6323 "Vector element count mismatch!");
6354 "Invalid ANY_EXTEND!");
6356 "ANY_EXTEND result type type should be vector iff the operand "
6361 "Vector element count mismatch!");
6386 "Invalid TRUNCATE!");
6388 "TRUNCATE result type type should be vector iff the operand "
6393 "Vector element count mismatch!");
6416 assert(VT.
isVector() &&
"This DAG node is restricted to vector types.");
6418 "The input must be the same size or smaller than the result.");
6421 "The destination vector type must have fewer lanes than the input.");
6431 "BSWAP types must be a multiple of 16 bits!");
6445 "Cannot BITCAST between types of different sizes!");
6458 "Illegal SCALAR_TO_VECTOR node!");
6515 "Wrong operand type!");
6522 if (VT != MVT::Glue) {
6526 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
6527 E->intersectFlagsWith(Flags);
6531 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
6533 createOperands(
N, Ops);
6534 CSEMap.InsertNode(
N, IP);
6536 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
6537 createOperands(
N, Ops);
6571 if (!C2.getBoolValue())
6575 if (!C2.getBoolValue())
6579 if (!C2.getBoolValue())
6583 if (!C2.getBoolValue())
6603 return std::nullopt;
6608 bool IsUndef1,
const APInt &C2,
6610 if (!(IsUndef1 || IsUndef2))
6618 return std::nullopt;
6628 auto *C2 = dyn_cast<ConstantSDNode>(N2);
6631 int64_t
Offset = C2->getSExtValue();
6649 assert(Ops.
size() == 2 &&
"Div/rem should have 2 operands");
6656 [](
SDValue V) { return V.isUndef() ||
6657 isNullConstant(V); });
6678 unsigned NumOps = Ops.
size();
6694 if (
auto *
C = dyn_cast<ConstantSDNode>(N1)) {
6695 const APInt &Val =
C->getAPIntValue();
6699 C->isTargetOpcode(),
C->isOpaque());
6706 C->isTargetOpcode(),
C->isOpaque());
6711 C->isTargetOpcode(),
C->isOpaque());
6713 C->isTargetOpcode(),
C->isOpaque());
6759 if (VT == MVT::f16 &&
C->getValueType(0) == MVT::i16)
6761 if (VT == MVT::f32 &&
C->getValueType(0) == MVT::i32)
6763 if (VT == MVT::f64 &&
C->getValueType(0) == MVT::i64)
6765 if (VT == MVT::f128 &&
C->getValueType(0) == MVT::i128)
6772 if (
auto *
C = dyn_cast<ConstantFPSDNode>(N1)) {
6826 return getConstant(V.bitcastToAPInt().getZExtValue(),
DL, VT);
6829 if (VT == MVT::i16 &&
C->getValueType(0) == MVT::f16)
6832 if (VT == MVT::i16 &&
C->getValueType(0) == MVT::bf16)
6835 if (VT == MVT::i32 &&
C->getValueType(0) == MVT::f32)
6838 if (VT == MVT::i64 &&
C->getValueType(0) == MVT::f64)
6839 return getConstant(V.bitcastToAPInt().getZExtValue(),
DL, VT);
6854 if (
auto *C1 = dyn_cast<ConstantSDNode>(Ops[0])) {
6855 if (
auto *C2 = dyn_cast<ConstantSDNode>(Ops[1])) {
6856 if (C1->isOpaque() || C2->isOpaque())
6859 std::optional<APInt> FoldAttempt =
6860 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue());
6866 "Can't fold vectors ops with scalar operands");
6880 EVT EVT = cast<VTSDNode>(Ops[1])->getVT();
6889 if (
auto *C1 = dyn_cast<ConstantSDNode>(Ops[0])) {
6890 const APInt &Val = C1->getAPIntValue();
6891 return SignExtendInReg(Val, VT);
6896 llvm::EVT OpVT = Ops[0].getOperand(0).getValueType();
6903 const APInt &Val = cast<ConstantSDNode>(
Op)->getAPIntValue();
6904 ScalarOps.
push_back(SignExtendInReg(Val, OpVT));
6910 isa<ConstantSDNode>(Ops[0].getOperand(0)))
6912 SignExtendInReg(Ops[0].getConstantOperandAPInt(0),
6925 Ops[0].getValueType() == VT && Ops[1].getValueType() == VT &&
6930 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1);
6931 auto *BV2 = dyn_cast<BuildVectorSDNode>(N2);
6938 if (BV1->getConstantRawBits(IsLE, EltBits, RawBits1, UndefElts1) &&
6939 BV2->getConstantRawBits(IsLE, EltBits, RawBits2, UndefElts2)) {
6943 Opcode, RawBits1[
I], UndefElts1[
I], RawBits2[
I], UndefElts2[
I]);
6954 BVEltVT = BV1->getOperand(0).getValueType();
6957 BVEltVT = BV2->getOperand(0).getValueType();
6963 DstBits, RawBits, DstUndefs,
6966 for (
unsigned I = 0, E = DstBits.
size();
I != E; ++
I) {
6984 ? Ops[0].getConstantOperandAPInt(0) * RHSVal
6985 : Ops[0].getConstantOperandAPInt(0) << RHSVal;
6990 auto IsScalarOrSameVectorSize = [NumElts](
const SDValue &
Op) {
6991 return !
Op.getValueType().isVector() ||
6992 Op.getValueType().getVectorElementCount() == NumElts;
6995 auto IsBuildVectorSplatVectorOrUndef = [](
const SDValue &
Op) {
7004 if (!
llvm::all_of(Ops, IsBuildVectorSplatVectorOrUndef) ||
7033 for (
unsigned I = 0;
I != NumVectorElts;
I++) {
7036 EVT InSVT =
Op.getValueType().getScalarType();
7058 !isa<ConstantSDNode>(ScalarOp) &&
7079 if (LegalSVT != SVT)
7080 ScalarResult =
getNode(ExtendCode,
DL, LegalSVT, ScalarResult);
7094 if (Ops.
size() != 2)
7105 if (N1CFP && N2CFP) {
7156 if (N1C && N1C->getValueAPF().isNegZero() && N2.
isUndef())
7185 ID.AddInteger(
A.value());
7188 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP))
7192 newSDNode<AssertAlignSDNode>(
DL.getIROrder(),
DL.getDebugLoc(), VTs,
A);
7193 createOperands(
N, {Val});
7195 CSEMap.InsertNode(
N, IP);
7208 return getNode(Opcode,
DL, VT, N1, N2, Flags);
7222 if ((N1C && !N2C) || (N1CFP && !N2CFP))
7236 "Operand is DELETED_NODE!");
7240 auto *N1C = dyn_cast<ConstantSDNode>(N1);
7241 auto *N2C = dyn_cast<ConstantSDNode>(N2);
7252 N2.
getValueType() == MVT::Other &&
"Invalid token factor!");
7256 if (N1 == N2)
return N1;
7272 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7274 N1.
getValueType() == VT &&
"Binary operator types must match!");
7277 if (N2CV && N2CV->
isZero())
7286 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7288 N1.
getValueType() == VT &&
"Binary operator types must match!");
7291 if (N2CV && N2CV->
isZero())
7298 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7300 N1.
getValueType() == VT &&
"Binary operator types must match!");
7305 const APInt &N2CImm = N2C->getAPIntValue();
7319 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7321 N1.
getValueType() == VT &&
"Binary operator types must match!");
7334 "Types of operands of UCMP/SCMP must match");
7336 "Operands and return type of must both be scalars or vectors");
7340 "Result and operands must have the same number of elements");
7346 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7348 N1.
getValueType() == VT &&
"Binary operator types must match!");
7352 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7354 N1.
getValueType() == VT &&
"Binary operator types must match!");
7360 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7362 N1.
getValueType() == VT &&
"Binary operator types must match!");
7368 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7370 N1.
getValueType() == VT &&
"Binary operator types must match!");
7381 N1.
getValueType() == VT &&
"Binary operator types must match!");
7389 "Invalid FCOPYSIGN!");
7394 const APInt &ShiftImm = N2C->getAPIntValue();
7406 "Shift operators return type must be the same as their first arg");
7408 "Shifts only work on integers");
7410 "Vector shift amounts must be in the same as their first arg");
7417 "Invalid use of small shift amount with oversized value!");
7424 if (N2CV && N2CV->
isZero())
7430 (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&
7436 EVT EVT = cast<VTSDNode>(N2)->getVT();
7439 "Cannot *_EXTEND_INREG FP types");
7441 "AssertSExt/AssertZExt type should be the vector element type "
7442 "rather than the vector type!");
7448 EVT EVT = cast<VTSDNode>(N2)->getVT();
7451 "Cannot *_EXTEND_INREG FP types");
7453 "SIGN_EXTEND_INREG type should be vector iff the operand "
7457 "Vector element counts must match in SIGN_EXTEND_INREG");
7459 if (
EVT == VT)
return N1;
7467 "FP_TO_*INT_SAT type should be vector iff the operand type is "
7471 "Vector element counts must match in FP_TO_*INT_SAT");
7472 assert(!cast<VTSDNode>(N2)->getVT().isVector() &&
7473 "Type to saturate to must be a scalar.");
7480 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \
7481 element type of the vector.");
7513 "BUILD_VECTOR used for scalable vectors");
7536 if (N1Op2C && N2C) {
7566 assert(N2C && (
unsigned)N2C->getZExtValue() < 2 &&
"Bad EXTRACT_ELEMENT!");
7570 "Wrong types for EXTRACT_ELEMENT!");
7581 unsigned Shift = ElementSize * N2C->getZExtValue();
7582 const APInt &Val = N1C->getAPIntValue();
7589 "Extract subvector VTs must be vectors!");
7591 "Extract subvector VTs must have the same element type!");
7593 "Cannot extract a scalable vector from a fixed length vector!");
7596 "Extract subvector must be from larger vector to smaller vector!");
7597 assert(N2C &&
"Extract subvector index must be a constant");
7601 "Extract subvector overflow!");
7602 assert(N2C->getAPIntValue().getBitWidth() ==
7604 "Constant index for EXTRACT_SUBVECTOR has an invalid size");
7619 return N1.
getOperand(N2C->getZExtValue() / Factor);
7687 if (VT != MVT::Glue) {
7691 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
7692 E->intersectFlagsWith(Flags);
7696 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
7698 createOperands(
N, Ops);
7699 CSEMap.InsertNode(
N, IP);
7701 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
7702 createOperands(
N, Ops);
7716 return getNode(Opcode,
DL, VT, N1, N2, N3, Flags);
7725 "Operand is DELETED_NODE!");
7736 if (N1CFP && N2CFP && N3CFP) {
7765 "SETCC operands must have the same type!");
7767 "SETCC type should be vector iff the operand type is vector!");
7770 "SETCC vector element counts must match!");
7790 if (cast<ConstantSDNode>(N3)->
isZero())
7820 "Dest and insert subvector source types must match!");
7822 "Insert subvector VTs must be vectors!");
7824 "Insert subvector VTs must have the same element type!");
7826 "Cannot insert a scalable vector into a fixed length vector!");
7829 "Insert subvector must be from smaller vector to larger vector!");
7830 assert(isa<ConstantSDNode>(N3) &&
7831 "Insert subvector index must be constant");
7835 "Insert subvector overflow!");
7838 "Constant index for INSERT_SUBVECTOR has an invalid size");
7856 case ISD::VP_TRUNCATE:
7857 case ISD::VP_SIGN_EXTEND:
7858 case ISD::VP_ZERO_EXTEND:
7867 assert(VT == VecVT &&
"Vector and result type don't match.");
7869 "All inputs must be vectors.");
7870 assert(VecVT == PassthruVT &&
"Vector and passthru types don't match.");
7872 "Vector and mask must have same number of elements.");
7885 if (VT != MVT::Glue) {
7889 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
7890 E->intersectFlagsWith(Flags);
7894 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
7896 createOperands(
N, Ops);
7897 CSEMap.InsertNode(
N, IP);
7899 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
7900 createOperands(
N, Ops);
7912 SDValue Ops[] = { N1, N2, N3, N4 };
7913 return getNode(Opcode,
DL, VT, Ops, Flags);
7921 return getNode(Opcode,
DL, VT, N1, N2, N3, N4, Flags);
7927 SDValue Ops[] = { N1, N2, N3, N4, N5 };
7928 return getNode(Opcode,
DL, VT, Ops, Flags);
7937 return getNode(Opcode,
DL, VT, N1, N2, N3, N4, N5, Flags);
7954 if (FI->getIndex() < 0)
7969 assert(
C->getAPIntValue().getBitWidth() == 8);
7974 return DAG.
getConstant(Val, dl, VT,
false, IsOpaque);
7979 assert(
Value.getValueType() == MVT::i8 &&
"memset with non-byte fill value?");
7995 if (VT !=
Value.getValueType())
8008 if (Slice.
Array ==
nullptr) {
8011 if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
8026 unsigned NumVTBytes = NumVTBits / 8;
8027 unsigned NumBytes = std::min(NumVTBytes,
unsigned(Slice.
Length));
8029 APInt Val(NumVTBits, 0);
8031 for (
unsigned i = 0; i != NumBytes; ++i)
8034 for (
unsigned i = 0; i != NumBytes; ++i)
8035 Val |= (
uint64_t)(
unsigned char)Slice[i] << (NumVTBytes-i-1)*8;
8054 APInt(
Base.getValueSizeInBits().getFixedValue(),
8055 Offset.getKnownMinValue()));
8066 EVT BasePtrVT =
Ptr.getValueType();
8075 G = cast<GlobalAddressSDNode>(Src);
8076 else if (Src.getOpcode() ==
ISD::ADD &&
8079 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
8080 SrcDelta = Src.getConstantOperandVal(1);
8086 SrcDelta +
G->getOffset());
8102 assert(OutLoadChains.
size() &&
"Missing loads in memcpy inlining");
8103 assert(OutStoreChains.
size() &&
"Missing stores in memcpy inlining");
8105 for (
unsigned i =
From; i < To; ++i) {
8107 GluedLoadChains.
push_back(OutLoadChains[i]);
8114 for (
unsigned i =
From; i < To; ++i) {
8115 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]);
8117 ST->getBasePtr(), ST->getMemoryVT(),
8118 ST->getMemOperand());
8126 bool isVol,
bool AlwaysInline,
8142 std::vector<EVT> MemOps;
8143 bool DstAlignCanChange =
false;
8149 DstAlignCanChange =
true;
8151 if (!SrcAlign || Alignment > *SrcAlign)
8152 SrcAlign = Alignment;
8153 assert(SrcAlign &&
"SrcAlign must be set");
8157 bool isZeroConstant = CopyFromConstant && Slice.
Array ==
nullptr;
8159 const MemOp Op = isZeroConstant
8163 *SrcAlign, isVol, CopyFromConstant);
8169 if (DstAlignCanChange) {
8170 Type *Ty = MemOps[0].getTypeForEVT(
C);
8171 Align NewAlign =
DL.getABITypeAlign(Ty);
8177 if (!
TRI->hasStackRealignment(MF))
8179 NewAlign = std::min(NewAlign, *StackAlign);
8181 if (NewAlign > Alignment) {
8185 Alignment = NewAlign;
8193 const Value *SrcVal = dyn_cast_if_present<const Value *>(SrcPtrInfo.
V);
8203 unsigned NumMemOps = MemOps.
size();
8205 for (
unsigned i = 0; i != NumMemOps; ++i) {
8210 if (VTSize >
Size) {
8213 assert(i == NumMemOps-1 && i != 0);
8214 SrcOff -= VTSize -
Size;
8215 DstOff -= VTSize -
Size;
8218 if (CopyFromConstant &&
8226 if (SrcOff < Slice.
Length) {
8228 SubSlice.
move(SrcOff);
8231 SubSlice.
Array =
nullptr;
8233 SubSlice.
Length = VTSize;
8236 if (
Value.getNode()) {
8240 DstPtrInfo.
getWithOffset(DstOff), Alignment, MMOFlags, NewAAInfo);
8245 if (!Store.getNode()) {
8254 bool isDereferenceable =
8257 if (isDereferenceable)
8272 DstPtrInfo.
getWithOffset(DstOff), VT, Alignment, MMOFlags, NewAAInfo);
8282 unsigned NumLdStInMemcpy = OutStoreChains.
size();
8284 if (NumLdStInMemcpy) {
8290 for (
unsigned i = 0; i < NumLdStInMemcpy; ++i) {
8296 if (NumLdStInMemcpy <= GluedLdStLimit) {
8298 NumLdStInMemcpy, OutLoadChains,
8301 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit;
8302 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit;
8303 unsigned GlueIter = 0;
8305 for (
unsigned cnt = 0; cnt < NumberLdChain; ++cnt) {
8306 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit;
8307 unsigned IndexTo = NumLdStInMemcpy - GlueIter;
8310 OutLoadChains, OutStoreChains);
8311 GlueIter += GluedLdStLimit;
8315 if (RemainingLdStInMemcpy) {
8317 RemainingLdStInMemcpy, OutLoadChains,
8329 bool isVol,
bool AlwaysInline,
8343 std::vector<EVT> MemOps;
8344 bool DstAlignCanChange =
false;
8350 DstAlignCanChange =
true;
8352 if (!SrcAlign || Alignment > *SrcAlign)
8353 SrcAlign = Alignment;
8354 assert(SrcAlign &&
"SrcAlign must be set");
8364 if (DstAlignCanChange) {
8365 Type *Ty = MemOps[0].getTypeForEVT(
C);
8366 Align NewAlign =
DL.getABITypeAlign(Ty);
8372 if (!
TRI->hasStackRealignment(MF))
8374 NewAlign = std::min(NewAlign, *StackAlign);
8376 if (NewAlign > Alignment) {
8380 Alignment = NewAlign;
8394 unsigned NumMemOps = MemOps.
size();
8395 for (
unsigned i = 0; i < NumMemOps; i++) {
8400 bool isDereferenceable =
8403 if (isDereferenceable)
8409 SrcPtrInfo.
getWithOffset(SrcOff), *SrcAlign, SrcMMOFlags, NewAAInfo);
8416 for (
unsigned i = 0; i < NumMemOps; i++) {
8422 Chain, dl, LoadValues[i],
8424 DstPtrInfo.
getWithOffset(DstOff), Alignment, MMOFlags, NewAAInfo);
8464 std::vector<EVT> MemOps;
8465 bool DstAlignCanChange =
false;
8471 DstAlignCanChange =
true;
8477 MemOp::Set(
Size, DstAlignCanChange, Alignment, IsZeroVal, isVol),
8481 if (DstAlignCanChange) {
8484 Align NewAlign =
DL.getABITypeAlign(Ty);
8490 if (!
TRI->hasStackRealignment(MF))
8492 NewAlign = std::min(NewAlign, *StackAlign);
8494 if (NewAlign > Alignment) {
8498 Alignment = NewAlign;
8504 unsigned NumMemOps = MemOps.size();
8507 EVT LargestVT = MemOps[0];
8508 for (
unsigned i = 1; i < NumMemOps; i++)
8509 if (MemOps[i].bitsGT(LargestVT))
8510 LargestVT = MemOps[i];
8517 for (
unsigned i = 0; i < NumMemOps; i++) {
8520 if (VTSize >
Size) {
8523 assert(i == NumMemOps-1 && i != 0);
8524 DstOff -= VTSize -
Size;
8531 if (VT.
bitsLT(LargestVT)) {
8552 assert(
Value.getValueType() == VT &&
"Value with wrong type.");
8579 Align Alignment,
bool isVol,
bool AlwaysInline,
const CallInst *CI,
8587 if (ConstantSize->
isZero())
8591 *
this, dl, Chain, Dst, Src, ConstantSize->
getZExtValue(), Alignment,
8592 isVol,
false, DstPtrInfo, SrcPtrInfo, AAInfo, AA);
8593 if (Result.getNode())
8601 *
this, dl, Chain, Dst, Src,
Size, Alignment, isVol, AlwaysInline,
8602 DstPtrInfo, SrcPtrInfo);
8603 if (Result.getNode())
8610 assert(ConstantSize &&
"AlwaysInline requires a constant size!");
8612 *
this, dl, Chain, Dst, Src, ConstantSize->
getZExtValue(), Alignment,
8613 isVol,
true, DstPtrInfo, SrcPtrInfo, AAInfo, AA);
8629 Entry.Node = Dst; Args.push_back(Entry);
8630 Entry.Node = Src; Args.push_back(Entry);
8633 Entry.Node =
Size; Args.push_back(Entry);
8636 bool IsTailCall =
false;
8637 if (OverrideTailCall.has_value()) {
8638 IsTailCall = *OverrideTailCall;
8640 bool LowersToMemcpy =
8645 ReturnsFirstArg && LowersToMemcpy);
8651 Dst.getValueType().getTypeForEVT(*
getContext()),
8658 std::pair<SDValue,SDValue> CallResult = TLI->
LowerCallTo(CLI);
8659 return CallResult.second;
8664 Type *SizeTy,
unsigned ElemSz,
8673 Args.push_back(Entry);
8676 Args.push_back(Entry);
8680 Args.push_back(Entry);
8684 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
8698 std::pair<SDValue, SDValue> CallResult = TLI->
LowerCallTo(CLI);
8699 return CallResult.second;
8705 std::optional<bool> OverrideTailCall,
8714 if (ConstantSize->
isZero())
8718 *
this, dl, Chain, Dst, Src, ConstantSize->
getZExtValue(), Alignment,
8719 isVol,
false, DstPtrInfo, SrcPtrInfo, AAInfo);
8720 if (Result.getNode())
8729 Alignment, isVol, DstPtrInfo, SrcPtrInfo);
8730 if (Result.getNode())
8744 Entry.Node = Dst; Args.push_back(Entry);
8745 Entry.Node = Src; Args.push_back(Entry);
8748 Entry.Node =
Size; Args.push_back(Entry);
8752 bool IsTailCall =
false;
8753 if (OverrideTailCall.has_value()) {
8754 IsTailCall = *OverrideTailCall;
8756 bool LowersToMemmove =
8761 ReturnsFirstArg && LowersToMemmove);
8767 Dst.getValueType().getTypeForEVT(*
getContext()),
8774 std::pair<SDValue,SDValue> CallResult = TLI->
LowerCallTo(CLI);
8775 return CallResult.second;
8780 Type *SizeTy,
unsigned ElemSz,
8789 Args.push_back(Entry);
8792 Args.push_back(Entry);
8796 Args.push_back(Entry);
8800 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
8814 std::pair<SDValue, SDValue> CallResult = TLI->
LowerCallTo(CLI);
8815 return CallResult.second;
8820 bool isVol,
bool AlwaysInline,
8829 if (ConstantSize->
isZero())
8834 isVol,
false, DstPtrInfo, AAInfo);
8836 if (Result.getNode())
8844 *
this, dl, Chain, Dst, Src,
Size, Alignment, isVol, AlwaysInline, DstPtrInfo);
8845 if (Result.getNode())
8852 assert(ConstantSize &&
"AlwaysInline requires a constant size!");
8855 isVol,
true, DstPtrInfo, AAInfo);
8857 "getMemsetStores must return a valid sequence when AlwaysInline");
8874 const auto CreateEntry = [](
SDValue Node,
Type *Ty) {
8886 Args.push_back(CreateEntry(
Size,
DL.getIntPtrType(Ctx)));
8893 Args.push_back(CreateEntry(Src, Src.getValueType().getTypeForEVT(Ctx)));
8894 Args.push_back(CreateEntry(
Size,
DL.getIntPtrType(Ctx)));
8896 Dst.getValueType().getTypeForEVT(Ctx),
8901 bool LowersToMemset =
8912 std::pair<SDValue, SDValue> CallResult = TLI->
LowerCallTo(CLI);
8913 return CallResult.second;
8918 Type *SizeTy,
unsigned ElemSz,
8926 Args.push_back(Entry);
8930 Args.push_back(Entry);
8934 Args.push_back(Entry);
8938 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
8952 std::pair<SDValue, SDValue> CallResult = TLI->
LowerCallTo(CLI);
8953 return CallResult.second;
8965 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
8966 cast<AtomicSDNode>(E)->refineAlignment(MMO);
8971 VTList, MemVT, MMO);
8972 createOperands(
N, Ops);
8974 CSEMap.InsertNode(
N, IP);
8988 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
9007 "Invalid Atomic Op");
9014 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
9024 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
9029 if (Ops.
size() == 1)
9044 if (
Size.hasValue() && !
Size.getValue())
9061 (Opcode <= (
unsigned)std::numeric_limits<int>::max() &&
9063 "Opcode is not a memory-accessing opcode!");
9067 if (VTList.
VTs[VTList.
NumVTs-1] != MVT::Glue) {
9070 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>(
9071 Opcode, dl.
getIROrder(), VTList, MemVT, MMO));
9076 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9077 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
9082 VTList, MemVT, MMO);
9083 createOperands(
N, Ops);
9085 CSEMap.InsertNode(
N, IP);
9088 VTList, MemVT, MMO);
9089 createOperands(
N, Ops);
9098 SDValue Chain,
int FrameIndex,
9110 ID.AddInteger(FrameIndex);
9114 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
9119 createOperands(
N, Ops);
9120 CSEMap.InsertNode(
N, IP);
9136 ID.AddInteger(Index);
9138 if (
SDNode *E = FindNodeOrInsertPos(
ID, Dl, IP))
9141 auto *
N = newSDNode<PseudoProbeSDNode>(
9143 createOperands(
N, Ops);
9144 CSEMap.InsertNode(
N, IP);
9165 !isa<ConstantSDNode>(
Ptr.getOperand(1)) ||
9166 !isa<FrameIndexSDNode>(
Ptr.getOperand(0)))
9169 int FI = cast<FrameIndexSDNode>(
Ptr.getOperand(0))->getIndex();
9172 Offset + cast<ConstantSDNode>(
Ptr.getOperand(1))->getSExtValue());
9183 if (
ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
9198 "Invalid chain type");
9210 Alignment, AAInfo, Ranges);
9221 assert(VT == MemVT &&
"Non-extending load from different memory type!");
9225 "Should only be an extending load, not truncating!");
9227 "Cannot convert from FP to Int or Int -> FP!");
9229 "Cannot use an ext load to convert to or from a vector!");
9232 "Cannot use an ext load to change the number of vector elements!");
9244 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>(
9245 dl.
getIROrder(), VTs, AM, ExtType, MemVT, MMO));
9249 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9250 cast<LoadSDNode>(E)->refineAlignment(MMO);
9254 ExtType, MemVT, MMO);
9255 createOperands(
N, Ops);
9257 CSEMap.InsertNode(
N, IP);
9271 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges);
9289 MemVT, Alignment, MMOFlags, AAInfo);
9304 assert(LD->getOffset().isUndef() &&
"Load is already a indexed load!");
9307 LD->getMemOperand()->getFlags() &
9310 LD->getChain(),
Base,
Offset, LD->getPointerInfo(),
9311 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo());
9337 "Invalid chain type");
9345 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
9350 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9351 cast<StoreSDNode>(E)->refineAlignment(MMO);
9356 createOperands(
N, Ops);
9358 CSEMap.InsertNode(
N, IP);
9371 "Invalid chain type");
9392 "Invalid chain type");
9397 "Should only be a truncating store, not extending!");
9399 "Can't do FP-INT conversion!");
9401 "Cannot use trunc store to convert to or from a vector!");
9404 "Cannot use trunc store to change the number of vector elements!");
9412 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
9417 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9418 cast<StoreSDNode>(E)->refineAlignment(MMO);
9423 createOperands(
N, Ops);
9425 CSEMap.InsertNode(
N, IP);
9436 assert(ST->getOffset().isUndef() &&
"Store is already a indexed store!");
9441 ID.AddInteger(ST->getMemoryVT().getRawBits());
9442 ID.AddInteger(ST->getRawSubclassData());
9443 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
9444 ID.AddInteger(ST->getMemOperand()->getFlags());
9446 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
9450 ST->isTruncatingStore(), ST->getMemoryVT(),
9451 ST->getMemOperand());
9452 createOperands(
N, Ops);
9454 CSEMap.InsertNode(
N, IP);
9466 const MDNode *Ranges,
bool IsExpanding) {
9479 Alignment, AAInfo, Ranges);
9480 return getLoadVP(AM, ExtType, VT, dl, Chain,
Ptr,
Offset, Mask, EVL, MemVT,
9499 ID.AddInteger(getSyntheticNodeSubclassData<VPLoadSDNode>(
9500 dl.
getIROrder(), VTs, AM, ExtType, IsExpanding, MemVT, MMO));
9504 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9505 cast<VPLoadSDNode>(E)->refineAlignment(MMO);
9509 ExtType, IsExpanding, MemVT, MMO);
9510 createOperands(
N, Ops);
9512 CSEMap.InsertNode(
N, IP);
9528 Mask, EVL, PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges,
9537 Mask, EVL, VT, MMO, IsExpanding);
9546 const AAMDNodes &AAInfo,
bool IsExpanding) {
9549 EVL, PtrInfo, MemVT, Alignment, MMOFlags, AAInfo,
nullptr,
9559 EVL, MemVT, MMO, IsExpanding);
9565 auto *LD = cast<VPLoadSDNode>(OrigLoad);
9566 assert(LD->getOffset().isUndef() &&
"Load is already a indexed load!");
9569 LD->getMemOperand()->getFlags() &
9573 LD->getVectorLength(), LD->getPointerInfo(),
9574 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo(),
9575 nullptr, LD->isExpandingLoad());
9582 bool IsCompressing) {
9592 ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>(
9593 dl.
getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
9597 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9598 cast<VPStoreSDNode>(E)->refineAlignment(MMO);
9602 IsTruncating, IsCompressing, MemVT, MMO);
9603 createOperands(
N, Ops);
9605 CSEMap.InsertNode(
N, IP);
9618 bool IsCompressing) {
9639 bool IsCompressing) {
9646 false, IsCompressing);
9649 "Should only be a truncating store, not extending!");
9652 "Cannot use trunc store to convert to or from a vector!");
9655 "Cannot use trunc store to change the number of vector elements!");
9659 SDValue Ops[] = {Chain, Val,
Ptr, Undef, Mask, EVL};
9663 ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>(
9668 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9669 cast<VPStoreSDNode>(E)->refineAlignment(MMO);
9675 createOperands(
N, Ops);
9677 CSEMap.InsertNode(
N, IP);
9687 auto *ST = cast<VPStoreSDNode>(OrigStore);
9688 assert(ST->getOffset().isUndef() &&
"Store is already an indexed store!");
9690 SDValue Ops[] = {ST->getChain(), ST->getValue(),
Base,
9691 Offset, ST->getMask(), ST->getVectorLength()};
9694 ID.AddInteger(ST->getMemoryVT().getRawBits());
9695 ID.AddInteger(ST->getRawSubclassData());
9696 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
9697 ID.AddInteger(ST->getMemOperand()->getFlags());
9699 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
9702 auto *
N = newSDNode<VPStoreSDNode>(
9704 ST->isCompressingStore(), ST->getMemoryVT(), ST->getMemOperand());
9705 createOperands(
N, Ops);
9707 CSEMap.InsertNode(
N, IP);
9727 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedLoadSDNode>(
9728 DL.getIROrder(), VTs, AM, ExtType, IsExpanding, MemVT, MMO));
9732 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
9733 cast<VPStridedLoadSDNode>(E)->refineAlignment(MMO);
9738 newSDNode<VPStridedLoadSDNode>(
DL.getIROrder(),
DL.getDebugLoc(), VTs, AM,
9739 ExtType, IsExpanding, MemVT, MMO);
9740 createOperands(
N, Ops);
9741 CSEMap.InsertNode(
N, IP);
9755 Undef, Stride, Mask, EVL, VT, MMO, IsExpanding);
9764 Stride, Mask, EVL, MemVT, MMO, IsExpanding);
9773 bool IsTruncating,
bool IsCompressing) {
9783 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedStoreSDNode>(
9784 DL.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
9787 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
9788 cast<VPStridedStoreSDNode>(E)->refineAlignment(MMO);
9791 auto *
N = newSDNode<VPStridedStoreSDNode>(
DL.getIROrder(),
DL.getDebugLoc(),
9792 VTs, AM, IsTruncating,
9793 IsCompressing, MemVT, MMO);
9794 createOperands(
N, Ops);
9796 CSEMap.InsertNode(
N, IP);
9808 bool IsCompressing) {
9815 false, IsCompressing);
9818 "Should only be a truncating store, not extending!");
9821 "Cannot use trunc store to convert to or from a vector!");
9824 "Cannot use trunc store to change the number of vector elements!");
9828 SDValue Ops[] = {Chain, Val,
Ptr, Undef, Stride, Mask, EVL};
9832 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedStoreSDNode>(
9836 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
9837 cast<VPStridedStoreSDNode>(E)->refineAlignment(MMO);
9840 auto *
N = newSDNode<VPStridedStoreSDNode>(
DL.getIROrder(),
DL.getDebugLoc(),
9842 IsCompressing, SVT, MMO);
9843 createOperands(
N, Ops);
9845 CSEMap.InsertNode(
N, IP);
9855 assert(Ops.
size() == 6 &&
"Incompatible number of operands");
9860 ID.AddInteger(getSyntheticNodeSubclassData<VPGatherSDNode>(
9865 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9866 cast<VPGatherSDNode>(E)->refineAlignment(MMO);
9871 VT, MMO, IndexType);
9872 createOperands(
N, Ops);
9874 assert(
N->getMask().getValueType().getVectorElementCount() ==
9875 N->getValueType(0).getVectorElementCount() &&
9876 "Vector width mismatch between mask and data");
9877 assert(
N->getIndex().getValueType().getVectorElementCount().isScalable() ==
9878 N->getValueType(0).getVectorElementCount().isScalable() &&
9879 "Scalable flags of index and data do not match");
9881 N->getIndex().getValueType().getVectorElementCount(),
9882 N->getValueType(0).getVectorElementCount()) &&
9883 "Vector width mismatch between index and data");
9884 assert(isa<ConstantSDNode>(
N->getScale()) &&
9885 N->getScale()->getAsAPIntVal().isPowerOf2() &&
9886 "Scale should be a constant power of 2");
9888 CSEMap.InsertNode(
N, IP);
9899 assert(Ops.
size() == 7 &&
"Incompatible number of operands");
9904 ID.AddInteger(getSyntheticNodeSubclassData<VPScatterSDNode>(
9909 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9910 cast<VPScatterSDNode>(E)->refineAlignment(MMO);
9914 VT, MMO, IndexType);
9915 createOperands(
N, Ops);
9917 assert(
N->getMask().getValueType().getVectorElementCount() ==
9918 N->getValue().getValueType().getVectorElementCount() &&
9919 "Vector width mismatch between mask and data");
9921 N->getIndex().getValueType().getVectorElementCount().isScalable() ==
9922 N->getValue().getValueType().getVectorElementCount().isScalable() &&
9923 "Scalable flags of index and data do not match");
9925 N->getIndex().getValueType().getVectorElementCount(),
9926 N->getValue().getValueType().getVectorElementCount()) &&
9927 "Vector width mismatch between index and data");
9928 assert(isa<ConstantSDNode>(
N->getScale()) &&
9929 N->getScale()->getAsAPIntVal().isPowerOf2() &&
9930 "Scale should be a constant power of 2");
9932 CSEMap.InsertNode(
N, IP);
9947 "Unindexed masked load with an offset!");
9954 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>(
9955 dl.
getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO));
9959 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9960 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
9964 AM, ExtTy, isExpanding, MemVT, MMO);
9965 createOperands(
N, Ops);
9967 CSEMap.InsertNode(
N, IP);
9978 assert(LD->getOffset().isUndef() &&
"Masked load is already a indexed load!");
9980 Offset, LD->getMask(), LD->getPassThru(),
9981 LD->getMemoryVT(), LD->getMemOperand(), AM,
9982 LD->getExtensionType(), LD->isExpandingLoad());
9990 bool IsCompressing) {
9992 "Invalid chain type");
9995 "Unindexed masked store with an offset!");
10002 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>(
10003 dl.
getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
10006 void *IP =
nullptr;
10007 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10008 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
10013 IsTruncating, IsCompressing, MemVT, MMO);
10014 createOperands(
N, Ops);
10016 CSEMap.InsertNode(
N, IP);
10027 assert(ST->getOffset().isUndef() &&
10028 "Masked store is already a indexed store!");
10030 ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(),
10031 AM, ST->isTruncatingStore(), ST->isCompressingStore());
10039 assert(Ops.
size() == 6 &&
"Incompatible number of operands");
10044 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>(
10045 dl.
getIROrder(), VTs, MemVT, MMO, IndexType, ExtTy));
10048 void *IP =
nullptr;
10049 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10050 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
10055 VTs, MemVT, MMO, IndexType, ExtTy);
10056 createOperands(
N, Ops);
10058 assert(
N->getPassThru().getValueType() ==
N->getValueType(0) &&
10059 "Incompatible type of the PassThru value in MaskedGatherSDNode");
10060 assert(
N->getMask().getValueType().getVectorElementCount() ==
10061 N->getValueType(0).getVectorElementCount() &&
10062 "Vector width mismatch between mask and data");
10063 assert(
N->getIndex().getValueType().getVectorElementCount().isScalable() ==
10064 N->getValueType(0).getVectorElementCount().isScalable() &&
10065 "Scalable flags of index and data do not match");
10067 N->getIndex().getValueType().getVectorElementCount(),
10068 N->getValueType(0).getVectorElementCount()) &&
10069 "Vector width mismatch between index and data");
10070 assert(isa<ConstantSDNode>(
N->getScale()) &&
10071 N->getScale()->getAsAPIntVal().isPowerOf2() &&
10072 "Scale should be a constant power of 2");
10074 CSEMap.InsertNode(
N, IP);
10086 assert(Ops.
size() == 6 &&
"Incompatible number of operands");
10091 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>(
10092 dl.
getIROrder(), VTs, MemVT, MMO, IndexType, IsTrunc));
10095 void *IP =
nullptr;
10096 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10097 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
10102 VTs, MemVT, MMO, IndexType, IsTrunc);
10103 createOperands(
N, Ops);
10105 assert(
N->getMask().getValueType().getVectorElementCount() ==
10106 N->getValue().getValueType().getVectorElementCount() &&
10107 "Vector width mismatch between mask and data");
10109 N->getIndex().getValueType().getVectorElementCount().isScalable() ==
10110 N->getValue().getValueType().getVectorElementCount().isScalable() &&
10111 "Scalable flags of index and data do not match");
10113 N->getIndex().getValueType().getVectorElementCount(),
10114 N->getValue().getValueType().getVectorElementCount()) &&
10115 "Vector width mismatch between index and data");
10116 assert(isa<ConstantSDNode>(
N->getScale()) &&
10117 N->getScale()->getAsAPIntVal().isPowerOf2() &&
10118 "Scale should be a constant power of 2");
10120 CSEMap.InsertNode(
N, IP);
10131 assert(Ops.
size() == 7 &&
"Incompatible number of operands");
10136 ID.AddInteger(getSyntheticNodeSubclassData<MaskedHistogramSDNode>(
10137 dl.
getIROrder(), VTs, MemVT, MMO, IndexType));
10140 void *IP =
nullptr;
10141 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10142 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
10147 VTs, MemVT, MMO, IndexType);
10148 createOperands(
N, Ops);
10150 assert(
N->getMask().getValueType().getVectorElementCount() ==
10151 N->getIndex().getValueType().getVectorElementCount() &&
10152 "Vector width mismatch between mask and data");
10153 assert(isa<ConstantSDNode>(
N->getScale()) &&
10154 N->getScale()->getAsAPIntVal().isPowerOf2() &&
10155 "Scale should be a constant power of 2");
10156 assert(
N->getInc().getValueType().isInteger() &&
"Non integer update value");
10158 CSEMap.InsertNode(
N, IP);
10173 ID.AddInteger(getSyntheticNodeSubclassData<FPStateAccessSDNode>(
10177 void *IP =
nullptr;
10178 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
10183 createOperands(
N, Ops);
10185 CSEMap.InsertNode(
N, IP);
10200 ID.AddInteger(getSyntheticNodeSubclassData<FPStateAccessSDNode>(
10204 void *IP =
nullptr;
10205 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
10210 createOperands(
N, Ops);
10212 CSEMap.InsertNode(
N, IP);
10223 if (
Cond.isUndef())
10258 return !Val || Val->getAPIntValue().uge(
X.getScalarValueSizeInBits());
10264 if (
X.getValueType().getScalarType() == MVT::i1)
10277 bool HasNan = (XC && XC->getValueAPF().isNaN()) ||
10279 bool HasInf = (XC && XC->getValueAPF().isInfinity()) ||
10282 if (Flags.hasNoNaNs() && (HasNan ||
X.isUndef() ||
Y.isUndef()))
10285 if (Flags.hasNoInfs() && (HasInf ||
X.isUndef() ||
Y.isUndef()))
10308 if (Opcode ==
ISD::FMUL && Flags.hasNoNaNs() && Flags.hasNoSignedZeros())
10323 switch (Ops.
size()) {
10324 case 0:
return getNode(Opcode,
DL, VT);
10325 case 1:
return getNode(Opcode,
DL, VT,
static_cast<const SDValue>(Ops[0]));
10326 case 2:
return getNode(Opcode,
DL, VT, Ops[0], Ops[1]);
10327 case 3:
return getNode(Opcode,
DL, VT, Ops[0], Ops[1], Ops[2]);
10334 return getNode(Opcode,
DL, VT, NewOps);
10342 return getNode(Opcode,
DL, VT, Ops, Flags);
10347 unsigned NumOps = Ops.
size();
10349 case 0:
return getNode(Opcode,
DL, VT);
10350 case 1:
return getNode(Opcode,
DL, VT, Ops[0], Flags);
10351 case 2:
return getNode(Opcode,
DL, VT, Ops[0], Ops[1], Flags);
10352 case 3:
return getNode(Opcode,
DL, VT, Ops[0], Ops[1], Ops[2], Flags);
10357 for (
const auto &
Op : Ops)
10359 "Operand is DELETED_NODE!");
10374 assert(NumOps == 5 &&
"SELECT_CC takes 5 operands!");
10376 "LHS and RHS of condition must have same type!");
10378 "True and False arms of SelectCC must have same type!");
10380 "select_cc node must be of same type as true and false value!");
10384 "Expected select_cc with vector result to have the same sized "
10385 "comparison type!");
10388 assert(NumOps == 5 &&
"BR_CC takes 5 operands!");
10390 "LHS/RHS of comparison should match types!");
10396 Opcode = ISD::VP_XOR;
10401 Opcode = ISD::VP_AND;
10403 case ISD::VP_REDUCE_MUL:
10406 Opcode = ISD::VP_REDUCE_AND;
10408 case ISD::VP_REDUCE_ADD:
10411 Opcode = ISD::VP_REDUCE_XOR;
10413 case ISD::VP_REDUCE_SMAX:
10414 case ISD::VP_REDUCE_UMIN:
10418 Opcode = ISD::VP_REDUCE_AND;
10420 case ISD::VP_REDUCE_SMIN:
10421 case ISD::VP_REDUCE_UMAX:
10425 Opcode = ISD::VP_REDUCE_OR;
10433 if (VT != MVT::Glue) {
10436 void *IP =
nullptr;
10438 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
10439 E->intersectFlagsWith(Flags);
10443 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
10444 createOperands(
N, Ops);
10446 CSEMap.InsertNode(
N, IP);
10448 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
10449 createOperands(
N, Ops);
10452 N->setFlags(Flags);
10469 return getNode(Opcode,
DL, VTList, Ops, Flags);
10475 return getNode(Opcode,
DL, VTList.
VTs[0], Ops, Flags);
10478 for (
const auto &
Op : Ops)
10480 "Operand is DELETED_NODE!");
10489 "Invalid add/sub overflow op!");
10491 Ops[0].getValueType() == Ops[1].getValueType() &&
10492 Ops[0].getValueType() == VTList.
VTs[0] &&
10493 "Binary operator types must match!");
10494 SDValue N1 = Ops[0], N2 = Ops[1];
10500 if (N2CV && N2CV->
isZero()) {
10532 "Invalid add/sub overflow op!");
10534 Ops[0].getValueType() == Ops[1].getValueType() &&
10535 Ops[0].getValueType() == VTList.
VTs[0] &&
10536 Ops[2].getValueType() == VTList.
VTs[1] &&
10537 "Binary operator types must match!");
10543 VTList.
VTs[0] == Ops[0].getValueType() &&
10544 VTList.
VTs[0] == Ops[1].getValueType() &&
10545 "Binary operator types must match!");
10551 unsigned OutWidth = Width * 2;
10555 Val = Val.
sext(OutWidth);
10556 Mul =
Mul.sext(OutWidth);
10558 Val = Val.
zext(OutWidth);
10559 Mul =
Mul.zext(OutWidth);
10573 VTList.
VTs[0] == Ops[0].getValueType() &&
"frexp type mismatch");
10589 "Invalid STRICT_FP_EXTEND!");
10591 Ops[1].getValueType().isFloatingPoint() &&
"Invalid FP cast!");
10593 "STRICT_FP_EXTEND result type should be vector iff the operand "
10594 "type is vector!");
10597 Ops[1].getValueType().getVectorElementCount()) &&
10598 "Vector element count mismatch!");
10600 "Invalid fpext node, dst <= src!");
10603 assert(VTList.
NumVTs == 2 && Ops.
size() == 3 &&
"Invalid STRICT_FP_ROUND!");
10605 "STRICT_FP_ROUND result type should be vector iff the operand "
10606 "type is vector!");
10609 Ops[1].getValueType().getVectorElementCount()) &&
10610 "Vector element count mismatch!");
10612 Ops[1].getValueType().isFloatingPoint() &&
10613 VTList.
VTs[0].
bitsLT(Ops[1].getValueType()) &&
10615 (Ops[2]->getAsZExtVal() == 0 || Ops[2]->getAsZExtVal() == 1) &&
10616 "Invalid STRICT_FP_ROUND!");
10626 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
10627 return getNode(Opcode,
DL, VT, N1, N2, N3.getOperand(0));
10628 else if (N3.getOpcode() ==
ISD::AND)
10629 if (
ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
10633 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
10634 return getNode(Opcode,
DL, VT, N1, N2, N3.getOperand(0));
10642 if (VTList.
VTs[VTList.
NumVTs-1] != MVT::Glue) {
10645 void *IP =
nullptr;
10646 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
10647 E->intersectFlagsWith(Flags);
10651 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTList);
10652 createOperands(
N, Ops);
10653 CSEMap.InsertNode(
N, IP);
10655 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTList);
10656 createOperands(
N, Ops);
10659 N->setFlags(Flags);
10674 return getNode(Opcode,
DL, VTList, Ops);
10680 return getNode(Opcode,
DL, VTList, Ops);
10685 SDValue Ops[] = { N1, N2, N3 };
10686 return getNode(Opcode,
DL, VTList, Ops);
10691 SDValue Ops[] = { N1, N2, N3, N4 };
10692 return getNode(Opcode,
DL, VTList, Ops);
10698 SDValue Ops[] = { N1, N2, N3, N4, N5 };
10699 return getNode(Opcode,
DL, VTList, Ops);
10706 return makeVTList(&(*EVTs.insert(VT).first), 1);
10715 void *IP =
nullptr;
10721 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, 2);
10722 VTListMap.InsertNode(Result, IP);
10724 return Result->getSDVTList();
10734 void *IP =
nullptr;
10741 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, 3);
10742 VTListMap.InsertNode(Result, IP);
10744 return Result->getSDVTList();
10755 void *IP =
nullptr;
10763 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, 4);
10764 VTListMap.InsertNode(Result, IP);
10766 return Result->getSDVTList();
10770 unsigned NumVTs = VTs.
size();
10772 ID.AddInteger(NumVTs);
10773 for (
unsigned index = 0; index < NumVTs; index++) {
10774 ID.AddInteger(VTs[index].getRawBits());
10777 void *IP =
nullptr;
10782 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, NumVTs);
10783 VTListMap.InsertNode(Result, IP);
10785 return Result->getSDVTList();
10796 assert(
N->getNumOperands() == 1 &&
"Update with wrong number of operands");
10799 if (
Op ==
N->getOperand(0))
return N;
10802 void *InsertPos =
nullptr;
10803 if (
SDNode *Existing = FindModifiedNodeSlot(
N,
Op, InsertPos))
10808 if (!RemoveNodeFromCSEMaps(
N))
10809 InsertPos =
nullptr;
10812 N->OperandList[0].set(
Op);
10816 if (InsertPos) CSEMap.InsertNode(
N, InsertPos);
10821 assert(
N->getNumOperands() == 2 &&
"Update with wrong number of operands");
10824 if (Op1 ==
N->getOperand(0) && Op2 ==
N->getOperand(1))
10828 void *InsertPos =
nullptr;
10829 if (
SDNode *Existing = FindModifiedNodeSlot(
N, Op1, Op2, InsertPos))
10834 if (!RemoveNodeFromCSEMaps(
N))
10835 InsertPos =
nullptr;
10838 if (
N->OperandList[0] != Op1)
10839 N->OperandList[0].set(Op1);
10840 if (
N->OperandList[1] != Op2)
10841 N->OperandList[1].set(Op2);
10845 if (InsertPos) CSEMap.InsertNode(
N, InsertPos);
10851 SDValue Ops[] = { Op1, Op2, Op3 };
10858 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
10865 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
10871 unsigned NumOps = Ops.
size();
10872 assert(
N->getNumOperands() == NumOps &&
10873 "Update with wrong number of operands");
10876 if (std::equal(Ops.
begin(), Ops.
end(),
N->op_begin()))
10880 void *InsertPos =
nullptr;
10881 if (
SDNode *Existing = FindModifiedNodeSlot(
N, Ops, InsertPos))
10886 if (!RemoveNodeFromCSEMaps(
N))
10887 InsertPos =
nullptr;
10890 for (
unsigned i = 0; i != NumOps; ++i)
10891 if (
N->OperandList[i] != Ops[i])
10892 N->OperandList[i].set(Ops[i]);
10896 if (InsertPos) CSEMap.InsertNode(
N, InsertPos);
10913 if (NewMemRefs.
empty()) {
10919 if (NewMemRefs.
size() == 1) {
10920 N->MemRefs = NewMemRefs[0];
10926 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.
size());
10928 N->MemRefs = MemRefsBuffer;
10929 N->NumMemRefs =
static_cast<int>(NewMemRefs.
size());
10952 SDValue Ops[] = { Op1, Op2 };
10960 SDValue Ops[] = { Op1, Op2, Op3 };
10993 SDValue Ops[] = { Op1, Op2 };
11001 New->setNodeId(-1);
11021 unsigned Order = std::min(
N->getIROrder(), OLoc.
getIROrder());
11022 N->setIROrder(Order);
11045 void *IP =
nullptr;
11046 if (VTs.
VTs[VTs.
NumVTs-1] != MVT::Glue) {
11050 return UpdateSDLocOnMergeSDNode(ON,
SDLoc(
N));
11053 if (!RemoveNodeFromCSEMaps(
N))
11058 N->ValueList = VTs.
VTs;
11068 if (Used->use_empty())
11069 DeadNodeSet.
insert(Used);
11074 MN->clearMemRefs();
11078 createOperands(
N, Ops);
11082 if (!DeadNodeSet.
empty()) {
11084 for (
SDNode *
N : DeadNodeSet)
11085 if (
N->use_empty())
11091 CSEMap.InsertNode(
N, IP);
11096 unsigned OrigOpc = Node->getOpcode();
11101#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
11102 case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break;
11103#define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
11104 case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break;
11105#include "llvm/IR/ConstrainedOps.def"
11108 assert(Node->getNumValues() == 2 &&
"Unexpected number of results!");
11111 SDValue InputChain = Node->getOperand(0);
11116 for (
unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
11159 SDValue Ops[] = { Op1, Op2 };
11167 SDValue Ops[] = { Op1, Op2, Op3 };
11181 SDValue Ops[] = { Op1, Op2 };
11189 SDValue Ops[] = { Op1, Op2, Op3 };
11204 SDValue Ops[] = { Op1, Op2 };
11213 SDValue Ops[] = { Op1, Op2, Op3 };
11234 bool DoCSE = VTs.
VTs[VTs.
NumVTs-1] != MVT::Glue;
11236 void *IP =
nullptr;
11242 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
11243 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E,
DL));
11248 N = newSDNode<MachineSDNode>(~Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
11249 createOperands(
N, Ops);
11252 CSEMap.InsertNode(
N, IP);
11265 VT, Operand, SRIdxVal);
11275 VT, Operand, Subreg, SRIdxVal);
11292 if (VTList.
VTs[VTList.
NumVTs - 1] != MVT::Glue) {
11295 void *IP =
nullptr;
11297 E->intersectFlagsWith(Flags);
11307 if (VTList.
VTs[VTList.
NumVTs - 1] != MVT::Glue) {
11310 void *IP =
nullptr;
11311 if (FindNodeOrInsertPos(
ID,
SDLoc(), IP))
11321 SDNode *
N,
unsigned R,
bool IsIndirect,
11323 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
11324 "Expected inlined-at fields to agree");
11327 {}, IsIndirect,
DL, O,
11336 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
11337 "Expected inlined-at fields to agree");
11350 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
11351 "Expected inlined-at fields to agree");
11362 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
11363 "Expected inlined-at fields to agree");
11366 Dependencies, IsIndirect,
DL, O,
11372 unsigned VReg,
bool IsIndirect,
11374 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
11375 "Expected inlined-at fields to agree");
11378 {}, IsIndirect,
DL, O,
11386 unsigned O,
bool IsVariadic) {
11387 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
11388 "Expected inlined-at fields to agree");
11391 DL, O, IsVariadic);
11395 unsigned OffsetInBits,
unsigned SizeInBits,
11396 bool InvalidateDbg) {
11399 assert(FromNode && ToNode &&
"Can't modify dbg values");
11404 if (
From == To || FromNode == ToNode)
11416 if (Dbg->isInvalidated())
11423 bool Changed =
false;
11424 auto NewLocOps = Dbg->copyLocationOps();
11426 NewLocOps.begin(), NewLocOps.end(),
11428 bool Match = Op == FromLocOp;
11438 auto *Expr = Dbg->getExpression();
11444 if (
auto FI = Expr->getFragmentInfo())
11445 if (OffsetInBits + SizeInBits > FI->SizeInBits)
11454 auto AdditionalDependencies = Dbg->getAdditionalDependencies();
11457 Var, Expr, NewLocOps, AdditionalDependencies, Dbg->isIndirect(),
11458 Dbg->getDebugLoc(), std::max(ToNode->
getIROrder(), Dbg->getOrder()),
11459 Dbg->isVariadic());
11462 if (InvalidateDbg) {
11464 Dbg->setIsInvalidated();
11465 Dbg->setIsEmitted();
11471 "Transferred DbgValues should depend on the new SDNode");
11477 if (!
N.getHasDebugValue())
11480 auto GetLocationOperand = [](
SDNode *Node,
unsigned ResNo) {
11481 if (
auto *FISDN = dyn_cast<FrameIndexSDNode>(Node))
11488 if (DV->isInvalidated())
11490 switch (
N.getOpcode()) {
11496 if (!isa<ConstantSDNode>(N0)) {
11497 bool RHSConstant = isa<ConstantSDNode>(N1);
11500 Offset =
N.getConstantOperandVal(1);
11503 if (!RHSConstant && DV->isIndirect())
11510 auto *DIExpr = DV->getExpression();
11511 auto NewLocOps = DV->copyLocationOps();
11512 bool Changed =
false;
11513 size_t OrigLocOpsSize = NewLocOps.size();
11514 for (
size_t i = 0; i < OrigLocOpsSize; ++i) {
11519 NewLocOps[i].getSDNode() != &
N)
11530 const auto *TmpDIExpr =
11538 NewLocOps.push_back(
RHS);
11544 assert(Changed &&
"Salvage target doesn't use N");
11547 DV->isVariadic() || OrigLocOpsSize != NewLocOps.size();
11549 auto AdditionalDependencies = DV->getAdditionalDependencies();
11551 DV->getVariable(), DIExpr, NewLocOps, AdditionalDependencies,
11552 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder(), IsVariadic);
11554 DV->setIsInvalidated();
11555 DV->setIsEmitted();
11557 N0.
getNode()->dumprFull(
this);
11558 dbgs() <<
" into " << *DIExpr <<
'\n');
11565 TypeSize ToSize =
N.getValueSizeInBits(0);
11569 auto NewLocOps = DV->copyLocationOps();
11570 bool Changed =
false;
11571 for (
size_t i = 0; i < NewLocOps.size(); ++i) {
11573 NewLocOps[i].getSDNode() != &
N)
11580 assert(Changed &&
"Salvage target doesn't use N");
11585 DV->getAdditionalDependencies(), DV->isIndirect(),
11586 DV->getDebugLoc(), DV->getOrder(), DV->isVariadic());
11589 DV->setIsInvalidated();
11590 DV->setIsEmitted();
11592 dbgs() <<
" into " << *DbgExpression <<
'\n');
11599 assert((!Dbg->getSDNodes().empty() ||
11602 return Op.getKind() == SDDbgOperand::FRAMEIX;
11604 "Salvaged DbgValue should depend on a new SDNode");
11612 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(
DL) &&
11613 "Expected inlined-at fields to agree");
11629 while (UI != UE &&
N == UI->
getUser())
11637 :
SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
11650 "Cannot replace with this method!");
11666 RAUWUpdateListener Listener(*
this, UI, UE);
11671 RemoveNodeFromCSEMaps(
User);
11686 AddModifiedNodeToCSEMaps(
User);
11702 for (
unsigned i = 0, e =
From->getNumValues(); i != e; ++i)
11705 "Cannot use this version of ReplaceAllUsesWith!");
11713 for (
unsigned i = 0, e =
From->getNumValues(); i != e; ++i)
11714 if (
From->hasAnyUseOfValue(i)) {
11715 assert((i < To->getNumValues()) &&
"Invalid To location");
11724 RAUWUpdateListener Listener(*
this, UI, UE);
11729 RemoveNodeFromCSEMaps(
User);
11745 AddModifiedNodeToCSEMaps(
User);
11759 if (
From->getNumValues() == 1)
11762 for (
unsigned i = 0, e =
From->getNumValues(); i != e; ++i) {
11772 RAUWUpdateListener Listener(*
this, UI, UE);
11777 RemoveNodeFromCSEMaps(
User);
11783 bool To_IsDivergent =
false;
11792 if (To_IsDivergent !=
From->isDivergent())
11797 AddModifiedNodeToCSEMaps(
User);
11810 if (
From == To)
return;
11813 if (
From.getNode()->getNumValues() == 1) {
11825 UE =
From.getNode()->use_end();
11826 RAUWUpdateListener Listener(*
this, UI, UE);
11829 bool UserRemovedFromCSEMaps =
false;
11839 if (
Use.getResNo() !=
From.getResNo()) {
11846 if (!UserRemovedFromCSEMaps) {
11847 RemoveNodeFromCSEMaps(
User);
11848 UserRemovedFromCSEMaps =
true;
11858 if (!UserRemovedFromCSEMaps)
11863 AddModifiedNodeToCSEMaps(
User);
11882bool operator<(
const UseMemo &L,
const UseMemo &R) {
11883 return (intptr_t)L.User < (intptr_t)R.User;
11893 for (UseMemo &Memo :
Uses)
11894 if (Memo.User ==
N)
11895 Memo.User =
nullptr;
11907 switch (
Node->getOpcode()) {
11921 "Conflicting divergence information!");
11926 for (
const auto &
Op :
N->ops()) {
11927 EVT VT =
Op.getValueType();
11930 if (VT != MVT::Other &&
Op.getNode()->isDivergent() &&
11942 if (
N->SDNodeBits.IsDivergent != IsDivergent) {
11943 N->SDNodeBits.IsDivergent = IsDivergent;
11946 }
while (!Worklist.
empty());
11949void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) {
11951 Order.
reserve(AllNodes.size());
11953 unsigned NOps =
N.getNumOperands();
11956 Order.push_back(&
N);
11958 for (
size_t I = 0;
I != Order.size(); ++
I) {
11960 for (
auto *U :
N->users()) {
11961 unsigned &UnsortedOps = Degree[U];
11962 if (0 == --UnsortedOps)
11963 Order.push_back(U);
11968#if !defined(NDEBUG) && LLVM_ENABLE_ABI_BREAKING_CHECKS
11969void SelectionDAG::VerifyDAGDivergence() {
11970 std::vector<SDNode *> TopoOrder;
11971 CreateTopologicalOrder(TopoOrder);
11972 for (
auto *
N : TopoOrder) {
11974 "Divergence bit inconsistency detected");
11997 for (
unsigned i = 0; i != Num; ++i) {
11998 unsigned FromResNo =
From[i].getResNo();
12001 if (
Use.getResNo() == FromResNo) {
12003 Uses.push_back(Memo);
12010 RAUOVWUpdateListener Listener(*
this,
Uses);
12012 for (
unsigned UseIndex = 0, UseIndexEnd =
Uses.size();
12013 UseIndex != UseIndexEnd; ) {
12019 if (
User ==
nullptr) {
12025 RemoveNodeFromCSEMaps(
User);
12032 unsigned i =
Uses[UseIndex].Index;
12037 }
while (UseIndex != UseIndexEnd &&
Uses[UseIndex].
User ==
User);
12041 AddModifiedNodeToCSEMaps(
User);
12049 unsigned DAGSize = 0;
12065 unsigned Degree =
N.getNumOperands();
12068 N.setNodeId(DAGSize++);
12070 if (Q != SortedPos)
12071 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
12072 assert(SortedPos != AllNodes.end() &&
"Overran node list");
12076 N.setNodeId(Degree);
12088 unsigned Degree =
P->getNodeId();
12089 assert(Degree != 0 &&
"Invalid node degree");
12093 P->setNodeId(DAGSize++);
12094 if (
P->getIterator() != SortedPos)
12095 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(
P));
12096 assert(SortedPos != AllNodes.end() &&
"Overran node list");
12100 P->setNodeId(Degree);
12103 if (Node.getIterator() == SortedPos) {
12107 dbgs() <<
"Overran sorted position:\n";
12109 dbgs() <<
"Checking if this is due to cycles\n";
12116 assert(SortedPos == AllNodes.end() &&
12117 "Topological sort incomplete!");
12119 "First node in topological sort is not the entry token!");
12120 assert(AllNodes.front().getNodeId() == 0 &&
12121 "First node in topological sort has non-zero id!");
12122 assert(AllNodes.front().getNumOperands() == 0 &&
12123 "First node in topological sort has operands!");
12124 assert(AllNodes.back().getNodeId() == (
int)DAGSize-1 &&
12125 "Last node in topologic sort has unexpected id!");
12126 assert(AllNodes.back().use_empty() &&
12127 "Last node in topologic sort has users!");
12135 for (
SDNode *SD : DB->getSDNodes()) {
12139 SD->setHasDebugValue(
true);
12141 DbgInfo->
add(DB, isParameter);
12148 assert(isa<MemSDNode>(NewMemOpChain) &&
"Expected a memop node");
12154 if (OldChain == NewMemOpChain || OldChain.
use_empty())
12155 return NewMemOpChain;
12158 OldChain, NewMemOpChain);
12161 return TokenFactor;
12166 assert(isa<MemSDNode>(NewMemOp.
getNode()) &&
"Expected a memop node");
12174 assert(isa<ExternalSymbolSDNode>(
Op) &&
"Node should be an ExternalSymbol");
12176 auto *Symbol = cast<ExternalSymbolSDNode>(
Op)->getSymbol();
12180 if (OutFunction !=
nullptr)
12188 std::string ErrorStr;
12190 ErrorFormatter <<
"Undefined external symbol ";
12191 ErrorFormatter <<
'"' << Symbol <<
'"';
12201 return Const !=
nullptr && Const->isZero();
12210 return Const !=
nullptr && Const->isZero() && !Const->isNegative();
12215 return Const !=
nullptr && Const->isAllOnes();
12220 return Const !=
nullptr && Const->isOne();
12225 return Const !=
nullptr && Const->isMinSignedValue();
12229 unsigned OperandNo) {
12234 APInt Const = ConstV->getAPIntValue().trunc(V.getScalarValueSizeInBits());
12240 return Const.isZero();
12242 return Const.isOne();
12245 return Const.isAllOnes();
12247 return Const.isMinSignedValue();
12249 return Const.isMaxSignedValue();
12254 return OperandNo == 1 && Const.isZero();
12257 return OperandNo == 1 && Const.isOne();
12262 return ConstFP->isZero() &&
12263 (Flags.hasNoSignedZeros() || ConstFP->isNegative());
12265 return OperandNo == 1 && ConstFP->isZero() &&
12266 (Flags.hasNoSignedZeros() || !ConstFP->isNegative());
12268 return ConstFP->isExactlyValue(1.0);
12270 return OperandNo == 1 && ConstFP->isExactlyValue(1.0);
12274 EVT VT = V.getValueType();
12276 APFloat NeutralAF = !Flags.hasNoNaNs()
12278 : !Flags.hasNoInfs()
12284 return ConstFP->isExactlyValue(NeutralAF);
12293 V = V.getOperand(0);
12298 while (V.getOpcode() ==
ISD::BITCAST && V.getOperand(0).hasOneUse())
12299 V = V.getOperand(0);
12305 V = V.getOperand(0);
12311 V = V.getOperand(0);
12319 unsigned NumBits = V.getScalarValueSizeInBits();
12322 return C && (
C->getAPIntValue().countr_one() >= NumBits);
12326 bool AllowTruncation) {
12327 EVT VT =
N.getValueType();
12336 bool AllowTruncation) {
12343 EVT VecEltVT =
N->getValueType(0).getVectorElementType();
12344 if (
auto *CN = dyn_cast<ConstantSDNode>(
N->getOperand(0))) {
12345 EVT CVT = CN->getValueType(0);
12346 assert(CVT.
bitsGE(VecEltVT) &&
"Illegal splat_vector element extension");
12347 if (AllowTruncation || CVT == VecEltVT)
12354 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements);
12359 if (CN && (UndefElements.
none() || AllowUndefs)) {
12361 EVT NSVT =
N.getValueType().getScalarType();
12362 assert(CVT.
bitsGE(NSVT) &&
"Illegal build vector element extension");
12363 if (AllowTruncation || (CVT == NSVT))
12372 EVT VT =
N.getValueType();
12380 const APInt &DemandedElts,
12381 bool AllowUndefs) {
12388 BV->getConstantFPSplatNode(DemandedElts, &UndefElements);
12390 if (CN && (UndefElements.
none() || AllowUndefs))
12405 return C &&
C->isZero();
12411 return C &&
C->isOne();
12416 unsigned BitWidth =
N.getScalarValueSizeInBits();
12418 return C &&
C->isAllOnes() &&
C->getValueSizeInBits(0) ==
BitWidth;
12427 :
SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
12451 std::vector<EVT> VTs;
12464const EVT *SDNode::getValueTypeList(
MVT VT) {
12465 static EVTArray SimpleVTArray;
12468 return &SimpleVTArray.VTs[VT.
SimpleTy];
12479 if (U.getResNo() ==
Value) {
12496 if (U.getResNo() ==
Value)
12534 return any_of(
N->op_values(),
12535 [
this](
SDValue Op) { return this == Op.getNode(); });
12549 unsigned Depth)
const {
12550 if (*
this == Dest)
return true;
12554 if (
Depth == 0)
return false;
12574 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1);
12579 if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(*
this)) {
12580 if (Ld->isUnordered())
12581 return Ld->getChain().reachesChainWithoutSideEffects(Dest,
Depth-1);
12594 this->Flags &= Flags;
12600 bool AllowPartials) {
12609 return Op.getOpcode() ==
unsigned(BinOp);
12615 unsigned CandidateBinOp =
Op.getOpcode();
12616 if (
Op.getValueType().isFloatingPoint()) {
12618 switch (CandidateBinOp) {
12620 if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation())
12630 auto PartialReduction = [&](
SDValue Op,
unsigned NumSubElts) {
12631 if (!AllowPartials || !
Op)
12633 EVT OpVT =
Op.getValueType();
12656 unsigned Stages =
Log2_32(
Op.getValueType().getVectorNumElements());
12658 for (
unsigned i = 0; i < Stages; ++i) {
12659 unsigned MaskEnd = (1 << i);
12661 if (
Op.getOpcode() != CandidateBinOp)
12662 return PartialReduction(PrevOp, MaskEnd);
12671 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1);
12678 return PartialReduction(PrevOp, MaskEnd);
12681 for (
int Index = 0; Index < (int)MaskEnd; ++Index)
12682 if (Shuffle->
getMaskElt(Index) != (int)(MaskEnd + Index))
12683 return PartialReduction(PrevOp, MaskEnd);
12690 while (
Op.getOpcode() == CandidateBinOp) {
12691 unsigned NumElts =
Op.getValueType().getVectorNumElements();
12699 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
12700 if (NumSrcElts != (2 * NumElts))
12715 EVT VT =
N->getValueType(0);
12724 else if (NE > ResNE)
12727 if (
N->getNumValues() == 2) {
12730 EVT VT1 =
N->getValueType(1);
12734 for (i = 0; i != NE; ++i) {
12735 for (
unsigned j = 0, e =
N->getNumOperands(); j != e; ++j) {
12736 SDValue Operand =
N->getOperand(j);
12750 for (; i < ResNE; ++i) {
12762 assert(
N->getNumValues() == 1 &&
12763 "Can't unroll a vector with multiple results!");
12769 for (i= 0; i != NE; ++i) {
12770 for (
unsigned j = 0, e =
N->getNumOperands(); j != e; ++j) {
12771 SDValue Operand =
N->getOperand(j);
12784 switch (
N->getOpcode()) {
12810 const auto *ASC = cast<AddrSpaceCastSDNode>(
N);
12812 ASC->getSrcAddressSpace(),
12813 ASC->getDestAddressSpace()));
12819 for (; i < ResNE; ++i)
12828 unsigned Opcode =
N->getOpcode();
12832 "Expected an overflow opcode");
12834 EVT ResVT =
N->getValueType(0);
12835 EVT OvVT =
N->getValueType(1);
12844 else if (NE > ResNE)
12856 for (
unsigned i = 0; i < NE; ++i) {
12857 SDValue Res =
getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]);
12880 if (LD->isVolatile() ||
Base->isVolatile())
12883 if (!LD->isSimple())
12885 if (LD->isIndexed() ||
Base->isIndexed())
12887 if (LD->getChain() !=
Base->getChain())
12889 EVT VT = LD->getMemoryVT();
12897 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *
this,
Offset))
12898 return (Dist * (int64_t)Bytes ==
Offset);
12907 int64_t GVOffset = 0;
12919 int FrameIdx = INT_MIN;
12920 int64_t FrameOffset = 0;
12922 FrameIdx = FI->getIndex();
12924 isa<FrameIndexSDNode>(
Ptr.getOperand(0))) {
12926 FrameIdx = cast<FrameIndexSDNode>(
Ptr.getOperand(0))->getIndex();
12927 FrameOffset =
Ptr.getConstantOperandVal(1);
12930 if (FrameIdx != INT_MIN) {
12935 return std::nullopt;
12945 "Split node must be a scalar type");
12950 return std::make_pair(
Lo,
Hi);
12963 return std::make_pair(LoVT, HiVT);
12971 bool *HiIsEmpty)
const {
12981 "Mixing fixed width and scalable vectors when enveloping a type");
12986 *HiIsEmpty =
false;
12994 return std::make_pair(LoVT, HiVT);
12999std::pair<SDValue, SDValue>
13004 "Splitting vector with an invalid mixture of fixed and scalable "
13007 N.getValueType().getVectorMinNumElements() &&
13008 "More vector elements requested than available!");
13018 return std::make_pair(
Lo,
Hi);
13025 EVT VT =
N.getValueType();
13027 "Expecting the mask to be an evenly-sized vector");
13035 return std::make_pair(
Lo,
Hi);
13040 EVT VT =
N.getValueType();
13049 unsigned Start,
unsigned Count,
13051 EVT VT =
Op.getValueType();
13054 if (EltVT ==
EVT())
13057 for (
unsigned i = Start, e = Start + Count; i != e; ++i) {
13070 return Val.MachineCPVal->getType();
13071 return Val.ConstVal->getType();
13075 unsigned &SplatBitSize,
13076 bool &HasAnyUndefs,
13077 unsigned MinSplatBits,
13078 bool IsBigEndian)
const {
13082 if (MinSplatBits > VecWidth)
13087 SplatValue =
APInt(VecWidth, 0);
13088 SplatUndef =
APInt(VecWidth, 0);
13095 assert(NumOps > 0 &&
"isConstantSplat has 0-size build vector");
13098 for (
unsigned j = 0; j < NumOps; ++j) {
13099 unsigned i = IsBigEndian ? NumOps - 1 - j : j;
13101 unsigned BitPos = j * EltWidth;
13104 SplatUndef.
setBits(BitPos, BitPos + EltWidth);
13105 else if (
auto *CN = dyn_cast<ConstantSDNode>(OpVal))
13106 SplatValue.
insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos);
13107 else if (
auto *CN = dyn_cast<ConstantFPSDNode>(OpVal))
13108 SplatValue.
insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos);
13115 HasAnyUndefs = (SplatUndef != 0);
13118 while (VecWidth > 8) {
13123 unsigned HalfSize = VecWidth / 2;
13130 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
13131 MinSplatBits > HalfSize)
13134 SplatValue = HighValue | LowValue;
13135 SplatUndef = HighUndef & LowUndef;
13137 VecWidth = HalfSize;
13146 SplatBitSize = VecWidth;
13153 if (UndefElements) {
13154 UndefElements->
clear();
13155 UndefElements->
resize(NumOps);
13161 for (
unsigned i = 0; i != NumOps; ++i) {
13162 if (!DemandedElts[i])
13165 if (
Op.isUndef()) {
13167 (*UndefElements)[i] =
true;
13168 }
else if (!Splatted) {
13170 }
else if (Splatted !=
Op) {
13176 unsigned FirstDemandedIdx = DemandedElts.
countr_zero();
13178 "Can only have a splat without a constant for all undefs.");
13195 if (UndefElements) {
13196 UndefElements->
clear();
13197 UndefElements->
resize(NumOps);
13205 for (
unsigned I = 0;
I != NumOps; ++
I)
13207 (*UndefElements)[
I] =
true;
13210 for (
unsigned SeqLen = 1; SeqLen < NumOps; SeqLen *= 2) {
13211 Sequence.append(SeqLen,
SDValue());
13212 for (
unsigned I = 0;
I != NumOps; ++
I) {
13213 if (!DemandedElts[
I])
13215 SDValue &SeqOp = Sequence[
I % SeqLen];
13217 if (
Op.isUndef()) {
13222 if (SeqOp && !SeqOp.
isUndef() && SeqOp !=
Op) {
13228 if (!Sequence.empty())
13232 assert(Sequence.empty() &&
"Failed to empty non-repeating sequence pattern");
13245 return dyn_cast_or_null<ConstantSDNode>(
13251 return dyn_cast_or_null<ConstantSDNode>(
getSplatValue(UndefElements));
13257 return dyn_cast_or_null<ConstantFPSDNode>(
13263 return dyn_cast_or_null<ConstantFPSDNode>(
getSplatValue(UndefElements));
13270 dyn_cast_or_null<ConstantFPSDNode>(
getSplatValue(UndefElements))) {
13273 const APFloat &APF = CN->getValueAPF();
13279 return IntVal.exactLogBase2();
13285 bool IsLittleEndian,
unsigned DstEltSizeInBits,
13293 assert(((NumSrcOps * SrcEltSizeInBits) % DstEltSizeInBits) == 0 &&
13294 "Invalid bitcast scale");
13299 BitVector SrcUndeElements(NumSrcOps,
false);
13301 for (
unsigned I = 0;
I != NumSrcOps; ++
I) {
13303 if (
Op.isUndef()) {
13304 SrcUndeElements.
set(
I);
13307 auto *CInt = dyn_cast<ConstantSDNode>(
Op);
13308 auto *CFP = dyn_cast<ConstantFPSDNode>(
Op);
13309 assert((CInt || CFP) &&
"Unknown constant");
13310 SrcBitElements[
I] = CInt ? CInt->getAPIntValue().trunc(SrcEltSizeInBits)
13311 : CFP->getValueAPF().bitcastToAPInt();
13315 recastRawBits(IsLittleEndian, DstEltSizeInBits, RawBitElements,
13316 SrcBitElements, UndefElements, SrcUndeElements);
13321 unsigned DstEltSizeInBits,
13326 unsigned NumSrcOps = SrcBitElements.
size();
13327 unsigned SrcEltSizeInBits = SrcBitElements[0].getBitWidth();
13328 assert(((NumSrcOps * SrcEltSizeInBits) % DstEltSizeInBits) == 0 &&
13329 "Invalid bitcast scale");
13330 assert(NumSrcOps == SrcUndefElements.
size() &&
13331 "Vector size mismatch");
13333 unsigned NumDstOps = (NumSrcOps * SrcEltSizeInBits) / DstEltSizeInBits;
13334 DstUndefElements.
clear();
13335 DstUndefElements.
resize(NumDstOps,
false);
13339 if (SrcEltSizeInBits <= DstEltSizeInBits) {
13340 unsigned Scale = DstEltSizeInBits / SrcEltSizeInBits;
13341 for (
unsigned I = 0;
I != NumDstOps; ++
I) {
13342 DstUndefElements.
set(
I);
13343 APInt &DstBits = DstBitElements[
I];
13344 for (
unsigned J = 0; J != Scale; ++J) {
13345 unsigned Idx = (
I * Scale) + (IsLittleEndian ? J : (Scale - J - 1));
13346 if (SrcUndefElements[
Idx])
13348 DstUndefElements.
reset(
I);
13349 const APInt &SrcBits = SrcBitElements[
Idx];
13351 "Illegal constant bitwidths");
13352 DstBits.
insertBits(SrcBits, J * SrcEltSizeInBits);
13359 unsigned Scale = SrcEltSizeInBits / DstEltSizeInBits;
13360 for (
unsigned I = 0;
I != NumSrcOps; ++
I) {
13361 if (SrcUndefElements[
I]) {
13362 DstUndefElements.
set(
I * Scale, (
I + 1) * Scale);
13365 const APInt &SrcBits = SrcBitElements[
I];
13366 for (
unsigned J = 0; J != Scale; ++J) {
13367 unsigned Idx = (
I * Scale) + (IsLittleEndian ? J : (Scale - J - 1));
13368 APInt &DstBits = DstBitElements[
Idx];
13369 DstBits = SrcBits.
extractBits(DstEltSizeInBits, J * DstEltSizeInBits);
13376 unsigned Opc =
Op.getOpcode();
13383std::optional<std::pair<APInt, APInt>>
13387 return std::nullopt;
13391 return std::nullopt;
13398 return std::nullopt;
13400 for (
unsigned i = 2; i < NumOps; ++i) {
13402 return std::nullopt;
13405 if (Val != (Start + (Stride * i)))
13406 return std::nullopt;
13409 return std::make_pair(Start, Stride);
13425 for (
int Idx = Mask[i]; i != e; ++i)
13426 if (Mask[i] >= 0 && Mask[i] !=
Idx)
13434 SDValue N,
bool AllowOpaques)
const {
13437 if (
auto *
C = dyn_cast<ConstantSDNode>(
N))
13438 return AllowOpaques || !
C->isOpaque();
13445 if (
auto *GA = dyn_cast<GlobalAddressSDNode>(
N))
13451 isa<ConstantSDNode>(
N.getOperand(0)))
13458 if (isa<ConstantFPSDNode>(
N))
13465 isa<ConstantFPSDNode>(
N.getOperand(0)))
13472 bool AllowTruncation)
const {
13475 return std::nullopt;
13477 const APInt &CVal = Const->getAPIntValue();
13484 return std::nullopt;
13490 return std::nullopt;
13498 assert(!Node->OperandList &&
"Node already has operands");
13500 "too many operands to fit into SDNode");
13501 SDUse *Ops = OperandRecycler.allocate(
13504 bool IsDivergent =
false;
13505 for (
unsigned I = 0;
I != Vals.
size(); ++
I) {
13506 Ops[
I].setUser(Node);
13507 Ops[
I].setInitial(Vals[
I]);
13511 if (VT != MVT::Other &&
13513 Ops[
I].
getNode()->isDivergent()) {
13514 IsDivergent =
true;
13518 Node->OperandList = Ops;
13521 Node->SDNodeBits.IsDivergent = IsDivergent;
13529 while (Vals.
size() > Limit) {
13530 unsigned SliceIdx = Vals.
size() - Limit;
13606 const SDLoc &DLoc) {
13611 Entry.Ty =
Ptr.getValueType().getTypeForEVT(*
getContext());
13612 Args.push_back(Entry);
13624 assert(
From && To &&
"Invalid SDNode; empty source SDValue?");
13625 auto I = SDEI.find(
From);
13626 if (
I == SDEI.end())
13631 NodeExtraInfo NEI =
I->second;
13640 SDEI[To] = std::move(NEI);
13659 Leafs.emplace_back(
N);
13662 if (!FromReach.
insert(
N).second)
13670 auto DeepCopyTo = [&](
auto &&Self,
const SDNode *
N) {
13673 if (!Visited.
insert(
N).second)
13678 if (!Self(Self,
Op.getNode()))
13698 for (
const SDNode *
N : StartFrom)
13699 VisitFrom(VisitFrom,
N,
MaxDepth - PrevDepth);
13711 errs() <<
"warning: incomplete propagation of SelectionDAG::NodeExtraInfo\n";
13712 assert(
false &&
"From subgraph too complex - increase max. MaxDepth?");
13714 SDEI[To] = std::move(NEI);
13728 if (!Visited.
insert(
N).second) {
13729 errs() <<
"Detected cycle in SelectionDAG\n";
13730 dbgs() <<
"Offending node:\n";
13731 N->dumprFull(DAG);
dbgs() <<
"\n";
13747 bool check = force;
13748#ifdef EXPENSIVE_CHECKS
13752 assert(
N &&
"Checking nonexistent SDNode");
static bool isConstant(const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
This file implements the BitVector class.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
static std::optional< bool > isBigEndian(const SmallDenseMap< int64_t, int64_t, 8 > &MemOffset2Idx, int64_t LowestIdx)
Given a map from byte offsets in memory to indices in a load/store, determine if that map corresponds...
#define __asan_unpoison_memory_region(p, size)
#define LLVM_LIKELY(EXPR)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Looks at all the uses of the given value Returns the Liveness deduced from the uses of this value Adds all uses that cause the result to be MaybeLive to MaybeLiveRetUses If the result is MaybeLiveUses might be modified but its content should be ignored(since it might not be complete). DeadArgumentEliminationPass
Given that RA is a live propagate it s liveness to any other values it uses(according to Uses). void DeadArgumentEliminationPass
Given that RA is a live value
This file defines the DenseSet and SmallDenseSet classes.
This file contains constants used for implementing Dwarf debug support.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This file defines a hash set that can be used to remove duplication of nodes in a graph.
static const unsigned MaxDepth
static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB)
static bool shouldLowerMemFuncForSize(const MachineFunction &MF)
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG)
mir Rename Register Operands
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
PowerPC Reduce CR logical Operation
const SmallVectorImpl< MachineOperand > & Cond
Remove Loads Into Fake Uses
Contains matchers for matching SelectionDAG nodes and values.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow)
static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, const AAMDNodes &AAInfo)
Lower the call to 'memset' intrinsic function into a series of store operations.
static std::optional< APInt > FoldValueWithUndef(unsigned Opcode, const APInt &C1, bool IsUndef1, const APInt &C2, bool IsUndef2)
static SDValue FoldSTEP_VECTOR(const SDLoc &DL, EVT VT, SDValue Step, SelectionDAG &DAG)
static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned OpC, SDVTList VTList, ArrayRef< SDValue > OpList)
static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, const TargetLowering &TLI, const ConstantDataArraySlice &Slice)
getMemsetStringVal - Similar to getMemsetValue.
static cl::opt< bool > EnableMemCpyDAGOpt("enable-memcpy-dag-opt", cl::Hidden, cl::init(true), cl::desc("Gang up loads and stores generated by inlining of memcpy"))
static bool haveNoCommonBitsSetCommutative(SDValue A, SDValue B)
static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList)
AddNodeIDValueTypes - Value type lists are intern'd so we can represent them solely with their pointe...
static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef< int > M)
Swaps the values of N1 and N2.
static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice)
Returns true if memcpy source is constant data.
static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo)
static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo, AAResults *AA)
static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC)
AddNodeIDOpcode - Add the node opcode to the NodeID data.
static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike)
static bool doNotCSE(SDNode *N)
doNotCSE - Return true if CSE should not be performed for this node.
static cl::opt< int > MaxLdStGlue("ldstmemcpy-glue-max", cl::desc("Number limit for gluing ld/st of memcpy."), cl::Hidden, cl::init(0))
static void AddNodeIDOperands(FoldingSetNodeID &ID, ArrayRef< SDValue > Ops)
AddNodeIDOperands - Various routines for adding operands to the NodeID data.
static bool canFoldStoreIntoLibCallOutputPointers(StoreSDNode *StoreNode, SDNode *FPNode)
Given a store node StoreNode, return true if it is safe to fold that node into FPNode,...
static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SelectionDAG &DAG)
Try to simplify vector concatenation to an input value, undef, or build vector.
static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, SelectionDAG &DAG, SDValue Ptr, int64_t Offset=0)
InferPointerInfo - If the specified ptr/offset is a frame index, infer a MachinePointerInfo record fr...
static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N)
If this is an SDNode with special info, add this info to the NodeID data.
static bool gluePropagatesDivergence(const SDNode *Node)
Return true if a glue output should propagate divergence information.
static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G)
static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs)
makeVTList - Return an instance of the SDVTList struct initialized with the specified members.
static void VerifySDNode(SDNode *N, const TargetLowering *TLI)
VerifySDNode - Check the given SDNode. Aborts if it is invalid.
static void checkForCyclesHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallPtrSetImpl< const SDNode * > &Checked, const llvm::SelectionDAG *DAG)
static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl, SmallVector< SDValue, 32 > &OutChains, unsigned From, unsigned To, SmallVector< SDValue, 16 > &OutLoadChains, SmallVector< SDValue, 16 > &OutStoreChains)
static int isSignedOp(ISD::CondCode Opcode)
For an integer comparison, return 1 if the comparison is a signed operation and 2 if the result is an...
static std::optional< APInt > FoldValue(unsigned Opcode, const APInt &C1, const APInt &C2)
static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SelectionDAG &DAG)
static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, unsigned AS)
static cl::opt< unsigned > MaxSteps("has-predecessor-max-steps", cl::Hidden, cl::init(8192), cl::desc("DAG combiner limit number of steps when searching DAG " "for predecessor nodes"))
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR)
Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
static unsigned getSize(unsigned Kind)
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
Checks whether the given location points to constant memory, or if OrLocal is true whether it points ...
static APFloat getQNaN(const fltSemantics &Sem, bool Negative=false, const APInt *payload=nullptr)
Factory for QNaN values.
opStatus divide(const APFloat &RHS, roundingMode RM)
void copySign(const APFloat &RHS)
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
opStatus subtract(const APFloat &RHS, roundingMode RM)
bool isExactlyValue(double V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
opStatus add(const APFloat &RHS, roundingMode RM)
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
opStatus multiply(const APFloat &RHS, roundingMode RM)
opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend, roundingMode RM)
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
opStatus mod(const APFloat &RHS)
static APFloat getNaN(const fltSemantics &Sem, bool Negative=false, uint64_t payload=0)
Factory for NaN values.
Class for arbitrary precision integers.
APInt umul_ov(const APInt &RHS, bool &Overflow) const
APInt usub_sat(const APInt &RHS) const
APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
unsigned popcount() const
Count the number of bits set.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
APInt abs() const
Get the absolute value.
APInt sadd_sat(const APInt &RHS) const
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
void clearAllBits()
Set every bit to 0.
APInt rotr(unsigned rotateAmt) const
Rotate right by rotateAmt.
APInt reverseBits() const
void ashrInPlace(unsigned ShiftAmt)
Arithmetic right-shift this APInt by ShiftAmt in place.
bool sle(const APInt &RHS) const
Signed less or equal comparison.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
APInt sshl_sat(const APInt &RHS) const
APInt ushl_sat(const APInt &RHS) const
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
APInt rotl(unsigned rotateAmt) const
Rotate left by rotateAmt.
void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
void clearLowBits(unsigned loBits)
Set bottom loBits bits to 0.
unsigned logBase2() const
APInt uadd_sat(const APInt &RHS) const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
void setAllBits()
Set every bit to 1.
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
APInt sext(unsigned width) const
Sign extend to a new width.
void setBits(unsigned loBit, unsigned hiBit)
Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
bool isOne() const
Determine if this is a value of 1.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
APInt ssub_sat(const APInt &RHS) const
An arbitrary precision integer that knows its signedness.
Recycle small arrays allocated from a BumpPtrAllocator.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
This is an SDNode representing atomic operations.
static BaseIndexOffset match(const SDNode *N, const SelectionDAG &DAG)
Parses tree in N for base, index, offset addresses.
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
void clear()
clear - Removes all bits from the bitvector.
bool none() const
none - Returns true if none of the bits are set.
size_type size() const
size - Returns the number of bits in this bitvector.
int64_t getOffset() const
unsigned getTargetFlags() const
const BlockAddress * getBlockAddress() const
The address of a basic block.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
A "pseudo-class" with methods for operating on BUILD_VECTORs.
bool getConstantRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits, SmallVectorImpl< APInt > &RawBitElements, BitVector &UndefElements) const
Extract the raw bit data from a build vector of Undef, Constant or ConstantFP node elements.
static void recastRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits, SmallVectorImpl< APInt > &DstBitElements, ArrayRef< APInt > SrcBitElements, BitVector &DstUndefElements, const BitVector &SrcUndefElements)
Recast bit data SrcBitElements to DstEltSizeInBits wide elements.
bool getRepeatedSequence(const APInt &DemandedElts, SmallVectorImpl< SDValue > &Sequence, BitVector *UndefElements=nullptr) const
Find the shortest repeating sequence of values in the build vector.
ConstantFPSDNode * getConstantFPSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant FP or null if this is not a constant FP splat.
std::optional< std::pair< APInt, APInt > > isConstantSequence() const
If this BuildVector is constant and represents the numerical series "<a, a+n, a+2n,...
SDValue getSplatValue(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted value or a null value if this is not a splat.
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
ConstantSDNode * getConstantSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant or null if this is not a constant splat.
int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, uint32_t BitWidth) const
If this is a constant FP splat and the splatted constant FP is an exact power or 2,...
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, Align Alignment)
Allocate space at the specified alignment.
void Reset()
Deallocate all but the current slab and reset the current pointer to the beginning of it,...
This class represents a function call, abstracting a target machine's calling convention.
static bool isValueValidForType(EVT VT, const APFloat &Val)
const APFloat & getValueAPF() const
bool isExactlyValue(double V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
ConstantFP - Floating Point Values [float, double].
const APFloat & getValue() const
This is the shared class of boolean and integer constants.
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
const APInt & getValue() const
Return the constant as an APInt value reference.
bool isMachineConstantPoolEntry() const
This class represents a range of values.
ConstantRange multiply(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a multiplication of a value in thi...
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
OverflowResult unsignedSubMayOverflow(const ConstantRange &Other) const
Return whether unsigned sub of the two ranges always/never overflows.
OverflowResult unsignedAddMayOverflow(const ConstantRange &Other) const
Return whether unsigned add of the two ranges always/never overflows.
KnownBits toKnownBits() const
Return known bits for values in this range.
ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
OverflowResult unsignedMulMayOverflow(const ConstantRange &Other) const
Return whether unsigned mul of the two ranges always/never overflows.
ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
OverflowResult
Represents whether an operation on the given constant range is known to always or never overflow.
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
OverflowResult signedSubMayOverflow(const ConstantRange &Other) const
Return whether signed sub of the two ranges always/never overflows.
uint64_t getZExtValue() const
const APInt & getAPIntValue() const
This is an important base class in LLVM.
Constant * getSplatValue(bool AllowPoison=false) const
If all elements of the vector constant have the same value, return that value.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
static ExtOps getExtOps(unsigned FromSize, unsigned ToSize, bool Signed)
Returns the ops for a zero- or sign-extension in a DIExpression.
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static const DIExpression * convertToVariadicExpression(const DIExpression *Expr)
If Expr is a non-variadic expression (i.e.
static std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
Base class for variables.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
bool isLittleEndian() const
Layout endianness...
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
unsigned getPointerTypeSizeInBits(Type *) const
Layout pointer size, in bits, based on the type.
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Implements a dense probed hash-table based set.
const char * getSymbol() const
unsigned getTargetFlags() const
FoldingSetNodeID - This class is used to gather all the unique data bits of a node.
MachineBasicBlock * MBB
MBB - The current block.
Data structure describing the variable locations in a function.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
AttributeList getAttributes() const
Return the attribute list for this Function.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
int64_t getOffset() const
unsigned getAddressSpace() const
unsigned getTargetFlags() const
const GlobalValue * getGlobal() const
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
This class is used to form a handle around another node that is persistent and is updated across invo...
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
constexpr bool isValid() const
This is an important class for using LLVM in a threaded context.
This SDNode is used for LIFETIME_START/LIFETIME_END values, which indicate the offet and size that ar...
This class is used to represent ISD::LOAD nodes.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
static MVT getIntegerVT(unsigned BitWidth)
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
Abstract base class for all machine specific constantpool value subclasses.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
void setObjectAlignment(int ObjectIdx, Align Alignment)
setObjectAlignment - Change the alignment of the specified stack object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Function & getFunction()
Return the LLVM function that this machine code represents.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
bool isNonTemporal() const
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
const MachinePointerInfo & getPointerInfo() const
Flags getFlags() const
Return the raw flags of the source value,.
bool isDereferenceable() const
This class contains meta information specific to a module.
An SDNode that represents everything that will be needed to construct a MachineInstr.
This class is used to represent an MGATHER node.
This class is used to represent an MLOAD node.
This class is used to represent an MSCATTER node.
This class is used to represent an MSTORE node.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT memvt, MachineMemOperand *MMO)
MachineMemOperand * MMO
Memory reference information.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
unsigned getRawSubclassData() const
Return the SubclassData value, without HasDebugValue.
EVT getMemoryVT() const
Return the type of the in-memory value.
Representation for a specific memory location.
A Module instance is used to store all the information related to an LLVM module.
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Pass interface - Implemented by all 'passes'.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
unsigned getAddressSpace() const
Return the address space of the Pointer type.
bool isNull() const
Test if the pointer held in the union is null, regardless of which type it is.
Analysis providing profile information.
void Deallocate(SubClass *E)
Deallocate - Release storage for the pointed-to object.
Wrapper class representing virtual and physical registers.
Keeps track of dbg_value information through SDISel.
BumpPtrAllocator & getAlloc()
void add(SDDbgValue *V, bool isParameter)
void erase(const SDNode *Node)
Invalidate all DbgValues attached to the node and remove it from the Node-to-DbgValues map.
ArrayRef< SDDbgValue * > getSDDbgValues(const SDNode *Node) const
Holds the information from a dbg_label node through SDISel.
Holds the information for a single machine location through SDISel; either an SDNode,...
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(unsigned VReg)
static SDDbgOperand fromConst(const Value *Const)
@ SDNODE
Value is the result of an expression.
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
unsigned getIROrder() const
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
void dumprFull(const SelectionDAG *G=nullptr) const
printrFull to dbgs().
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool isOnlyUserOf(const SDNode *N) const
Return true if this node is the only use of N.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
static constexpr size_t getMaxNumOperands()
Return the maximum number of operands that a SDNode can hold.
iterator_range< use_iterator > uses()
MemSDNodeBitfields MemSDNodeBits
void Profile(FoldingSetNodeID &ID) const
Gather unique data for the node.
bool getHasDebugValue() const
SDNodeFlags getFlags() const
void setNodeId(int Id)
Set unique node id.
void intersectFlagsWith(const SDNodeFlags Flags)
Clear any flags in this node that aren't also set in Flags.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
bool use_empty() const
Return true if there are no uses of this node.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
static bool areOnlyUsersOf(ArrayRef< const SDNode * > Nodes, const SDNode *N)
Return true if all the users of N are contained in Nodes.
bool isOperandOf(const SDNode *N) const
Return true if this node is an operand of N.
const APInt & getConstantOperandAPInt(unsigned Num) const
Helper method returns the APInt of a ConstantSDNode operand.
bool hasPredecessor(const SDNode *N) const
Return true if N is a predecessor of this node.
bool hasAnyUseOfValue(unsigned Value) const
Return true if there are any use of the indicated value.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool isUndef() const
Return true if the type of the node type undefined.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
op_iterator op_end() const
op_iterator op_begin() const
void DropOperands()
Release the operands and set this node to have zero operands.
Represents a use of a SDNode.
EVT getValueType() const
Convenience function for get().getValueType().
SDNode * getUser()
This returns the SDNode that contains this Use.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
bool isOperandOf(const SDNode *N) const
Return true if this node is an operand of N.
bool reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth=2) const
Return true if this operand (which must be a chain) reaches the specified operand without crossing an...
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
bool use_empty() const
Return true if there are no nodes using value ResNo of Node.
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
unsigned getOpcode() const
virtual bool isTargetMemoryOpcode(unsigned Opcode) const
Returns true if a node with the given target-specific opcode has a memory operand.
virtual SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, Align Alignment, bool isVolatile, bool AlwaysInline, MachinePointerInfo DstPtrInfo) const
Emit target-specific code that performs a memset.
virtual SDValue EmitTargetCodeForMemmove(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, Align Alignment, bool isVolatile, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memmove.
virtual SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, Align Alignment, bool isVolatile, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memcpy.
SDNodeFlags getFlags() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
SDValue getVPZeroExtendInReg(SDValue Op, SDValue Mask, SDValue EVL, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
SDValue getShiftAmountOperand(EVT LHSTy, SDValue Op)
Return the specified value casted to the target's desired shift amount type.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getExtLoadVP(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, bool IsExpanding=false)
SDValue getSplatSourceVector(SDValue V, int &SplatIndex)
If V is a splatted value, return the source vector and its splat index.
SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root, MCSymbol *Label)
OverflowKind computeOverflowForUnsignedSub(SDValue N0, SDValue N1) const
Determine if the result of the unsigned sub of 2 nodes can overflow.
unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth=0) const
Get the upper bound on bit size for this Value Op as a signed integer.
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
bool isKnownNeverSNaN(SDValue Op, unsigned Depth=0) const
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack.
const TargetSubtargetInfo & getSubtarget() const
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
void updateDivergence(SDNode *N)
SDValue getSplatValue(SDValue V, bool LegalTypes=false)
If V is a splat vector, return its scalar source operand by extracting that element from the source v...
SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond, const SDLoc &dl)
Constant fold a setcc to true or false.
SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getNeutralElement(unsigned Opcode, const SDLoc &DL, EVT VT, SDNodeFlags Flags)
Get the (commutative) neutral element for the given opcode, if it exists.
SDValue getAtomicMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Value, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo)
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, uint64_t Guid, uint64_t Index, uint32_t Attr)
Creates a PseudoProbeSDNode with function GUID Guid and the index of the block Index it is probing,...
SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
SDNode * SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT)
These are used for target selectors to mutate the specified node to have the specified return type,...
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SelectionDAG(const TargetMachine &TM, CodeGenOptLevel)
SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, MachinePointerInfo DstPtrInfo, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getBitcastedSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp, MachineMemOperand *MMO)
Gets a node for an atomic cmpxchg op.
SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain)
If an existing load has uses of its chain, create a token factor node with that chain and the new mem...
bool isConstantIntBuildVectorOrConstantInt(SDValue N, bool AllowOpaques=true) const
Test whether the given value is a constant int or similar node.
SDDbgValue * getVRegDbgValue(DIVariable *Var, DIExpression *Expr, unsigned VReg, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a VReg SDDbgValue node.
void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To, unsigned Num)
Like ReplaceAllUsesOfValueWith, but for multiple values at once.
SDValue getJumpTableDebugInfo(int JTI, SDValue Chain, const SDLoc &DL)
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getSymbolFunctionGlobalAddress(SDValue Op, Function **TargetFunction=nullptr)
Return a GlobalAddress of the function from the current module with name matching the given ExternalS...
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
OverflowKind
Used to represent the possible overflow behavior of an operation.
static unsigned getHasPredecessorMaxSteps()
bool haveNoCommonBitsSet(SDValue A, SDValue B) const
Return true if A and B have no common bits set.
bool cannotBeOrderedNegativeFP(SDValue Op) const
Test whether the given float value is known to be positive.
SDValue getRegister(Register Reg, EVT VT)
bool calculateDivergence(SDNode *N)
SDValue getElementCount(const SDLoc &DL, EVT VT, ElementCount EC, bool ConstantFold=true)
SDValue getGetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
SDValue getAssertAlign(const SDLoc &DL, SDValue V, Align A)
Return an AssertAlignSDNode.
SDNode * mutateStrictFPToFP(SDNode *Node)
Mutate the specified strict FP node to its non-strict equivalent, unlinking the node from its chain a...
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getBitcastedZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
SDValue getStepVector(const SDLoc &DL, EVT ResVT, const APInt &StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
std::optional< uint64_t > getValidMinimumShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
bool shouldOptForSize() const
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
SDValue getVPZExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be an integer vector, to the vector-type VT,...
const TargetLowering & getTargetLoweringInfo() const
bool isEqualTo(SDValue A, SDValue B) const
Test whether two SDValues are known to compare equal.
static constexpr unsigned MaxRecursionDepth
SDValue getStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
SDValue expandVACopy(SDNode *Node)
Expand the specified ISD::VACOPY node as the Legalize pass would.
SDValue getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
APInt computeVectorKnownZeroElements(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
For each demanded element of a vector, see if it is known to be zero.
void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
bool NewNodesMustHaveLegalTypes
When true, additional steps are taken to ensure that getConstant() and similar functions return DAG n...
std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
void salvageDebugInfo(SDNode &N)
To be invoked on an SDNode that is slated to be erased.
SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)
This mutates the specified node to have the specified return type, opcode, and operands.
std::pair< SDValue, SDValue > UnrollVectorOverflowOp(SDNode *N, unsigned ResNE=0)
Like UnrollVectorOp(), but for the [US](ADD|SUB|MUL)O family of opcodes.
allnodes_const_iterator allnodes_begin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getBitcastedAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
void DeleteNode(SDNode *N)
Remove the specified node from the system.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
SDValue getNegative(SDValue Val, const SDLoc &DL, EVT VT)
Create negative operation as (SUB 0, Val).
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
SDValue simplifySelect(SDValue Cond, SDValue TVal, SDValue FVal)
Try to simplify a select/vselect into 1 of its operands or a constant.
SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
bool isConstantFPBuildVectorOrConstantFP(SDValue N) const
Test whether the given value is a constant FP or similar node.
const DataLayout & getDataLayout() const
SDValue expandVAArg(SDNode *Node)
Expand the specified ISD::VAARG node as the Legalize pass would.
SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
bool doesNodeExist(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops)
Check if a node exists without modifying its flags.
bool areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base, unsigned Bytes, int Dist) const
Return true if loads are next to each other and can be merged.
SDValue getMaskedHistogram(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDDbgLabel * getDbgLabel(DILabel *Label, const DebugLoc &DL, unsigned O)
Creates a SDDbgLabel node.
SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
OverflowKind computeOverflowForUnsignedMul(SDValue N0, SDValue N1) const
Determine if the result of the unsigned mul of 2 nodes can overflow.
void copyExtraInfo(SDNode *From, SDNode *To)
Copy extra info associated with one node to another.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue SV, unsigned Align)
VAArg produces a result and token chain, and takes a pointer and a source value as input.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getMDNode(const MDNode *MD)
Return an MDNodeSDNode which holds an MDNode.
void clear()
Clear state and free memory necessary to make this SelectionDAG ready to process a new block.
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV)
Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to the shuffle node in input but with swa...
std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
SDValue makeStateFunctionCall(unsigned LibFunc, SDValue Ptr, SDValue InChain, const SDLoc &DLoc)
Helper used to make a call to a library function that has one argument of pointer type.
bool isGuaranteedNotToBeUndefOrPoison(SDValue Op, bool PoisonOnly=false, unsigned Depth=0) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getIndexedLoadVP(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
SDValue getSrcValue(const Value *v)
Construct a node to track a Value* through the backend.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
SDValue getAtomicMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
OverflowKind computeOverflowForSignedMul(SDValue N0, SDValue N1) const
Determine if the result of the signed mul of 2 nodes can overflow.
MaybeAlign InferPtrAlign(SDValue Ptr) const
Infer alignment of a load / store address.
bool MaskedValueIsAllOnes(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if '(Op & Mask) == Mask'.
bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
void AddDbgLabel(SDDbgLabel *DB)
Add a dbg_label SDNode.
bool isConstantValueOfAnyType(SDValue N) const
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getBasicBlock(MachineBasicBlock *MBB)
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
bool isKnownToBeAPowerOfTwo(SDValue Val, unsigned Depth=0) const
Test if the given value is known to have exactly one bit set.
SDValue getPartialReduceAdd(SDLoc DL, EVT ReducedTy, SDValue Op1, SDValue Op2)
Create the DAG equivalent of vector_partial_reduce where Op1 and Op2 are its operands and ReducedTY i...
SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label)
SDValue getIndexedStoreVP(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
bool isKnownNeverZero(SDValue Op, unsigned Depth=0) const
Test whether the given SDValue is known to contain non-zero value(s).
SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SDNodeFlags Flags=SDNodeFlags())
SDValue getSetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT)
Convert Op, which must be of integer type, to the integer type VT, by using an extension appropriate ...
SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
std::pair< SDValue, SDValue > getStrictFPExtendOrRound(SDValue Op, SDValue Chain, const SDLoc &DL, EVT VT)
Convert Op, which must be a STRICT operation of float type, to the float type VT, by either extending...
std::pair< SDValue, SDValue > SplitEVL(SDValue N, EVT VecVT, const SDLoc &DL)
Split the explicit vector length parameter of a VP operation.
SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
SDValue getVPLogicalNOT(const SDLoc &DL, SDValue Val, SDValue Mask, SDValue EVL, EVT VT)
Create a vector-predicated logical NOT operation as (VP_XOR Val, BooleanOne, Mask,...
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
iterator_range< allnodes_iterator > allnodes()
SDValue getBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, bool isTarget=false, unsigned TargetFlags=0)
SDValue WidenVector(const SDValue &N, const SDLoc &DL)
Widen the vector up to the next power of two using INSERT_SUBVECTOR.
bool isKnownNeverZeroFloat(SDValue Op) const
Test whether the given floating point SDValue is known to never be positive or negative zero.
SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, const MDNode *Ranges=nullptr, bool IsExpanding=false)
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDDbgValue * getConstantDbgValue(DIVariable *Var, DIExpression *Expr, const Value *C, const DebugLoc &DL, unsigned O)
Creates a constant SDDbgValue node.
SDValue getScatterVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getValueType(EVT)
ArrayRef< SDDbgValue * > GetDbgValues(const SDNode *SD) const
Get the debug values which reference the given SDNode.
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
OverflowKind computeOverflowForSignedAdd(SDValue N0, SDValue N1) const
Determine if the result of the signed addition of 2 nodes can overflow.
SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
unsigned AssignTopologicalOrder()
Topological-sort the AllNodes list and a assign a unique node id for each node in the DAG based on th...
ilist< SDNode >::size_type allnodes_size() const
SDValue getAtomicMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
bool isKnownNeverNaN(SDValue Op, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN.
SDValue getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
SDValue getTruncStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, bool IsCompressing=false)
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
const TargetLibraryInfo & getLibInfo() const
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
bool MaskedVectorIsZero(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
Return true if 'Op' is known to be zero in DemandedElts.
SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT)
Create a true or false constant of type VT using the target's BooleanContent for type OpVT.
SDDbgValue * getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr, unsigned FI, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a FrameIndex SDDbgValue node.
SDValue getExtStridedLoadVP(ISD::LoadExtType ExtType, const SDLoc &DL, EVT VT, SDValue Chain, SDValue Ptr, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
SDValue getJumpTable(int JTI, EVT VT, bool isTarget=false, unsigned TargetFlags=0)
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
SDValue getVPPtrExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be of integer type, to the vector-type integer type VT,...
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to extend the Op as a pointer value assuming it was the smaller SrcTy ...
bool canCreateUndefOrPoison(SDValue Op, const APInt &DemandedElts, bool PoisonOnly=false, bool ConsiderFlags=true, unsigned Depth=0) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
OverflowKind computeOverflowForUnsignedAdd(SDValue N0, SDValue N1) const
Determine if the result of the unsigned addition of 2 nodes can overflow.
std::optional< uint64_t > getValidMaximumShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
SDValue getTruncStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Stride, SDValue Mask, SDValue EVL, EVT SVT, MachineMemOperand *MMO, bool IsCompressing=false)
void canonicalizeCommutativeBinop(unsigned Opcode, SDValue &N1, SDValue &N2) const
Swap N1 and N2 if Opcode is a commutative binary opcode and the canonical form expects the opposite o...
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getCondCode(ISD::CondCode Cond)
SDValue getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain, int FrameIndex, int64_t Size, int64_t Offset=-1)
Creates a LifetimeSDNode that starts (IsStart==true) or ends (IsStart==false) the lifetime of the por...
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
bool isKnownToBeAPowerOfTwoFP(SDValue Val, unsigned Depth=0) const
Test if the given fp value is known to be an integer power-of-2, either positive or negative.
OverflowKind computeOverflowForSignedSub(SDValue N0, SDValue N1) const
Determine if the result of the signed sub of 2 nodes can overflow.
bool expandMultipleResultFPLibCall(RTLIB::Libcall LC, SDNode *Node, SmallVectorImpl< SDValue > &Results, std::optional< unsigned > CallRetResNo={})
Expands a node with multiple results to an FP or vector libcall.
std::optional< uint64_t > getValidShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has a uniform shift amount that is less than the element bit-width of the shi...
LLVMContext * getContext() const
SDValue simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y, SDNodeFlags Flags)
Try to simplify a floating-point binary operation into 1 of its operands or a constant.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
bool isUndef(unsigned Opcode, ArrayRef< SDValue > Ops)
Return true if the result of this operation is always undefined.
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
std::pair< EVT, EVT > GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, bool *HiIsEmpty) const
Compute the VTs needed for the low/hi parts of a type, dependent on an enveloping VT that has been sp...
SDValue foldConstantFPMath(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops)
Fold floating-point operations when all operands are constants and/or undefined.
SDNode * getNodeIfExists(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops, const SDNodeFlags Flags)
Get the specified node if it's already available, or else return NULL.
void init(MachineFunction &NewMF, OptimizationRemarkEmitter &NewORE, Pass *PassPtr, const TargetLibraryInfo *LibraryInfo, UniformityInfo *UA, ProfileSummaryInfo *PSIin, BlockFrequencyInfo *BFIin, MachineModuleInfo &MMI, FunctionVarLocs const *FnVarLocs)
Prepare this SelectionDAG to process code in the given MachineFunction.
std::optional< ConstantRange > getValidShiftAmountRange(SDValue V, const APInt &DemandedElts, unsigned Depth) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
SDValue FoldSymbolOffset(unsigned Opcode, EVT VT, const GlobalAddressSDNode *GA, const SDNode *N2)
std::optional< bool > isBoolConstant(SDValue N, bool AllowTruncation=false) const
Check if a value \op N is a constant using the target's BooleanContent for its type.
SDValue getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDDbgValue * getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N, unsigned R, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a SDDbgValue node.
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
SDValue matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp, ArrayRef< ISD::NodeType > CandidateBinOps, bool AllowPartials=false)
Match a binop + shuffle pyramid that represents a horizontal reduction over the elements of a vector ...
bool isADDLike(SDValue Op, bool NoWrap=false) const
Return true if the specified operand is an ISD::OR or ISD::XOR node that can be treated as an ISD::AD...
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
SDValue simplifyShift(SDValue X, SDValue Y)
Try to simplify a shift into 1 of its operands or a constant.
void transferDbgValues(SDValue From, SDValue To, unsigned OffsetInBits=0, unsigned SizeInBits=0, bool InvalidateDbg=true)
Transfer debug values from one node to another, while optionally generating fragment expressions for ...
SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a logical NOT operation as (XOR Val, BooleanOne).
SDValue getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, bool IsTruncating=false)
ilist< SDNode >::iterator allnodes_iterator
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
static bool isSplatMask(const int *Mask, EVT VT)
int getMaskElt(unsigned Idx) const
ArrayRef< int > getMask() const
static void commuteMask(MutableArrayRef< int > Mask)
Change values in a shuffle permute mask assuming the two vector operands have swapped position.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Information about stack frame layout on the target.
virtual TargetStackID::Value getStackIDForScalableVectors() const
Returns the StackID that scalable vectors should be associated with.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
Completely target-dependent object reference.
int64_t getOffset() const
unsigned getTargetFlags() const
Provides information about what library functions are available for the current target.
const VecDesc * getVectorMappingInfo(StringRef F, const ElementCount &VF, bool Masked) const
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const
Return true if it is beneficial to convert a load of a constant to just the constant itself.
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
const TargetMachine & getTargetMachine() const
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
unsigned getMaxStoresPerMemcpy(bool OptSize) const
Get maximum # of store operations permitted for llvm.memcpy.
virtual bool isCommutativeBinOp(unsigned Opcode) const
Returns true if the opcode is a commutative binary operation.
virtual ISD::NodeType getExtendForAtomicOps() const
Returns how the platform's atomic operations are extended (ZERO_EXTEND, SIGN_EXTEND,...
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const
Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type from this source type with ...
virtual bool shallExtractConstSplatVectorElementToStore(Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const
Return true if the target shall perform extract vector element and store given that the vector is kno...
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal for a comparison of the specified types on this ...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
unsigned getMaxStoresPerMemmove(bool OptSize) const
Get maximum # of store operations permitted for llvm.memmove.
virtual unsigned getMaxGluedStoresPerMemcpy() const
Get maximum # of store operations to be glued together.
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
virtual bool hasVectorBlend() const
Return true if the target has a vector blend instruction.
unsigned getMaxStoresPerMemset(bool OptSize) const
Get maximum # of store operations permitted for llvm.memset.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
virtual bool isLegalStoreImmediate(int64_t Value) const
Return true if the specified immediate is legal for the value input of a store instruction.
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual void computeKnownBitsForFrameIndex(int FIOp, KnownBits &Known, const MachineFunction &MF) const
Determine which of the bits of FrameIndex FIOp are known to be 0.
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
virtual void verifyTargetSDNode(const SDNode *N) const
Check the given SDNode. Aborts if it is invalid.
virtual bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const
Determines the optimal series of memory ops to replace the memset / memcpy.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual bool isKnownNeverNaNForTargetNode(SDValue Op, const SelectionDAG &DAG, bool SNaN=false, unsigned Depth=0) const
If SNaN is false,.
virtual void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
virtual bool isSDNodeSourceOfDivergence(const SDNode *N, FunctionLoweringInfo *FLI, UniformityInfo *UA) const
virtual bool isSDNodeAlwaysUniform(const SDNode *N) const
virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts, APInt &UndefElts, const SelectionDAG &DAG, unsigned Depth=0) const
Return true if vector Op has the same value across all DemandedElts, indicating any elements which ma...
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
virtual const Constant * getTargetConstantFromLoad(LoadSDNode *LD) const
This method returns the constant pool value that will be loaded by LD.
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.
virtual bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
virtual bool canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
Primary interface to the complete machine description for the target machine.
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
const Triple & getTargetTriple() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const SelectionDAGTargetInfo * getSelectionDAGInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetLowering * getTargetLowering() const
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, XROS, or DriverKit).
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt8Ty(LLVMContext &C)
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
This class is used to represent an VP_GATHER node.
This class is used to represent a VP_LOAD node.
This class is used to represent an VP_SCATTER node.
This class is used to represent a VP_STORE node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_LOAD node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_STORE node.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
Provides info so a possible vectorization of a function can be computed.
StringRef getVectorFnName() const
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr bool isKnownEven() const
A return value of true indicates we know at compile time that the number of elements (vscale * Min) i...
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
APInt mulhu(const APInt &C1, const APInt &C2)
Performs (2*N)-bit multiplication on zero-extended operands.
const APInt abdu(const APInt &A, const APInt &B)
Determine the absolute difference of two APInts considered to be unsigned.
APInt avgCeilU(const APInt &C1, const APInt &C2)
Compute the ceil of the unsigned average of C1 and C2.
APInt avgFloorU(const APInt &C1, const APInt &C2)
Compute the floor of the unsigned average of C1 and C2.
const APInt abds(const APInt &A, const APInt &B)
Determine the absolute difference of two APInts considered to be signed.
APInt mulhs(const APInt &C1, const APInt &C2)
Performs (2*N)-bit multiplication on sign-extended operands.
APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
APInt avgFloorS(const APInt &C1, const APInt &C2)
Compute the floor of the signed average of C1 and C2.
APInt avgCeilS(const APInt &C1, const APInt &C2)
Compute the ceil of the signed average of C1 and C2.
@ C
The default llvm calling convention, compatible with C.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
CondCode getSetCCInverse(CondCode Operation, bool isIntegerLike)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, EVT Type)
Return the result of a logical AND between different comparisons of identical values: ((X op1 Y) & (X...
bool isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are ~0 ...
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ MDNODE_SDNODE
MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to reference metadata in the IR.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ MLOAD
Masked load and store - consecutive vector load and store operations with additional mask operand tha...
@ FGETSIGN
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ JUMP_TABLE_DEBUG_INFO
JUMP_TABLE_DEBUG_INFO - Jumptable debug info.
@ BSWAP
Byte Swap and Counting operators.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ FATAN2
FATAN2 - atan2, inspired by libm.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SRCVALUE
SRCVALUE - This is a node type that holds a Value* that is used to make reference to a value in the L...
@ EH_LABEL
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ TargetIndex
TargetIndex - Like a constant pool entry, but with completely target-dependent semantics.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ SETCCCARRY
Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but op #2 is a boolean indicating ...
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ STEP_VECTOR
STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised of a linear sequence of unsign...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ UNDEF
UNDEF - An undefined node.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ AssertAlign
AssertAlign - These nodes record if a register contains a value that has a known alignment and the tr...
@ BasicBlock
Various leaf nodes.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ TargetGlobalAddress
TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or anything else with this node...
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ VSCALE
VSCALE(IMM) - Returns the runtime scaling factor used to calculate the number of elements within a sc...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ LIFETIME_START
This corresponds to the llvm.lifetime.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ MGATHER
Masked gather and scatter - load and store operations for a vector of random addresses with additiona...
@ HANDLENODE
HANDLENODE node - Used as a handle for various purposes.
@ BF16_TO_FP
BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions and truncation for bfloat16.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ GET_FPENV_MEM
Gets the current floating-point environment.
@ PSEUDO_PROBE
Pseudo probe for AutoFDO, as a place holder in a basic block to improve the sample counts quality.
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ SPLAT_VECTOR_PARTS
SPLAT_VECTOR_PARTS(SCALAR1, SCALAR2, ...) - Returns a vector with the scalar values joined together a...
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
@ EXPERIMENTAL_VECTOR_HISTOGRAM
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ CALLSEQ_START
CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end of a call sequence,...
@ SET_FPENV_MEM
Sets the current floating point environment.
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
@ SADDO_CARRY
Carry-using overflow-aware nodes for multiple precision addition and subtraction.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
NodeType getExtForLoadExtType(bool IsFP, LoadExtType)
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
bool matchUnaryPredicate(SDValue Op, std::function< bool(ConstantSDNode *)> Match, bool AllowUndefs=false)
Hook for matching ConstantSDNode predicate.
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
bool matchUnaryFpPredicate(SDValue Op, std::function< bool(ConstantFPSDNode *)> Match, bool AllowUndefs=false)
Hook for matching ConstantFPSDNode predicate.
bool isExtOpcode(unsigned Opcode)
bool isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are 0 o...
bool isVectorShrinkable(const SDNode *N, unsigned NewEltSize, bool Signed)
Returns true if the specified node is a vector where all elements can be truncated to the specified e...
bool matchUnaryPredicateImpl(SDValue Op, std::function< bool(ConstNodeType *)> Match, bool AllowUndefs=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant BUI...
bool isVPBinaryOp(unsigned Opcode)
Whether this is a vector-predicated binary operation opcode.
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
std::optional< unsigned > getBaseOpcodeForVP(unsigned Opcode, bool hasFPExcept)
Translate this VP Opcode to its corresponding non-VP Opcode.
bool isTrueWhenEqual(CondCode Cond)
Return true if the specified condition returns true if the two operands to the condition are equal.
std::optional< unsigned > getVPMaskIdx(unsigned Opcode)
The operand position of the vector mask.
unsigned getUnorderedFlavor(CondCode Cond)
This function returns 0 if the condition is always false if an operand is a NaN, 1 if the condition i...
std::optional< unsigned > getVPExplicitVectorLengthIdx(unsigned Opcode)
The operand position of the explicit vector length parameter.
bool isEXTLoad(const SDNode *N)
Returns true if the specified node is a EXTLOAD.
bool allOperandsUndef(const SDNode *N)
Return true if the node has at least one operand and all operands of the specified node are ISD::UNDE...
bool isFreezeUndef(const SDNode *N)
Return true if the specified node is FREEZE(UNDEF).
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
std::optional< unsigned > getVPForBaseOpcode(unsigned Opcode)
Translate this non-VP Opcode to its corresponding VP Opcode.
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
NodeType getInverseMinMaxOpcode(unsigned MinMaxOpc)
Given a MinMaxOpc of ISD::(U|S)MIN or ISD::(U|S)MAX, returns ISD::(U|S)MAX and ISD::(U|S)MIN,...
bool matchBinaryPredicate(SDValue LHS, SDValue RHS, std::function< bool(ConstantSDNode *, ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTypeMismatch=false)
Attempt to match a binary predicate against a pair of scalar/splat constants or every element of a pa...
bool isVPReduction(unsigned Opcode)
Whether this is a vector-predicated reduction opcode.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
bool isBuildVectorOfConstantFPSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantFPSDNode or undef.
bool isSEXTLoad(const SDNode *N)
Returns true if the specified node is a SEXTLOAD.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isBuildVectorAllOnes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are ~0 or undef.
NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isVPOpcode(unsigned Opcode)
Whether this is a vector-predicated Opcode.
CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, EVT Type)
Return the result of a logical OR between different comparisons of identical values: ((X op1 Y) | (X ...
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
Libcall getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMCPY_ELEMENT_UNORDERED_ATOMIC - Return MEMCPY_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
Libcall getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMSET_ELEMENT_UNORDERED_ATOMIC - Return MEMSET_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
Libcall getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMMOVE_ELEMENT_UNORDERED_ATOMIC - Return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_* value for the given e...
bool sd_match(SDNode *N, const SelectionDAG *DAG, Pattern &&P)
initializer< Ty > init(const Ty &Val)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
bool operator<(int64_t V1, const APSInt &V2)
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
SDValue peekThroughExtractSubvectors(SDValue V)
Return the non-extracted vector source operand of V if it exists.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
SDValue getBitwiseNotOperand(SDValue V, SDValue Mask, bool AllowUndefs)
If V is a bitwise not, returns the inverted operand.
SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice, unsigned ElementSize, uint64_t Offset=0)
Returns true if the value V is a pointer into a ConstantDataArray.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
MaybeAlign getAlign(const Function &F, unsigned Index)
bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
bool isMinSignedConstant(SDValue V)
Returns true if V is a constant min signed integer value.
ConstantFPSDNode * isConstOrConstSplatFP(SDValue N, bool AllowUndefs=false)
Returns the SDNode if it is a constant splat BuildVector or constant float.
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 maximumNumber semantics.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
bool isBitwiseNot(SDValue V, bool AllowUndefs=false)
Returns true if V is a bitwise not operation.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void checkForCycles(const SelectionDAG *DAG, bool force=false)
void sort(IteratorTy Start, IteratorTy End)
LLVM_READONLY APFloat minimumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimumNumber semantics.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
SDValue peekThroughTruncates(SDValue V)
Return the non-truncated source operand of V if it exists.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
SDValue peekThroughOneUseBitcasts(SDValue V)
Return the non-bitcasted and one-use source operand of V if it exists.
CodeGenOptLevel
Code generation optimization level.
bool isOneOrOneSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 minimumNumber semantics.
@ Mul
Product of integers.
bool isNullConstantOrUndef(SDValue V)
Returns true if V is a constant integer zero or an UNDEF node.
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
DWARFExpression::Operation Op
ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
OutputIt copy(R &&Range, OutputIt Out)
constexpr unsigned BitWidth
bool funcReturnsFirstArgOfCall(const CallInst &CI)
Returns true if the parent of CI returns CI's first argument after calling CI.
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool isNullFPConstant(SDValue V)
Returns true if V is an FP constant with a value of positive zero.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
unsigned Log2(Align A)
Returns the log2 of the alignment.
void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
LLVM_READONLY APFloat maximumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximumNumber semantics.
bool isNeutralConstant(unsigned Opc, SDNodeFlags Flags, SDValue V, unsigned OperandNo)
Returns true if V is a neutral element of Opc with Flags.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
MDNode * TBAAStruct
The tag for type-based alias analysis (tbaa struct).
MDNode * TBAA
The tag for type-based alias analysis.
static const fltSemantics & IEEEsingle() LLVM_READNONE
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
static constexpr roundingMode rmTowardNegative
static constexpr roundingMode rmNearestTiesToEven
static constexpr roundingMode rmTowardZero
static const fltSemantics & IEEEquad() LLVM_READNONE
static const fltSemantics & IEEEdouble() LLVM_READNONE
static const fltSemantics & IEEEhalf() LLVM_READNONE
static constexpr roundingMode rmTowardPositive
static const fltSemantics & BFloat() LLVM_READNONE
opStatus
IEEE-754R 7: Default exception handling.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Represents offset+length into a ConstantDataArray.
uint64_t Length
Length of the slice.
uint64_t Offset
Slice starts at this Offset.
void move(uint64_t Delta)
Moves the Offset and adjusts Length accordingly.
const ConstantDataArray * Array
ConstantDataArray pointer.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
intptr_t getRawBits() const
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isExtended() const
Test if the given EVT is extended (as opposed to being simple).
const fltSemantics & getFltSemantics() const
Returns an APFloat semantics tag appropriate for the value type.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool bitsLE(EVT VT) const
Return true if this has no more bits than VT.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
KnownBits sextInReg(unsigned SrcBitWidth) const
Return known bits for a in-register sign extension of the value we're tracking.
static KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from zero-extended multiply-hi.
unsigned countMinSignBits() const
Returns the number of times the sign bit is replicated into the other bits.
static KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
void makeNonNegative()
Make this value non-negative.
static KnownBits usub_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.usub.sat(LHS, RHS)
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
static KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
static KnownBits urem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for urem(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
static std::optional< bool > ne(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_NE result.
void makeNegative()
Make this value negative.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
KnownBits byteSwap() const
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
void setAllZero()
Make all bits known to be zero and discard any previous information.
KnownBits reverseBits() const
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
unsigned getBitWidth() const
Get the bit width of this value.
static KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits unionWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for either this or RHS or both.
static KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
bool isNonZero() const
Returns true if this value is known to be non-zero.
static KnownBits abdu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for abdu(LHS, RHS).
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const
Return a subset of the known bits from [bitPosition,bitPosition+numBits).
static KnownBits avgFloorU(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgFloorU.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
static KnownBits computeForSubBorrow(const KnownBits &LHS, KnownBits RHS, const KnownBits &Borrow)
Compute known bits results from subtracting RHS from LHS with 1-bit Borrow.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static KnownBits abds(KnownBits LHS, KnownBits RHS)
Compute known bits for abds(LHS, RHS).
static KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from sign-extended multiply-hi.
static KnownBits srem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for srem(LHS, RHS).
static KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for udiv(LHS, RHS).
static KnownBits computeForAddSub(bool Add, bool NSW, bool NUW, const KnownBits &LHS, const KnownBits &RHS)
Compute known bits resulting from adding LHS and RHS.
bool isStrictlyPositive() const
Returns true if this value is known to be positive.
static KnownBits sdiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for sdiv(LHS, RHS).
static KnownBits avgFloorS(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgFloorS.
static bool haveNoCommonBitsSet(const KnownBits &LHS, const KnownBits &RHS)
Return true if LHS and RHS have no common bits set.
bool isNegative() const
Returns true if this value is known to be negative.
static KnownBits computeForAddCarry(const KnownBits &LHS, const KnownBits &RHS, const KnownBits &Carry)
Compute known bits resulting from adding LHS, RHS and a 1-bit Carry.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
static KnownBits avgCeilU(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgCeilU.
static KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
KnownBits abs(bool IntMinIsPoison=false) const
Compute known bits for the absolute value.
static KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
static KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
static KnownBits avgCeilS(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgCeilS.
This class contains a discriminated union of information about pointers in memory operands,...
bool isDereferenceable(unsigned Size, LLVMContext &C, const DataLayout &DL) const
Return true if memory region [V, V+Offset+Size) is known to be dereferenceable.
unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign, bool IsZeroMemset, bool IsVolatile)
static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign, Align SrcAlign, bool IsVolatile, bool MemcpyStrSrc=false)
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Clients of various APIs that cause global effects on the DAG can optionally implement this interface.
DAGUpdateListener *const Next
virtual void NodeDeleted(SDNode *N, SDNode *E)
The node N that was deleted and, if E is not null, an equivalent node E that replaced it.
virtual void NodeInserted(SDNode *N)
The node N that was inserted.
virtual void NodeUpdated(SDNode *N)
The node N that was updated.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setChain(SDValue InChain)