58 if (
F.getFnAttribute(
"disable-tail-calls").getValueAsBool())
64 AttrBuilder CallerAttrs(
F.getContext(),
F.getAttributes().getRetAttrs());
65 for (
const auto &Attr :
66 {Attribute::Alignment, Attribute::Dereferenceable,
67 Attribute::DereferenceableOrNull, Attribute::NoAlias,
68 Attribute::NonNull, Attribute::NoUndef, Attribute::Range})
75 if (CallerAttrs.
contains(Attribute::ZExt) ||
76 CallerAttrs.
contains(Attribute::SExt))
87 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
103 Register ArgReg = cast<RegisterSDNode>(
Value->getOperand(1))->getReg();
104 if (
MRI.getLiveInPhysReg(ArgReg) != Reg)
114 IsSExt = Call->paramHasAttr(ArgIdx, Attribute::SExt);
115 IsZExt = Call->paramHasAttr(ArgIdx, Attribute::ZExt);
116 IsInReg = Call->paramHasAttr(ArgIdx, Attribute::InReg);
117 IsSRet = Call->paramHasAttr(ArgIdx, Attribute::StructRet);
118 IsNest = Call->paramHasAttr(ArgIdx, Attribute::Nest);
119 IsByVal = Call->paramHasAttr(ArgIdx, Attribute::ByVal);
120 IsPreallocated = Call->paramHasAttr(ArgIdx, Attribute::Preallocated);
121 IsInAlloca = Call->paramHasAttr(ArgIdx, Attribute::InAlloca);
122 IsReturned = Call->paramHasAttr(ArgIdx, Attribute::Returned);
123 IsSwiftSelf = Call->paramHasAttr(ArgIdx, Attribute::SwiftSelf);
124 IsSwiftAsync = Call->paramHasAttr(ArgIdx, Attribute::SwiftAsync);
125 IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError);
126 Alignment = Call->getParamStackAlign(ArgIdx);
129 "multiple ABI attributes?");
145std::pair<SDValue, SDValue>
155 Args.reserve(Ops.
size());
158 for (
unsigned i = 0; i < Ops.
size(); ++i) {
161 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.
getContext());
164 Entry.IsZExt = !Entry.IsSExt;
168 Entry.IsSExt = Entry.IsZExt =
false;
170 Args.push_back(Entry);
173 if (LC == RTLIB::UNKNOWN_LIBCALL)
181 bool zeroExtend = !signExtend;
185 signExtend = zeroExtend =
false;
196 return LowerCallTo(CLI);
200 std::vector<EVT> &MemOps,
unsigned Limit,
const MemOp &
Op,
unsigned DstAS,
202 if (Limit != ~
unsigned(0) &&
Op.isMemcpyWithFixedDstAlign() &&
203 Op.getSrcAlign() <
Op.getDstAlign())
208 if (VT == MVT::Other) {
212 VT = MVT::LAST_INTEGER_VALUETYPE;
213 if (
Op.isFixedDstAlign())
220 MVT LVT = MVT::LAST_INTEGER_VALUETYPE;
231 unsigned NumMemOps = 0;
235 while (VTSize >
Size) {
246 else if (NewVT == MVT::i64 &&
258 if (NewVT == MVT::i8)
267 if (NumMemOps &&
Op.allowOverlap() && NewVTSize <
Size &&
269 VT, DstAS,
Op.isFixedDstAlign() ?
Op.getDstAlign() :
Align(1),
279 if (++NumMemOps > Limit)
282 MemOps.push_back(VT);
297 return softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, dl, OldLHS,
307 bool IsSignaling)
const {
312 assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128)
313 &&
"Unsupported setcc type!");
316 RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL;
317 bool ShouldInvertCC =
false;
321 LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
322 (VT == MVT::f64) ? RTLIB::OEQ_F64 :
323 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
327 LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 :
328 (VT == MVT::f64) ? RTLIB::UNE_F64 :
329 (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128;
333 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
334 (VT == MVT::f64) ? RTLIB::OGE_F64 :
335 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
339 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
340 (VT == MVT::f64) ? RTLIB::OLT_F64 :
341 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
345 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
346 (VT == MVT::f64) ? RTLIB::OLE_F64 :
347 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
351 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
352 (VT == MVT::f64) ? RTLIB::OGT_F64 :
353 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
356 ShouldInvertCC =
true;
359 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
360 (VT == MVT::f64) ? RTLIB::UO_F64 :
361 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
365 ShouldInvertCC =
true;
368 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
369 (VT == MVT::f64) ? RTLIB::UO_F64 :
370 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
371 LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
372 (VT == MVT::f64) ? RTLIB::OEQ_F64 :
373 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
377 ShouldInvertCC =
true;
380 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
381 (VT == MVT::f64) ? RTLIB::OGE_F64 :
382 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
385 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
386 (VT == MVT::f64) ? RTLIB::OGT_F64 :
387 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
390 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
391 (VT == MVT::f64) ? RTLIB::OLE_F64 :
392 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
395 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
396 (VT == MVT::f64) ? RTLIB::OLT_F64 :
397 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
405 SDValue Ops[2] = {NewLHS, NewRHS};
410 auto Call = makeLibCall(DAG, LC1, RetVT, Ops, CallOptions, dl, Chain);
415 if (ShouldInvertCC) {
417 CCCode = getSetCCInverse(CCCode, RetVT);
420 if (LC2 == RTLIB::UNKNOWN_LIBCALL) {
427 auto Call2 = makeLibCall(DAG, LC2, RetVT, Ops, CallOptions, dl, Chain);
430 CCCode = getSetCCInverse(CCCode, RetVT);
431 NewLHS = DAG.
getSetCC(dl, SetCCVT, Call2.first, NewRHS, CCCode);
445 if (!isPositionIndependent())
459 unsigned JTEncoding = getJumpTableEncoding();
495 if (!TM.shouldAssumeDSOLocal(GV))
499 if (isPositionIndependent())
515 const APInt &DemandedElts,
518 unsigned Opcode =
Op.getOpcode();
526 if (targetShrinkDemandedConstant(
Op,
DemandedBits, DemandedElts, TLO))
536 auto *Op1C = dyn_cast<ConstantSDNode>(
Op.getOperand(1));
537 if (!Op1C || Op1C->isOpaque())
541 const APInt &
C = Op1C->getAPIntValue();
546 EVT VT =
Op.getValueType();
563 EVT VT =
Op.getValueType();
578 "ShrinkDemandedOp only supports binary operators!");
579 assert(
Op.getNode()->getNumValues() == 1 &&
580 "ShrinkDemandedOp only supports nodes with one result!");
582 EVT VT =
Op.getValueType();
591 Op.getOperand(1).getValueType().getScalarSizeInBits() ==
BitWidth &&
592 "ShrinkDemandedOp only supports operands that have the same size!");
596 if (!
Op.getNode()->hasOneUse())
608 Op.getOpcode(), dl, SmallVT,
611 assert(DemandedSize <= SmallVTBits &&
"Narrowed below demanded bits?");
626 bool Simplified = SimplifyDemandedBits(
Op,
DemandedBits, Known, TLO);
635 const APInt &DemandedElts,
655 bool AssumeSingleUse)
const {
656 EVT VT =
Op.getValueType();
672 EVT VT =
Op.getValueType();
690 switch (
Op.getOpcode()) {
696 EVT SrcVT = Src.getValueType();
697 EVT DstVT =
Op.getValueType();
703 if (NumSrcEltBits == NumDstEltBits)
704 if (
SDValue V = SimplifyMultipleUseDemandedBits(
708 if (SrcVT.
isVector() && (NumDstEltBits % NumSrcEltBits) == 0) {
709 unsigned Scale = NumDstEltBits / NumSrcEltBits;
713 for (
unsigned i = 0; i != Scale; ++i) {
714 unsigned EltOffset = IsLE ? i : (Scale - 1 - i);
715 unsigned BitOffset = EltOffset * NumSrcEltBits;
718 DemandedSrcBits |= Sub;
719 for (
unsigned j = 0; j != NumElts; ++j)
721 DemandedSrcElts.
setBit((j * Scale) + i);
725 if (
SDValue V = SimplifyMultipleUseDemandedBits(
726 Src, DemandedSrcBits, DemandedSrcElts, DAG,
Depth + 1))
731 if (IsLE && (NumSrcEltBits % NumDstEltBits) == 0) {
732 unsigned Scale = NumSrcEltBits / NumDstEltBits;
736 for (
unsigned i = 0; i != NumElts; ++i)
737 if (DemandedElts[i]) {
738 unsigned Offset = (i % Scale) * NumDstEltBits;
740 DemandedSrcElts.
setBit(i / Scale);
743 if (
SDValue V = SimplifyMultipleUseDemandedBits(
744 Src, DemandedSrcBits, DemandedSrcElts, DAG,
Depth + 1))
765 return Op.getOperand(0);
767 return Op.getOperand(1);
778 return Op.getOperand(0);
780 return Op.getOperand(1);
790 return Op.getOperand(0);
792 return Op.getOperand(1);
798 if (std::optional<uint64_t> MaxSA =
801 unsigned ShAmt = *MaxSA;
802 unsigned NumSignBits =
805 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits))
834 EVT ExVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
841 if (NumSignBits >= (
BitWidth - ExBits + 1))
854 EVT SrcVT = Src.getValueType();
855 EVT DstVT =
Op.getValueType();
856 if (IsLE && DemandedElts == 1 &&
869 auto *CIdx = dyn_cast<ConstantSDNode>(
Op.getOperand(2));
872 !DemandedElts[CIdx->getZExtValue()])
886 if (DemandedSubElts == 0)
896 bool AllUndef =
true, IdentityLHS =
true, IdentityRHS =
true;
897 for (
unsigned i = 0; i != NumElts; ++i) {
898 int M = ShuffleMask[i];
899 if (M < 0 || !DemandedElts[i])
902 IdentityLHS &= (M == (int)i);
903 IdentityRHS &= ((M - NumElts) == i);
909 return Op.getOperand(0);
911 return Op.getOperand(1);
921 if (
SDValue V = SimplifyMultipleUseDemandedBitsForTargetNode(
931 unsigned Depth)
const {
932 EVT VT =
Op.getValueType();
939 return SimplifyMultipleUseDemandedBits(
Op,
DemandedBits, DemandedElts, DAG,
945 unsigned Depth)
const {
947 return SimplifyMultipleUseDemandedBits(
Op,
DemandedBits, DemandedElts, DAG,
959 "SRL or SRA node is required here!");
962 if (!N1C || !N1C->
isOne())
1009 unsigned ShiftOpc =
Op.getOpcode();
1010 bool IsSigned =
false;
1014 unsigned NumSigned = std::min(NumSignedA, NumSignedB) - 1;
1019 unsigned NumZero = std::min(NumZeroA, NumZeroB);
1025 if (NumZero >= 2 && NumSigned < NumZero) {
1030 if (NumSigned >= 1) {
1038 if (NumZero >= 1 && NumSigned < NumZero) {
1058 EVT VT =
Op.getValueType();
1072 Add.getOperand(1)) &&
1083 (isa<ConstantSDNode>(ExtOpA) || isa<ConstantSDNode>(ExtOpB)))
1103 unsigned Depth,
bool AssumeSingleUse)
const {
1106 "Mask size mismatches value type size!");
1111 EVT VT =
Op.getValueType();
1113 unsigned NumElts = OriginalDemandedElts.
getBitWidth();
1115 "Unexpected vector size");
1118 APInt DemandedElts = OriginalDemandedElts;
1138 cast<ConstantFPSDNode>(
Op)->getValueAPF().bitcastToAPInt());
1143 bool HasMultiUse =
false;
1144 if (!AssumeSingleUse && !
Op.getNode()->hasOneUse()) {
1153 }
else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) {
1162 switch (
Op.getOpcode()) {
1166 if (!DemandedElts[0])
1171 unsigned SrcBitWidth = Src.getScalarValueSizeInBits();
1173 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcKnown, TLO,
Depth + 1))
1178 if (DemandedElts == 1)
1191 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO,
Depth + 1))
1200 auto *LD = cast<LoadSDNode>(
Op);
1201 if (getTargetConstantFromLoad(LD)) {
1207 EVT MemVT = LD->getMemoryVT();
1219 auto *CIdx = dyn_cast<ConstantSDNode>(
Op.getOperand(2));
1224 APInt DemandedVecElts(DemandedElts);
1226 unsigned Idx = CIdx->getZExtValue();
1230 if (!DemandedElts[
Idx])
1237 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO,
Depth + 1))
1243 if (SimplifyDemandedBits(Vec,
DemandedBits, DemandedVecElts, KnownVec, TLO,
1247 if (!!DemandedVecElts)
1262 APInt DemandedSrcElts = DemandedElts;
1266 if (SimplifyDemandedBits(Sub,
DemandedBits, DemandedSubElts, KnownSub, TLO,
1269 if (SimplifyDemandedBits(Src,
DemandedBits, DemandedSrcElts, KnownSrc, TLO,
1275 if (!!DemandedSubElts)
1277 if (!!DemandedSrcElts)
1283 SDValue NewSub = SimplifyMultipleUseDemandedBits(
1285 SDValue NewSrc = SimplifyMultipleUseDemandedBits(
1287 if (NewSub || NewSrc) {
1288 NewSub = NewSub ? NewSub : Sub;
1289 NewSrc = NewSrc ? NewSrc : Src;
1302 if (Src.getValueType().isScalableVector())
1305 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
1308 if (SimplifyDemandedBits(Src,
DemandedBits, DemandedSrcElts, Known, TLO,
1314 SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
1329 EVT SubVT =
Op.getOperand(0).getValueType();
1332 for (
unsigned i = 0; i != NumSubVecs; ++i) {
1333 APInt DemandedSubElts =
1334 DemandedElts.
extractBits(NumSubElts, i * NumSubElts);
1335 if (SimplifyDemandedBits(
Op.getOperand(i),
DemandedBits, DemandedSubElts,
1336 Known2, TLO,
Depth + 1))
1339 if (!!DemandedSubElts)
1349 APInt DemandedLHS, DemandedRHS;
1354 if (!!DemandedLHS || !!DemandedRHS) {
1360 if (!!DemandedLHS) {
1361 if (SimplifyDemandedBits(Op0,
DemandedBits, DemandedLHS, Known2, TLO,
1366 if (!!DemandedRHS) {
1367 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedRHS, Known2, TLO,
1374 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1376 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1378 if (DemandedOp0 || DemandedOp1) {
1379 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1380 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1415 LHSKnown.
One == ~RHSC->getAPIntValue()) {
1427 unsigned NumSubElts =
1444 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedElts, Known, TLO,
1448 Known2, TLO,
Depth + 1))
1470 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1472 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1474 if (DemandedOp0 || DemandedOp1) {
1475 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1476 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1489 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedElts, Known, TLO,
1491 if (Flags.hasDisjoint()) {
1492 Flags.setDisjoint(
false);
1493 Op->setFlags(Flags);
1498 if (SimplifyDemandedBits(Op0, ~Known.
One &
DemandedBits, DemandedElts,
1499 Known2, TLO,
Depth + 1)) {
1500 if (Flags.hasDisjoint()) {
1501 Flags.setDisjoint(
false);
1502 Op->setFlags(Flags);
1522 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1524 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1526 if (DemandedOp0 || DemandedOp1) {
1527 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1528 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1539 for (
int I = 0;
I != 2; ++
I) {
1542 SDValue Alt =
Op.getOperand(1 -
I).getOperand(0);
1543 SDValue C2 =
Op.getOperand(1 -
I).getOperand(1);
1545 for (
int J = 0; J != 2; ++J) {
1568 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedElts, Known, TLO,
1571 if (SimplifyDemandedBits(Op0,
DemandedBits, DemandedElts, Known2, TLO,
1598 if (
C->getAPIntValue() == Known2.
One) {
1607 if (!
C->isAllOnes() &&
DemandedBits.isSubsetOf(
C->getAPIntValue())) {
1619 if (ShiftC->getAPIntValue().ult(
BitWidth)) {
1620 uint64_t ShiftAmt = ShiftC->getZExtValue();
1623 : Ones.
lshr(ShiftAmt);
1625 isDesirableToCommuteXorWithShift(
Op.getNode())) {
1640 if (!
C || !
C->isAllOnes())
1646 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1648 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1650 if (DemandedOp0 || DemandedOp1) {
1651 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1652 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1662 if (SimplifyDemandedBits(
Op.getOperand(2),
DemandedBits, DemandedElts,
1663 Known, TLO,
Depth + 1))
1665 if (SimplifyDemandedBits(
Op.getOperand(1),
DemandedBits, DemandedElts,
1666 Known2, TLO,
Depth + 1))
1677 if (SimplifyDemandedBits(
Op.getOperand(2),
DemandedBits, DemandedElts,
1678 Known, TLO,
Depth + 1))
1680 if (SimplifyDemandedBits(
Op.getOperand(1),
DemandedBits, DemandedElts,
1681 Known2, TLO,
Depth + 1))
1688 if (SimplifyDemandedBits(
Op.getOperand(3),
DemandedBits, DemandedElts,
1689 Known, TLO,
Depth + 1))
1691 if (SimplifyDemandedBits(
Op.getOperand(2),
DemandedBits, DemandedElts,
1692 Known2, TLO,
Depth + 1))
1735 if (std::optional<uint64_t> KnownSA =
1737 unsigned ShAmt = *KnownSA;
1747 if (std::optional<uint64_t> InnerSA =
1749 unsigned C1 = *InnerSA;
1751 int Diff = ShAmt - C1;
1770 if (ShAmt < InnerBits &&
DemandedBits.getActiveBits() <= InnerBits &&
1771 isTypeDesirableForOp(
ISD::SHL, InnerVT)) {
1788 InnerOp, DemandedElts,
Depth + 2)) {
1789 unsigned InnerShAmt = *SA2;
1790 if (InnerShAmt < ShAmt && InnerShAmt < InnerBits &&
1792 (InnerBits - InnerShAmt + ShAmt) &&
1806 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
1809 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) {
1812 Flags.setNoSignedWrap(
false);
1813 Flags.setNoUnsignedWrap(
false);
1814 Op->setFlags(Flags);
1818 Known.
Zero <<= ShAmt;
1819 Known.
One <<= ShAmt;
1825 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1826 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
1837 Op.getNode()->hasOneUse()) {
1845 isTypeDesirableForOp(
ISD::SHL, SmallVT) &&
1848 assert(DemandedSize <= SmallVTBits &&
1849 "Narrowed below demanded bits?");
1869 isTypeDesirableForOp(
ISD::SHL, HalfVT) &&
1878 Flags.setNoSignedWrap(IsNSW);
1879 Flags.setNoUnsignedWrap(IsNUW);
1884 NewShiftAmt, Flags);
1897 if (SimplifyDemandedBits(Op0, DemandedFromOp, DemandedElts, Known, TLO,
1900 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) {
1903 Flags.setNoSignedWrap(
false);
1904 Flags.setNoUnsignedWrap(
false);
1905 Op->setFlags(Flags);
1915 if (std::optional<uint64_t> MaxSA =
1917 unsigned ShAmt = *MaxSA;
1918 unsigned NumSignBits =
1921 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits))
1931 if (std::optional<uint64_t> KnownSA =
1933 unsigned ShAmt = *KnownSA;
1943 if (std::optional<uint64_t> InnerSA =
1945 unsigned C1 = *InnerSA;
1947 int Diff = ShAmt - C1;
1963 if (std::optional<uint64_t> InnerSA =
1965 unsigned C1 = *InnerSA;
1967 unsigned Combined = std::min(C1 + ShAmt,
BitWidth - 1);
1979 if (
Op->getFlags().hasExact())
1988 isTypeDesirableForOp(
ISD::SRL, HalfVT) &&
2004 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
2014 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2015 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
2029 DemandedElts,
Depth + 1))
2053 if (std::optional<uint64_t> KnownSA =
2055 unsigned ShAmt = *KnownSA;
2062 if (std::optional<uint64_t> InnerSA =
2064 unsigned LowBits =
BitWidth - ShAmt;
2070 if (*InnerSA == ShAmt) {
2080 unsigned NumSignBits =
2082 if (NumSignBits > ShAmt)
2092 if (
Op->getFlags().hasExact())
2100 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
2111 Flags.setExact(
Op->getFlags().hasExact());
2129 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2130 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
2140 DemandedElts,
Depth + 1))
2153 unsigned Amt = SA->getAPIntValue().urem(
BitWidth);
2158 if (SimplifyDemandedBits(IsFSHL ? Op0 : Op1,
DemandedBits, DemandedElts,
2159 Known, TLO,
Depth + 1))
2168 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO,
2171 if (SimplifyDemandedBits(Op1, Demanded1, DemandedElts, Known, TLO,
2184 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2185 Op0, Demanded0, DemandedElts, TLO.
DAG,
Depth + 1);
2186 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
2187 Op1, Demanded1, DemandedElts, TLO.
DAG,
Depth + 1);
2188 if (DemandedOp0 || DemandedOp1) {
2189 DemandedOp0 = DemandedOp0 ? DemandedOp0 : Op0;
2190 DemandedOp1 = DemandedOp1 ? DemandedOp1 : Op1;
2201 if (SimplifyDemandedBits(Op2, DemandedAmtBits, DemandedElts,
2202 Known2, TLO,
Depth + 1))
2218 unsigned Amt = SA->getAPIntValue().urem(
BitWidth);
2224 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO,
2234 DemandedBits.countr_zero() >= (IsROTL ? Amt : RevAmt)) {
2239 DemandedBits.countl_zero() >= (IsROTL ? RevAmt : Amt)) {
2248 if (SimplifyDemandedBits(Op1, DemandedAmtBits, DemandedElts, Known2, TLO,
2258 unsigned Opc =
Op.getOpcode();
2265 unsigned NumSignBits =
2269 if (NumSignBits >= NumDemandedUpperBits)
2310 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO,
2336 unsigned ShiftAmount = NLZ > NTZ ? NLZ - NTZ : NTZ - NLZ;
2344 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO,
2364 EVT ExVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
2369 unsigned MinSignedBits =
2371 bool AlreadySignExtended = ExVTBits >= MinSignedBits;
2374 if (!AlreadySignExtended) {
2392 InputDemandedBits.
setBit(ExVTBits - 1);
2394 if (SimplifyDemandedBits(Op0, InputDemandedBits, DemandedElts, Known, TLO,
2402 if (Known.
Zero[ExVTBits - 1])
2406 if (Known.
One[ExVTBits - 1]) {
2416 EVT HalfVT =
Op.getOperand(0).getValueType();
2424 if (SimplifyDemandedBits(
Op.getOperand(0), MaskLo, KnownLo, TLO,
Depth + 1))
2427 if (SimplifyDemandedBits(
Op.getOperand(1), MaskHi, KnownHi, TLO,
Depth + 1))
2430 Known = KnownHi.
concat(KnownLo);
2439 EVT SrcVT = Src.getValueType();
2448 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2460 APInt InDemandedElts = DemandedElts.
zext(InElts);
2461 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
2463 if (Flags.hasNonNeg()) {
2464 Flags.setNonNeg(
false);
2465 Op->setFlags(Flags);
2473 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2474 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2484 EVT SrcVT = Src.getValueType();
2489 APInt InDemandedElts = DemandedElts.
zext(InElts);
2494 InDemandedBits.
setBit(InBits - 1);
2500 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2515 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
2530 Flags.setNonNeg(
true);
2536 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2537 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2547 EVT SrcVT = Src.getValueType();
2554 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2559 APInt InDemandedElts = DemandedElts.
zext(InElts);
2560 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
2567 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2568 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2577 unsigned OperandBitWidth = Src.getScalarValueSizeInBits();
2579 if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, Known, TLO,
2585 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2586 Src, TruncMask, DemandedElts, TLO.
DAG,
Depth + 1))
2591 switch (Src.getOpcode()) {
2602 if (Src.getNode()->hasOneUse()) {
2614 std::optional<uint64_t> ShAmtC =
2616 if (!ShAmtC || *ShAmtC >=
BitWidth)
2642 EVT ZVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
2644 if (SimplifyDemandedBits(
Op.getOperand(0), ~InMask |
DemandedBits, Known,
2648 Known.
Zero |= ~InMask;
2649 Known.
One &= (~Known.Zero);
2655 ElementCount SrcEltCnt = Src.getValueType().getVectorElementCount();
2656 unsigned EltBitWidth = Src.getScalarValueSizeInBits();
2664 if (
auto *CIdx = dyn_cast<ConstantSDNode>(
Idx))
2665 if (CIdx->getAPIntValue().ult(NumSrcElts))
2672 DemandedSrcBits = DemandedSrcBits.
trunc(EltBitWidth);
2674 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, Known2, TLO,
2680 if (
SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
2681 Src, DemandedSrcBits, DemandedSrcElts, TLO.
DAG,
Depth + 1)) {
2697 EVT SrcVT = Src.getValueType();
2707 if ((OpVTLegal || i32Legal) && VT.
isSimple() && SrcVT != MVT::f16 &&
2708 SrcVT != MVT::f128) {
2710 EVT Ty = OpVTLegal ? VT : MVT::i32;
2714 unsigned OpVTSizeInBits =
Op.getValueSizeInBits();
2715 if (!OpVTLegal && OpVTSizeInBits > 32)
2717 unsigned ShVal =
Op.getValueSizeInBits() - 1;
2727 unsigned Scale =
BitWidth / NumSrcEltBits;
2731 for (
unsigned i = 0; i != Scale; ++i) {
2732 unsigned EltOffset = IsLE ? i : (Scale - 1 - i);
2733 unsigned BitOffset = EltOffset * NumSrcEltBits;
2736 DemandedSrcBits |= Sub;
2737 for (
unsigned j = 0; j != NumElts; ++j)
2738 if (DemandedElts[j])
2739 DemandedSrcElts.
setBit((j * Scale) + i);
2743 APInt KnownSrcUndef, KnownSrcZero;
2744 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef,
2745 KnownSrcZero, TLO,
Depth + 1))
2749 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts,
2750 KnownSrcBits, TLO,
Depth + 1))
2752 }
else if (IsLE && (NumSrcEltBits %
BitWidth) == 0) {
2754 unsigned Scale = NumSrcEltBits /
BitWidth;
2758 for (
unsigned i = 0; i != NumElts; ++i)
2759 if (DemandedElts[i]) {
2762 DemandedSrcElts.
setBit(i / Scale);
2766 APInt KnownSrcUndef, KnownSrcZero;
2767 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef,
2768 KnownSrcZero, TLO,
Depth + 1))
2773 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts,
2774 KnownSrcBits, TLO,
Depth + 1))
2779 if (
SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
2780 Src, DemandedSrcBits, DemandedSrcElts, TLO.
DAG,
Depth + 1)) {
2802 if (
C &&
C->getAPIntValue().countr_zero() == CTZ) {
2821 SDValue Op0 =
Op.getOperand(0), Op1 =
Op.getOperand(1);
2826 auto GetDemandedBitsLHSMask = [&](
APInt Demanded,
2832 if (SimplifyDemandedBits(Op1, LoMask, DemandedElts, KnownOp1, TLO,
2834 SimplifyDemandedBits(Op0, GetDemandedBitsLHSMask(LoMask, KnownOp1),
2835 DemandedElts, KnownOp0, TLO,
Depth + 1) ||
2838 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) {
2841 Flags.setNoSignedWrap(
false);
2842 Flags.setNoUnsignedWrap(
false);
2843 Op->setFlags(Flags);
2855 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2856 Op0, LoMask, DemandedElts, TLO.
DAG,
Depth + 1);
2857 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
2858 Op1, LoMask, DemandedElts, TLO.
DAG,
Depth + 1);
2859 if (DemandedOp0 || DemandedOp1) {
2860 Flags.setNoSignedWrap(
false);
2861 Flags.setNoUnsignedWrap(
false);
2862 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
2863 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
2865 TLO.
DAG.
getNode(
Op.getOpcode(), dl, VT, Op0, Op1, Flags);
2877 if (
C && !
C->isAllOnes() && !
C->isOne() &&
2878 (
C->getAPIntValue() | HighMask).isAllOnes()) {
2882 Flags.setNoSignedWrap(
false);
2883 Flags.setNoUnsignedWrap(
false);
2891 auto getShiftLeftAmt = [&HighMask](
SDValue Mul) ->
unsigned {
2918 if (
unsigned ShAmt = getShiftLeftAmt(Op0))
2921 if (
unsigned ShAmt = getShiftLeftAmt(Op1))
2922 return foldMul(
ISD::SUB, Op1.getOperand(0), Op0, ShAmt);
2926 if (
unsigned ShAmt = getShiftLeftAmt(Op1))
2927 return foldMul(
ISD::ADD, Op1.getOperand(0), Op0, ShAmt);
2935 Op.getOpcode() ==
ISD::ADD, Flags.hasNoSignedWrap(),
2936 Flags.hasNoUnsignedWrap(), KnownOp0, KnownOp1);
2946 if (
Op.getValueType().isScalableVector())
2948 if (SimplifyDemandedBitsForTargetNode(
Op,
DemandedBits, DemandedElts,
2961 if (!isTargetCanonicalConstantNode(
Op) &&
2967 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
2989 const APInt &DemandedElts,
2995 APInt KnownUndef, KnownZero;
2997 SimplifyDemandedVectorElts(
Op, DemandedElts, KnownUndef, KnownZero, TLO);
3009 const APInt &UndefOp0,
3010 const APInt &UndefOp1) {
3013 "Vector binop only");
3018 UndefOp1.
getBitWidth() == NumElts &&
"Bad type for undef analysis");
3020 auto getUndefOrConstantElt = [&](
SDValue V,
unsigned Index,
3021 const APInt &UndefVals) {
3022 if (UndefVals[
Index])
3025 if (
auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
3029 auto *
C = dyn_cast<ConstantSDNode>(Elt);
3030 if (isa<ConstantFPSDNode>(Elt) || Elt.
isUndef() || (
C && !
C->isOpaque()))
3038 for (
unsigned i = 0; i != NumElts; ++i) {
3057 bool AssumeSingleUse)
const {
3058 EVT VT =
Op.getValueType();
3059 unsigned Opcode =
Op.getOpcode();
3060 APInt DemandedElts = OriginalDemandedElts;
3066 if (!shouldSimplifyDemandedVectorElts(
Op, TLO))
3074 "Mask size mismatches value type element count!");
3083 if (!AssumeSingleUse && !
Op.getNode()->hasOneUse())
3087 if (DemandedElts == 0) {
3102 auto SimplifyDemandedVectorEltsBinOp = [&](
SDValue Op0,
SDValue Op1) {
3103 SDValue NewOp0 = SimplifyMultipleUseDemandedVectorElts(Op0, DemandedElts,
3105 SDValue NewOp1 = SimplifyMultipleUseDemandedVectorElts(Op1, DemandedElts,
3107 if (NewOp0 || NewOp1) {
3110 NewOp1 ? NewOp1 : Op1,
Op->getFlags());
3118 if (!DemandedElts[0]) {
3126 EVT SrcVT = Src.getValueType();
3138 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
3148 EVT SrcVT = Src.getValueType();
3157 if (NumSrcElts == NumElts)
3158 return SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef,
3159 KnownZero, TLO,
Depth + 1);
3161 APInt SrcDemandedElts, SrcZero, SrcUndef;
3165 if ((NumElts % NumSrcElts) == 0) {
3166 unsigned Scale = NumElts / NumSrcElts;
3168 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
3178 for (
unsigned i = 0; i != NumElts; ++i)
3179 if (DemandedElts[i]) {
3180 unsigned Ofs = (i % Scale) * EltSizeInBits;
3181 SrcDemandedBits.
setBits(Ofs, Ofs + EltSizeInBits);
3185 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcDemandedElts, Known,
3193 for (
unsigned SubElt = 0; SubElt != Scale; ++SubElt) {
3197 for (
unsigned SrcElt = 0; SrcElt != NumSrcElts; ++SrcElt) {
3198 unsigned Elt = Scale * SrcElt + SubElt;
3199 if (DemandedElts[Elt])
3207 for (
unsigned i = 0; i != NumSrcElts; ++i) {
3208 if (SrcDemandedElts[i]) {
3210 KnownZero.
setBits(i * Scale, (i + 1) * Scale);
3212 KnownUndef.
setBits(i * Scale, (i + 1) * Scale);
3220 if ((NumSrcElts % NumElts) == 0) {
3221 unsigned Scale = NumSrcElts / NumElts;
3223 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
3229 for (
unsigned i = 0; i != NumElts; ++i) {
3230 if (DemandedElts[i]) {
3259 [&](
SDValue Elt) { return Op.getOperand(0) != Elt; })) {
3261 bool Updated =
false;
3262 for (
unsigned i = 0; i != NumElts; ++i) {
3263 if (!DemandedElts[i] && !Ops[i].
isUndef()) {
3273 for (
unsigned i = 0; i != NumElts; ++i) {
3275 if (
SrcOp.isUndef()) {
3277 }
else if (EltSizeInBits ==
SrcOp.getScalarValueSizeInBits() &&
3285 EVT SubVT =
Op.getOperand(0).getValueType();
3288 for (
unsigned i = 0; i != NumSubVecs; ++i) {
3291 APInt SubUndef, SubZero;
3292 if (SimplifyDemandedVectorElts(SubOp, SubElts, SubUndef, SubZero, TLO,
3295 KnownUndef.
insertBits(SubUndef, i * NumSubElts);
3296 KnownZero.
insertBits(SubZero, i * NumSubElts);
3301 bool FoundNewSub =
false;
3303 for (
unsigned i = 0; i != NumSubVecs; ++i) {
3306 SDValue NewSubOp = SimplifyMultipleUseDemandedVectorElts(
3307 SubOp, SubElts, TLO.
DAG,
Depth + 1);
3308 DemandedSubOps.
push_back(NewSubOp ? NewSubOp : SubOp);
3309 FoundNewSub = NewSubOp ?
true : FoundNewSub;
3327 APInt DemandedSrcElts = DemandedElts;
3330 APInt SubUndef, SubZero;
3331 if (SimplifyDemandedVectorElts(Sub, DemandedSubElts, SubUndef, SubZero, TLO,
3336 if (!DemandedSrcElts && !Src.isUndef())
3341 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownUndef, KnownZero,
3349 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
3350 Src, DemandedSrcElts, TLO.
DAG,
Depth + 1);
3351 SDValue NewSub = SimplifyMultipleUseDemandedVectorElts(
3352 Sub, DemandedSubElts, TLO.
DAG,
Depth + 1);
3353 if (NewSrc || NewSub) {
3354 NewSrc = NewSrc ? NewSrc : Src;
3355 NewSub = NewSub ? NewSub : Sub;
3357 NewSub,
Op.getOperand(2));
3366 if (Src.getValueType().isScalableVector())
3369 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3372 APInt SrcUndef, SrcZero;
3373 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO,
3381 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
3382 Src, DemandedSrcElts, TLO.
DAG,
Depth + 1);
3394 auto *CIdx = dyn_cast<ConstantSDNode>(
Op.getOperand(2));
3398 if (CIdx && CIdx->getAPIntValue().ult(NumElts)) {
3399 unsigned Idx = CIdx->getZExtValue();
3400 if (!DemandedElts[
Idx])
3403 APInt DemandedVecElts(DemandedElts);
3405 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
3406 KnownZero, TLO,
Depth + 1))
3415 APInt VecUndef, VecZero;
3416 if (SimplifyDemandedVectorElts(Vec, DemandedElts, VecUndef, VecZero, TLO,
3429 APInt UndefSel, ZeroSel;
3430 if (SimplifyDemandedVectorElts(Sel, DemandedElts, UndefSel, ZeroSel, TLO,
3435 APInt DemandedLHS(DemandedElts);
3436 APInt DemandedRHS(DemandedElts);
3437 APInt UndefLHS, ZeroLHS;
3438 APInt UndefRHS, ZeroRHS;
3439 if (SimplifyDemandedVectorElts(
LHS, DemandedLHS, UndefLHS, ZeroLHS, TLO,
3442 if (SimplifyDemandedVectorElts(
RHS, DemandedRHS, UndefRHS, ZeroRHS, TLO,
3446 KnownUndef = UndefLHS & UndefRHS;
3447 KnownZero = ZeroLHS & ZeroRHS;
3451 APInt DemandedSel = DemandedElts & ~KnownZero;
3452 if (DemandedSel != DemandedElts)
3453 if (SimplifyDemandedVectorElts(Sel, DemandedSel, UndefSel, ZeroSel, TLO,
3465 APInt DemandedLHS(NumElts, 0);
3466 APInt DemandedRHS(NumElts, 0);
3467 for (
unsigned i = 0; i != NumElts; ++i) {
3468 int M = ShuffleMask[i];
3469 if (M < 0 || !DemandedElts[i])
3471 assert(0 <= M && M < (
int)(2 * NumElts) &&
"Shuffle index out of range");
3472 if (M < (
int)NumElts)
3475 DemandedRHS.
setBit(M - NumElts);
3479 APInt UndefLHS, ZeroLHS;
3480 APInt UndefRHS, ZeroRHS;
3481 if (SimplifyDemandedVectorElts(
LHS, DemandedLHS, UndefLHS, ZeroLHS, TLO,
3484 if (SimplifyDemandedVectorElts(
RHS, DemandedRHS, UndefRHS, ZeroRHS, TLO,
3489 bool Updated =
false;
3490 bool IdentityLHS =
true, IdentityRHS =
true;
3492 for (
unsigned i = 0; i != NumElts; ++i) {
3493 int &M = NewMask[i];
3496 if (!DemandedElts[i] || (M < (
int)NumElts && UndefLHS[M]) ||
3497 (M >= (
int)NumElts && UndefRHS[M - NumElts])) {
3501 IdentityLHS &= (M < 0) || (M == (
int)i);
3502 IdentityRHS &= (M < 0) || ((M - NumElts) == i);
3507 if (Updated && !IdentityLHS && !IdentityRHS && !TLO.
LegalOps) {
3509 buildLegalVectorShuffle(VT,
DL,
LHS,
RHS, NewMask, TLO.
DAG);
3515 for (
unsigned i = 0; i != NumElts; ++i) {
3516 int M = ShuffleMask[i];
3519 }
else if (M < (
int)NumElts) {
3525 if (UndefRHS[M - NumElts])
3527 if (ZeroRHS[M - NumElts])
3536 APInt SrcUndef, SrcZero;
3538 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3539 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts);
3540 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO,
3547 Op.getValueSizeInBits() == Src.getValueSizeInBits() &&
3548 DemandedSrcElts == 1) {
3561 if (IsLE && DemandedSrcElts == 1 && Src.getOpcode() ==
ISD::AND &&
3562 Op->isOnlyUserOf(Src.getNode()) &&
3563 Op.getValueSizeInBits() == Src.getValueSizeInBits()) {
3565 EVT SrcVT = Src.getValueType();
3572 ISD::AND,
DL, SrcVT, {Src.getOperand(1), Mask})) {
3586 if (Op0 == Op1 &&
Op->isOnlyUserOf(Op0.
getNode())) {
3587 APInt UndefLHS, ZeroLHS;
3588 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
3609 APInt UndefRHS, ZeroRHS;
3610 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO,
3613 APInt UndefLHS, ZeroLHS;
3614 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
3618 KnownZero = ZeroLHS & ZeroRHS;
3624 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3636 APInt UndefRHS, ZeroRHS;
3637 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO,
3640 APInt UndefLHS, ZeroLHS;
3641 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
3645 KnownZero = ZeroLHS;
3646 KnownUndef = UndefLHS & UndefRHS;
3651 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3662 APInt SrcUndef, SrcZero;
3663 if (SimplifyDemandedVectorElts(Op1, DemandedElts, SrcUndef, SrcZero, TLO,
3668 APInt DemandedElts0 = DemandedElts & ~SrcZero;
3669 if (SimplifyDemandedVectorElts(Op0, DemandedElts0, KnownUndef, KnownZero,
3673 KnownUndef &= DemandedElts0;
3674 KnownZero &= DemandedElts0;
3679 if (DemandedElts.
isSubsetOf(SrcZero | KnownZero | SrcUndef | KnownUndef))
3686 KnownZero |= SrcZero;
3687 KnownUndef &= SrcUndef;
3688 KnownUndef &= ~KnownZero;
3692 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3699 if (SimplifyDemandedVectorElts(
Op.getOperand(0), DemandedElts, KnownUndef,
3700 KnownZero, TLO,
Depth + 1))
3712 if (SimplifyDemandedVectorEltsForTargetNode(
Op, DemandedElts, KnownUndef,
3713 KnownZero, TLO,
Depth))
3718 if (SimplifyDemandedBits(
Op,
DemandedBits, OriginalDemandedElts, Known,
3719 TLO,
Depth, AssumeSingleUse))
3725 assert((KnownUndef & KnownZero) == 0 &&
"Elements flagged as undef AND zero");
3739 const APInt &DemandedElts,
3741 unsigned Depth)
const {
3746 "Should use MaskedValueIsZero if you don't know whether Op"
3747 " is a target node!");
3754 unsigned Depth)
const {
3766 unsigned Depth)
const {
3775 unsigned Depth)
const {
3780 "Should use ComputeNumSignBits if you don't know whether Op"
3781 " is a target node!");
3798 "Should use SimplifyDemandedVectorElts if you don't know whether Op"
3799 " is a target node!");
3810 "Should use SimplifyDemandedBits if you don't know whether Op"
3811 " is a target node!");
3812 computeKnownBitsForTargetNode(
Op, Known, DemandedElts, TLO.
DAG,
Depth);
3824 "Should use SimplifyMultipleUseDemandedBits if you don't know whether Op"
3825 " is a target node!");
3858 "Should use isGuaranteedNotToBeUndefOrPoison if you don't know whether Op"
3859 " is a target node!");
3863 return !canCreateUndefOrPoisonForTargetNode(
Op, DemandedElts, DAG,
PoisonOnly,
3866 return DAG.isGuaranteedNotToBeUndefOrPoison(V, PoisonOnly,
3878 "Should use canCreateUndefOrPoison if you don't know whether Op"
3879 " is a target node!");
3887 unsigned Depth)
const {
3892 "Should use isKnownNeverNaN if you don't know whether Op"
3893 " is a target node!");
3898 const APInt &DemandedElts,
3901 unsigned Depth)
const {
3906 "Should use isSplatValue if you don't know whether Op"
3907 " is a target node!");
3922 CVal = CN->getAPIntValue();
3923 EltWidth =
N.getValueType().getScalarSizeInBits();
3930 CVal = CVal.
trunc(EltWidth);
3936 return CVal.
isOne();
3978 return (
N->isOne() && !SExt) || (SExt && (
N->getValueType(0) != MVT::i1));
3981 return N->isAllOnes() && SExt;
3990 DAGCombinerInfo &DCI)
const {
4018 auto *AndC = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
4019 if (AndC &&
isNullConstant(N1) && AndC->getAPIntValue().isPowerOf2() &&
4022 AndC->getAPIntValue().getActiveBits());
4049 if (isXAndYEqZeroPreferableToXAndYEqY(
Cond, OpVT) &&
4057 if (DCI.isBeforeLegalizeOps() ||
4091SDValue TargetLowering::optimizeSetCCOfSignedTruncationCheck(
4096 if (!(C1 = dyn_cast<ConstantSDNode>(N1)))
4105 if (!(C01 = dyn_cast<ConstantSDNode>(N0->
getOperand(1))))
4109 EVT XVT =
X.getValueType();
4133 auto checkConstants = [&
I1, &I01]() ->
bool {
4138 if (checkConstants()) {
4146 if (!checkConstants())
4152 const unsigned KeptBits =
I1.logBase2();
4153 const unsigned KeptBitsMinusOne = I01.
logBase2();
4156 if (KeptBits != (KeptBitsMinusOne + 1))
4170 return DAG.
getSetCC(
DL, SCCVT, SExtInReg,
X, NewCond);
4174SDValue TargetLowering::optimizeSetCCByHoistingAndByConstFromLogicalShift(
4176 DAGCombinerInfo &DCI,
const SDLoc &
DL)
const {
4178 "Should be a comparison with 0.");
4180 "Valid only for [in]equality comparisons.");
4182 unsigned NewShiftOpcode;
4192 unsigned OldShiftOpcode =
V.getOpcode();
4193 switch (OldShiftOpcode) {
4205 C =
V.getOperand(0);
4210 Y =
V.getOperand(1);
4215 X, XC,
CC,
Y, OldShiftOpcode, NewShiftOpcode, DAG);
4232 EVT VT =
X.getValueType();
4247 DAGCombinerInfo &DCI)
const {
4250 "Unexpected binop");
4278 if (!DCI.isCalledByLegalizer())
4279 DCI.AddToWorklist(YShl1.
getNode());
4294 if (CTPOP.getOpcode() !=
ISD::CTPOP || !CTPOP.hasOneUse())
4297 EVT CTVT = CTPOP.getValueType();
4298 SDValue CTOp = CTPOP.getOperand(0);
4318 for (
unsigned i = 0; i <
Passes; i++) {
4367 auto getRotateSource = [](
SDValue X) {
4369 return X.getOperand(0);
4376 if (
SDValue R = getRotateSource(N0))
4409 if (!C1 || !C1->
isZero())
4418 if (!ShAmtC || ShAmtC->getAPIntValue().uge(
BitWidth))
4422 unsigned ShAmt = ShAmtC->getZExtValue();
4431 if (
Or.getOperand(0) ==
Other) {
4432 X =
Or.getOperand(0);
4433 Y =
Or.getOperand(1);
4436 if (
Or.getOperand(1) ==
Other) {
4437 X =
Or.getOperand(1);
4438 Y =
Or.getOperand(0);
4448 if (matchOr(F0, F1)) {
4455 if (matchOr(F1, F0)) {
4471 const SDLoc &dl)
const {
4481 bool N0ConstOrSplat =
4483 bool N1ConstOrSplat =
4491 if (N0ConstOrSplat && !N1ConstOrSplat &&
4494 return DAG.
getSetCC(dl, VT, N1, N0, SwappedCC);
4500 if (!N0ConstOrSplat && !N1ConstOrSplat &&
4505 return DAG.
getSetCC(dl, VT, N1, N0, SwappedCC);
4514 const APInt &C1 = N1C->getAPIntValue();
4534 return DAG.
getNode(LogicOp, dl, VT, IsXZero, IsYZero);
4564 if (
auto *N1C = dyn_cast<ConstantSDNode>(N1.
getNode())) {
4565 const APInt &C1 = N1C->getAPIntValue();
4580 if (
auto *
C = dyn_cast<ConstantSDNode>(N0->
getOperand(1)))
4581 if ((
C->getAPIntValue()+1).isPowerOf2()) {
4582 MinBits =
C->getAPIntValue().countr_one();
4590 }
else if (
auto *LN0 = dyn_cast<LoadSDNode>(N0)) {
4593 MinBits = LN0->getMemoryVT().getSizeInBits();
4597 MinBits = LN0->getMemoryVT().getSizeInBits();
4608 MinBits >= ReqdBits) {
4610 if (isTypeDesirableForOp(
ISD::SETCC, MinVT)) {
4613 if (MinBits == 1 && C1 == 1)
4632 if (TopSetCC.
getValueType() == MVT::i1 && VT == MVT::i1 &&
4645 cast<CondCodeSDNode>(TopSetCC.
getOperand(2))->get(),
4664 auto *Lod = cast<LoadSDNode>(N0.
getOperand(0));
4666 unsigned bestWidth = 0, bestOffset = 0;
4667 if (Lod->isSimple() && Lod->isUnindexed() &&
4668 (Lod->getMemoryVT().isByteSized() ||
4670 unsigned memWidth = Lod->getMemoryVT().getStoreSizeInBits();
4672 unsigned maskWidth = origWidth;
4676 origWidth = Lod->getMemoryVT().getSizeInBits();
4680 for (
unsigned width = 8; width < origWidth; width *= 2) {
4687 unsigned maxOffset = origWidth - width;
4688 for (
unsigned offset = 0; offset <= maxOffset; offset += 8) {
4689 if (Mask.isSubsetOf(newMask)) {
4690 unsigned ptrOffset =
4692 unsigned IsFast = 0;
4695 *DAG.
getContext(), Layout, newVT, Lod->getAddressSpace(),
4696 NewAlign, Lod->getMemOperand()->getFlags(), &IsFast) &&
4698 bestOffset = ptrOffset / 8;
4699 bestMask = Mask.lshr(offset);
4713 if (bestOffset != 0)
4717 Lod->getPointerInfo().getWithOffset(bestOffset),
4718 Lod->getOriginalAlign());
4795 ExtDstTy != ExtSrcTy &&
"Unexpected types!");
4802 return DAG.
getSetCC(dl, VT, ZextOp,
4804 }
else if ((N1C->isZero() || N1C->isOne()) &&
4851 return DAG.
getSetCC(dl, VT, Val, N1,
4854 }
else if (N1C->isOne()) {
4891 cast<VTSDNode>(Op0.
getOperand(1))->getVT() == MVT::i1)
4922 optimizeSetCCOfSignedTruncationCheck(VT, N0, N1,
Cond, DCI, dl))
4929 const APInt &C1 = N1C->getAPIntValue();
4931 APInt MinVal, MaxVal;
4953 (!N1C->isOpaque() || (
C.getBitWidth() <= 64 &&
4973 (!N1C->isOpaque() || (
C.getBitWidth() <= 64 &&
5021 if (
SDValue CC = optimizeSetCCByHoistingAndByConstFromLogicalShift(
5022 VT, N0, N1,
Cond, DCI, dl))
5029 bool CmpZero = N1C->isZero();
5030 bool CmpNegOne = N1C->isAllOnes();
5031 if ((CmpZero || CmpNegOne) && N0.
hasOneUse()) {
5034 unsigned EltBits = V.getScalarValueSizeInBits();
5035 if (V.getOpcode() !=
ISD::OR || (EltBits % 2) != 0)
5042 isa<ConstantSDNode>(
RHS.getOperand(1)) &&
5043 RHS.getConstantOperandAPInt(1) == (EltBits / 2) &&
5046 Hi =
RHS.getOperand(0);
5050 isa<ConstantSDNode>(
LHS.getOperand(1)) &&
5051 LHS.getConstantOperandAPInt(1) == (EltBits / 2) &&
5054 Hi =
LHS.getOperand(0);
5062 unsigned HalfBits = EltBits / 2;
5073 if (IsConcat(N0,
Lo,
Hi))
5074 return MergeConcat(
Lo,
Hi);
5111 if (
auto *N1C = dyn_cast<ConstantSDNode>(N1.
getNode())) {
5112 const APInt &C1 = N1C->getAPIntValue();
5124 if (
auto *AndRHS = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) {
5127 unsigned ShCt = AndRHS->getAPIntValue().logBase2();
5128 if (AndRHS->getAPIntValue().isPowerOf2() &&
5135 }
else if (
Cond ==
ISD::SETEQ && C1 == AndRHS->getAPIntValue()) {
5154 if (
auto *AndRHS = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) {
5155 const APInt &AndRHSC = AndRHS->getAPIntValue();
5192 return DAG.
getSetCC(dl, VT, Shift, CmpRHS, NewCond);
5198 if (!isa<ConstantFPSDNode>(N0) && isa<ConstantFPSDNode>(N1)) {
5199 auto *CFP = cast<ConstantFPSDNode>(N1);
5200 assert(!CFP->getValueAPF().isNaN() &&
"Unexpected NaN value");
5221 !
isFPImmLegal(CFP->getValueAPF(), CFP->getValueType(0))) {
5240 if (CFP->getValueAPF().isInfinity()) {
5241 bool IsNegInf = CFP->getValueAPF().isNegative();
5252 return DAG.
getSetCC(dl, VT, N0, N1, NewCond);
5261 "Integer types should be handled by FoldSetCC");
5267 if (UOF ==
unsigned(EqTrue))
5272 if (NewCond !=
Cond &&
5275 return DAG.
getSetCC(dl, VT, N0, N1, NewCond);
5282 if ((isSignedIntSetCC(
Cond) || isUnsignedIntSetCC(
Cond)) &&
5319 bool LegalRHSImm =
false;
5321 if (
auto *RHSC = dyn_cast<ConstantSDNode>(N1)) {
5322 if (
auto *LHSR = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) {
5327 DAG.
getConstant(RHSC->getAPIntValue() - LHSR->getAPIntValue(),
5335 DAG.
getConstant(LHSR->getAPIntValue() ^ RHSC->getAPIntValue(),
5341 if (
auto *SUBC = dyn_cast<ConstantSDNode>(N0.
getOperand(0)))
5345 DAG.
getConstant(SUBC->getAPIntValue() - RHSC->getAPIntValue(),
5350 if (RHSC->getValueType(0).getSizeInBits() <= 64)
5359 if (
SDValue V = foldSetCCWithBinOp(VT, N0, N1,
Cond, dl, DCI))
5365 if (
SDValue V = foldSetCCWithBinOp(VT, N1, N0,
Cond, dl, DCI))
5368 if (
SDValue V = foldSetCCWithAnd(VT, N0, N1,
Cond, dl, DCI))
5379 if (
SDValue Folded = buildUREMEqFold(VT, N0, N1,
Cond, DCI, dl))
5382 if (
SDValue Folded = buildSREMEqFold(VT, N0, N1,
Cond, DCI, dl))
5395 N0 = DAG.
getNOT(dl, Temp, OpVT);
5404 Temp = DAG.
getNOT(dl, N0, OpVT);
5411 Temp = DAG.
getNOT(dl, N1, OpVT);
5418 Temp = DAG.
getNOT(dl, N0, OpVT);
5425 Temp = DAG.
getNOT(dl, N1, OpVT);
5434 N0 = DAG.
getNode(ExtendCode, dl, VT, N0);
5450 if (
auto *GASD = dyn_cast<GlobalAddressSDNode>(
N)) {
5451 GA = GASD->getGlobal();
5452 Offset += GASD->getOffset();
5460 if (
auto *V = dyn_cast<ConstantSDNode>(N2)) {
5461 Offset += V->getSExtValue();
5465 if (
auto *V = dyn_cast<ConstantSDNode>(N1)) {
5466 Offset += V->getSExtValue();
5487 unsigned S = Constraint.
size();
5490 switch (Constraint[0]) {
5493 return C_RegisterClass;
5521 if (S > 1 && Constraint[0] ==
'{' && Constraint[S - 1] ==
'}') {
5522 if (S == 8 && Constraint.
substr(1, 6) ==
"memory")
5550 std::vector<SDValue> &Ops,
5553 if (Constraint.
size() > 1)
5556 char ConstraintLetter = Constraint[0];
5557 switch (ConstraintLetter) {
5573 if ((
C = dyn_cast<ConstantSDNode>(
Op)) && ConstraintLetter !=
's') {
5577 bool IsBool =
C->getConstantIntValue()->getBitWidth() == 1;
5587 if (ConstraintLetter !=
'n') {
5588 if (
const auto *GA = dyn_cast<GlobalAddressSDNode>(
Op)) {
5590 GA->getValueType(0),
5591 Offset + GA->getOffset()));
5594 if (
const auto *BA = dyn_cast<BlockAddressSDNode>(
Op)) {
5596 BA->getBlockAddress(), BA->getValueType(0),
5597 Offset + BA->getOffset(), BA->getTargetFlags()));
5600 if (isa<BasicBlockSDNode>(
Op)) {
5605 const unsigned OpCode =
Op.getOpcode();
5607 if ((
C = dyn_cast<ConstantSDNode>(
Op.getOperand(0))))
5608 Op =
Op.getOperand(1);
5611 (
C = dyn_cast<ConstantSDNode>(
Op.getOperand(1))))
5612 Op =
Op.getOperand(0);
5629std::pair<unsigned, const TargetRegisterClass *>
5635 assert(*(Constraint.
end() - 1) ==
'}' &&
"Not a brace enclosed constraint?");
5640 std::pair<unsigned, const TargetRegisterClass *> R =
5652 std::pair<unsigned, const TargetRegisterClass *> S =
5653 std::make_pair(PR, RC);
5675 assert(!ConstraintCode.empty() &&
"No known constraint!");
5676 return isdigit(
static_cast<unsigned char>(ConstraintCode[0]));
5682 assert(!ConstraintCode.empty() &&
"No known constraint!");
5683 return atoi(ConstraintCode.c_str());
5697 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
5698 unsigned maCount = 0;
5704 unsigned LabelNo = 0;
5707 ConstraintOperands.emplace_back(std::move(CI));
5717 switch (OpInfo.
Type) {
5727 assert(!Call.getType()->isVoidTy() &&
"Bad inline asm!");
5728 if (
auto *STy = dyn_cast<StructType>(Call.getType())) {
5732 assert(ResNo == 0 &&
"Asm only has one result!");
5742 OpInfo.
CallOperandVal = cast<CallBrInst>(&Call)->getIndirectDest(LabelNo);
5753 OpTy = Call.getParamElementType(ArgNo);
5754 assert(OpTy &&
"Indirect operand must have elementtype attribute");
5758 if (
StructType *STy = dyn_cast<StructType>(OpTy))
5759 if (STy->getNumElements() == 1)
5760 OpTy = STy->getElementType(0);
5765 unsigned BitSize =
DL.getTypeSizeInBits(OpTy);
5786 if (!ConstraintOperands.empty()) {
5788 unsigned bestMAIndex = 0;
5789 int bestWeight = -1;
5795 for (maIndex = 0; maIndex < maCount; ++maIndex) {
5797 for (
unsigned cIndex = 0, eIndex = ConstraintOperands.size();
5798 cIndex != eIndex; ++cIndex) {
5819 weight = getMultipleConstraintMatchWeight(OpInfo, maIndex);
5824 weightSum += weight;
5827 if (weightSum > bestWeight) {
5828 bestWeight = weightSum;
5829 bestMAIndex = maIndex;
5836 cInfo.selectAlternative(bestMAIndex);
5841 for (
unsigned cIndex = 0, eIndex = ConstraintOperands.size();
5842 cIndex != eIndex; ++cIndex) {
5853 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
5856 std::pair<unsigned, const TargetRegisterClass *> InputRC =
5861 (MatchRC.second != InputRC.second)) {
5863 " with a matching output constraint of"
5864 " incompatible type!");
5870 return ConstraintOperands;
5905 if (maIndex >= (
int)
info.multipleAlternatives.size())
5906 rCodes = &
info.Codes;
5908 rCodes = &
info.multipleAlternatives[maIndex].Codes;
5912 for (
const std::string &rCode : *rCodes) {
5914 getSingleConstraintMatchWeight(
info, rCode.c_str());
5915 if (weight > BestWeight)
5916 BestWeight = weight;
5929 Value *CallOperandVal =
info.CallOperandVal;
5932 if (!CallOperandVal)
5935 switch (*constraint) {
5938 if (isa<ConstantInt>(CallOperandVal))
5939 weight = CW_Constant;
5942 if (isa<GlobalValue>(CallOperandVal))
5943 weight = CW_Constant;
5947 if (isa<ConstantFP>(CallOperandVal))
5948 weight = CW_Constant;
5961 weight = CW_Register;
5965 weight = CW_Default;
5999 Ret.reserve(OpInfo.
Codes.size());
6014 Ret.emplace_back(Code, CType);
6019 return getConstraintPiority(a.second) > getConstraintPiority(b.second);
6033 "need immediate or other");
6038 std::vector<SDValue> ResultOps;
6040 return !ResultOps.empty();
6048 assert(!OpInfo.
Codes.empty() &&
"Must have at least one constraint");
6051 if (OpInfo.
Codes.size() == 1) {
6059 unsigned BestIdx = 0;
6060 for (
const unsigned E =
G.size();
6067 if (BestIdx + 1 == E) {
6083 if (isa<ConstantInt>(v) || isa<Function>(v)) {
6087 if (isa<BasicBlock>(v) || isa<BlockAddress>(v)) {
6094 if (
const char *Repl = LowerXConstraint(OpInfo.
ConstraintVT)) {
6109 EVT VT =
N->getValueType(0);
6114 bool UseSRA =
false;
6120 APInt Divisor =
C->getAPIntValue();
6142 "Expected matchUnaryPredicate to return one element for scalable "
6147 assert(isa<ConstantSDNode>(Op1) &&
"Expected a constant");
6149 Factor = Factors[0];
6155 Flags.setExact(
true);
6169 EVT VT =
N->getValueType(0);
6174 bool UseSRL =
false;
6180 APInt Divisor =
C->getAPIntValue();
6205 "Expected matchUnaryPredicate to return one element for scalable "
6210 assert(isa<ConstantSDNode>(Op1) &&
"Expected a constant");
6212 Factor = Factors[0];
6218 Flags.setExact(
true);
6257 EVT VT =
N->getValueType(0);
6293 bool IsAfterLegalization,
6294 bool IsAfterLegalTypes,
6297 EVT VT =
N->getValueType(0);
6323 if (
N->getFlags().hasExact())
6332 const APInt &Divisor =
C->getAPIntValue();
6334 int NumeratorFactor = 0;
6345 NumeratorFactor = 1;
6348 NumeratorFactor = -1;
6365 SDValue MagicFactor, Factor, Shift, ShiftMask;
6373 Shifts.
size() == 1 && ShiftMasks.
size() == 1 &&
6374 "Expected matchUnaryPredicate to return one element for scalable "
6381 assert(isa<ConstantSDNode>(N1) &&
"Expected a constant");
6382 MagicFactor = MagicFactors[0];
6383 Factor = Factors[0];
6385 ShiftMask = ShiftMasks[0];
6431 SDValue Q = GetMULHS(N0, MagicFactor);
6461 bool IsAfterLegalization,
6462 bool IsAfterLegalTypes,
6465 EVT VT =
N->getValueType(0);
6491 if (
N->getFlags().hasExact())
6501 bool UseNPQ =
false, UsePreShift =
false, UsePostShift =
false;
6507 const APInt& Divisor =
C->getAPIntValue();
6509 SDValue PreShift, MagicFactor, NPQFactor, PostShift;
6513 if (Divisor.
isOne()) {
6514 PreShift = PostShift = DAG.
getUNDEF(ShSVT);
6515 MagicFactor = NPQFactor = DAG.
getUNDEF(SVT);
6519 Divisor, std::min(KnownLeadingZeros, Divisor.
countl_zero()));
6524 "We shouldn't generate an undefined shift!");
6526 "We shouldn't generate an undefined shift!");
6528 "Unexpected pre-shift");
6535 UseNPQ |= magics.
IsAdd;
6536 UsePreShift |= magics.
PreShift != 0;
6551 SDValue PreShift, PostShift, MagicFactor, NPQFactor;
6559 NPQFactors.
size() == 1 && PostShifts.
size() == 1 &&
6560 "Expected matchUnaryPredicate to return one for scalable vectors");
6566 assert(isa<ConstantSDNode>(N1) &&
"Expected a constant");
6567 PreShift = PreShifts[0];
6568 MagicFactor = MagicFactors[0];
6569 PostShift = PostShifts[0];
6621 Q = GetMULHU(Q, MagicFactor);
6634 NPQ = GetMULHU(NPQ, NPQFactor);
6653 return DAG.
getSelect(dl, VT, IsOne, N0, Q);
6662 std::function<
bool(
SDValue)> Predicate,
6667 if (SplatValue != Values.
end()) {
6670 return Value == *SplatValue || Predicate(
Value);
6672 Replacement = *SplatValue;
6676 if (!AlternativeReplacement)
6679 Replacement = AlternativeReplacement;
6681 std::replace_if(Values.
begin(), Values.
end(), Predicate, Replacement);
6692 DAGCombinerInfo &DCI,
6695 if (
SDValue Folded = prepareUREMEqFold(SETCCVT, REMNode, CompTargetNode,
Cond,
6698 DCI.AddToWorklist(
N);
6706TargetLowering::prepareUREMEqFold(
EVT SETCCVT,
SDValue REMNode,
6708 DAGCombinerInfo &DCI,
const SDLoc &
DL,
6716 "Only applicable for (in)equality comparisons.");
6729 bool ComparingWithAllZeros =
true;
6730 bool AllComparisonsWithNonZerosAreTautological =
true;
6731 bool HadTautologicalLanes =
false;
6732 bool AllLanesAreTautological =
true;
6733 bool HadEvenDivisor =
false;
6734 bool AllDivisorsArePowerOfTwo =
true;
6735 bool HadTautologicalInvertedLanes =
false;
6744 const APInt &
Cmp = CCmp->getAPIntValue();
6746 ComparingWithAllZeros &=
Cmp.isZero();
6752 bool TautologicalInvertedLane =
D.ule(Cmp);
6753 HadTautologicalInvertedLanes |= TautologicalInvertedLane;
6758 bool TautologicalLane =
D.isOne() || TautologicalInvertedLane;
6759 HadTautologicalLanes |= TautologicalLane;
6760 AllLanesAreTautological &= TautologicalLane;
6766 AllComparisonsWithNonZerosAreTautological &= TautologicalLane;
6769 unsigned K =
D.countr_zero();
6770 assert((!
D.isOne() || (K == 0)) &&
"For divisor '1' we won't rotate.");
6774 HadEvenDivisor |= (
K != 0);
6777 AllDivisorsArePowerOfTwo &= D0.
isOne();
6781 unsigned W =
D.getBitWidth();
6783 assert((D0 *
P).isOne() &&
"Multiplicative inverse basic check failed.");
6796 "We are expecting that K is always less than all-ones for ShSVT");
6799 if (TautologicalLane) {
6823 if (AllLanesAreTautological)
6828 if (AllDivisorsArePowerOfTwo)
6833 if (HadTautologicalLanes) {
6848 "Expected matchBinaryPredicate to return one element for "
6859 if (!ComparingWithAllZeros && !AllComparisonsWithNonZerosAreTautological) {
6863 "Expecting that the types on LHS and RHS of comparisons match.");
6873 if (HadEvenDivisor) {
6886 if (!HadTautologicalInvertedLanes)
6892 assert(VT.
isVector() &&
"Can/should only get here for vectors.");
6899 SDValue TautologicalInvertedChannels =
6909 DL, SETCCVT, SETCCVT);
6911 Replacement, NewCC);
6919 TautologicalInvertedChannels);
6932 DAGCombinerInfo &DCI,
6935 if (
SDValue Folded = prepareSREMEqFold(SETCCVT, REMNode, CompTargetNode,
Cond,
6937 assert(Built.
size() <= 7 &&
"Max size prediction failed.");
6939 DCI.AddToWorklist(
N);
6947TargetLowering::prepareSREMEqFold(
EVT SETCCVT,
SDValue REMNode,
6949 DAGCombinerInfo &DCI,
const SDLoc &
DL,
6974 "Only applicable for (in)equality comparisons.");
6990 if (!CompTarget || !CompTarget->
isZero())
6993 bool HadIntMinDivisor =
false;
6994 bool HadOneDivisor =
false;
6995 bool AllDivisorsAreOnes =
true;
6996 bool HadEvenDivisor =
false;
6997 bool NeedToApplyOffset =
false;
6998 bool AllDivisorsArePowerOfTwo =
true;
7013 HadIntMinDivisor |=
D.isMinSignedValue();
7016 HadOneDivisor |=
D.isOne();
7017 AllDivisorsAreOnes &=
D.isOne();
7020 unsigned K =
D.countr_zero();
7021 assert((!
D.isOne() || (K == 0)) &&
"For divisor '1' we won't rotate.");
7024 if (!
D.isMinSignedValue()) {
7027 HadEvenDivisor |= (
K != 0);
7032 AllDivisorsArePowerOfTwo &= D0.
isOne();
7036 unsigned W =
D.getBitWidth();
7038 assert((D0 *
P).isOne() &&
"Multiplicative inverse basic check failed.");
7044 if (!
D.isMinSignedValue()) {
7047 NeedToApplyOffset |=
A != 0;
7054 "We are expecting that A is always less than all-ones for SVT");
7056 "We are expecting that K is always less than all-ones for ShSVT");
7094 if (AllDivisorsAreOnes)
7099 if (AllDivisorsArePowerOfTwo)
7102 SDValue PVal, AVal, KVal, QVal;
7104 if (HadOneDivisor) {
7124 QAmts.
size() == 1 &&
7125 "Expected matchUnaryPredicate to return one element for scalable "
7132 assert(isa<ConstantSDNode>(
D) &&
"Expected a constant");
7143 if (NeedToApplyOffset) {
7155 if (HadEvenDivisor) {
7170 if (!HadIntMinDivisor)
7176 assert(VT.
isVector() &&
"Can/should only get here for vectors.");
7211 MaskedIsZero, Fold);
7218 if (!isa<ConstantSDNode>(
Op.getOperand(0))) {
7220 "be a constant integer");
7230 EVT VT =
Op.getValueType();
7253 bool LegalOps,
bool OptForSize,
7255 unsigned Depth)
const {
7257 if (
Op.getOpcode() ==
ISD::FNEG ||
Op.getOpcode() == ISD::VP_FNEG) {
7259 return Op.getOperand(0);
7270 EVT VT =
Op.getValueType();
7271 unsigned Opcode =
Op.getOpcode();
7281 auto RemoveDeadNode = [&](
SDValue N) {
7282 if (
N &&
N.getNode()->use_empty())
7291 std::list<HandleSDNode> Handles;
7302 if (LegalOps && !IsOpLegal)
7305 APFloat V = cast<ConstantFPSDNode>(
Op)->getValueAPF();
7319 return !N.isUndef() && !isa<ConstantFPSDNode>(N);
7327 return N.isUndef() ||
7332 if (LegalOps && !IsOpLegal)
7341 APFloat V = cast<ConstantFPSDNode>(
C)->getValueAPF();
7349 if (!
Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
7360 getNegatedExpression(
X, DAG, LegalOps, OptForSize, CostX,
Depth);
7363 Handles.emplace_back(NegX);
7368 getNegatedExpression(
Y, DAG, LegalOps, OptForSize, CostY,
Depth);
7374 if (NegX && (CostX <= CostY)) {
7378 RemoveDeadNode(NegY);
7387 RemoveDeadNode(NegX);
7394 if (!
Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
7416 getNegatedExpression(
X, DAG, LegalOps, OptForSize, CostX,
Depth);
7419 Handles.emplace_back(NegX);
7424 getNegatedExpression(
Y, DAG, LegalOps, OptForSize, CostY,
Depth);
7430 if (NegX && (CostX <= CostY)) {
7434 RemoveDeadNode(NegY);
7440 if (
C->isExactlyValue(2.0) &&
Op.getOpcode() ==
ISD::FMUL)
7448 RemoveDeadNode(NegX);
7455 if (!
Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
7458 SDValue X =
Op.getOperand(0),
Y =
Op.getOperand(1), Z =
Op.getOperand(2);
7461 getNegatedExpression(Z, DAG, LegalOps, OptForSize, CostZ,
Depth);
7467 Handles.emplace_back(NegZ);
7472 getNegatedExpression(
X, DAG, LegalOps, OptForSize, CostX,
Depth);
7475 Handles.emplace_back(NegX);
7480 getNegatedExpression(
Y, DAG, LegalOps, OptForSize, CostY,
Depth);
7486 if (NegX && (CostX <= CostY)) {
7487 Cost = std::min(CostX, CostZ);
7490 RemoveDeadNode(NegY);
7496 Cost = std::min(CostY, CostZ);
7499 RemoveDeadNode(NegX);
7507 if (
SDValue NegV = getNegatedExpression(
Op.getOperand(0), DAG, LegalOps,
7509 return DAG.
getNode(Opcode,
DL, VT, NegV);
7512 if (
SDValue NegV = getNegatedExpression(
Op.getOperand(0), DAG, LegalOps,
7523 getNegatedExpression(
LHS, DAG, LegalOps, OptForSize, CostLHS,
Depth);
7525 RemoveDeadNode(NegLHS);
7530 Handles.emplace_back(NegLHS);
7535 getNegatedExpression(
RHS, DAG, LegalOps, OptForSize, CostRHS,
Depth);
7543 RemoveDeadNode(NegLHS);
7544 RemoveDeadNode(NegRHS);
7548 Cost = std::min(CostLHS, CostRHS);
7549 return DAG.
getSelect(
DL, VT,
Op.getOperand(0), NegLHS, NegRHS);
7578 if (!HasMULHU && !HasMULHS && !HasUMUL_LOHI && !HasSMUL_LOHI)
7591 if ((
Signed && HasSMUL_LOHI) || (!
Signed && HasUMUL_LOHI)) {
7619 if (MakeMUL_LOHI(LL, RL,
Lo,
Hi,
false)) {
7620 Result.push_back(
Lo);
7621 Result.push_back(
Hi);
7624 Result.push_back(Zero);
7625 Result.push_back(Zero);
7636 if (MakeMUL_LOHI(LL, RL,
Lo,
Hi,
true)) {
7637 Result.push_back(
Lo);
7638 Result.push_back(
Hi);
7643 unsigned ShiftAmount = OuterBitSize - InnerBitSize;
7658 if (!MakeMUL_LOHI(LL, RL,
Lo,
Hi,
false))
7661 Result.push_back(
Lo);
7668 Result.push_back(
Hi);
7681 if (!MakeMUL_LOHI(LL, RH,
Lo,
Hi,
false))
7688 if (!MakeMUL_LOHI(LH, RL,
Lo,
Hi,
false))
7740 bool Ok = expandMUL_LOHI(
N->getOpcode(),
N->getValueType(0),
SDLoc(
N),
7741 N->getOperand(0),
N->getOperand(1), Result, HiLoVT,
7742 DAG, Kind, LL, LH, RL, RH);
7744 assert(Result.size() == 2);
7776 unsigned Opcode =
N->getOpcode();
7777 EVT VT =
N->getValueType(0);
7784 "Unexpected opcode");
7786 auto *CN = dyn_cast<ConstantSDNode>(
N->getOperand(1));
7790 APInt Divisor = CN->getAPIntValue();
7798 if (Divisor.
uge(HalfMaxPlus1))
7816 unsigned TrailingZeros = 0;
7830 if (HalfMaxPlus1.
urem(Divisor).
isOne()) {
7831 assert(!LL == !LH &&
"Expected both input halves or no input halves!");
7833 std::tie(LL, LH) = DAG.
SplitScalar(
N->getOperand(0), dl, HiLoVT, HiLoVT);
7837 if (TrailingZeros) {
7905 std::tie(QuotL, QuotH) = DAG.
SplitScalar(Quotient, dl, HiLoVT, HiLoVT);
7906 Result.push_back(QuotL);
7907 Result.push_back(QuotH);
7913 if (TrailingZeros) {
7919 Result.push_back(RemL);
7935 EVT VT =
Node->getValueType(0);
7945 bool IsFSHL =
Node->getOpcode() == ISD::VP_FSHL;
7948 EVT ShVT = Z.getValueType();
7954 ShAmt = DAG.
getNode(ISD::VP_UREM,
DL, ShVT, Z, BitWidthC, Mask, VL);
7955 InvShAmt = DAG.
getNode(ISD::VP_SUB,
DL, ShVT, BitWidthC, ShAmt, Mask, VL);
7956 ShX = DAG.
getNode(ISD::VP_SHL,
DL, VT,
X, IsFSHL ? ShAmt : InvShAmt, Mask,
7958 ShY = DAG.
getNode(ISD::VP_SRL,
DL, VT,
Y, IsFSHL ? InvShAmt : ShAmt, Mask,
7966 ShAmt = DAG.
getNode(ISD::VP_AND,
DL, ShVT, Z, BitMask, Mask, VL);
7970 InvShAmt = DAG.
getNode(ISD::VP_AND,
DL, ShVT, NotZ, BitMask, Mask, VL);
7973 ShAmt = DAG.
getNode(ISD::VP_UREM,
DL, ShVT, Z, BitWidthC, Mask, VL);
7974 InvShAmt = DAG.
getNode(ISD::VP_SUB,
DL, ShVT, BitMask, ShAmt, Mask, VL);
7979 ShX = DAG.
getNode(ISD::VP_SHL,
DL, VT,
X, ShAmt, Mask, VL);
7981 ShY = DAG.
getNode(ISD::VP_SRL,
DL, VT, ShY1, InvShAmt, Mask, VL);
7984 ShX = DAG.
getNode(ISD::VP_SHL,
DL, VT, ShX1, InvShAmt, Mask, VL);
7985 ShY = DAG.
getNode(ISD::VP_SRL,
DL, VT,
Y, ShAmt, Mask, VL);
7988 return DAG.
getNode(ISD::VP_OR,
DL, VT, ShX, ShY, Mask, VL);
7993 if (Node->isVPOpcode())
7996 EVT VT = Node->getValueType(0);
8006 SDValue Z = Node->getOperand(2);
8009 bool IsFSHL = Node->getOpcode() ==
ISD::FSHL;
8012 EVT ShVT = Z.getValueType();
8082 EVT VT = Node->getValueType(0);
8084 bool IsLeft = Node->getOpcode() ==
ISD::ROTL;
8085 SDValue Op0 = Node->getOperand(0);
8086 SDValue Op1 = Node->getOperand(1);
8097 return DAG.
getNode(RevRot,
DL, VT, Op0, Sub);
8100 if (!AllowVectorOps && VT.
isVector() &&
8118 ShVal = DAG.
getNode(ShOpc,
DL, VT, Op0, ShAmt);
8120 HsVal = DAG.
getNode(HsOpc,
DL, VT, Op0, HsAmt);
8126 ShVal = DAG.
getNode(ShOpc,
DL, VT, Op0, ShAmt);
8137 assert(Node->getNumOperands() == 3 &&
"Not a double-shift!");
8138 EVT VT = Node->getValueType(0);
8144 SDValue ShOpLo = Node->getOperand(0);
8145 SDValue ShOpHi = Node->getOperand(1);
8146 SDValue ShAmt = Node->getOperand(2);
8189 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0;
8190 SDValue Src = Node->getOperand(OpNo);
8191 EVT SrcVT = Src.getValueType();
8192 EVT DstVT = Node->getValueType(0);
8196 if (SrcVT != MVT::f32 || DstVT != MVT::i64)
8199 if (Node->isStrictFPOpcode())
8262 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0;
8263 SDValue Src = Node->getOperand(OpNo);
8265 EVT SrcVT = Src.getValueType();
8266 EVT DstVT = Node->getValueType(0);
8287 if (Node->isStrictFPOpcode()) {
8289 { Node->getOperand(0), Src });
8290 Chain = Result.getValue(1);
8304 if (Node->isStrictFPOpcode()) {
8306 Node->getOperand(0),
true);
8312 bool Strict = Node->isStrictFPOpcode() ||
8331 if (Node->isStrictFPOpcode()) {
8333 { Chain, Src, FltOfs });
8355 Result = DAG.
getSelect(dl, DstVT, Sel, True, False);
8366 if (Node->isStrictFPOpcode())
8369 SDValue Src = Node->getOperand(0);
8370 EVT SrcVT = Src.getValueType();
8371 EVT DstVT = Node->getValueType(0);
8394 llvm::bit_cast<double>(UINT64_C(0x4530000000100000)), dl, DstVT);
8414 unsigned Opcode = Node->getOpcode();
8419 if (Node->getFlags().hasNoNaNs()) {
8421 SDValue Op1 = Node->getOperand(0);
8422 SDValue Op2 = Node->getOperand(1);
8427 Flags.setNoSignedZeros(
true);
8440 EVT VT = Node->getValueType(0);
8444 "Expanding fminnum/fmaxnum for scalable vectors is undefined.");
8447 SDValue Quiet0 = Node->getOperand(0);
8448 SDValue Quiet1 = Node->getOperand(1);
8450 if (!Node->getFlags().hasNoNaNs()) {
8463 return DAG.
getNode(NewOp, dl, VT, Quiet0, Quiet1, Node->getFlags());
8469 if ((Node->getFlags().hasNoNaNs() ||
8472 (Node->getFlags().hasNoSignedZeros() ||
8475 unsigned IEEE2018Op =
8478 return DAG.
getNode(IEEE2018Op, dl, VT, Node->getOperand(0),
8479 Node->getOperand(1), Node->getFlags());
8482 if (
SDValue SelCC = createSelectForFMINNUM_FMAXNUM(Node, DAG))
8493 unsigned Opc =
N->getOpcode();
8494 EVT VT =
N->getValueType(0);
8507 bool MinMaxMustRespectOrderedZero =
false;
8511 MinMaxMustRespectOrderedZero =
true;
8525 if (!
N->getFlags().hasNoNaNs() &&
8534 if (!MinMaxMustRespectOrderedZero && !
N->getFlags().hasNoSignedZeros() &&
8557 unsigned Opc = Node->getOpcode();
8558 EVT VT = Node->getValueType(0);
8568 if (!Flags.hasNoNaNs()) {
8584 if (Flags.hasNoNaNs() ||
8586 unsigned IEEE2019Op =
8594 if ((Flags.hasNoNaNs() ||
8623 if (
Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros() ||
8648 bool IsOrdered = NanTest ==
fcNone;
8649 bool IsUnordered = NanTest ==
fcNan;
8652 if (!IsOrdered && !IsUnordered)
8653 return std::nullopt;
8655 if (OrderedMask ==
fcZero &&
8661 return std::nullopt;
8668 EVT OperandVT =
Op.getValueType();
8680 if (OperandVT == MVT::ppcf128) {
8683 OperandVT = MVT::f64;
8688 bool IsInverted =
false;
8691 Test = InvertedCheck;
8698 bool IsF80 = (ScalarFloatVT == MVT::f80);
8702 if (Flags.hasNoFPExcept() &&
8710 FPClassTest OrderedFPTestMask = FPTestMask & ~fcNan;
8715 OrderedFPTestMask = FPTestMask;
8717 const bool IsOrdered = FPTestMask == OrderedFPTestMask;
8719 if (std::optional<bool> IsCmp0 =
8722 *IsCmp0 ? OrderedCmpOpcode : UnorderedCmpOpcode,
8729 *IsCmp0 ? OrderedCmpOpcode : UnorderedCmpOpcode);
8767 return DAG.
getSetCC(
DL, ResultVT, Abs, SmallestNormal,
8768 IsOrdered ? OrderedOp : UnorderedOp);
8785 const unsigned ExplicitIntBitInF80 = 63;
8786 APInt ExpMask = Inf;
8788 ExpMask.
clearBit(ExplicitIntBitInF80);
8802 const auto appendResult = [&](
SDValue PartialRes) {
8812 const auto getIntBitIsSet = [&]() ->
SDValue {
8813 if (!IntBitIsSetV) {
8814 APInt IntBitMask(BitSize, 0);
8815 IntBitMask.
setBit(ExplicitIntBitInF80);
8820 return IntBitIsSetV;
8841 Test &= ~fcPosFinite;
8846 Test &= ~fcNegFinite;
8848 appendResult(PartialRes);
8857 appendResult(ExpIsZero);
8867 else if (PartialCheck ==
fcZero)
8871 appendResult(PartialRes);
8884 appendResult(PartialRes);
8887 if (
unsigned PartialCheck =
Test &
fcInf) {
8890 else if (PartialCheck ==
fcInf)
8897 appendResult(PartialRes);
8900 if (
unsigned PartialCheck =
Test &
fcNan) {
8901 APInt InfWithQnanBit = Inf | QNaNBitMask;
8903 if (PartialCheck ==
fcNan) {
8916 }
else if (PartialCheck ==
fcQNan) {
8928 appendResult(PartialRes);
8933 APInt ExpLSB = ExpMask & ~(ExpMask.
shl(1));
8936 APInt ExpLimit = ExpMask - ExpLSB;
8949 appendResult(PartialRes);
8972 EVT VT = Node->getValueType(0);
8979 if (!(Len <= 128 && Len % 8 == 0))
9038 for (
unsigned Shift = 8; Shift < Len; Shift *= 2) {
9049 EVT VT = Node->getValueType(0);
9052 SDValue Mask = Node->getOperand(1);
9053 SDValue VL = Node->getOperand(2);
9058 if (!(Len <= 128 && Len % 8 == 0))
9070 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5;
9073 Tmp1 = DAG.
getNode(ISD::VP_AND, dl, VT,
9077 Op = DAG.
getNode(ISD::VP_SUB, dl, VT,
Op, Tmp1, Mask, VL);
9080 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op, Mask33, Mask, VL);
9081 Tmp3 = DAG.
getNode(ISD::VP_AND, dl, VT,
9085 Op = DAG.
getNode(ISD::VP_ADD, dl, VT, Tmp2, Tmp3, Mask, VL);
9090 Tmp5 = DAG.
getNode(ISD::VP_ADD, dl, VT,
Op, Tmp4, Mask, VL);
9091 Op = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp5, Mask0F, Mask, VL);
9102 V = DAG.
getNode(ISD::VP_MUL, dl, VT,
Op, Mask01, Mask, VL);
9105 for (
unsigned Shift = 8; Shift < Len; Shift *= 2) {
9107 V = DAG.
getNode(ISD::VP_ADD, dl, VT, V,
9108 DAG.
getNode(ISD::VP_SHL, dl, VT, V, ShiftC, Mask, VL),
9118 EVT VT = Node->getValueType(0);
9157 for (
unsigned i = 0; (1U << i) < NumBitsPerElt; ++i) {
9168 EVT VT = Node->getValueType(0);
9171 SDValue Mask = Node->getOperand(1);
9172 SDValue VL = Node->getOperand(2);
9182 for (
unsigned i = 0; (1U << i) < NumBitsPerElt; ++i) {
9185 DAG.
getNode(ISD::VP_SRL, dl, VT,
Op, Tmp, Mask, VL), Mask,
9190 return DAG.
getNode(ISD::VP_CTPOP, dl, VT,
Op, Mask, VL);
9199 :
APInt(64, 0x0218A392CD3D5DBFULL);
9213 for (
unsigned i = 0; i <
BitWidth; i++) {
9239 EVT VT = Node->getValueType(0);
9273 if (
SDValue V = CTTZTableLookup(Node, DAG, dl, VT,
Op, NumBitsPerElt))
9295 SDValue Mask = Node->getOperand(1);
9296 SDValue VL = Node->getOperand(2);
9298 EVT VT = Node->getValueType(0);
9305 SDValue Tmp = DAG.
getNode(ISD::VP_AND, dl, VT, Not, MinusOne, Mask, VL);
9306 return DAG.
getNode(ISD::VP_CTPOP, dl, VT, Tmp, Mask, VL);
9320 EVT SrcVT = Source.getValueType();
9321 EVT ResVT =
N->getValueType(0);
9330 Source = DAG.
getNode(ISD::VP_SETCC,
DL, SrcVT, Source, AllZero,
9338 DAG.
getNode(ISD::VP_SELECT,
DL, ResVecVT, Source, StepVec,
Splat, EVL);
9339 return DAG.
getNode(ISD::VP_REDUCE_UMIN,
DL, ResVT, ExtEVL,
Select, Mask, EVL);
9343 bool IsNegative)
const {
9345 EVT VT =
N->getValueType(0);
9399 EVT VT =
N->getValueType(0);
9473 EVT VT =
N->getValueType(0);
9477 unsigned Opc =
N->getOpcode();
9486 "Unknown AVG node");
9498 return DAG.
getNode(ShiftOpc, dl, VT, Sum,
9550 return DAG.
getNode(SumOpc, dl, VT, Sign, Shift);
9555 EVT VT =
N->getValueType(0);
9562 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
9613 EVT VT =
N->getValueType(0);
9622 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
9631 return DAG.
getNode(ISD::VP_OR, dl, VT, Tmp1, Tmp2, Mask, EVL);
9641 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9645 Tmp4 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp3, Mask, EVL);
9646 Tmp2 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp1, Mask, EVL);
9647 return DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp2, Mask, EVL);
9651 Tmp7 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op,
9655 Tmp6 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op,
9656 DAG.
getConstant(255ULL << 16, dl, VT), Mask, EVL);
9659 Tmp5 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op,
9660 DAG.
getConstant(255ULL << 24, dl, VT), Mask, EVL);
9665 Tmp4 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp4,
9666 DAG.
getConstant(255ULL << 24, dl, VT), Mask, EVL);
9669 Tmp3 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp3,
9670 DAG.
getConstant(255ULL << 16, dl, VT), Mask, EVL);
9673 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9677 Tmp8 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp7, Mask, EVL);
9678 Tmp6 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp6, Tmp5, Mask, EVL);
9679 Tmp4 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp3, Mask, EVL);
9680 Tmp2 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp1, Mask, EVL);
9681 Tmp8 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp6, Mask, EVL);
9682 Tmp4 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp2, Mask, EVL);
9683 return DAG.
getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp4, Mask, EVL);
9689 EVT VT =
N->getValueType(0);
9732 for (
unsigned I = 0, J = Sz-1;
I < Sz; ++
I, --J) {
9749 assert(
N->getOpcode() == ISD::VP_BITREVERSE);
9752 EVT VT =
N->getValueType(0);
9771 Tmp = (Sz > 8 ? DAG.
getNode(ISD::VP_BSWAP, dl, VT,
Op, Mask, EVL) :
Op);
9776 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9782 Tmp = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
9787 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9793 Tmp = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
9798 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9804 Tmp = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
9810std::pair<SDValue, SDValue>
9814 SDValue Chain = LD->getChain();
9815 SDValue BasePTR = LD->getBasePtr();
9816 EVT SrcVT = LD->getMemoryVT();
9817 EVT DstVT = LD->getValueType(0);
9849 LD->getPointerInfo(), SrcIntVT, LD->getOriginalAlign(),
9850 LD->getMemOperand()->getFlags(), LD->getAAInfo());
9853 for (
unsigned Idx = 0;
Idx < NumElem; ++
Idx) {
9854 unsigned ShiftIntoIdx =
9865 Scalar = DAG.
getNode(ExtendOp, SL, DstEltVT, Scalar);
9872 return std::make_pair(
Value, Load.getValue(1));
9881 for (
unsigned Idx = 0;
Idx < NumElem; ++
Idx) {
9883 DAG.
getExtLoad(ExtType, SL, DstEltVT, Chain, BasePTR,
9884 LD->getPointerInfo().getWithOffset(
Idx * Stride),
9885 SrcEltVT, LD->getOriginalAlign(),
9886 LD->getMemOperand()->getFlags(), LD->getAAInfo());
9897 return std::make_pair(
Value, NewChain);
9904 SDValue Chain = ST->getChain();
9905 SDValue BasePtr = ST->getBasePtr();
9907 EVT StVT = ST->getMemoryVT();
9933 for (
unsigned Idx = 0;
Idx < NumElem; ++
Idx) {
9938 unsigned ShiftIntoIdx =
9947 return DAG.
getStore(Chain, SL, CurrVal, BasePtr, ST->getPointerInfo(),
9948 ST->getOriginalAlign(), ST->getMemOperand()->getFlags(),
9954 assert(Stride &&
"Zero stride!");
9958 for (
unsigned Idx = 0;
Idx < NumElem; ++
Idx) {
9967 Chain, SL, Elt,
Ptr, ST->getPointerInfo().getWithOffset(
Idx * Stride),
9968 MemSclVT, ST->getOriginalAlign(), ST->getMemOperand()->getFlags(),
9977std::pair<SDValue, SDValue>
9980 "unaligned indexed loads not implemented!");
9981 SDValue Chain = LD->getChain();
9983 EVT VT = LD->getValueType(0);
9984 EVT LoadedVT = LD->getMemoryVT();
9994 return scalarizeVectorLoad(LD, DAG);
10000 LD->getMemOperand());
10002 if (LoadedVT != VT)
10006 return std::make_pair(Result, newLoad.
getValue(1));
10014 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes;
10018 auto FrameIndex = cast<FrameIndexSDNode>(StackBase.
getNode())->getIndex();
10020 SDValue StackPtr = StackBase;
10023 EVT PtrVT =
Ptr.getValueType();
10024 EVT StackPtrVT = StackPtr.getValueType();
10030 for (
unsigned i = 1; i < NumRegs; i++) {
10033 RegVT, dl, Chain,
Ptr, LD->getPointerInfo().getWithOffset(
Offset),
10034 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(),
10038 Load.getValue(1), dl, Load, StackPtr,
10049 8 * (LoadedBytes -
Offset));
10052 LD->getPointerInfo().getWithOffset(
Offset), MemVT,
10053 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(),
10059 Load.getValue(1), dl, Load, StackPtr,
10066 Load = DAG.
getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase,
10071 return std::make_pair(Load, TF);
10075 "Unaligned load of unsupported type.");
10084 Align Alignment = LD->getOriginalAlign();
10085 unsigned IncrementSize = NumBits / 8;
10096 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
10101 LD->getPointerInfo().getWithOffset(IncrementSize),
10102 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
10105 Hi = DAG.
getExtLoad(HiExtType, dl, VT, Chain,
Ptr, LD->getPointerInfo(),
10106 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
10111 LD->getPointerInfo().getWithOffset(IncrementSize),
10112 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
10124 return std::make_pair(Result, TF);
10130 "unaligned indexed stores not implemented!");
10131 SDValue Chain = ST->getChain();
10133 SDValue Val = ST->getValue();
10135 Align Alignment = ST->getOriginalAlign();
10137 EVT StoreMemVT = ST->getMemoryVT();
10153 Result = DAG.
getStore(Chain, dl, Result,
Ptr, ST->getPointerInfo(),
10154 Alignment, ST->getMemOperand()->getFlags());
10162 EVT PtrVT =
Ptr.getValueType();
10165 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes;
10169 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
10173 Chain, dl, Val, StackPtr,
10176 EVT StackPtrVT = StackPtr.getValueType();
10184 for (
unsigned i = 1; i < NumRegs; i++) {
10187 RegVT, dl, Store, StackPtr,
10191 ST->getPointerInfo().getWithOffset(
Offset),
10192 ST->getOriginalAlign(),
10193 ST->getMemOperand()->getFlags()));
10213 ST->getPointerInfo().getWithOffset(
Offset), LoadMemVT,
10214 ST->getOriginalAlign(),
10215 ST->getMemOperand()->getFlags(), ST->getAAInfo()));
10222 "Unaligned store of unknown type.");
10226 unsigned IncrementSize = NumBits / 8;
10235 if (
auto *
C = dyn_cast<ConstantSDNode>(
Lo);
C && !
C->isOpaque())
10246 Ptr, ST->getPointerInfo(), NewStoredVT, Alignment,
10247 ST->getMemOperand()->getFlags());
10252 ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment,
10253 ST->getMemOperand()->getFlags(), ST->getAAInfo());
10264 bool IsCompressedMemory)
const {
10266 EVT AddrVT =
Addr.getValueType();
10267 EVT MaskVT = Mask.getValueType();
10269 "Incompatible types of Data and Mask");
10270 if (IsCompressedMemory) {
10273 "Cannot currently handle compressed memory with scalable vectors");
10279 MaskIntVT = MVT::i32;
10303 "Cannot index a scalable vector within a fixed-width vector");
10307 EVT IdxVT =
Idx.getValueType();
10313 if (
auto *IdxCst = dyn_cast<ConstantSDNode>(
Idx))
10314 if (IdxCst->getZExtValue() + (NumSubElts - 1) < NElts)
10328 unsigned MaxIndex = NumSubElts < NElts ? NElts - NumSubElts : 0;
10336 return getVectorSubVecPointer(
10337 DAG, VecPtr, VecVT,
10355 "Converting bits to bytes lost precision");
10357 "Sub-vector must be a vector with matching element type");
10393 assert(EmuTlsVar &&
"Cannot find EmuTlsVar ");
10395 Entry.Ty = VoidPtrType;
10396 Args.push_back(Entry);
10403 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
10412 "Emulated TLS must have zero offset in GlobalAddressSDNode");
10413 return CallResult.first;
10424 EVT VT =
Op.getOperand(0).getValueType();
10426 if (VT.
bitsLT(MVT::i32)) {
10440 SDValue Op0 = Node->getOperand(0);
10441 SDValue Op1 = Node->getOperand(1);
10444 unsigned Opcode = Node->getOpcode();
10486 {Op0, Op1, DAG.getCondCode(CC)})) {
10493 {Op0, Op1, DAG.getCondCode(CC)})) {
10521 unsigned Opcode = Node->getOpcode();
10524 EVT VT =
LHS.getValueType();
10527 assert(VT ==
RHS.getValueType() &&
"Expected operands to be the same type");
10543 unsigned OverflowOp;
10558 llvm_unreachable(
"Expected method to receive signed or unsigned saturation "
10559 "addition or subtraction node.");
10567 unsigned BitWidth =
LHS.getScalarValueSizeInBits();
10570 SDValue SumDiff = Result.getValue(0);
10571 SDValue Overflow = Result.getValue(1);
10593 return DAG.
getSelect(dl, VT, Overflow, Zero, SumDiff);
10613 if (LHSIsNonNegative || RHSIsNonNegative) {
10615 return DAG.
getSelect(dl, VT, Overflow, SatMax, SumDiff);
10621 if (LHSIsNegative || RHSIsNegative) {
10623 return DAG.
getSelect(dl, VT, Overflow, SatMin, SumDiff);
10633 return DAG.
getSelect(dl, VT, Overflow, Result, SumDiff);
10637 unsigned Opcode = Node->getOpcode();
10640 EVT VT =
LHS.getValueType();
10641 EVT ResVT = Node->getValueType(0);
10672 unsigned Opcode = Node->getOpcode();
10676 EVT VT =
LHS.getValueType();
10681 "Expected a SHLSAT opcode");
10682 assert(VT ==
RHS.getValueType() &&
"Expected operands to be the same type");
10720 if (WideVT == MVT::i16)
10721 LC = RTLIB::MUL_I16;
10722 else if (WideVT == MVT::i32)
10723 LC = RTLIB::MUL_I32;
10724 else if (WideVT == MVT::i64)
10725 LC = RTLIB::MUL_I64;
10726 else if (WideVT == MVT::i128)
10727 LC = RTLIB::MUL_I128;
10736 unsigned HalfBits = Bits >> 1;
10775 if (shouldSplitFunctionArgumentsAsLittleEndian(DAG.
getDataLayout())) {
10780 SDValue Args[] = {LL, LH, RL, RH};
10781 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first;
10783 SDValue Args[] = {LH, LL, RH, RL};
10784 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first;
10787 "Ret value is a collection of constituent nodes holding result.");
10790 Lo = Ret.getOperand(0);
10791 Hi = Ret.getOperand(1);
10793 Lo = Ret.getOperand(1);
10794 Hi = Ret.getOperand(0);
10803 EVT VT =
LHS.getValueType();
10804 assert(
RHS.getValueType() == VT &&
"Mismatching operand types");
10823 forceExpandWideMUL(DAG, dl,
Signed, WideVT,
LHS, HiLHS,
RHS, HiRHS,
Lo,
Hi);
10832 "Expected a fixed point multiplication opcode");
10837 EVT VT =
LHS.getValueType();
10838 unsigned Scale = Node->getConstantOperandVal(2);
10854 SDValue Product = Result.getValue(0);
10855 SDValue Overflow = Result.getValue(1);
10866 Result = DAG.
getSelect(dl, VT, ProdNeg, SatMin, SatMax);
10867 return DAG.
getSelect(dl, VT, Overflow, Result, Product);
10871 SDValue Product = Result.getValue(0);
10872 SDValue Overflow = Result.getValue(1);
10876 return DAG.
getSelect(dl, VT, Overflow, SatMax, Product);
10881 "Expected scale to be less than the number of bits if signed or at "
10882 "most the number of bits if unsigned.");
10884 "Expected both operands to be the same type");
10896 Lo = Result.getValue(0);
10897 Hi = Result.getValue(1);
10918 if (Scale == VTSize)
10964 return DAG.
getSelect(dl, VT, Overflow, ResultIfOverflow, Result);
10989 "Expected a fixed point division opcode");
10991 EVT VT =
LHS.getValueType();
11013 if (LHSLead + RHSTrail < Scale + (
unsigned)(Saturating &&
Signed))
11016 unsigned LHSShift = std::min(LHSLead, Scale);
11017 unsigned RHSShift = Scale - LHSShift;
11074 bool IsAdd = Node->getOpcode() ==
ISD::UADDO;
11080 SDValue NodeCarry = DAG.
getNode(OpcCarry, dl, Node->getVTList(),
11081 { LHS, RHS, CarryIn });
11090 EVT ResultType = Node->getValueType(1);
11101 DAG.
getSetCC(dl, SetCCType, Result,
11120 bool IsAdd = Node->getOpcode() ==
ISD::SADDO;
11125 EVT ResultType = Node->getValueType(1);
11151 DAG.
getNode(
ISD::XOR, dl, OType, ConditionRHS, ResultLowerThanLHS), dl,
11152 ResultType, ResultType);
11158 EVT VT = Node->getValueType(0);
11166 const APInt &
C = RHSC->getAPIntValue();
11168 if (
C.isPowerOf2()) {
11170 bool UseArithShift =
isSigned && !
C.isMinSignedValue();
11173 Overflow = DAG.
getSetCC(dl, SetCCVT,
11175 dl, VT, Result, ShiftAmt),
11188 static const unsigned Ops[2][3] =
11211 forceExpandWideMUL(DAG, dl,
isSigned,
LHS,
RHS, BottomHalf, TopHalf);
11214 Result = BottomHalf;
11221 Overflow = DAG.
getSetCC(dl, SetCCVT, TopHalf,
11226 EVT RType = Node->getValueType(1);
11231 "Unexpected result type for S/UMULO legalization");
11239 EVT VT =
Op.getValueType();
11243 "Expanding reductions for scalable vectors is undefined.");
11254 Op = DAG.
getNode(BaseOpcode, dl, HalfVT,
Lo,
Hi, Node->getFlags());
11266 for (
unsigned i = 1; i < NumElts; i++)
11267 Res = DAG.
getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Node->getFlags());
11270 if (EltVT != Node->getValueType(0))
11277 SDValue AccOp = Node->getOperand(0);
11278 SDValue VecOp = Node->getOperand(1);
11286 "Expanding reductions for scalable vectors is undefined.");
11296 for (
unsigned i = 0; i < NumElts; i++)
11297 Res = DAG.
getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Flags);
11304 EVT VT = Node->getValueType(0);
11309 SDValue Dividend = Node->getOperand(0);
11310 SDValue Divisor = Node->getOperand(1);
11313 Result = DAG.
getNode(DivRemOpc, dl, VTs, Dividend, Divisor).
getValue(1);
11318 SDValue Divide = DAG.
getNode(DivOpc, dl, VT, Dividend, Divisor);
11330 SDValue Src = Node->getOperand(0);
11333 EVT SrcVT = Src.getValueType();
11334 EVT DstVT = Node->getValueType(0);
11336 EVT SatVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
11339 assert(SatWidth <= DstWidth &&
11340 "Expected saturation width smaller than result width");
11344 APInt MinInt, MaxInt;
11355 if (SrcVT == MVT::f16 || SrcVT == MVT::bf16) {
11357 SrcVT = Src.getValueType();
11379 if (AreExactFloatBounds && MinMaxLegal) {
11388 dl, DstVT, Clamped);
11400 return DAG.
getSelect(dl, DstVT, IsNan, ZeroInt, FpToInt);
11439 EVT OperandVT =
Op.getValueType();
11461 AbsWide = DAG.
getBitcast(OperandVT, ClearedSign);
11484 KeepNarrow = DAG.
getNode(
ISD::OR, dl, WideSetCCVT, KeepNarrow, AlreadyOdd);
11493 SDValue Adjust = DAG.
getSelect(dl, ResultIntVT, NarrowIsRd, One, NegativeOne);
11495 Op = DAG.
getSelect(dl, ResultIntVT, KeepNarrow, NarrowBits, Adjusted);
11507 EVT VT = Node->getValueType(0);
11510 if (Node->getConstantOperandVal(1) == 1) {
11513 EVT OperandVT =
Op.getValueType();
11525 EVT I32 =
F32.changeTypeToInteger();
11526 Op = expandRoundInexactToOdd(
F32,
Op, dl, DAG);
11551 EVT I16 = I32.isVector() ? I32.changeVectorElementType(MVT::i16) : MVT::i16;
11561 assert(Node->getValueType(0).isScalableVector() &&
11562 "Fixed length vector types expected to use SHUFFLE_VECTOR!");
11564 EVT VT = Node->getValueType(0);
11565 SDValue V1 = Node->getOperand(0);
11566 SDValue V2 = Node->getOperand(1);
11567 int64_t Imm = cast<ConstantSDNode>(Node->getOperand(2))->getSExtValue();
11586 EVT PtrVT = StackPtr.getValueType();
11588 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
11603 StackPtr = getVectorElementPointer(DAG, StackPtr, VT, Node->getOperand(2));
11605 return DAG.
getLoad(VT,
DL, StoreV2, StackPtr,
11628 return DAG.
getLoad(VT,
DL, StoreV2, StackPtr2,
11635 SDValue Vec = Node->getOperand(0);
11636 SDValue Mask = Node->getOperand(1);
11637 SDValue Passthru = Node->getOperand(2);
11641 EVT MaskVT = Mask.getValueType();
11650 int FI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
11658 bool HasPassthru = !Passthru.
isUndef();
11664 Chain = DAG.
getStore(Chain,
DL, Passthru, StackPtr, PtrInfo);
11667 APInt PassthruSplatVal;
11668 bool IsSplatPassthru =
11671 if (IsSplatPassthru) {
11675 LastWriteVal = DAG.
getConstant(PassthruSplatVal,
DL, ScalarVT);
11676 }
else if (HasPassthru) {
11686 getVectorElementPointer(DAG, StackPtr, VecVT, Popcount);
11688 ScalarVT,
DL, Chain, LastElmtPtr,
11694 for (
unsigned I = 0;
I < NumElms;
I++) {
11698 SDValue OutPtr = getVectorElementPointer(DAG, StackPtr, VecVT, OutPos);
11700 Chain,
DL, ValI, OutPtr,
11713 if (HasPassthru &&
I == NumElms - 1) {
11719 OutPtr = getVectorElementPointer(DAG, StackPtr, VecVT, OutPos);
11724 DAG.
getSelect(
DL, ScalarVT, AllLanesSelected, ValI, LastWriteVal);
11726 Chain,
DL, LastWriteVal, OutPtr,
11731 return DAG.
getLoad(VecVT,
DL, Chain, StackPtr, PtrInfo);
11737 SDValue EVL,
bool &NeedInvert,
11739 bool IsSignaling)
const {
11740 MVT OpVT =
LHS.getSimpleValueType();
11742 NeedInvert =
false;
11743 assert(!EVL == !Mask &&
"VP Mask and EVL must either both be set or unset");
11744 bool IsNonVP = !EVL;
11759 bool NeedSwap =
false;
11760 InvCC = getSetCCInverse(CCCode, OpVT);
11788 "If SETUE is expanded, SETOEQ or SETUNE must be legal!");
11793 "If SETO is expanded, SETOEQ must be legal!");
11810 NeedInvert = ((
unsigned)CCCode & 0x8U);
11851 SetCC1 = DAG.
getSetCC(dl, VT,
LHS,
RHS, CC1, Chain, IsSignaling);
11852 SetCC2 = DAG.
getSetCC(dl, VT,
LHS,
RHS, CC2, Chain, IsSignaling);
11860 SetCC1 = DAG.
getSetCC(dl, VT,
LHS,
LHS, CC1, Chain, IsSignaling);
11861 SetCC2 = DAG.
getSetCC(dl, VT,
RHS,
RHS, CC2, Chain, IsSignaling);
11871 LHS = DAG.
getNode(Opc, dl, VT, SetCC1, SetCC2);
11875 Opc = Opc ==
ISD::OR ? ISD::VP_OR : ISD::VP_AND;
11876 LHS = DAG.
getNode(Opc, dl, VT, SetCC1, SetCC2, Mask, EVL);
unsigned const MachineRegisterInfo * MRI
amdgpu AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
block Block Frequency Analysis
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
static bool isUndef(ArrayRef< int > Mask)
static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo, const APInt &Demanded)
Check to see if the specified operand of the specified instruction is a constant integer.
static bool isNonZeroModBitWidthOrUndef(const MachineRegisterInfo &MRI, Register Reg, unsigned BW)
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
unsigned const TargetRegisterInfo * TRI
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
Function const char * Passes
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static SDValue foldSetCCWithFunnelShift(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, const SDLoc &dl, SelectionDAG &DAG)
static bool lowerImmediateIfPossible(TargetLowering::ConstraintPair &P, SDValue Op, SelectionDAG *DAG, const TargetLowering &TLI)
If we have an immediate, see if we can lower it.
static SDValue expandVPFunnelShift(SDNode *Node, SelectionDAG &DAG)
static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG, const APInt &UndefOp0, const APInt &UndefOp1)
Given a vector binary operation and known undefined elements for each input operand,...
static SDValue BuildExactUDIV(const TargetLowering &TLI, SDNode *N, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created)
Given an exact UDIV by a constant, create a multiplication with the multiplicative inverse of the con...
static SDValue clampDynamicVectorIndex(SelectionDAG &DAG, SDValue Idx, EVT VecVT, const SDLoc &dl, ElementCount SubEC)
static unsigned getConstraintPiority(TargetLowering::ConstraintType CT)
Return a number indicating our preference for chosing a type of constraint over another,...
static std::optional< bool > isFCmpEqualZero(FPClassTest Test, const fltSemantics &Semantics, const MachineFunction &MF)
Returns a true value if if this FPClassTest can be performed with an ordered fcmp to 0,...
static void turnVectorIntoSplatVector(MutableArrayRef< SDValue > Values, std::function< bool(SDValue)> Predicate, SDValue AlternativeReplacement=SDValue())
If all values in Values that don't match the predicate are same 'splat' value, then replace all value...
static bool canExpandVectorCTPOP(const TargetLowering &TLI, EVT VT)
static SDValue foldSetCCWithRotate(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, const SDLoc &dl, SelectionDAG &DAG)
static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created)
Given an exact SDIV by a constant, create a multiplication with the multiplicative inverse of the con...
static SDValue simplifySetCCWithCTPOP(const TargetLowering &TLI, EVT VT, SDValue N0, const APInt &C1, ISD::CondCode Cond, const SDLoc &dl, SelectionDAG &DAG)
static SDValue combineShiftToAVG(SDValue Op, TargetLowering::TargetLoweringOpt &TLO, const TargetLowering &TLI, const APInt &DemandedBits, const APInt &DemandedElts, unsigned Depth)
This file describes how to lower LLVM code to machine code.
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT, SelectionDAG &DAG)
Scalarize a vector store, bitcasting to TargetVT to determine the scalar type.
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
static APFloat getSmallestNormalized(const fltSemantics &Sem, bool Negative=false)
Returns the smallest (by magnitude) normalized finite number in the given semantics.
APInt bitcastToAPInt() const
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
static APFloat getNaN(const fltSemantics &Sem, bool Negative=false, uint64_t payload=0)
Factory for NaN values.
Class for arbitrary precision integers.
APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
void setSignBit()
Set the sign bit to 1.
unsigned getBitWidth() const
Return the number of bits in the APInt.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
void clearAllBits()
Set every bit to 0.
APInt reverseBits() const
void ashrInPlace(unsigned ShiftAmt)
Arithmetic right-shift this APInt by ShiftAmt in place.
void negate()
Negate this APInt in place.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
unsigned getSignificantBits() const
Get the minimum bit size for this signed APInt.
unsigned countLeadingZeros() const
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
unsigned logBase2() const
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
void setAllBits()
Set every bit to 1.
APInt multiplicativeInverse() const
bool isMaxSignedValue() const
Determine if this is the largest signed value.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
APInt sext(unsigned width) const
Sign extend to a new width.
void setBits(unsigned loBit, unsigned hiBit)
Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
bool isOne() const
Determine if this is a value of 1.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
void clearHighBits(unsigned hiBits)
Set top hiBits bits to 0.
int64_t getSExtValue() const
Get sign extended value.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
unsigned countr_one() const
Count the number of trailing one bits.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
void setBitVal(unsigned BitPosition, bool BitValue)
Set a given bit to a given value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool hasAttributes() const
Return true if the builder has IR-level attributes.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
AttrBuilder & removeAttribute(Attribute::AttrKind Val)
Remove an attribute from the builder.
bool hasFnAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the function.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
ConstantSDNode * getConstantSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant or null if this is not a constant splat.
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents a function call, abstracting a target machine's calling convention.
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
ConstantFP - Floating Point Values [float, double].
const APInt & getAPIntValue() const
This is an important base class in LLVM.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
bool isLittleEndian() const
Layout endianness...
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
AttributeList getAttributes() const
Return the attribute list for this Function.
int64_t getOffset() const
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
std::vector< std::string > ConstraintCodeVector
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
This class is used to represent ISD::LOAD nodes.
Context object for machine code objects.
Base class for the full range of assembler expressions which are needed for parsing.
Wrapper class representing physical registers. Should be passed by value.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setAdjustsStack(bool V)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
MCSymbol * getJTISymbol(unsigned JTI, MCContext &Ctx, bool isLinkerPrivate=false) const
getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
Function & getFunction()
Return the LLVM function that this machine code represents.
@ EK_GPRel32BlockAddress
EK_GPRel32BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
@ EK_LabelDifference32
EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table.
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
@ EK_GPRel64BlockAddress
EK_GPRel64BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const GlobalVariable * getNamedGlobal(StringRef Name) const
Return the global variable in the module with the specified name, of arbitrary type.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Class to represent pointers.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
static SDNodeIterator end(const SDNode *N)
static SDNodeIterator begin(const SDNode *N)
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
SDNodeFlags getFlags() const
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
void setFlags(SDNodeFlags NewFlags)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
bool use_empty() const
Return true if there are no nodes using value ResNo of Node.
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
bool willNotOverflowAdd(bool IsSigned, SDValue N0, SDValue N1) const
Determine if the result of the addition of 2 nodes can never overflow.
Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth=0) const
Get the upper bound on bit size for this Value Op as a signed integer.
bool isKnownNeverSNaN(SDValue Op, unsigned Depth=0) const
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond, const SDLoc &dl)
Constant fold a setcc to true or false.
SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDNode * isConstantIntBuildVectorOrConstantInt(SDValue N) const
Test whether the given value is a constant int or similar node.
SDValue getJumpTableDebugInfo(int JTI, SDValue Chain, const SDLoc &DL)
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
SDValue getStepVector(const SDLoc &DL, EVT ResVT, const APInt &StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
bool willNotOverflowSub(bool IsSigned, SDValue N0, SDValue N1) const
Determine if the result of the sub of 2 nodes can never overflow.
bool shouldOptForSize() const
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
const DataLayout & getDataLayout() const
bool doesNodeExist(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops)
Check if a node exists without modifying its flags.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
bool isGuaranteedNotToBeUndefOrPoison(SDValue Op, bool PoisonOnly=false, unsigned Depth=0) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
bool isKnownToBeAPowerOfTwo(SDValue Val, unsigned Depth=0) const
Test if the given value is known to have exactly one bit set.
bool isKnownNeverZero(SDValue Op, unsigned Depth=0) const
Test whether the given SDValue is known to contain non-zero value(s).
SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SDNodeFlags Flags=SDNodeFlags())
SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT)
Convert Op, which must be of integer type, to the integer type VT, by using an extension appropriate ...
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
bool isKnownNeverZeroFloat(SDValue Op) const
Test whether the given floating point SDValue is known to never be positive or negative zero.
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
bool isKnownNeverNaN(SDValue Op, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT)
Create a true or false constant of type VT using the target's BooleanContent for type OpVT.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
std::optional< uint64_t > getValidMaximumShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getCondCode(ISD::CondCode Cond)
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
std::optional< uint64_t > getValidShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has a uniform shift amount that is less than the element bit-width of the shi...
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue getSetCCVP(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Mask, SDValue EVL)
Helper function to make it easier to build VP_SETCCs if you just have an ISD::CondCode instead of an ...
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
static void commuteMask(MutableArrayRef< int > Mask)
Change values in a shuffle permute mask assuming the two vector operands have swapped position.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr size_t size() const
size - Get the string size.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Class to represent struct types.
void setAttributes(const CallBase *Call, unsigned ArgIdx)
Set CallLoweringInfo attribute flags based on a call instruction and called function attributes.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
virtual bool isShuffleMaskLegal(ArrayRef< int >, EVT) const
Targets can use this to indicate that they only support some VECTOR_SHUFFLE operations,...
virtual bool shouldRemoveRedundantExtend(SDValue Op) const
Return true (the default) if it is profitable to remove a sext_inreg(x) where the sext is redundant,...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
virtual bool isLegalICmpImmediate(int64_t) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual bool isSafeMemOpType(MVT) const
Returns true if it's safe to use load / store of the specified type to expand memcpy / memset inline.
const TargetMachine & getTargetMachine() const
virtual bool isCtpopFast(EVT VT) const
Return true if ctpop instruction is fast.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
bool isPaddedAtMostSignificantBitsWhenStored(EVT VT) const
Indicates if any padding is guaranteed to go at the most significant bits when storing the type to me...
virtual EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &) const
Returns the target specific optimal type for load and store operations as a result of memset,...
LegalizeAction getCondCodeAction(ISD::CondCode CC, MVT VT) const
Return how the condition code should be treated: either it is legal, needs to be expanded to some oth...
virtual bool isCommutativeBinOp(unsigned Opcode) const
Returns true if the opcode is a commutative binary operation.
virtual bool isFPImmLegal(const APFloat &, EVT, bool ForCodeSize=false) const
Returns true if the target can instruction select the specified FP immediate natively.
virtual MVT::SimpleValueType getCmpLibcallReturnType() const
Return the ValueType for comparison libcalls.
virtual bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
bool isLegalRC(const TargetRegisterInfo &TRI, const TargetRegisterClass &RC) const
Return true if the value types that can be represented by the specified register class are all legal.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual bool shouldExtendTypeInLibCall(EVT Type) const
Returns true if arguments should be extended in lib calls.
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const
Return true if creating a shift of the type by the given amount is not profitable.
virtual bool shouldExpandCmpUsingSelects() const
Should we expand [US]CMP nodes using two selects and two compares, or by doing arithmetic on boolean ...
virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const
Return true if an fpext operation is free (for instance, because single-precision floating-point numb...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
MVT getSimpleValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the MVT corresponding to this LLVM type. See getValueType.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal on this target.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual bool isNarrowingProfitable(EVT SrcVT, EVT DestVT) const
Return true if it's profitable to narrow operations of type SrcVT to DestVT.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT) const
Return true if it is profitable to reduce a load to a smaller type.
virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const
Return the maximum number of "x & (x - 1)" operations that can be done instead of deferring to a cust...
virtual bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, unsigned OldShiftOpcode, unsigned NewShiftOpcode, SelectionDAG &DAG) const
Given the pattern (X & (C l>>/<< Y)) ==/!= 0 return true if it should be transformed into: ((X <</l>>...
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const
Return true if integer divide is usually cheaper than a sequence of several shifts,...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
virtual bool hasAndNotCompare(SDValue Y) const
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) !...
virtual bool isBinOp(unsigned Opcode) const
Return true if the node is a math/logic binary operator.
virtual bool isCtlzFast() const
Return true if ctlz instruction is fast.
virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT, bool IsSigned) const
Return true if it is more correct/profitable to use strict FP_TO_INT conversion operations - canonica...
NegatibleCost
Enum that specifies when a float negation is beneficial.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const
Get the CondCode that's to be used to test the result of the comparison libcall against zero.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
virtual EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal or custom on this target.
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
MulExpansionKind
Enum that specifies when a multiplication should be expanded.
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT.
SDValue buildSDIVPow2WithCMov(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Build sdiv by power-of-2 with conditional move instructions Ref: "Hacker's Delight" by Henry Warren 1...
virtual ConstraintWeight getMultipleConstraintMatchWeight(AsmOperandInfo &info, int maIndex) const
Examine constraint type and operand type and determine a weight value.
SDValue expandVPCTLZ(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTLZ/VP_CTLZ_ZERO_UNDEF nodes.
bool expandMULO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]MULO.
bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, SelectionDAG &DAG, MulExpansionKind Kind, SDValue LL=SDValue(), SDValue LH=SDValue(), SDValue RL=SDValue(), SDValue RH=SDValue()) const
Expand a MUL into two nodes.
virtual const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Vector Op.
virtual bool isUsedByReturnOnly(SDNode *, SDValue &) const
Return true if result of the specified node is used by a return node only.
virtual void computeKnownBitsForFrameIndex(int FIOp, KnownBits &Known, const MachineFunction &MF) const
Determine which of the bits of FrameIndex FIOp are known to be 0.
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const
virtual unsigned computeNumSignBitsForTargetInstr(GISelKnownBits &Analysis, Register R, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
SDValue expandVPBSWAP(SDNode *N, SelectionDAG &DAG) const
Expand VP_BSWAP nodes.
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, const SDLoc &DL, const SDValue OldLHS, const SDValue OldRHS) const
Soften the operands of a comparison.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
SDValue expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const
Expand a VECREDUCE_SEQ_* into an explicit ordered calculation.
SDValue expandCTLZ(SDNode *N, SelectionDAG &DAG) const
Expand CTLZ/CTLZ_ZERO_UNDEF nodes.
SDValue expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const
Expand BITREVERSE nodes.
SDValue expandCTTZ(SDNode *N, SelectionDAG &DAG) const
Expand CTTZ/CTTZ_ZERO_UNDEF nodes.
virtual SDValue expandIndirectJTBranch(const SDLoc &dl, SDValue Value, SDValue Addr, int JTI, SelectionDAG &DAG) const
Expands target specific indirect branch for the case of JumpTable expansion.
SDValue expandABD(SDNode *N, SelectionDAG &DAG) const
Expand ABDS/ABDU nodes.
virtual Align computeKnownAlignForTargetInstr(GISelKnownBits &Analysis, Register R, const MachineRegisterInfo &MRI, unsigned Depth=0) const
Determine the known alignment for the pointer value R.
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandShlSat(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]SHLSAT.
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
SDValue expandFP_TO_INT_SAT(SDNode *N, SelectionDAG &DAG) const
Expand FP_TO_[US]INT_SAT into FP_TO_[US]INT and selects or min/max.
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "look through" ops that don't contri...
SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const
Expands an unaligned store to 2 half-size stores for integer values, and possibly more for vectors.
SDValue SimplifyMultipleUseDemandedVectorElts(SDValue Op, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all bits from only some vector eleme...
virtual bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const
Determines the optimal series of memory ops to replace the memset / memcpy.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
void expandSADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::S(ADD|SUB)O.
SDValue expandVPBITREVERSE(SDNode *N, SelectionDAG &DAG) const
Expand VP_BITREVERSE nodes.
SDValue expandABS(SDNode *N, SelectionDAG &DAG, bool IsNegative=false) const
Expand ABS nodes.
SDValue expandVecReduce(SDNode *Node, SelectionDAG &DAG) const
Expand a VECREDUCE_* into an explicit calculation.
bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const
Check to see if the specified operand of the specified instruction is a constant integer.
SDValue expandVPCTTZElements(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTTZ_ELTS/VP_CTTZ_ELTS_ZERO_UNDEF nodes.
SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, bool IsAfterLegalTypes, SmallVectorImpl< SDNode * > &Created) const
Given an ISD::SDIV node expressing a divide by constant, return a DAG expression to select that will ...
virtual const char * getTargetNodeName(unsigned Opcode) const
This method returns the name of a target specific DAG node.
bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SDValue &Chain, SelectionDAG &DAG) const
Expand float to UINT conversion.
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
virtual bool SimplifyDemandedVectorEltsForTargetNode(SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0) const
Attempt to simplify any target nodes based on the demanded vector elements, returning true on success...
bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const
Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal.
std::pair< SDValue, SDValue > expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Expands an unaligned load to 2 half-size loads for an integer, and possibly more for vectors.
SDValue expandFMINIMUMNUM_FMAXIMUMNUM(SDNode *N, SelectionDAG &DAG) const
Expand fminimumnum/fmaximumnum into multiple comparison with selects.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::VECTOR_SPLICE.
virtual const char * LowerXConstraint(EVT ConstraintVT) const
Try to replace an X constraint, which matches anything, with another that has more specific requireme...
SDValue expandCTPOP(SDNode *N, SelectionDAG &DAG) const
Expand CTPOP nodes.
SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, bool IsAfterLegalTypes, SmallVectorImpl< SDNode * > &Created) const
Given an ISD::UDIV node expressing a divide by constant, return a DAG expression to select that will ...
SDValue expandBSWAP(SDNode *N, SelectionDAG &DAG) const
Expand BSWAP nodes.
SDValue expandFMINIMUM_FMAXIMUM(SDNode *N, SelectionDAG &DAG) const
Expand fminimum/fmaximum into multiple comparison with selects.
SDValue CTTZTableLookup(SDNode *N, SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Op, unsigned NumBitsPerElt) const
Expand CTTZ via Table Lookup.
virtual bool isKnownNeverNaNForTargetNode(SDValue Op, const SelectionDAG &DAG, bool SNaN=false, unsigned Depth=0) const
If SNaN is false,.
bool expandDIVREMByConstant(SDNode *N, SmallVectorImpl< SDValue > &Result, EVT HiLoVT, SelectionDAG &DAG, SDValue LL=SDValue(), SDValue LH=SDValue()) const
Attempt to expand an n-bit div/rem/divrem by constant using a n/2-bit urem by constant and other arit...
SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, EVT SubVecVT, SDValue Index) const
Get a pointer to a sub-vector of type SubVecVT at index Idx located in memory for a vector of type Ve...
virtual void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool isPositionIndependent() const
std::pair< StringRef, TargetLowering::ConstraintType > ConstraintPair
virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG, const DenormalMode &Mode) const
Return a target-dependent comparison result if the input operand is suitable for use with a square ro...
ConstraintGroup getConstraintPreferences(AsmOperandInfo &OpInfo) const
Given an OpInfo with list of constraints codes as strings, return a sorted Vector of pairs of constra...
bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const
Expand float(f32) to SINT(i64) conversion.
virtual SDValue SimplifyMultipleUseDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth) const
More limited version of SimplifyDemandedBits that can be used to "look through" ops that don't contri...
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
SDValue buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, SDValue N1, MutableArrayRef< int > Mask, SelectionDAG &DAG) const
Tries to build a legal vector shuffle using the provided parameters or equivalent variations.
virtual SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const
Returns relocation base for the given PIC jumptable.
std::pair< SDValue, SDValue > scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Turn load of vector type into a load of the individual elements.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
void forceExpandWideMUL(SelectionDAG &DAG, const SDLoc &dl, bool Signed, EVT WideVT, const SDValue LL, const SDValue LH, const SDValue RL, const SDValue RH, SDValue &Lo, SDValue &Hi) const
forceExpandWideMUL - Unconditionally expand a MUL into either a libcall or brute force via a wide mul...
virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
TargetLowering(const TargetLowering &)=delete
bool isConstFalseVal(SDValue N) const
Return if the N is a constant or constant vector equal to the false value from getBooleanContents().
SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL, EVT DataVT, SelectionDAG &DAG, bool IsCompressedMemory) const
Increments memory address Addr according to the type of the value DataVT that should be stored.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts, APInt &UndefElts, const SelectionDAG &DAG, unsigned Depth=0) const
Return true if vector Op has the same value across all DemandedElts, indicating any elements which ma...
SDValue expandRoundInexactToOdd(EVT ResultVT, SDValue Op, const SDLoc &DL, SelectionDAG &DAG) const
Truncate Op to ResultVT.
SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, bool foldBooleans, DAGCombinerInfo &DCI, const SDLoc &dl) const
Try to simplify a setcc built with the specified operands and cc.
SDValue expandFunnelShift(SDNode *N, SelectionDAG &DAG) const
Expand funnel shift.
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
bool LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC, SDValue Mask, SDValue EVL, bool &NeedInvert, const SDLoc &dl, SDValue &Chain, bool IsSignaling=false) const
Legalize a SETCC or VP_SETCC with given LHS and RHS and condition code CC on the current target.
bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const
Return if N is a True value when extended to VT.
bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &DemandedBits, TargetLoweringOpt &TLO) const
Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
bool isConstTrueVal(SDValue N) const
Return if the N is a constant or constant vector equal to the true value from getBooleanContents().
SDValue expandVPCTPOP(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTPOP nodes.
SDValue expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, SDValue LHS, SDValue RHS, unsigned Scale, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]DIVFIX[SAT].
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base a...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
SDValue expandVPCTTZ(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTTZ/VP_CTTZ_ZERO_UNDEF nodes.
SDValue expandVECTOR_COMPRESS(SDNode *Node, SelectionDAG &DAG) const
Expand a vector VECTOR_COMPRESS into a sequence of extract element, store temporarily,...
virtual const Constant * getTargetConstantFromLoad(LoadSDNode *LD) const
This method returns the constant pool value that will be loaded by LD.
SDValue expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const
Expand round(fp) to fp conversion.
SDValue createSelectForFMINNUM_FMAXNUM(SDNode *Node, SelectionDAG &DAG) const
Try to convert the fminnum/fmaxnum to a compare/select sequence.
SDValue expandROT(SDNode *N, bool AllowVectorOps, SelectionDAG &DAG) const
Expand rotations.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
SDValue expandFMINNUM_FMAXNUM(SDNode *N, SelectionDAG &DAG) const
Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs.
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.
virtual bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
SDValue expandCMP(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]CMP.
void expandShiftParts(SDNode *N, SDValue &Lo, SDValue &Hi, SelectionDAG &DAG) const
Expand shift-by-parts.
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
This method will be invoked for all target nodes and for any target-independent nodes that the target...
virtual bool canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
SDValue expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[U|S]MULFIX[SAT].
SDValue expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US][MIN|MAX].
virtual void computeKnownBitsForTargetInstr(GISelKnownBits &Analysis, Register R, KnownBits &Known, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
void expandUADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::U(ADD|SUB)O.
virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Targets may override this function to provide custom SDIV lowering for power-of-2 denominators.
virtual SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Targets may override this function to provide custom SREM lowering for power-of-2 denominators.
bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SDValue &Chain, SelectionDAG &DAG) const
Expand UINT(i64) to double(f64) conversion.
bool expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, SDValue LHS, SDValue RHS, SmallVectorImpl< SDValue > &Result, EVT HiLoVT, SelectionDAG &DAG, MulExpansionKind Kind, SDValue LL=SDValue(), SDValue LH=SDValue(), SDValue RL=SDValue(), SDValue RH=SDValue()) const
Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes, respectively,...
SDValue expandAVG(SDNode *N, SelectionDAG &DAG) const
Expand vector/scalar AVGCEILS/AVGCEILU/AVGFLOORS/AVGFLOORU nodes.
Primary interface to the complete machine description for the target machine.
bool isPositionIndependent() const
const Triple & getTargetTriple() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
iterator_range< regclass_iterator > regclasses() const
virtual StringRef getRegAsmName(MCRegister Reg) const
Return the assembly name for Reg.
bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const
Return true if the given TargetRegisterClass has the ValueType T.
bool isOSBinFormatCOFF() const
Tests whether the OS uses the COFF binary format.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
const fltSemantics & getFltSemantics() const
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isIntegerTy() const
True if this is an instance of IntegerType.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
StringRef getName() const
Return a constant reference to the value's name.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ FGETSIGN
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ SSUBO
Same for subtraction.
@ BRIND
BRIND - Indirect branch.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
NodeType getExtForLoadExtType(bool IsFP, LoadExtType)
bool matchUnaryPredicate(SDValue Op, std::function< bool(ConstantSDNode *)> Match, bool AllowUndefs=false)
Hook for matching ConstantSDNode predicate.
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
bool isTrueWhenEqual(CondCode Cond)
Return true if the specified condition returns true if the two operands to the condition are equal.
unsigned getUnorderedFlavor(CondCode Cond)
This function returns 0 if the condition is always false if an operand is a NaN, 1 if the condition i...
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
bool isSignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs a signed comparison when used with integer o...
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
bool matchBinaryPredicate(SDValue LHS, SDValue RHS, std::function< bool(ConstantSDNode *, ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTypeMismatch=false)
Attempt to match a binary predicate against a pair of scalar/splat constants or every element of a pa...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
FPClassTest invertFPClassTestIfSimpler(FPClassTest Test)
Evaluates if the specified FP class test is better performed as the inverse (i.e.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
ConstantFPSDNode * isConstOrConstSplatFP(SDValue N, bool AllowUndefs=false)
Returns the SDNode if it is a constant splat BuildVector or constant float.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
bool isBitwiseNot(SDValue V, bool AllowUndefs=false)
Returns true if V is a bitwise not operation.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
auto find_if_not(R &&Range, UnaryPredicate P)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool isOneOrOneSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
constexpr unsigned BitWidth
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool isNullFPConstant(SDValue V)
Returns true if V is an FP constant with a value of positive zero.
APFloat neg(APFloat X)
Returns the negated value of the argument.
unsigned Log2(Align A)
Returns the log2 of the alignment.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static constexpr roundingMode rmNearestTiesToEven
static constexpr roundingMode rmTowardZero
opStatus
IEEE-754R 7: Default exception handling.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ IEEE
IEEE-754 denormal numbers preserved.
constexpr bool inputsAreZero() const
Return true if input denormals must be implicitly treated as 0.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
EVT getHalfSizedIntegerVT(LLVMContext &Context) const
Finds the smallest simple value type that is greater than or equal to half the width of this EVT.
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
TypeSize getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
const fltSemantics & getFltSemantics() const
Returns an APFloat semantics tag appropriate for the value type.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool bitsLE(EVT VT) const
Return true if this has no more bits than VT.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber/label.
int MatchingInput
MatchingInput - If this is not -1, this is an output constraint where an input constraint is required...
ConstraintCodeVector Codes
Code - The constraint code, either the register name (in braces) or the constraint letter/number.
SubConstraintInfoVector multipleAlternatives
multipleAlternatives - If there are multiple alternative constraints, this array will contain them.
bool isIndirect
isIndirect - True if this operand is an indirect operand.
bool hasMatchingInput() const
hasMatchingInput - Return true if this is an output constraint that has a matching input constraint.
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
unsigned countMinSignBits() const
Returns the number of times the sign bit is replicated into the other bits.
static KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
bool isUnknown() const
Returns true if we don't know any bits.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
static std::optional< bool > sge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGE result.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
unsigned getBitWidth() const
Get the bit width of this value.
static KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits unionWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for either this or RHS or both.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
static KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static std::optional< bool > ugt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGT result.
static std::optional< bool > slt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLT result.
static KnownBits computeForAddSub(bool Add, bool NSW, bool NUW, const KnownBits &LHS, const KnownBits &RHS)
Compute known bits resulting from adding LHS and RHS.
static std::optional< bool > ult(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULT result.
static std::optional< bool > ule(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULE result.
bool isNegative() const
Returns true if this value is known to be negative.
static KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
static std::optional< bool > sle(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLE result.
static std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
unsigned countMinPopulation() const
Returns the number of bits known to be one.
static std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
static KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
These are IR-level optimization flags that may be propagated to SDNodes.
bool hasNoUnsignedWrap() const
bool hasNoSignedWrap() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Magic data for optimising signed division by a constant.
unsigned ShiftAmount
shift amount
static SignedDivisionByConstantInfo get(const APInt &D)
Calculate the magic numbers required to implement a signed integer division by a constant as a sequen...
This contains information for each constraint that we are lowering.
MVT ConstraintVT
The ValueType for the operand value.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
std::string ConstraintCode
This contains the actual string for the code, like "m".
Value * CallOperandVal
If this is the result output operand or a clobber, this is null, otherwise it is the incoming operand...
unsigned getMatchedOperand() const
If this is an input matching constraint, this method returns the output operand it matches.
bool isMatchingInputConstraint() const
Return true of this is an input operand that is a matching constraint like "4".
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setIsPostTypeLegalization(bool Value=true)
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setSExtResult(bool Value=true)
CallLoweringInfo & setNoReturn(bool Value=true)
CallLoweringInfo & setChain(SDValue InChain)
bool isBeforeLegalizeOps() const
void AddToWorklist(SDNode *N)
bool isCalledByLegalizer() const
bool isBeforeLegalize() const
void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO)
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setIsPostTypeLegalization(bool Value=true)
ArrayRef< EVT > OpsVTBeforeSoften
bool IsPostTypeLegalization
MakeLibCallOptions & setSExt(bool Value=true)
MakeLibCallOptions & setTypeListBeforeSoften(ArrayRef< EVT > OpsVT, EVT RetVT, bool Value=true)
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
bool CombineTo(SDValue O, SDValue N)
bool LegalOperations() const
Magic data for optimising unsigned division by a constant.
unsigned PreShift
pre-shift amount
static UnsignedDivisionByConstantInfo get(const APInt &D, unsigned LeadingZeros=0, bool AllowEvenDivisorOptimization=true)
Calculate the magic numbers required to implement an unsigned integer division by a constant as a seq...
unsigned PostShift
post-shift amount