56 if (
F.getFnAttribute(
"disable-tail-calls").getValueAsBool())
62 AttrBuilder CallerAttrs(
F.getContext(),
F.getAttributes().getRetAttrs());
63 for (
const auto &Attr : {Attribute::Alignment, Attribute::Dereferenceable,
64 Attribute::DereferenceableOrNull, Attribute::NoAlias,
65 Attribute::NonNull, Attribute::NoUndef})
72 if (CallerAttrs.
contains(Attribute::ZExt) ||
73 CallerAttrs.
contains(Attribute::SExt))
84 for (
unsigned I = 0,
E = ArgLocs.size();
I !=
E; ++
I) {
100 Register ArgReg = cast<RegisterSDNode>(
Value->getOperand(1))->getReg();
111 IsSExt = Call->paramHasAttr(ArgIdx, Attribute::SExt);
112 IsZExt = Call->paramHasAttr(ArgIdx, Attribute::ZExt);
113 IsInReg = Call->paramHasAttr(ArgIdx, Attribute::InReg);
114 IsSRet = Call->paramHasAttr(ArgIdx, Attribute::StructRet);
115 IsNest = Call->paramHasAttr(ArgIdx, Attribute::Nest);
116 IsByVal = Call->paramHasAttr(ArgIdx, Attribute::ByVal);
117 IsPreallocated = Call->paramHasAttr(ArgIdx, Attribute::Preallocated);
118 IsInAlloca = Call->paramHasAttr(ArgIdx, Attribute::InAlloca);
119 IsReturned = Call->paramHasAttr(ArgIdx, Attribute::Returned);
120 IsSwiftSelf = Call->paramHasAttr(ArgIdx, Attribute::SwiftSelf);
121 IsSwiftAsync = Call->paramHasAttr(ArgIdx, Attribute::SwiftAsync);
122 IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError);
123 Alignment = Call->getParamStackAlign(ArgIdx);
126 "multiple ABI attributes?");
142 std::pair<SDValue, SDValue>
155 for (
unsigned i = 0;
i < Ops.
size(); ++
i) {
158 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.
getContext());
161 Entry.IsZExt = !Entry.IsSExt;
165 Entry.IsSExt = Entry.IsZExt =
false;
167 Args.push_back(Entry);
170 if (LC == RTLIB::UNKNOWN_LIBCALL)
178 bool zeroExtend = !signExtend;
182 signExtend = zeroExtend =
false;
193 return LowerCallTo(CLI);
197 std::vector<EVT> &MemOps,
unsigned Limit,
const MemOp &
Op,
unsigned DstAS,
199 if (Limit != ~
unsigned(0) &&
Op.isMemcpyWithFixedDstAlign() &&
200 Op.getSrcAlign() <
Op.getDstAlign())
210 if (
Op.isFixedDstAlign())
228 unsigned NumMemOps = 0;
232 while (VTSize > Size) {
264 if (NumMemOps &&
Op.allowOverlap() && NewVTSize < Size &&
266 VT, DstAS,
Op.isFixedDstAlign() ?
Op.getDstAlign() :
Align(1),
276 if (++NumMemOps > Limit)
279 MemOps.push_back(VT);
294 return softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, dl, OldLHS,
304 bool IsSignaling)
const {
310 &&
"Unsupported setcc type!");
313 RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL;
314 bool ShouldInvertCC =
false;
318 LC1 = (VT ==
MVT::f32) ? RTLIB::OEQ_F32 :
320 (VT ==
MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
324 LC1 = (VT ==
MVT::f32) ? RTLIB::UNE_F32 :
326 (VT ==
MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128;
330 LC1 = (VT ==
MVT::f32) ? RTLIB::OGE_F32 :
332 (VT ==
MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
336 LC1 = (VT ==
MVT::f32) ? RTLIB::OLT_F32 :
338 (VT ==
MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
342 LC1 = (VT ==
MVT::f32) ? RTLIB::OLE_F32 :
344 (VT ==
MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
348 LC1 = (VT ==
MVT::f32) ? RTLIB::OGT_F32 :
350 (VT ==
MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
353 ShouldInvertCC =
true;
356 LC1 = (VT ==
MVT::f32) ? RTLIB::UO_F32 :
358 (VT ==
MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
362 ShouldInvertCC =
true;
365 LC1 = (VT ==
MVT::f32) ? RTLIB::UO_F32 :
367 (VT ==
MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
368 LC2 = (VT ==
MVT::f32) ? RTLIB::OEQ_F32 :
370 (VT ==
MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
374 ShouldInvertCC =
true;
377 LC1 = (VT ==
MVT::f32) ? RTLIB::OGE_F32 :
379 (VT ==
MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
382 LC1 = (VT ==
MVT::f32) ? RTLIB::OGT_F32 :
384 (VT ==
MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
387 LC1 = (VT ==
MVT::f32) ? RTLIB::OLE_F32 :
389 (VT ==
MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
392 LC1 = (VT ==
MVT::f32) ? RTLIB::OLT_F32 :
394 (VT ==
MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
402 SDValue Ops[2] = {NewLHS, NewRHS};
407 auto Call = makeLibCall(DAG, LC1, RetVT, Ops, CallOptions, dl, Chain);
412 if (ShouldInvertCC) {
417 if (LC2 == RTLIB::UNKNOWN_LIBCALL) {
424 auto Call2 = makeLibCall(DAG, LC2, RetVT, Ops, CallOptions, dl, Chain);
428 NewLHS = DAG.
getSetCC(dl, SetCCVT, Call2.first, NewRHS, CCCode);
442 if (!isPositionIndependent())
456 unsigned JTEncoding = getJumpTableEncoding();
481 if (!
TM.shouldAssumeDSOLocal(*GV->
getParent(), GV))
485 if (isPositionIndependent())
501 const APInt &DemandedElts,
504 unsigned Opcode =
Op.getOpcode();
507 if (targetShrinkDemandedConstant(
Op,
DemandedBits, DemandedElts, TLO))
517 auto *Op1C = dyn_cast<ConstantSDNode>(
Op.getOperand(1));
518 if (!Op1C || Op1C->isOpaque())
522 const APInt &
C = Op1C->getAPIntValue();
527 EVT VT =
Op.getValueType();
543 EVT VT =
Op.getValueType();
554 const APInt &Demanded,
557 "ShrinkDemandedOp only supports binary operators!");
558 assert(
Op.getNode()->getNumValues() == 1 &&
559 "ShrinkDemandedOp only supports nodes with one result!");
565 if (
Op.getValueType().isVector())
570 if (!
Op.getNode()->hasOneUse())
577 unsigned SmallVTBits = DemandedSize;
586 Op.getOpcode(), dl, SmallVT,
589 assert(DemandedSize <= SmallVTBits &&
"Narrowed below demanded bits?");
613 const APInt &DemandedElts,
633 bool AssumeSingleUse)
const {
634 EVT VT =
Op.getValueType();
673 switch (
Op.getOpcode()) {
676 EVT SrcVT = Src.getValueType();
677 EVT DstVT =
Op.getValueType();
683 if (NumSrcEltBits == NumDstEltBits)
684 if (
SDValue V = SimplifyMultipleUseDemandedBits(
688 if (SrcVT.
isVector() && (NumDstEltBits % NumSrcEltBits) == 0) {
689 unsigned Scale = NumDstEltBits / NumSrcEltBits;
693 for (
unsigned i = 0;
i != Scale; ++
i) {
694 unsigned EltOffset = IsLE ?
i : (Scale - 1 -
i);
695 unsigned BitOffset = EltOffset * NumSrcEltBits;
698 DemandedSrcBits |= Sub;
699 for (
unsigned j = 0;
j != NumElts; ++
j)
701 DemandedSrcElts.
setBit((
j * Scale) +
i);
705 if (
SDValue V = SimplifyMultipleUseDemandedBits(
706 Src, DemandedSrcBits, DemandedSrcElts, DAG,
Depth + 1))
711 if (IsLE && (NumSrcEltBits % NumDstEltBits) == 0) {
712 unsigned Scale = NumSrcEltBits / NumDstEltBits;
716 for (
unsigned i = 0;
i != NumElts; ++
i)
717 if (DemandedElts[
i]) {
718 unsigned Offset = (
i % Scale) * NumDstEltBits;
720 DemandedSrcElts.
setBit(
i / Scale);
723 if (
SDValue V = SimplifyMultipleUseDemandedBits(
724 Src, DemandedSrcBits, DemandedSrcElts, DAG,
Depth + 1))
738 return Op.getOperand(0);
740 return Op.getOperand(1);
751 return Op.getOperand(0);
753 return Op.getOperand(1);
763 return Op.getOperand(0);
765 return Op.getOperand(1);
771 if (
const APInt *MaxSA =
774 unsigned ShAmt = MaxSA->getZExtValue();
775 unsigned NumSignBits =
778 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits))
793 BooleanContent::ZeroOrNegativeOneBooleanContent) {
807 EVT ExVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
813 if (NumSignBits >= (
BitWidth - ExBits + 1))
823 EVT SrcVT = Src.getValueType();
824 EVT DstVT =
Op.getValueType();
825 if (IsLE && DemandedElts == 1 &&
835 auto *CIdx = dyn_cast<ConstantSDNode>(
Op.getOperand(2));
838 !DemandedElts[CIdx->getZExtValue()])
849 if (DemandedSubElts == 0)
852 if (Idx == 0 && Vec.
isUndef()) {
853 if (
SDValue NewSub = SimplifyMultipleUseDemandedBits(
856 Op.getOperand(0), NewSub,
Op.getOperand(2));
865 bool AllUndef =
true, IdentityLHS =
true, IdentityRHS =
true;
866 for (
unsigned i = 0;
i != NumElts; ++
i) {
867 int M = ShuffleMask[
i];
868 if (
M < 0 || !DemandedElts[
i])
871 IdentityLHS &= (
M == (
int)
i);
872 IdentityRHS &= ((
M - NumElts) ==
i);
878 return Op.getOperand(0);
880 return Op.getOperand(1);
885 if (
SDValue V = SimplifyMultipleUseDemandedBitsForTargetNode(
895 unsigned Depth)
const {
896 EVT VT =
Op.getValueType();
900 return SimplifyMultipleUseDemandedBits(
Op,
DemandedBits, DemandedElts, DAG,
906 unsigned Depth)
const {
908 return SimplifyMultipleUseDemandedBits(
Op,
DemandedBits, DemandedElts, DAG,
917 const APInt &DemandedElts,
920 "SRL or SRA node is required here!");
923 if (!N1C || !N1C->
isOne())
936 SDValue ExtOpA = Add.getOperand(0);
937 SDValue ExtOpB = Add.getOperand(1);
973 bool IsSigned =
false;
977 unsigned NumSigned =
std::min(NumSignedA, NumSignedB) - 1;
982 unsigned NumZero =
std::min(NumZeroA, NumZeroB);
988 if (NumZero >= 2 && NumSigned < NumZero) {
993 if (NumSigned >= 1) {
1001 if (NumZero >= 1 && NumSigned < NumZero) {
1021 EVT VT =
Op.getValueType();
1048 unsigned Depth,
bool AssumeSingleUse)
const {
1051 "Mask size mismatches value type size!");
1059 if (
Op.getValueType().isScalableVector())
1063 unsigned NumElts = OriginalDemandedElts.
getBitWidth();
1064 assert((!
Op.getValueType().isVector() ||
1065 NumElts ==
Op.getValueType().getVectorNumElements()) &&
1066 "Unexpected vector size");
1069 APInt DemandedElts = OriginalDemandedElts;
1086 cast<ConstantFPSDNode>(
Op)->getValueAPF().bitcastToAPInt());
1091 EVT VT =
Op.getValueType();
1092 if (!
Op.getNode()->hasOneUse() && !AssumeSingleUse) {
1103 }
else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) {
1112 switch (
Op.getOpcode()) {
1116 if (!DemandedElts[0])
1121 unsigned SrcBitWidth = Src.getScalarValueSizeInBits();
1123 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcKnown, TLO,
Depth + 1))
1128 if (DemandedElts == 1)
1138 auto *
LD = cast<LoadSDNode>(
Op);
1139 if (getTargetConstantFromLoad(
LD)) {
1145 EVT MemVT =
LD->getMemoryVT();
1155 auto *CIdx = dyn_cast<ConstantSDNode>(
Op.getOperand(2));
1160 APInt DemandedVecElts(DemandedElts);
1162 unsigned Idx = CIdx->getZExtValue();
1166 if (!DemandedElts[Idx])
1173 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO,
Depth + 1))
1179 if (SimplifyDemandedBits(Vec,
DemandedBits, DemandedVecElts, KnownVec, TLO,
1183 if (!!DemandedVecElts)
1196 APInt DemandedSrcElts = DemandedElts;
1200 if (SimplifyDemandedBits(Sub,
DemandedBits, DemandedSubElts, KnownSub, TLO,
1203 if (SimplifyDemandedBits(Src,
DemandedBits, DemandedSrcElts, KnownSrc, TLO,
1209 if (!!DemandedSubElts)
1211 if (!!DemandedSrcElts)
1217 SDValue NewSub = SimplifyMultipleUseDemandedBits(
1219 SDValue NewSrc = SimplifyMultipleUseDemandedBits(
1221 if (NewSub || NewSrc) {
1222 NewSub = NewSub ? NewSub : Sub;
1223 NewSrc = NewSrc ? NewSrc : Src;
1234 if (Src.getValueType().isScalableVector())
1237 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
1238 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts).
shl(Idx);
1240 if (SimplifyDemandedBits(Src,
DemandedBits, DemandedSrcElts, Known, TLO,
1246 SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
1259 EVT SubVT =
Op.getOperand(0).getValueType();
1260 unsigned NumSubVecs =
Op.getNumOperands();
1262 for (
unsigned i = 0;
i != NumSubVecs; ++
i) {
1263 APInt DemandedSubElts =
1265 if (SimplifyDemandedBits(
Op.getOperand(
i),
DemandedBits, DemandedSubElts,
1266 Known2, TLO,
Depth + 1))
1269 if (!!DemandedSubElts)
1278 APInt DemandedLHS(NumElts, 0);
1279 APInt DemandedRHS(NumElts, 0);
1280 for (
unsigned i = 0;
i != NumElts; ++
i) {
1281 if (!DemandedElts[
i])
1283 int M = ShuffleMask[
i];
1291 assert(0 <=
M &&
M < (
int)(2 * NumElts) &&
"Shuffle index out of range");
1292 if (
M < (
int)NumElts)
1295 DemandedRHS.
setBit(
M - NumElts);
1298 if (!!DemandedLHS || !!DemandedRHS) {
1304 if (!!DemandedLHS) {
1305 if (SimplifyDemandedBits(Op0,
DemandedBits, DemandedLHS, Known2, TLO,
1310 if (!!DemandedRHS) {
1311 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedRHS, Known2, TLO,
1318 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1320 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1322 if (DemandedOp0 || DemandedOp1) {
1323 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1324 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1359 LHSKnown.
One == ~RHSC->getAPIntValue()) {
1365 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedElts, Known, TLO,
1370 Known2, TLO,
Depth + 1))
1376 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1378 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1380 if (DemandedOp0 || DemandedOp1) {
1381 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1382 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1412 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedElts, Known, TLO,
1416 if (SimplifyDemandedBits(Op0, ~Known.
One &
DemandedBits, DemandedElts,
1417 Known2, TLO,
Depth + 1))
1423 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1425 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1427 if (DemandedOp0 || DemandedOp1) {
1428 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1429 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1455 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedElts, Known, TLO,
1459 if (SimplifyDemandedBits(Op0,
DemandedBits, DemandedElts, Known2, TLO,
1466 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1468 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1470 if (DemandedOp0 || DemandedOp1) {
1471 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1472 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1501 if (
C->getAPIntValue() == Known2.
One) {
1510 if (!
C->isAllOnes() &&
DemandedBits.isSubsetOf(
C->getAPIntValue())) {
1518 if (!
C || !
C->isAllOnes())
1526 if (SimplifyDemandedBits(
Op.getOperand(2),
DemandedBits, Known, TLO,
1529 if (SimplifyDemandedBits(
Op.getOperand(1),
DemandedBits, Known2, TLO,
1543 if (SimplifyDemandedBits(
Op.getOperand(2),
DemandedBits, DemandedElts,
1544 Known, TLO,
Depth + 1))
1546 if (SimplifyDemandedBits(
Op.getOperand(1),
DemandedBits, DemandedElts,
1547 Known2, TLO,
Depth + 1))
1556 if (SimplifyDemandedBits(
Op.getOperand(3),
DemandedBits, Known, TLO,
1559 if (SimplifyDemandedBits(
Op.getOperand(2),
DemandedBits, Known2, TLO,
1582 BooleanContent::ZeroOrNegativeOneBooleanContent) {
1605 if (
const APInt *SA =
1607 unsigned ShAmt = SA->getZExtValue();
1617 if (
const APInt *SA2 =
1619 unsigned C1 = SA2->getZExtValue();
1621 int Diff = ShAmt -
C1;
1640 if (ShAmt < InnerBits &&
DemandedBits.getActiveBits() <= InnerBits &&
1641 isTypeDesirableForOp(
ISD::SHL, InnerVT)) {
1660 if (
const APInt *SA2 =
1662 unsigned InnerShAmt = SA2->getZExtValue();
1663 if (InnerShAmt < ShAmt && InnerShAmt < InnerBits &&
1665 (InnerBits - InnerShAmt + ShAmt) &&
1679 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
1683 Known.
Zero <<= ShAmt;
1684 Known.
One <<= ShAmt;
1690 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1691 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
1707 if (
const APInt *MaxSA =
1709 unsigned ShAmt = MaxSA->getZExtValue();
1710 unsigned NumSignBits =
1713 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits))
1725 DemandedElts,
Depth + 1))
1728 if (
const APInt *SA =
1730 unsigned ShAmt = SA->getZExtValue();
1740 if (
const APInt *SA2 =
1742 unsigned C1 = SA2->getZExtValue();
1744 int Diff = ShAmt -
C1;
1760 if (
Op->getFlags().hasExact())
1764 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
1796 DemandedElts,
Depth + 1))
1799 if (
const APInt *SA =
1801 unsigned ShAmt = SA->getZExtValue();
1809 if (
Op->getFlags().hasExact())
1817 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
1847 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1848 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
1865 unsigned Amt = SA->getAPIntValue().urem(
BitWidth);
1870 if (SimplifyDemandedBits(IsFSHL ? Op0 : Op1,
DemandedBits, DemandedElts,
1871 Known, TLO,
Depth + 1))
1880 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO,
1883 if (SimplifyDemandedBits(Op1, Demanded1, DemandedElts, Known, TLO,
1897 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1898 Op0, Demanded0, DemandedElts, TLO.
DAG,
Depth + 1);
1899 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1900 Op1, Demanded1, DemandedElts, TLO.
DAG,
Depth + 1);
1901 if (DemandedOp0 || DemandedOp1) {
1902 DemandedOp0 = DemandedOp0 ? DemandedOp0 : Op0;
1903 DemandedOp1 = DemandedOp1 ? DemandedOp1 : Op1;
1914 if (SimplifyDemandedBits(Op2, DemandedAmtBits, DemandedElts,
1915 Known2, TLO,
Depth + 1))
1931 unsigned Amt = SA->getAPIntValue().urem(
BitWidth);
1937 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO,
1947 DemandedBits.countTrailingZeros() >= (IsROTL ? Amt : RevAmt)) {
1952 DemandedBits.countLeadingZeros() >= (IsROTL ? RevAmt : Amt)) {
1961 if (SimplifyDemandedBits(Op1, DemandedAmtBits, DemandedElts, Known2, TLO,
1975 return TLO.
CombineTo(
Op, IsULE.getValue() ? Op0 : Op1);
1977 return TLO.
CombineTo(
Op, IsULT.getValue() ? Op0 : Op1);
1988 return TLO.
CombineTo(
Op, IsUGE.getValue() ? Op0 : Op1);
1990 return TLO.
CombineTo(
Op, IsUGT.getValue() ? Op0 : Op1);
1996 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO,
2023 unsigned ShiftAmount = NLZ > NTZ ? NLZ - NTZ : NTZ - NLZ;
2031 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO,
2051 EVT ExVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
2056 unsigned MinSignedBits =
2058 bool AlreadySignExtended = ExVTBits >= MinSignedBits;
2061 if (!AlreadySignExtended) {
2079 InputDemandedBits.
setBit(ExVTBits - 1);
2081 if (SimplifyDemandedBits(Op0, InputDemandedBits, DemandedElts, Known, TLO,
2090 if (Known.
Zero[ExVTBits - 1])
2094 if (Known.
One[ExVTBits - 1]) {
2104 EVT HalfVT =
Op.getOperand(0).getValueType();
2112 if (SimplifyDemandedBits(
Op.getOperand(0), MaskLo, KnownLo, TLO,
Depth + 1))
2115 if (SimplifyDemandedBits(
Op.getOperand(1), MaskHi, KnownHi, TLO,
Depth + 1))
2128 EVT SrcVT = Src.getValueType();
2137 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2148 APInt InDemandedElts = DemandedElts.
zext(InElts);
2149 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
2157 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2158 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2165 EVT SrcVT = Src.getValueType();
2174 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2185 APInt InDemandedElts = DemandedElts.
zext(InElts);
2189 InDemandedBits.
setBit(InBits - 1);
2191 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
2209 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2210 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2217 EVT SrcVT = Src.getValueType();
2224 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2229 APInt InDemandedElts = DemandedElts.
zext(InElts);
2230 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
2238 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2239 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2248 unsigned OperandBitWidth = Src.getScalarValueSizeInBits();
2250 if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, Known, TLO,
2256 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2257 Src, TruncMask, DemandedElts, TLO.
DAG,
Depth + 1))
2262 if (Src.getNode()->hasOneUse()) {
2263 switch (Src.getOpcode()) {
2274 const APInt *ShAmtC =
2305 EVT ZVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
2307 if (SimplifyDemandedBits(
Op.getOperand(0), ~InMask |
DemandedBits, Known,
2312 Known.
Zero |= ~InMask;
2318 ElementCount SrcEltCnt = Src.getValueType().getVectorElementCount();
2319 unsigned EltBitWidth = Src.getScalarValueSizeInBits();
2327 if (
auto *CIdx = dyn_cast<ConstantSDNode>(Idx))
2328 if (CIdx->getAPIntValue().ult(NumSrcElts))
2335 DemandedSrcBits = DemandedSrcBits.
trunc(EltBitWidth);
2337 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, Known2, TLO,
2343 if (
SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
2344 Src, DemandedSrcBits, DemandedSrcElts, TLO.
DAG,
Depth + 1)) {
2346 TLO.
DAG.
getNode(
Op.getOpcode(), dl, VT, DemandedSrc, Idx);
2358 EVT SrcVT = Src.getValueType();
2375 unsigned OpVTSizeInBits =
Op.getValueSizeInBits();
2376 if (!OpVTLegal && OpVTSizeInBits > 32)
2378 unsigned ShVal =
Op.getValueSizeInBits() - 1;
2388 unsigned Scale =
BitWidth / NumSrcEltBits;
2392 for (
unsigned i = 0;
i != Scale; ++
i) {
2393 unsigned EltOffset = IsLE ?
i : (Scale - 1 -
i);
2394 unsigned BitOffset = EltOffset * NumSrcEltBits;
2397 DemandedSrcBits |= Sub;
2398 for (
unsigned j = 0;
j != NumElts; ++
j)
2399 if (DemandedElts[
j])
2400 DemandedSrcElts.
setBit((
j * Scale) +
i);
2404 APInt KnownSrcUndef, KnownSrcZero;
2405 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef,
2406 KnownSrcZero, TLO,
Depth + 1))
2410 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts,
2411 KnownSrcBits, TLO,
Depth + 1))
2413 }
else if (IsLE && (NumSrcEltBits %
BitWidth) == 0) {
2415 unsigned Scale = NumSrcEltBits /
BitWidth;
2419 for (
unsigned i = 0;
i != NumElts; ++
i)
2420 if (DemandedElts[
i]) {
2421 unsigned Offset = (
i % Scale) *
BitWidth;
2423 DemandedSrcElts.
setBit(
i / Scale);
2427 APInt KnownSrcUndef, KnownSrcZero;
2428 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef,
2429 KnownSrcZero, TLO,
Depth + 1))
2434 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts,
2435 KnownSrcBits, TLO,
Depth + 1))
2454 if (
C &&
C->getAPIntValue().countTrailingZeros() == CTZ) {
2474 SDValue Op0 =
Op.getOperand(0), Op1 =
Op.getOperand(1);
2476 unsigned DemandedBitsLZ =
DemandedBits.countLeadingZeros();
2478 if (SimplifyDemandedBits(Op0, LoMask, DemandedElts, Known2, TLO,
2480 SimplifyDemandedBits(Op1, LoMask, DemandedElts, Known2, TLO,
2490 TLO.
DAG.
getNode(
Op.getOpcode(), dl, VT, Op0, Op1, Flags);
2498 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2499 Op0, LoMask, DemandedElts, TLO.
DAG,
Depth + 1);
2500 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
2501 Op1, LoMask, DemandedElts, TLO.
DAG,
Depth + 1);
2502 if (DemandedOp0 || DemandedOp1) {
2505 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
2506 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
2508 TLO.
DAG.
getNode(
Op.getOpcode(), dl, VT, Op0, Op1, Flags);
2520 if (
C && !
C->isAllOnes() && !
C->isOne() &&
2521 (
C->getAPIntValue() | HighMask).isAllOnes()) {
2534 auto getShiftLeftAmt = [&HighMask](
SDValue Mul) ->
unsigned {
2561 if (
unsigned ShAmt = getShiftLeftAmt(Op0))
2564 if (
unsigned ShAmt = getShiftLeftAmt(Op1))
2565 return foldMul(
ISD::SUB, Op1.getOperand(0), Op0, ShAmt);
2569 if (
unsigned ShAmt = getShiftLeftAmt(Op1))
2570 return foldMul(
ISD::ADD, Op1.getOperand(0), Op0, ShAmt);
2578 if (SimplifyDemandedBitsForTargetNode(
Op,
DemandedBits, DemandedElts,
2591 if (!isTargetCanonicalConstantNode(
Op) &&
2614 const APInt &DemandedElts,
2620 APInt KnownUndef, KnownZero;
2622 SimplifyDemandedVectorElts(
Op, DemandedElts, KnownUndef, KnownZero, TLO);
2634 const APInt &UndefOp0,
2635 const APInt &UndefOp1) {
2638 "Vector binop only");
2643 UndefOp1.
getBitWidth() == NumElts &&
"Bad type for undef analysis");
2645 auto getUndefOrConstantElt = [&](
SDValue V,
unsigned Index,
2646 const APInt &UndefVals) {
2647 if (UndefVals[
Index])
2650 if (
auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
2654 auto *
C = dyn_cast<ConstantSDNode>(Elt);
2655 if (isa<ConstantFPSDNode>(Elt) || Elt.
isUndef() || (
C && !
C->isOpaque()))
2663 for (
unsigned i = 0;
i != NumElts; ++
i) {
2682 bool AssumeSingleUse)
const {
2683 EVT VT =
Op.getValueType();
2684 unsigned Opcode =
Op.getOpcode();
2685 APInt DemandedElts = OriginalDemandedElts;
2700 "Mask size mismatches value type element count!");
2709 if (!
Op.getNode()->hasOneUse() && !AssumeSingleUse)
2713 if (DemandedElts == 0) {
2728 auto SimplifyDemandedVectorEltsBinOp = [&](
SDValue Op0,
SDValue Op1) {
2729 SDValue NewOp0 = SimplifyMultipleUseDemandedVectorElts(Op0, DemandedElts,
2731 SDValue NewOp1 = SimplifyMultipleUseDemandedVectorElts(Op1, DemandedElts,
2733 if (NewOp0 || NewOp1) {
2735 Opcode,
SDLoc(
Op), VT, NewOp0 ? NewOp0 : Op0, NewOp1 ? NewOp1 : Op1);
2743 if (!DemandedElts[0]) {
2751 EVT SrcVT = Src.getValueType();
2763 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
2773 EVT SrcVT = Src.getValueType();
2782 if (NumSrcElts == NumElts)
2783 return SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef,
2784 KnownZero, TLO,
Depth + 1);
2786 APInt SrcDemandedElts, SrcZero, SrcUndef;
2790 if ((NumElts % NumSrcElts) == 0) {
2791 unsigned Scale = NumElts / NumSrcElts;
2793 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
2803 for (
unsigned i = 0;
i != NumElts; ++
i)
2804 if (DemandedElts[
i]) {
2805 unsigned Ofs = (
i % Scale) * EltSizeInBits;
2806 SrcDemandedBits.
setBits(Ofs, Ofs + EltSizeInBits);
2810 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcDemandedElts, Known,
2818 for (
unsigned SubElt = 0; SubElt != Scale; ++SubElt) {
2822 for (
unsigned SrcElt = 0; SrcElt != NumSrcElts; ++SrcElt) {
2823 unsigned Elt = Scale * SrcElt + SubElt;
2824 if (DemandedElts[Elt])
2832 for (
unsigned i = 0;
i != NumSrcElts; ++
i) {
2833 if (SrcDemandedElts[
i]) {
2835 KnownZero.
setBits(
i * Scale, (
i + 1) * Scale);
2837 KnownUndef.
setBits(
i * Scale, (
i + 1) * Scale);
2845 if ((NumSrcElts % NumElts) == 0) {
2846 unsigned Scale = NumSrcElts / NumElts;
2848 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
2854 for (
unsigned i = 0;
i != NumElts; ++
i) {
2855 if (DemandedElts[
i]) {
2870 [&](
SDValue Elt) { return Op.getOperand(0) != Elt; })) {
2872 bool Updated =
false;
2873 for (
unsigned i = 0;
i != NumElts; ++
i) {
2874 if (!DemandedElts[
i] && !Ops[
i].
isUndef()) {
2884 for (
unsigned i = 0;
i != NumElts; ++
i) {
2886 if (
SrcOp.isUndef()) {
2888 }
else if (EltSizeInBits ==
SrcOp.getScalarValueSizeInBits() &&
2896 EVT SubVT =
Op.getOperand(0).getValueType();
2897 unsigned NumSubVecs =
Op.getNumOperands();
2899 for (
unsigned i = 0;
i != NumSubVecs; ++
i) {
2902 APInt SubUndef, SubZero;
2903 if (SimplifyDemandedVectorElts(SubOp, SubElts, SubUndef, SubZero, TLO,
2912 bool FoundNewSub =
false;
2914 for (
unsigned i = 0;
i != NumSubVecs; ++
i) {
2917 SDValue NewSubOp = SimplifyMultipleUseDemandedVectorElts(
2918 SubOp, SubElts, TLO.
DAG,
Depth + 1);
2919 DemandedSubOps.push_back(NewSubOp ? NewSubOp : SubOp);
2920 FoundNewSub = NewSubOp ?
true : FoundNewSub;
2938 APInt DemandedSrcElts = DemandedElts;
2941 APInt SubUndef, SubZero;
2942 if (SimplifyDemandedVectorElts(Sub, DemandedSubElts, SubUndef, SubZero, TLO,
2947 if (!DemandedSrcElts && !Src.isUndef())
2952 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownUndef, KnownZero,
2960 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
2961 Src, DemandedSrcElts, TLO.
DAG,
Depth + 1);
2962 SDValue NewSub = SimplifyMultipleUseDemandedVectorElts(
2963 Sub, DemandedSubElts, TLO.
DAG,
Depth + 1);
2964 if (NewSrc || NewSub) {
2965 NewSrc = NewSrc ? NewSrc : Src;
2966 NewSub = NewSub ? NewSub : Sub;
2968 NewSub,
Op.getOperand(2));
2977 if (Src.getValueType().isScalableVector())
2980 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2981 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts).
shl(Idx);
2983 APInt SrcUndef, SrcZero;
2984 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO,
2992 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
2993 Src, DemandedSrcElts, TLO.
DAG,
Depth + 1);
3005 auto *CIdx = dyn_cast<ConstantSDNode>(
Op.getOperand(2));
3009 if (CIdx && CIdx->getAPIntValue().ult(NumElts)) {
3010 unsigned Idx = CIdx->getZExtValue();
3011 if (!DemandedElts[Idx])
3014 APInt DemandedVecElts(DemandedElts);
3016 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
3017 KnownZero, TLO,
Depth + 1))
3026 APInt VecUndef, VecZero;
3027 if (SimplifyDemandedVectorElts(Vec, DemandedElts, VecUndef, VecZero, TLO,
3040 APInt UnusedUndef, UnusedZero;
3041 if (SimplifyDemandedVectorElts(
Op.getOperand(0), DemandedElts, UnusedUndef,
3042 UnusedZero, TLO,
Depth + 1))
3046 APInt DemandedLHS(DemandedElts);
3047 APInt DemandedRHS(DemandedElts);
3048 APInt UndefLHS, ZeroLHS;
3049 APInt UndefRHS, ZeroRHS;
3050 if (SimplifyDemandedVectorElts(
Op.getOperand(1), DemandedLHS, UndefLHS,
3051 ZeroLHS, TLO,
Depth + 1))
3053 if (SimplifyDemandedVectorElts(
Op.getOperand(2), DemandedRHS, UndefRHS,
3054 ZeroRHS, TLO,
Depth + 1))
3057 KnownUndef = UndefLHS & UndefRHS;
3058 KnownZero = ZeroLHS & ZeroRHS;
3065 APInt DemandedLHS(NumElts, 0);
3066 APInt DemandedRHS(NumElts, 0);
3067 for (
unsigned i = 0;
i != NumElts; ++
i) {
3068 int M = ShuffleMask[
i];
3069 if (
M < 0 || !DemandedElts[
i])
3071 assert(0 <=
M &&
M < (
int)(2 * NumElts) &&
"Shuffle index out of range");
3072 if (
M < (
int)NumElts)
3075 DemandedRHS.
setBit(
M - NumElts);
3079 APInt UndefLHS, ZeroLHS;
3080 APInt UndefRHS, ZeroRHS;
3081 if (SimplifyDemandedVectorElts(
Op.getOperand(0), DemandedLHS, UndefLHS,
3082 ZeroLHS, TLO,
Depth + 1))
3084 if (SimplifyDemandedVectorElts(
Op.getOperand(1), DemandedRHS, UndefRHS,
3085 ZeroRHS, TLO,
Depth + 1))
3089 bool Updated =
false;
3090 bool IdentityLHS =
true, IdentityRHS =
true;
3092 for (
unsigned i = 0;
i != NumElts; ++
i) {
3093 int &
M = NewMask[
i];
3096 if (!DemandedElts[
i] || (
M < (
int)NumElts && UndefLHS[
M]) ||
3097 (
M >= (
int)NumElts && UndefRHS[
M - NumElts])) {
3101 IdentityLHS &= (
M < 0) || (
M == (
int)
i);
3102 IdentityRHS &= (
M < 0) || ((
M - NumElts) ==
i);
3107 if (Updated && !IdentityLHS && !IdentityRHS && !TLO.
LegalOps) {
3109 buildLegalVectorShuffle(VT,
DL,
Op.getOperand(0),
Op.getOperand(1),
3116 for (
unsigned i = 0;
i != NumElts; ++
i) {
3117 int M = ShuffleMask[
i];
3120 }
else if (
M < (
int)NumElts) {
3126 if (UndefRHS[
M - NumElts])
3128 if (ZeroRHS[
M - NumElts])
3137 APInt SrcUndef, SrcZero;
3139 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3140 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts);
3141 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO,
3148 Op.getValueSizeInBits() == Src.getValueSizeInBits() &&
3149 DemandedSrcElts == 1) {
3162 if (IsLE && DemandedSrcElts == 1 && Src.getOpcode() ==
ISD::AND &&
3163 Op->isOnlyUserOf(Src.getNode()) &&
3164 Op.getValueSizeInBits() == Src.getValueSizeInBits()) {
3166 EVT SrcVT = Src.getValueType();
3173 ISD::AND,
DL, SrcVT, {Src.getOperand(1), Mask})) {
3187 if (Op0 == Op1 &&
Op->isOnlyUserOf(Op0.
getNode())) {
3188 APInt UndefLHS, ZeroLHS;
3189 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
3206 APInt UndefRHS, ZeroRHS;
3207 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO,
3210 APInt UndefLHS, ZeroLHS;
3211 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
3215 KnownZero = ZeroLHS & ZeroRHS;
3221 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3233 APInt UndefRHS, ZeroRHS;
3234 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO,
3237 APInt UndefLHS, ZeroLHS;
3238 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
3242 KnownZero = ZeroLHS;
3243 KnownUndef = UndefLHS & UndefRHS;
3248 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3257 APInt SrcUndef, SrcZero;
3258 if (SimplifyDemandedVectorElts(Op1, DemandedElts, SrcUndef, SrcZero, TLO,
3261 if (SimplifyDemandedVectorElts(Op0, DemandedElts, KnownUndef, KnownZero,
3269 KnownZero |= SrcZero;
3270 KnownUndef &= SrcUndef;
3271 KnownUndef &= ~KnownZero;
3276 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3283 if (SimplifyDemandedVectorElts(
Op.getOperand(0), DemandedElts, KnownUndef,
3284 KnownZero, TLO,
Depth + 1))
3296 if (SimplifyDemandedVectorEltsForTargetNode(
Op, DemandedElts, KnownUndef,
3297 KnownZero, TLO,
Depth))
3302 if (SimplifyDemandedBits(
Op,
DemandedBits, OriginalDemandedElts, Known,
3303 TLO,
Depth, AssumeSingleUse))
3309 assert((KnownUndef & KnownZero) == 0 &&
"Elements flagged as undef AND zero");
3323 const APInt &DemandedElts,
3325 unsigned Depth)
const {
3330 "Should use MaskedValueIsZero if you don't know whether Op"
3331 " is a target node!");
3338 unsigned Depth)
const {
3350 unsigned Depth)
const {
3359 unsigned Depth)
const {
3364 "Should use ComputeNumSignBits if you don't know whether Op"
3365 " is a target node!");
3382 "Should use SimplifyDemandedVectorElts if you don't know whether Op"
3383 " is a target node!");
3394 "Should use SimplifyDemandedBits if you don't know whether Op"
3395 " is a target node!");
3396 computeKnownBitsForTargetNode(
Op, Known, DemandedElts, TLO.
DAG,
Depth);
3408 "Should use SimplifyMultipleUseDemandedBits if you don't know whether Op"
3409 " is a target node!");
3436 bool PoisonOnly,
unsigned Depth)
const {
3442 "Should use isGuaranteedNotToBeUndefOrPoison if you don't know whether Op"
3443 " is a target node!");
3450 unsigned Depth)
const {
3455 "Should use isKnownNeverNaN if you don't know whether Op"
3456 " is a target node!");
3461 const APInt &DemandedElts,
3463 unsigned Depth)
const {
3468 "Should use isSplatValue if you don't know whether Op"
3469 " is a target node!");
3484 CVal = CN->getAPIntValue();
3485 EltWidth =
N.getValueType().getScalarSizeInBits();
3492 CVal = CVal.
trunc(EltWidth);
3498 return CVal.
isOne();
3540 return (
N->isOne() && !SExt) || (SExt && (
N->getValueType(0) !=
MVT::i1));
3543 return N->isAllOnes() && SExt;
3552 DAGCombinerInfo &DCI)
const {
3595 if (DCI.isBeforeLegalizeOps() ||
3607 auto *YConst = dyn_cast<ConstantSDNode>(
Y);
3608 if (YConst && YConst->isZero())
3630 SDValue TargetLowering::optimizeSetCCOfSignedTruncationCheck(
3635 if (!(
C1 = dyn_cast<ConstantSDNode>(N1)))
3644 if (!(C01 = dyn_cast<ConstantSDNode>(N0->
getOperand(1))))
3648 EVT XVT =
X.getValueType();
3672 auto checkConstants = [&
I1, &I01]() ->
bool {
3677 if (checkConstants()) {
3685 if (!checkConstants())
3691 const unsigned KeptBits =
I1.logBase2();
3692 const unsigned KeptBitsMinusOne = I01.
logBase2();
3695 if (KeptBits != (KeptBitsMinusOne + 1))
3719 SDValue TargetLowering::optimizeSetCCByHoistingAndByConstFromLogicalShift(
3721 DAGCombinerInfo &DCI,
const SDLoc &
DL)
const {
3724 "Should be a comparison with 0.");
3726 "Valid only for [in]equality comparisons.");
3728 unsigned NewShiftOpcode;
3739 unsigned OldShiftOpcode = V.getOpcode();
3740 switch (OldShiftOpcode) {
3752 C = V.getOperand(0);
3757 Y = V.getOperand(1);
3761 return TLI.shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
3762 X,
XC, CC,
Y, OldShiftOpcode, NewShiftOpcode, DAG);
3779 EVT VT =
X.getValueType();
3794 DAGCombinerInfo &DCI)
const {
3797 "Unexpected binop");
3824 !DCI.isBeforeLegalize());
3827 if (!DCI.isCalledByLegalizer())
3828 DCI.AddToWorklist(YShl1.
getNode());
3907 if (!
C1 || !(
C1->isZero() ||
C1->isAllOnes()))
3910 auto getRotateSource = [](
SDValue X) {
3912 return X.getOperand(0);
3919 if (
SDValue R = getRotateSource(N0))
3952 if (!
C1 || !
C1->isZero())
3961 if (!ShAmtC || ShAmtC->getAPIntValue().uge(
BitWidth))
3965 unsigned ShAmt = ShAmtC->getZExtValue();
3974 if (
Or.getOperand(0) ==
Other) {
3975 X =
Or.getOperand(0);
3976 Y =
Or.getOperand(1);
3979 if (
Or.getOperand(1) ==
Other) {
3980 X =
Or.getOperand(1);
3981 Y =
Or.getOperand(0);
3991 if (matchOr(F0, F1)) {
3998 if (matchOr(F1, F0)) {
4014 const SDLoc &dl)
const {
4023 bool N0ConstOrSplat =
4025 bool N1ConstOrSplat =
4036 return DAG.
getSetCC(dl, VT, N1, N0, SwappedCC);
4042 if (!N0ConstOrSplat && !N1ConstOrSplat &&
4047 return DAG.
getSetCC(dl, VT, N1, N0, SwappedCC);
4056 const APInt &
C1 = N1C->getAPIntValue();
4089 if (
auto *N1C = dyn_cast<ConstantSDNode>(N1.
getNode())) {
4090 const APInt &
C1 = N1C->getAPIntValue();
4105 if (
auto *
C = dyn_cast<ConstantSDNode>(N0->
getOperand(1)))
4106 if ((
C->getAPIntValue()+1).isPowerOf2()) {
4107 MinBits =
C->getAPIntValue().countTrailingOnes();
4115 }
else if (
auto *LN0 = dyn_cast<LoadSDNode>(N0)) {
4118 MinBits = LN0->getMemoryVT().getSizeInBits();
4122 MinBits = LN0->getMemoryVT().getSizeInBits();
4128 unsigned ReqdBits =
Signed ?
C1.getMinSignedBits() :
C1.getActiveBits();
4132 MinBits <
C1.getBitWidth() &&
4133 MinBits >= ReqdBits) {
4135 if (isTypeDesirableForOp(
ISD::SETCC, MinVT)) {
4138 if (MinBits == 1 &&
C1 == 1)
4160 (isConstFalseVal(N1) ||
4170 cast<CondCodeSDNode>(TopSetCC.
getOperand(2))->get(),
4191 unsigned bestWidth = 0, bestOffset = 0;
4192 if (Lod->isSimple() && Lod->isUnindexed()) {
4194 unsigned maskWidth = origWidth;
4198 origWidth = Lod->getMemoryVT().getSizeInBits();
4200 for (
unsigned width = origWidth / 2; width>=8; width /= 2) {
4202 for (
unsigned offset=0; offset<origWidth/width; offset++) {
4203 if (
Mask.isSubsetOf(newMask)) {
4205 bestOffset = (
uint64_t)offset * (width/8);
4207 bestOffset = (origWidth/width - offset - 1) * (width/8);
4208 bestMask =
Mask.lshr(offset * (width/8) * 8);
4220 SDValue Ptr = Lod->getBasePtr();
4221 if (bestOffset != 0)
4225 DAG.
getLoad(newVT, dl, Lod->getChain(), Ptr,
4226 Lod->getPointerInfo().getWithOffset(bestOffset),
4227 Lod->getOriginalAlign());
4244 C1.getBitWidth() - InSize))) {
4302 if (
C1.getMinSignedBits() > ExtSrcTyBits)
4306 ExtDstTy != ExtSrcTy &&
"Unexpected types!");
4313 return DAG.
getSetCC(dl, VT, ZextOp,
4315 }
else if ((N1C->isZero() || N1C->isOne()) &&
4358 return DAG.
getSetCC(dl, VT, Val, N1,
4361 }
else if (N1C->isOne()) {
4422 N1C && N1C->isAllOnes()) {
4429 optimizeSetCCOfSignedTruncationCheck(VT, N0, N1,
Cond, DCI, dl))
4436 const APInt &
C1 = N1C->getAPIntValue();
4438 APInt MinVal, MaxVal;