58 if (
F.getFnAttribute(
"disable-tail-calls").getValueAsBool())
64 AttrBuilder CallerAttrs(
F.getContext(),
F.getAttributes().getRetAttrs());
65 for (
const auto &Attr :
66 {Attribute::Alignment, Attribute::Dereferenceable,
67 Attribute::DereferenceableOrNull, Attribute::NoAlias,
68 Attribute::NonNull, Attribute::NoUndef, Attribute::Range})
75 if (CallerAttrs.
contains(Attribute::ZExt) ||
76 CallerAttrs.
contains(Attribute::SExt))
87 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
103 Register ArgReg = cast<RegisterSDNode>(
Value->getOperand(1))->getReg();
104 if (
MRI.getLiveInPhysReg(ArgReg) != Reg)
114 IsSExt = Call->paramHasAttr(ArgIdx, Attribute::SExt);
115 IsZExt = Call->paramHasAttr(ArgIdx, Attribute::ZExt);
116 IsInReg = Call->paramHasAttr(ArgIdx, Attribute::InReg);
117 IsSRet = Call->paramHasAttr(ArgIdx, Attribute::StructRet);
118 IsNest = Call->paramHasAttr(ArgIdx, Attribute::Nest);
119 IsByVal = Call->paramHasAttr(ArgIdx, Attribute::ByVal);
120 IsPreallocated = Call->paramHasAttr(ArgIdx, Attribute::Preallocated);
121 IsInAlloca = Call->paramHasAttr(ArgIdx, Attribute::InAlloca);
122 IsReturned = Call->paramHasAttr(ArgIdx, Attribute::Returned);
123 IsSwiftSelf = Call->paramHasAttr(ArgIdx, Attribute::SwiftSelf);
124 IsSwiftAsync = Call->paramHasAttr(ArgIdx, Attribute::SwiftAsync);
125 IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError);
126 Alignment = Call->getParamStackAlign(ArgIdx);
129 "multiple ABI attributes?");
145std::pair<SDValue, SDValue>
155 Args.reserve(Ops.
size());
158 for (
unsigned i = 0; i < Ops.
size(); ++i) {
161 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.
getContext());
164 Entry.IsZExt = !Entry.IsSExt;
168 Entry.IsSExt = Entry.IsZExt =
false;
170 Args.push_back(Entry);
173 if (LC == RTLIB::UNKNOWN_LIBCALL)
181 bool zeroExtend = !signExtend;
185 signExtend = zeroExtend =
false;
196 return LowerCallTo(CLI);
200 std::vector<EVT> &MemOps,
unsigned Limit,
const MemOp &
Op,
unsigned DstAS,
202 if (Limit != ~
unsigned(0) &&
Op.isMemcpyWithFixedDstAlign() &&
203 Op.getSrcAlign() <
Op.getDstAlign())
208 if (VT == MVT::Other) {
213 if (
Op.isFixedDstAlign())
231 unsigned NumMemOps = 0;
235 while (VTSize >
Size) {
246 else if (NewVT == MVT::i64 &&
258 if (NewVT == MVT::i8)
267 if (NumMemOps &&
Op.allowOverlap() && NewVTSize <
Size &&
269 VT, DstAS,
Op.isFixedDstAlign() ?
Op.getDstAlign() :
Align(1),
279 if (++NumMemOps > Limit)
282 MemOps.push_back(VT);
297 return softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, dl, OldLHS,
307 bool IsSignaling)
const {
312 assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128)
313 &&
"Unsupported setcc type!");
316 RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL;
317 bool ShouldInvertCC =
false;
321 LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
322 (VT == MVT::f64) ? RTLIB::OEQ_F64 :
323 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
327 LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 :
328 (VT == MVT::f64) ? RTLIB::UNE_F64 :
329 (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128;
333 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
334 (VT == MVT::f64) ? RTLIB::OGE_F64 :
335 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
339 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
340 (VT == MVT::f64) ? RTLIB::OLT_F64 :
341 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
345 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
346 (VT == MVT::f64) ? RTLIB::OLE_F64 :
347 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
351 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
352 (VT == MVT::f64) ? RTLIB::OGT_F64 :
353 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
356 ShouldInvertCC =
true;
359 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
360 (VT == MVT::f64) ? RTLIB::UO_F64 :
361 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
365 ShouldInvertCC =
true;
368 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
369 (VT == MVT::f64) ? RTLIB::UO_F64 :
370 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
371 LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
372 (VT == MVT::f64) ? RTLIB::OEQ_F64 :
373 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
377 ShouldInvertCC =
true;
380 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
381 (VT == MVT::f64) ? RTLIB::OGE_F64 :
382 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
385 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
386 (VT == MVT::f64) ? RTLIB::OGT_F64 :
387 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
390 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
391 (VT == MVT::f64) ? RTLIB::OLE_F64 :
392 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
395 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
396 (VT == MVT::f64) ? RTLIB::OLT_F64 :
397 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
405 SDValue Ops[2] = {NewLHS, NewRHS};
410 auto Call = makeLibCall(DAG, LC1, RetVT, Ops, CallOptions, dl, Chain);
415 if (ShouldInvertCC) {
417 CCCode = getSetCCInverse(CCCode, RetVT);
420 if (LC2 == RTLIB::UNKNOWN_LIBCALL) {
427 auto Call2 = makeLibCall(DAG, LC2, RetVT, Ops, CallOptions, dl, Chain);
430 CCCode = getSetCCInverse(CCCode, RetVT);
431 NewLHS = DAG.
getSetCC(dl, SetCCVT, Call2.first, NewRHS, CCCode);
445 if (!isPositionIndependent())
459 unsigned JTEncoding = getJumpTableEncoding();
495 if (!
TM.shouldAssumeDSOLocal(GV))
499 if (isPositionIndependent())
515 const APInt &DemandedElts,
518 unsigned Opcode =
Op.getOpcode();
526 if (targetShrinkDemandedConstant(
Op,
DemandedBits, DemandedElts, TLO))
536 auto *Op1C = dyn_cast<ConstantSDNode>(
Op.getOperand(1));
537 if (!Op1C || Op1C->isOpaque())
541 const APInt &
C = Op1C->getAPIntValue();
546 EVT VT =
Op.getValueType();
563 EVT VT =
Op.getValueType();
578 "ShrinkDemandedOp only supports binary operators!");
579 assert(
Op.getNode()->getNumValues() == 1 &&
580 "ShrinkDemandedOp only supports nodes with one result!");
582 EVT VT =
Op.getValueType();
591 Op.getOperand(1).getValueType().getScalarSizeInBits() ==
BitWidth &&
592 "ShrinkDemandedOp only supports operands that have the same size!");
596 if (!
Op.getNode()->hasOneUse())
609 Op.getOpcode(), dl, SmallVT,
612 assert(DemandedSize <= SmallVTBits &&
"Narrowed below demanded bits?");
627 bool Simplified = SimplifyDemandedBits(
Op,
DemandedBits, Known, TLO);
636 const APInt &DemandedElts,
656 bool AssumeSingleUse)
const {
657 EVT VT =
Op.getValueType();
673 EVT VT =
Op.getValueType();
691 switch (
Op.getOpcode()) {
697 EVT SrcVT = Src.getValueType();
698 EVT DstVT =
Op.getValueType();
704 if (NumSrcEltBits == NumDstEltBits)
705 if (
SDValue V = SimplifyMultipleUseDemandedBits(
709 if (SrcVT.
isVector() && (NumDstEltBits % NumSrcEltBits) == 0) {
710 unsigned Scale = NumDstEltBits / NumSrcEltBits;
714 for (
unsigned i = 0; i != Scale; ++i) {
715 unsigned EltOffset = IsLE ? i : (Scale - 1 - i);
716 unsigned BitOffset = EltOffset * NumSrcEltBits;
719 DemandedSrcBits |= Sub;
720 for (
unsigned j = 0; j != NumElts; ++j)
722 DemandedSrcElts.
setBit((j * Scale) + i);
726 if (
SDValue V = SimplifyMultipleUseDemandedBits(
727 Src, DemandedSrcBits, DemandedSrcElts, DAG,
Depth + 1))
732 if (IsLE && (NumSrcEltBits % NumDstEltBits) == 0) {
733 unsigned Scale = NumSrcEltBits / NumDstEltBits;
737 for (
unsigned i = 0; i != NumElts; ++i)
738 if (DemandedElts[i]) {
739 unsigned Offset = (i % Scale) * NumDstEltBits;
741 DemandedSrcElts.
setBit(i / Scale);
744 if (
SDValue V = SimplifyMultipleUseDemandedBits(
745 Src, DemandedSrcBits, DemandedSrcElts, DAG,
Depth + 1))
766 return Op.getOperand(0);
768 return Op.getOperand(1);
779 return Op.getOperand(0);
781 return Op.getOperand(1);
791 return Op.getOperand(0);
793 return Op.getOperand(1);
799 if (std::optional<uint64_t> MaxSA =
802 unsigned ShAmt = *MaxSA;
803 unsigned NumSignBits =
806 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits))
835 EVT ExVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
842 if (NumSignBits >= (
BitWidth - ExBits + 1))
855 EVT SrcVT = Src.getValueType();
856 EVT DstVT =
Op.getValueType();
857 if (IsLE && DemandedElts == 1 &&
870 auto *CIdx = dyn_cast<ConstantSDNode>(
Op.getOperand(2));
873 !DemandedElts[CIdx->getZExtValue()])
887 if (DemandedSubElts == 0)
897 bool AllUndef =
true, IdentityLHS =
true, IdentityRHS =
true;
898 for (
unsigned i = 0; i != NumElts; ++i) {
899 int M = ShuffleMask[i];
900 if (M < 0 || !DemandedElts[i])
903 IdentityLHS &= (M == (int)i);
904 IdentityRHS &= ((M - NumElts) == i);
910 return Op.getOperand(0);
912 return Op.getOperand(1);
922 if (
SDValue V = SimplifyMultipleUseDemandedBitsForTargetNode(
932 unsigned Depth)
const {
933 EVT VT =
Op.getValueType();
940 return SimplifyMultipleUseDemandedBits(
Op,
DemandedBits, DemandedElts, DAG,
946 unsigned Depth)
const {
948 return SimplifyMultipleUseDemandedBits(
Op,
DemandedBits, DemandedElts, DAG,
960 "SRL or SRA node is required here!");
963 if (!N1C || !N1C->
isOne())
1010 unsigned ShiftOpc =
Op.getOpcode();
1011 bool IsSigned =
false;
1015 unsigned NumSigned = std::min(NumSignedA, NumSignedB) - 1;
1020 unsigned NumZero = std::min(NumZeroA, NumZeroB);
1026 if (NumZero >= 2 && NumSigned < NumZero) {
1031 if (NumSigned >= 1) {
1039 if (NumZero >= 1 && NumSigned < NumZero) {
1059 EVT VT =
Op.getValueType();
1073 Add.getOperand(1)) &&
1084 (isa<ConstantSDNode>(ExtOpA) || isa<ConstantSDNode>(ExtOpB)))
1104 unsigned Depth,
bool AssumeSingleUse)
const {
1107 "Mask size mismatches value type size!");
1112 EVT VT =
Op.getValueType();
1114 unsigned NumElts = OriginalDemandedElts.
getBitWidth();
1116 "Unexpected vector size");
1119 APInt DemandedElts = OriginalDemandedElts;
1139 cast<ConstantFPSDNode>(
Op)->getValueAPF().bitcastToAPInt());
1144 bool HasMultiUse =
false;
1145 if (!AssumeSingleUse && !
Op.getNode()->hasOneUse()) {
1154 }
else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) {
1163 switch (
Op.getOpcode()) {
1167 if (!DemandedElts[0])
1172 unsigned SrcBitWidth = Src.getScalarValueSizeInBits();
1174 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcKnown, TLO,
Depth + 1))
1179 if (DemandedElts == 1)
1192 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO,
Depth + 1))
1201 auto *LD = cast<LoadSDNode>(
Op);
1202 if (getTargetConstantFromLoad(LD)) {
1208 EVT MemVT = LD->getMemoryVT();
1220 auto *CIdx = dyn_cast<ConstantSDNode>(
Op.getOperand(2));
1225 APInt DemandedVecElts(DemandedElts);
1227 unsigned Idx = CIdx->getZExtValue();
1231 if (!DemandedElts[
Idx])
1238 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO,
Depth + 1))
1244 if (SimplifyDemandedBits(Vec,
DemandedBits, DemandedVecElts, KnownVec, TLO,
1248 if (!!DemandedVecElts)
1263 APInt DemandedSrcElts = DemandedElts;
1267 if (SimplifyDemandedBits(Sub,
DemandedBits, DemandedSubElts, KnownSub, TLO,
1270 if (SimplifyDemandedBits(Src,
DemandedBits, DemandedSrcElts, KnownSrc, TLO,
1276 if (!!DemandedSubElts)
1278 if (!!DemandedSrcElts)
1284 SDValue NewSub = SimplifyMultipleUseDemandedBits(
1286 SDValue NewSrc = SimplifyMultipleUseDemandedBits(
1288 if (NewSub || NewSrc) {
1289 NewSub = NewSub ? NewSub : Sub;
1290 NewSrc = NewSrc ? NewSrc : Src;
1303 if (Src.getValueType().isScalableVector())
1306 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
1309 if (SimplifyDemandedBits(Src,
DemandedBits, DemandedSrcElts, Known, TLO,
1315 SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
1330 EVT SubVT =
Op.getOperand(0).getValueType();
1333 for (
unsigned i = 0; i != NumSubVecs; ++i) {
1334 APInt DemandedSubElts =
1335 DemandedElts.
extractBits(NumSubElts, i * NumSubElts);
1336 if (SimplifyDemandedBits(
Op.getOperand(i),
DemandedBits, DemandedSubElts,
1337 Known2, TLO,
Depth + 1))
1340 if (!!DemandedSubElts)
1350 APInt DemandedLHS, DemandedRHS;
1355 if (!!DemandedLHS || !!DemandedRHS) {
1361 if (!!DemandedLHS) {
1362 if (SimplifyDemandedBits(Op0,
DemandedBits, DemandedLHS, Known2, TLO,
1367 if (!!DemandedRHS) {
1368 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedRHS, Known2, TLO,
1375 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1377 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1379 if (DemandedOp0 || DemandedOp1) {
1380 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1381 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1416 LHSKnown.
One == ~RHSC->getAPIntValue()) {
1428 unsigned NumSubElts =
1445 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedElts, Known, TLO,
1449 Known2, TLO,
Depth + 1))
1471 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1473 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1475 if (DemandedOp0 || DemandedOp1) {
1476 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1477 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1490 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedElts, Known, TLO,
1492 if (Flags.hasDisjoint()) {
1493 Flags.setDisjoint(
false);
1494 Op->setFlags(Flags);
1499 if (SimplifyDemandedBits(Op0, ~Known.
One &
DemandedBits, DemandedElts,
1500 Known2, TLO,
Depth + 1)) {
1501 if (Flags.hasDisjoint()) {
1502 Flags.setDisjoint(
false);
1503 Op->setFlags(Flags);
1523 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1525 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1527 if (DemandedOp0 || DemandedOp1) {
1528 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1529 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1540 for (
int I = 0;
I != 2; ++
I) {
1543 SDValue Alt =
Op.getOperand(1 -
I).getOperand(0);
1544 SDValue C2 =
Op.getOperand(1 -
I).getOperand(1);
1546 for (
int J = 0; J != 2; ++J) {
1569 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedElts, Known, TLO,
1572 if (SimplifyDemandedBits(Op0,
DemandedBits, DemandedElts, Known2, TLO,
1599 if (
C->getAPIntValue() == Known2.
One) {
1608 if (!
C->isAllOnes() &&
DemandedBits.isSubsetOf(
C->getAPIntValue())) {
1620 if (ShiftC->getAPIntValue().ult(
BitWidth)) {
1621 uint64_t ShiftAmt = ShiftC->getZExtValue();
1624 : Ones.
lshr(ShiftAmt);
1642 if (!
C || !
C->isAllOnes())
1648 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1650 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1652 if (DemandedOp0 || DemandedOp1) {
1653 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1654 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1664 if (SimplifyDemandedBits(
Op.getOperand(2),
DemandedBits, DemandedElts,
1665 Known, TLO,
Depth + 1))
1667 if (SimplifyDemandedBits(
Op.getOperand(1),
DemandedBits, DemandedElts,
1668 Known2, TLO,
Depth + 1))
1679 if (SimplifyDemandedBits(
Op.getOperand(2),
DemandedBits, DemandedElts,
1680 Known, TLO,
Depth + 1))
1682 if (SimplifyDemandedBits(
Op.getOperand(1),
DemandedBits, DemandedElts,
1683 Known2, TLO,
Depth + 1))
1690 if (SimplifyDemandedBits(
Op.getOperand(3),
DemandedBits, DemandedElts,
1691 Known, TLO,
Depth + 1))
1693 if (SimplifyDemandedBits(
Op.getOperand(2),
DemandedBits, DemandedElts,
1694 Known2, TLO,
Depth + 1))
1737 if (std::optional<uint64_t> KnownSA =
1739 unsigned ShAmt = *KnownSA;
1749 if (std::optional<uint64_t> InnerSA =
1751 unsigned C1 = *InnerSA;
1753 int Diff = ShAmt - C1;
1772 if (ShAmt < InnerBits &&
DemandedBits.getActiveBits() <= InnerBits &&
1773 isTypeDesirableForOp(
ISD::SHL, InnerVT)) {
1790 InnerOp, DemandedElts,
Depth + 2)) {
1791 unsigned InnerShAmt = *SA2;
1792 if (InnerShAmt < ShAmt && InnerShAmt < InnerBits &&
1794 (InnerBits - InnerShAmt + ShAmt) &&
1808 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
1811 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) {
1814 Flags.setNoSignedWrap(
false);
1815 Flags.setNoUnsignedWrap(
false);
1816 Op->setFlags(Flags);
1820 Known.
Zero <<= ShAmt;
1821 Known.
One <<= ShAmt;
1827 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1828 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
1839 Op.getNode()->hasOneUse()) {
1847 isTypeDesirableForOp(
ISD::SHL, SmallVT) &&
1850 assert(DemandedSize <= SmallVTBits &&
1851 "Narrowed below demanded bits?");
1871 isTypeDesirableForOp(
ISD::SHL, HalfVT) &&
1880 Flags.setNoSignedWrap(IsNSW);
1881 Flags.setNoUnsignedWrap(IsNUW);
1886 NewShiftAmt, Flags);
1899 if (SimplifyDemandedBits(Op0, DemandedFromOp, DemandedElts, Known, TLO,
1902 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) {
1905 Flags.setNoSignedWrap(
false);
1906 Flags.setNoUnsignedWrap(
false);
1907 Op->setFlags(Flags);
1917 if (std::optional<uint64_t> MaxSA =
1919 unsigned ShAmt = *MaxSA;
1920 unsigned NumSignBits =
1923 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits))
1933 if (std::optional<uint64_t> KnownSA =
1935 unsigned ShAmt = *KnownSA;
1945 if (std::optional<uint64_t> InnerSA =
1947 unsigned C1 = *InnerSA;
1949 int Diff = ShAmt - C1;
1965 if (
Op->getFlags().hasExact())
1974 isTypeDesirableForOp(
ISD::SRL, HalfVT) &&
1990 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
2000 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2001 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
2015 DemandedElts,
Depth + 1))
2039 if (std::optional<uint64_t> KnownSA =
2041 unsigned ShAmt = *KnownSA;
2048 if (std::optional<uint64_t> InnerSA =
2050 unsigned LowBits =
BitWidth - ShAmt;
2056 if (*InnerSA == ShAmt) {
2066 unsigned NumSignBits =
2068 if (NumSignBits > ShAmt)
2078 if (
Op->getFlags().hasExact())
2086 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
2097 Flags.setExact(
Op->getFlags().hasExact());
2115 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2116 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
2126 DemandedElts,
Depth + 1))
2139 unsigned Amt = SA->getAPIntValue().urem(
BitWidth);
2144 if (SimplifyDemandedBits(IsFSHL ? Op0 : Op1,
DemandedBits, DemandedElts,
2145 Known, TLO,
Depth + 1))
2154 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO,
2157 if (SimplifyDemandedBits(Op1, Demanded1, DemandedElts, Known, TLO,
2170 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2171 Op0, Demanded0, DemandedElts, TLO.
DAG,
Depth + 1);
2172 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
2173 Op1, Demanded1, DemandedElts, TLO.
DAG,
Depth + 1);
2174 if (DemandedOp0 || DemandedOp1) {
2175 DemandedOp0 = DemandedOp0 ? DemandedOp0 : Op0;
2176 DemandedOp1 = DemandedOp1 ? DemandedOp1 : Op1;
2187 if (SimplifyDemandedBits(Op2, DemandedAmtBits, DemandedElts,
2188 Known2, TLO,
Depth + 1))
2204 unsigned Amt = SA->getAPIntValue().urem(
BitWidth);
2210 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO,
2220 DemandedBits.countr_zero() >= (IsROTL ? Amt : RevAmt)) {
2225 DemandedBits.countl_zero() >= (IsROTL ? RevAmt : Amt)) {
2234 if (SimplifyDemandedBits(Op1, DemandedAmtBits, DemandedElts, Known2, TLO,
2244 unsigned Opc =
Op.getOpcode();
2251 unsigned NumSignBits =
2255 if (NumSignBits >= NumDemandedUpperBits)
2296 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO,
2322 unsigned ShiftAmount = NLZ > NTZ ? NLZ - NTZ : NTZ - NLZ;
2330 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO,
2350 EVT ExVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
2355 unsigned MinSignedBits =
2357 bool AlreadySignExtended = ExVTBits >= MinSignedBits;
2360 if (!AlreadySignExtended) {
2378 InputDemandedBits.
setBit(ExVTBits - 1);
2380 if (SimplifyDemandedBits(Op0, InputDemandedBits, DemandedElts, Known, TLO,
2388 if (Known.
Zero[ExVTBits - 1])
2392 if (Known.
One[ExVTBits - 1]) {
2402 EVT HalfVT =
Op.getOperand(0).getValueType();
2410 if (SimplifyDemandedBits(
Op.getOperand(0), MaskLo, KnownLo, TLO,
Depth + 1))
2413 if (SimplifyDemandedBits(
Op.getOperand(1), MaskHi, KnownHi, TLO,
Depth + 1))
2416 Known = KnownHi.
concat(KnownLo);
2425 EVT SrcVT = Src.getValueType();
2434 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2446 APInt InDemandedElts = DemandedElts.
zext(InElts);
2447 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
2449 if (Flags.hasNonNeg()) {
2450 Flags.setNonNeg(
false);
2451 Op->setFlags(Flags);
2459 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2460 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2470 EVT SrcVT = Src.getValueType();
2475 APInt InDemandedElts = DemandedElts.
zext(InElts);
2480 InDemandedBits.
setBit(InBits - 1);
2486 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2501 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
2516 Flags.setNonNeg(
true);
2522 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2523 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2533 EVT SrcVT = Src.getValueType();
2540 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2545 APInt InDemandedElts = DemandedElts.
zext(InElts);
2546 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
2553 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2554 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2563 unsigned OperandBitWidth = Src.getScalarValueSizeInBits();
2565 if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, Known, TLO,
2571 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2572 Src, TruncMask, DemandedElts, TLO.
DAG,
Depth + 1))
2577 switch (Src.getOpcode()) {
2588 if (Src.getNode()->hasOneUse()) {
2589 std::optional<uint64_t> ShAmtC =
2591 if (!ShAmtC || *ShAmtC >=
BitWidth)
2619 EVT ZVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
2621 if (SimplifyDemandedBits(
Op.getOperand(0), ~InMask |
DemandedBits, Known,
2625 Known.
Zero |= ~InMask;
2626 Known.
One &= (~Known.Zero);
2632 ElementCount SrcEltCnt = Src.getValueType().getVectorElementCount();
2633 unsigned EltBitWidth = Src.getScalarValueSizeInBits();
2641 if (
auto *CIdx = dyn_cast<ConstantSDNode>(
Idx))
2642 if (CIdx->getAPIntValue().ult(NumSrcElts))
2649 DemandedSrcBits = DemandedSrcBits.
trunc(EltBitWidth);
2651 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, Known2, TLO,
2657 if (
SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
2658 Src, DemandedSrcBits, DemandedSrcElts, TLO.
DAG,
Depth + 1)) {
2674 EVT SrcVT = Src.getValueType();
2684 if ((OpVTLegal || i32Legal) && VT.
isSimple() && SrcVT != MVT::f16 &&
2685 SrcVT != MVT::f128) {
2687 EVT Ty = OpVTLegal ? VT : MVT::i32;
2691 unsigned OpVTSizeInBits =
Op.getValueSizeInBits();
2692 if (!OpVTLegal && OpVTSizeInBits > 32)
2694 unsigned ShVal =
Op.getValueSizeInBits() - 1;
2704 unsigned Scale =
BitWidth / NumSrcEltBits;
2708 for (
unsigned i = 0; i != Scale; ++i) {
2709 unsigned EltOffset = IsLE ? i : (Scale - 1 - i);
2710 unsigned BitOffset = EltOffset * NumSrcEltBits;
2713 DemandedSrcBits |= Sub;
2714 for (
unsigned j = 0; j != NumElts; ++j)
2715 if (DemandedElts[j])
2716 DemandedSrcElts.
setBit((j * Scale) + i);
2720 APInt KnownSrcUndef, KnownSrcZero;
2721 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef,
2722 KnownSrcZero, TLO,
Depth + 1))
2726 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts,
2727 KnownSrcBits, TLO,
Depth + 1))
2729 }
else if (IsLE && (NumSrcEltBits %
BitWidth) == 0) {
2731 unsigned Scale = NumSrcEltBits /
BitWidth;
2735 for (
unsigned i = 0; i != NumElts; ++i)
2736 if (DemandedElts[i]) {
2739 DemandedSrcElts.
setBit(i / Scale);
2743 APInt KnownSrcUndef, KnownSrcZero;
2744 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef,
2745 KnownSrcZero, TLO,
Depth + 1))
2750 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts,
2751 KnownSrcBits, TLO,
Depth + 1))
2756 if (
SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
2757 Src, DemandedSrcBits, DemandedSrcElts, TLO.
DAG,
Depth + 1)) {
2779 if (
C &&
C->getAPIntValue().countr_zero() == CTZ) {
2798 SDValue Op0 =
Op.getOperand(0), Op1 =
Op.getOperand(1);
2803 auto GetDemandedBitsLHSMask = [&](
APInt Demanded,
2809 if (SimplifyDemandedBits(Op1, LoMask, DemandedElts, KnownOp1, TLO,
2811 SimplifyDemandedBits(Op0, GetDemandedBitsLHSMask(LoMask, KnownOp1),
2812 DemandedElts, KnownOp0, TLO,
Depth + 1) ||
2815 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) {
2818 Flags.setNoSignedWrap(
false);
2819 Flags.setNoUnsignedWrap(
false);
2820 Op->setFlags(Flags);
2832 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2833 Op0, LoMask, DemandedElts, TLO.
DAG,
Depth + 1);
2834 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
2835 Op1, LoMask, DemandedElts, TLO.
DAG,
Depth + 1);
2836 if (DemandedOp0 || DemandedOp1) {
2837 Flags.setNoSignedWrap(
false);
2838 Flags.setNoUnsignedWrap(
false);
2839 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
2840 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
2842 TLO.
DAG.
getNode(
Op.getOpcode(), dl, VT, Op0, Op1, Flags);
2854 if (
C && !
C->isAllOnes() && !
C->isOne() &&
2855 (
C->getAPIntValue() | HighMask).isAllOnes()) {
2859 Flags.setNoSignedWrap(
false);
2860 Flags.setNoUnsignedWrap(
false);
2868 auto getShiftLeftAmt = [&HighMask](
SDValue Mul) ->
unsigned {
2895 if (
unsigned ShAmt = getShiftLeftAmt(Op0))
2898 if (
unsigned ShAmt = getShiftLeftAmt(Op1))
2899 return foldMul(
ISD::SUB, Op1.getOperand(0), Op0, ShAmt);
2903 if (
unsigned ShAmt = getShiftLeftAmt(Op1))
2904 return foldMul(
ISD::ADD, Op1.getOperand(0), Op0, ShAmt);
2912 Op.getOpcode() ==
ISD::ADD, Flags.hasNoSignedWrap(),
2913 Flags.hasNoUnsignedWrap(), KnownOp0, KnownOp1);
2923 if (
Op.getValueType().isScalableVector())
2925 if (SimplifyDemandedBitsForTargetNode(
Op,
DemandedBits, DemandedElts,
2938 if (!isTargetCanonicalConstantNode(
Op) &&
2944 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
2967 const APInt &DemandedElts,
2973 APInt KnownUndef, KnownZero;
2975 SimplifyDemandedVectorElts(
Op, DemandedElts, KnownUndef, KnownZero, TLO);
2987 const APInt &UndefOp0,
2988 const APInt &UndefOp1) {
2991 "Vector binop only");
2996 UndefOp1.
getBitWidth() == NumElts &&
"Bad type for undef analysis");
2998 auto getUndefOrConstantElt = [&](
SDValue V,
unsigned Index,
2999 const APInt &UndefVals) {
3000 if (UndefVals[
Index])
3003 if (
auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
3007 auto *
C = dyn_cast<ConstantSDNode>(Elt);
3008 if (isa<ConstantFPSDNode>(Elt) || Elt.
isUndef() || (
C && !
C->isOpaque()))
3016 for (
unsigned i = 0; i != NumElts; ++i) {
3035 bool AssumeSingleUse)
const {
3036 EVT VT =
Op.getValueType();
3037 unsigned Opcode =
Op.getOpcode();
3038 APInt DemandedElts = OriginalDemandedElts;
3053 "Mask size mismatches value type element count!");
3062 if (!AssumeSingleUse && !
Op.getNode()->hasOneUse())
3066 if (DemandedElts == 0) {
3081 auto SimplifyDemandedVectorEltsBinOp = [&](
SDValue Op0,
SDValue Op1) {
3082 SDValue NewOp0 = SimplifyMultipleUseDemandedVectorElts(Op0, DemandedElts,
3084 SDValue NewOp1 = SimplifyMultipleUseDemandedVectorElts(Op1, DemandedElts,
3086 if (NewOp0 || NewOp1) {
3089 NewOp1 ? NewOp1 : Op1,
Op->getFlags());
3097 if (!DemandedElts[0]) {
3105 EVT SrcVT = Src.getValueType();
3117 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
3127 EVT SrcVT = Src.getValueType();
3136 if (NumSrcElts == NumElts)
3137 return SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef,
3138 KnownZero, TLO,
Depth + 1);
3140 APInt SrcDemandedElts, SrcZero, SrcUndef;
3144 if ((NumElts % NumSrcElts) == 0) {
3145 unsigned Scale = NumElts / NumSrcElts;
3147 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
3157 for (
unsigned i = 0; i != NumElts; ++i)
3158 if (DemandedElts[i]) {
3159 unsigned Ofs = (i % Scale) * EltSizeInBits;
3160 SrcDemandedBits.
setBits(Ofs, Ofs + EltSizeInBits);
3164 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcDemandedElts, Known,
3172 for (
unsigned SubElt = 0; SubElt != Scale; ++SubElt) {
3176 for (
unsigned SrcElt = 0; SrcElt != NumSrcElts; ++SrcElt) {
3177 unsigned Elt = Scale * SrcElt + SubElt;
3178 if (DemandedElts[Elt])
3186 for (
unsigned i = 0; i != NumSrcElts; ++i) {
3187 if (SrcDemandedElts[i]) {
3189 KnownZero.
setBits(i * Scale, (i + 1) * Scale);
3191 KnownUndef.
setBits(i * Scale, (i + 1) * Scale);
3199 if ((NumSrcElts % NumElts) == 0) {
3200 unsigned Scale = NumSrcElts / NumElts;
3202 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
3208 for (
unsigned i = 0; i != NumElts; ++i) {
3209 if (DemandedElts[i]) {
3238 [&](
SDValue Elt) { return Op.getOperand(0) != Elt; })) {
3240 bool Updated =
false;
3241 for (
unsigned i = 0; i != NumElts; ++i) {
3242 if (!DemandedElts[i] && !Ops[i].
isUndef()) {
3252 for (
unsigned i = 0; i != NumElts; ++i) {
3254 if (
SrcOp.isUndef()) {
3256 }
else if (EltSizeInBits ==
SrcOp.getScalarValueSizeInBits() &&
3264 EVT SubVT =
Op.getOperand(0).getValueType();
3267 for (
unsigned i = 0; i != NumSubVecs; ++i) {
3270 APInt SubUndef, SubZero;
3271 if (SimplifyDemandedVectorElts(SubOp, SubElts, SubUndef, SubZero, TLO,
3274 KnownUndef.
insertBits(SubUndef, i * NumSubElts);
3275 KnownZero.
insertBits(SubZero, i * NumSubElts);
3280 bool FoundNewSub =
false;
3282 for (
unsigned i = 0; i != NumSubVecs; ++i) {
3285 SDValue NewSubOp = SimplifyMultipleUseDemandedVectorElts(
3286 SubOp, SubElts, TLO.
DAG,
Depth + 1);
3287 DemandedSubOps.
push_back(NewSubOp ? NewSubOp : SubOp);
3288 FoundNewSub = NewSubOp ?
true : FoundNewSub;
3306 APInt DemandedSrcElts = DemandedElts;
3309 APInt SubUndef, SubZero;
3310 if (SimplifyDemandedVectorElts(Sub, DemandedSubElts, SubUndef, SubZero, TLO,
3315 if (!DemandedSrcElts && !Src.isUndef())
3320 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownUndef, KnownZero,
3328 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
3329 Src, DemandedSrcElts, TLO.
DAG,
Depth + 1);
3330 SDValue NewSub = SimplifyMultipleUseDemandedVectorElts(
3331 Sub, DemandedSubElts, TLO.
DAG,
Depth + 1);
3332 if (NewSrc || NewSub) {
3333 NewSrc = NewSrc ? NewSrc : Src;
3334 NewSub = NewSub ? NewSub : Sub;
3336 NewSub,
Op.getOperand(2));
3345 if (Src.getValueType().isScalableVector())
3348 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3351 APInt SrcUndef, SrcZero;
3352 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO,
3360 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
3361 Src, DemandedSrcElts, TLO.
DAG,
Depth + 1);
3373 auto *CIdx = dyn_cast<ConstantSDNode>(
Op.getOperand(2));
3377 if (CIdx && CIdx->getAPIntValue().ult(NumElts)) {
3378 unsigned Idx = CIdx->getZExtValue();
3379 if (!DemandedElts[
Idx])
3382 APInt DemandedVecElts(DemandedElts);
3384 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
3385 KnownZero, TLO,
Depth + 1))
3394 APInt VecUndef, VecZero;
3395 if (SimplifyDemandedVectorElts(Vec, DemandedElts, VecUndef, VecZero, TLO,
3408 APInt UndefSel, ZeroSel;
3409 if (SimplifyDemandedVectorElts(Sel, DemandedElts, UndefSel, ZeroSel, TLO,
3414 APInt DemandedLHS(DemandedElts);
3415 APInt DemandedRHS(DemandedElts);
3416 APInt UndefLHS, ZeroLHS;
3417 APInt UndefRHS, ZeroRHS;
3418 if (SimplifyDemandedVectorElts(
LHS, DemandedLHS, UndefLHS, ZeroLHS, TLO,
3421 if (SimplifyDemandedVectorElts(
RHS, DemandedRHS, UndefRHS, ZeroRHS, TLO,
3425 KnownUndef = UndefLHS & UndefRHS;
3426 KnownZero = ZeroLHS & ZeroRHS;
3430 APInt DemandedSel = DemandedElts & ~KnownZero;
3431 if (DemandedSel != DemandedElts)
3432 if (SimplifyDemandedVectorElts(Sel, DemandedSel, UndefSel, ZeroSel, TLO,
3444 APInt DemandedLHS(NumElts, 0);
3445 APInt DemandedRHS(NumElts, 0);
3446 for (
unsigned i = 0; i != NumElts; ++i) {
3447 int M = ShuffleMask[i];
3448 if (M < 0 || !DemandedElts[i])
3450 assert(0 <= M && M < (
int)(2 * NumElts) &&
"Shuffle index out of range");
3451 if (M < (
int)NumElts)
3454 DemandedRHS.
setBit(M - NumElts);
3458 APInt UndefLHS, ZeroLHS;
3459 APInt UndefRHS, ZeroRHS;
3460 if (SimplifyDemandedVectorElts(
LHS, DemandedLHS, UndefLHS, ZeroLHS, TLO,
3463 if (SimplifyDemandedVectorElts(
RHS, DemandedRHS, UndefRHS, ZeroRHS, TLO,
3468 bool Updated =
false;
3469 bool IdentityLHS =
true, IdentityRHS =
true;
3471 for (
unsigned i = 0; i != NumElts; ++i) {
3472 int &M = NewMask[i];
3475 if (!DemandedElts[i] || (M < (
int)NumElts && UndefLHS[M]) ||
3476 (M >= (
int)NumElts && UndefRHS[M - NumElts])) {
3480 IdentityLHS &= (M < 0) || (M == (
int)i);
3481 IdentityRHS &= (M < 0) || ((M - NumElts) == i);
3486 if (Updated && !IdentityLHS && !IdentityRHS && !TLO.
LegalOps) {
3488 buildLegalVectorShuffle(VT,
DL,
LHS,
RHS, NewMask, TLO.
DAG);
3494 for (
unsigned i = 0; i != NumElts; ++i) {
3495 int M = ShuffleMask[i];
3498 }
else if (M < (
int)NumElts) {
3504 if (UndefRHS[M - NumElts])
3506 if (ZeroRHS[M - NumElts])
3515 APInt SrcUndef, SrcZero;
3517 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3518 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts);
3519 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO,
3526 Op.getValueSizeInBits() == Src.getValueSizeInBits() &&
3527 DemandedSrcElts == 1) {
3540 if (IsLE && DemandedSrcElts == 1 && Src.getOpcode() ==
ISD::AND &&
3541 Op->isOnlyUserOf(Src.getNode()) &&
3542 Op.getValueSizeInBits() == Src.getValueSizeInBits()) {
3544 EVT SrcVT = Src.getValueType();
3551 ISD::AND,
DL, SrcVT, {Src.getOperand(1), Mask})) {
3565 if (Op0 == Op1 &&
Op->isOnlyUserOf(Op0.
getNode())) {
3566 APInt UndefLHS, ZeroLHS;
3567 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
3588 APInt UndefRHS, ZeroRHS;
3589 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO,
3592 APInt UndefLHS, ZeroLHS;
3593 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
3597 KnownZero = ZeroLHS & ZeroRHS;
3603 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3615 APInt UndefRHS, ZeroRHS;
3616 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO,
3619 APInt UndefLHS, ZeroLHS;
3620 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
3624 KnownZero = ZeroLHS;
3625 KnownUndef = UndefLHS & UndefRHS;
3630 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3641 APInt SrcUndef, SrcZero;
3642 if (SimplifyDemandedVectorElts(Op1, DemandedElts, SrcUndef, SrcZero, TLO,
3647 APInt DemandedElts0 = DemandedElts & ~SrcZero;
3648 if (SimplifyDemandedVectorElts(Op0, DemandedElts0, KnownUndef, KnownZero,
3652 KnownUndef &= DemandedElts0;
3653 KnownZero &= DemandedElts0;
3658 if (DemandedElts.
isSubsetOf(SrcZero | KnownZero | SrcUndef | KnownUndef))
3665 KnownZero |= SrcZero;
3666 KnownUndef &= SrcUndef;
3667 KnownUndef &= ~KnownZero;
3671 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3678 if (SimplifyDemandedVectorElts(
Op.getOperand(0), DemandedElts, KnownUndef,
3679 KnownZero, TLO,
Depth + 1))
3691 if (SimplifyDemandedVectorEltsForTargetNode(
Op, DemandedElts, KnownUndef,
3692 KnownZero, TLO,
Depth))
3697 if (SimplifyDemandedBits(
Op,
DemandedBits, OriginalDemandedElts, Known,
3698 TLO,
Depth, AssumeSingleUse))
3704 assert((KnownUndef & KnownZero) == 0 &&
"Elements flagged as undef AND zero");
3718 const APInt &DemandedElts,
3720 unsigned Depth)
const {
3725 "Should use MaskedValueIsZero if you don't know whether Op"
3726 " is a target node!");
3733 unsigned Depth)
const {
3745 unsigned Depth)
const {
3754 unsigned Depth)
const {
3759 "Should use ComputeNumSignBits if you don't know whether Op"
3760 " is a target node!");
3777 "Should use SimplifyDemandedVectorElts if you don't know whether Op"
3778 " is a target node!");
3789 "Should use SimplifyDemandedBits if you don't know whether Op"
3790 " is a target node!");
3791 computeKnownBitsForTargetNode(
Op, Known, DemandedElts, TLO.
DAG,
Depth);
3803 "Should use SimplifyMultipleUseDemandedBits if you don't know whether Op"
3804 " is a target node!");
3837 "Should use isGuaranteedNotToBeUndefOrPoison if you don't know whether Op"
3838 " is a target node!");
3842 return !canCreateUndefOrPoisonForTargetNode(
Op, DemandedElts, DAG,
PoisonOnly,
3845 return DAG.isGuaranteedNotToBeUndefOrPoison(V, PoisonOnly,
3857 "Should use canCreateUndefOrPoison if you don't know whether Op"
3858 " is a target node!");
3866 unsigned Depth)
const {
3871 "Should use isKnownNeverNaN if you don't know whether Op"
3872 " is a target node!");
3877 const APInt &DemandedElts,
3880 unsigned Depth)
const {
3885 "Should use isSplatValue if you don't know whether Op"
3886 " is a target node!");
3901 CVal = CN->getAPIntValue();
3902 EltWidth =
N.getValueType().getScalarSizeInBits();
3909 CVal = CVal.
trunc(EltWidth);
3915 return CVal.
isOne();
3957 return (
N->isOne() && !SExt) || (SExt && (
N->getValueType(0) != MVT::i1));
3960 return N->isAllOnes() && SExt;
3969 DAGCombinerInfo &DCI)
const {
3997 auto *AndC = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
3998 if (AndC &&
isNullConstant(N1) && AndC->getAPIntValue().isPowerOf2() &&
4001 AndC->getAPIntValue().getActiveBits());
4028 if (isXAndYEqZeroPreferableToXAndYEqY(
Cond, OpVT) &&
4036 if (DCI.isBeforeLegalizeOps() ||
4070SDValue TargetLowering::optimizeSetCCOfSignedTruncationCheck(
4075 if (!(C1 = dyn_cast<ConstantSDNode>(N1)))
4084 if (!(C01 = dyn_cast<ConstantSDNode>(N0->
getOperand(1))))
4088 EVT XVT =
X.getValueType();
4112 auto checkConstants = [&
I1, &I01]() ->
bool {
4117 if (checkConstants()) {
4125 if (!checkConstants())
4131 const unsigned KeptBits =
I1.logBase2();
4132 const unsigned KeptBitsMinusOne = I01.
logBase2();
4135 if (KeptBits != (KeptBitsMinusOne + 1))
4150 return DAG.
getSetCC(
DL, SCCVT, SExtInReg,
X, NewCond);
4154SDValue TargetLowering::optimizeSetCCByHoistingAndByConstFromLogicalShift(
4156 DAGCombinerInfo &DCI,
const SDLoc &
DL)
const {
4158 "Should be a comparison with 0.");
4160 "Valid only for [in]equality comparisons.");
4162 unsigned NewShiftOpcode;
4173 unsigned OldShiftOpcode =
V.getOpcode();
4174 switch (OldShiftOpcode) {
4186 C =
V.getOperand(0);
4191 Y =
V.getOperand(1);
4195 return TLI.shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
4196 X, XC,
CC,
Y, OldShiftOpcode, NewShiftOpcode, DAG);
4213 EVT VT =
X.getValueType();
4228 DAGCombinerInfo &DCI)
const {
4231 "Unexpected binop");
4260 if (!DCI.isCalledByLegalizer())
4261 DCI.AddToWorklist(YShl1.
getNode());
4276 if (CTPOP.getOpcode() !=
ISD::CTPOP || !CTPOP.hasOneUse())
4279 EVT CTVT = CTPOP.getValueType();
4280 SDValue CTOp = CTPOP.getOperand(0);
4300 for (
unsigned i = 0; i <
Passes; i++) {
4349 auto getRotateSource = [](
SDValue X) {
4351 return X.getOperand(0);
4358 if (
SDValue R = getRotateSource(N0))
4391 if (!C1 || !C1->
isZero())
4400 if (!ShAmtC || ShAmtC->getAPIntValue().uge(
BitWidth))
4404 unsigned ShAmt = ShAmtC->getZExtValue();
4413 if (
Or.getOperand(0) ==
Other) {
4414 X =
Or.getOperand(0);
4415 Y =
Or.getOperand(1);
4418 if (
Or.getOperand(1) ==
Other) {
4419 X =
Or.getOperand(1);
4420 Y =
Or.getOperand(0);
4430 if (matchOr(F0, F1)) {
4437 if (matchOr(F1, F0)) {
4453 const SDLoc &dl)
const {
4463 bool N0ConstOrSplat =
4465 bool N1ConstOrSplat =
4473 if (N0ConstOrSplat && !N1ConstOrSplat &&
4476 return DAG.
getSetCC(dl, VT, N1, N0, SwappedCC);
4482 if (!N0ConstOrSplat && !N1ConstOrSplat &&
4487 return DAG.
getSetCC(dl, VT, N1, N0, SwappedCC);
4496 const APInt &C1 = N1C->getAPIntValue();
4516 return DAG.
getNode(LogicOp, dl, VT, IsXZero, IsYZero);
4546 if (
auto *N1C = dyn_cast<ConstantSDNode>(N1.
getNode())) {
4547 const APInt &C1 = N1C->getAPIntValue();
4562 if (
auto *
C = dyn_cast<ConstantSDNode>(N0->
getOperand(1)))
4563 if ((
C->getAPIntValue()+1).isPowerOf2()) {
4564 MinBits =
C->getAPIntValue().countr_one();
4572 }
else if (
auto *LN0 = dyn_cast<LoadSDNode>(N0)) {
4575 MinBits = LN0->getMemoryVT().getSizeInBits();
4579 MinBits = LN0->getMemoryVT().getSizeInBits();
4590 MinBits >= ReqdBits) {
4592 if (isTypeDesirableForOp(
ISD::SETCC, MinVT)) {
4595 if (MinBits == 1 && C1 == 1)
4614 if (TopSetCC.
getValueType() == MVT::i1 && VT == MVT::i1 &&
4627 cast<CondCodeSDNode>(TopSetCC.
getOperand(2))->get(),
4646 auto *Lod = cast<LoadSDNode>(N0.
getOperand(0));
4648 unsigned bestWidth = 0, bestOffset = 0;
4649 if (Lod->isSimple() && Lod->isUnindexed() &&
4650 (Lod->getMemoryVT().isByteSized() ||
4652 unsigned memWidth = Lod->getMemoryVT().getStoreSizeInBits();
4654 unsigned maskWidth = origWidth;
4658 origWidth = Lod->getMemoryVT().getSizeInBits();
4662 for (
unsigned width = 8; width < origWidth; width *= 2) {
4669 unsigned maxOffset = origWidth - width;
4670 for (
unsigned offset = 0; offset <= maxOffset; offset += 8) {
4671 if (Mask.isSubsetOf(newMask)) {
4672 unsigned ptrOffset =
4674 unsigned IsFast = 0;
4677 *DAG.
getContext(), Layout, newVT, Lod->getAddressSpace(),
4678 NewAlign, Lod->getMemOperand()->getFlags(), &IsFast) &&
4680 bestOffset = ptrOffset / 8;
4681 bestMask = Mask.lshr(offset);
4695 if (bestOffset != 0)
4699 Lod->getPointerInfo().getWithOffset(bestOffset),
4700 Lod->getOriginalAlign());
4777 ExtDstTy != ExtSrcTy &&
"Unexpected types!");
4784 return DAG.
getSetCC(dl, VT, ZextOp,
4786 }
else if ((N1C->isZero() || N1C->isOne()) &&
4833 return DAG.
getSetCC(dl, VT, Val, N1,
4836 }
else if (N1C->isOne()) {
4873 cast<VTSDNode>(Op0.
getOperand(1))->getVT() == MVT::i1)
4897 N1C && N1C->isAllOnes()) {
4904 optimizeSetCCOfSignedTruncationCheck(VT, N0, N1,
Cond, DCI, dl))
4911 const APInt &C1 = N1C->getAPIntValue();
4913 APInt MinVal, MaxVal;
4935 (!N1C->isOpaque() || (
C.getBitWidth() <= 64 &&
4955 (!N1C->isOpaque() || (
C.getBitWidth() <= 64 &&
5003 if (
SDValue CC = optimizeSetCCByHoistingAndByConstFromLogicalShift(
5004 VT, N0, N1,
Cond, DCI, dl))
5011 bool CmpZero = N1C->isZero();
5012 bool CmpNegOne = N1C->isAllOnes();
5013 if ((CmpZero || CmpNegOne) && N0.
hasOneUse()) {
5016 unsigned EltBits = V.getScalarValueSizeInBits();
5017 if (V.getOpcode() !=
ISD::OR || (EltBits % 2) != 0)
5024 isa<ConstantSDNode>(
RHS.getOperand(1)) &&
5025 RHS.getConstantOperandAPInt(1) == (EltBits / 2) &&
5028 Hi =
RHS.getOperand(0);
5032 isa<ConstantSDNode>(
LHS.getOperand(1)) &&
5033 LHS.getConstantOperandAPInt(1) == (EltBits / 2) &&
5036 Hi =
LHS.getOperand(0);
5044 unsigned HalfBits = EltBits / 2;
5055 if (IsConcat(N0,
Lo,
Hi))
5056 return MergeConcat(
Lo,
Hi);
5093 if (
auto *N1C = dyn_cast<ConstantSDNode>(N1.
getNode())) {
5095 const APInt &C1 = N1C->getAPIntValue();
5107 if (
auto *AndRHS = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) {
5110 unsigned ShCt = AndRHS->getAPIntValue().logBase2();
5111 if (AndRHS->getAPIntValue().isPowerOf2() &&
5119 }
else if (
Cond ==
ISD::SETEQ && C1 == AndRHS->getAPIntValue()) {
5140 if (
auto *AndRHS = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) {
5141 const APInt &AndRHSC = AndRHS->getAPIntValue();
5180 return DAG.
getSetCC(dl, VT, Shift, CmpRHS, NewCond);
5186 if (!isa<ConstantFPSDNode>(N0) && isa<ConstantFPSDNode>(N1)) {
5187 auto *CFP = cast<ConstantFPSDNode>(N1);
5188 assert(!CFP->getValueAPF().isNaN() &&
"Unexpected NaN value");
5209 !
isFPImmLegal(CFP->getValueAPF(), CFP->getValueType(0))) {
5228 if (CFP->getValueAPF().isInfinity()) {
5229 bool IsNegInf = CFP->getValueAPF().isNegative();
5240 return DAG.
getSetCC(dl, VT, N0, N1, NewCond);
5249 "Integer types should be handled by FoldSetCC");
5255 if (UOF ==
unsigned(EqTrue))
5260 if (NewCond !=
Cond &&
5263 return DAG.
getSetCC(dl, VT, N0, N1, NewCond);
5270 if ((isSignedIntSetCC(
Cond) || isUnsignedIntSetCC(
Cond)) &&
5307 bool LegalRHSImm =
false;
5309 if (
auto *RHSC = dyn_cast<ConstantSDNode>(N1)) {
5310 if (
auto *LHSR = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) {
5315 DAG.
getConstant(RHSC->getAPIntValue() - LHSR->getAPIntValue(),
5323 DAG.
getConstant(LHSR->getAPIntValue() ^ RHSC->getAPIntValue(),
5329 if (
auto *SUBC = dyn_cast<ConstantSDNode>(N0.
getOperand(0)))
5333 DAG.
getConstant(SUBC->getAPIntValue() - RHSC->getAPIntValue(),
5338 if (RHSC->getValueType(0).getSizeInBits() <= 64)
5347 if (
SDValue V = foldSetCCWithBinOp(VT, N0, N1,
Cond, dl, DCI))
5353 if (
SDValue V = foldSetCCWithBinOp(VT, N1, N0,
Cond, dl, DCI))
5356 if (
SDValue V = foldSetCCWithAnd(VT, N0, N1,
Cond, dl, DCI))
5367 if (
SDValue Folded = buildUREMEqFold(VT, N0, N1,
Cond, DCI, dl))
5370 if (
SDValue Folded = buildSREMEqFold(VT, N0, N1,
Cond, DCI, dl))
5383 N0 = DAG.
getNOT(dl, Temp, OpVT);
5392 Temp = DAG.
getNOT(dl, N0, OpVT);
5399 Temp = DAG.
getNOT(dl, N1, OpVT);
5406 Temp = DAG.
getNOT(dl, N0, OpVT);
5413 Temp = DAG.
getNOT(dl, N1, OpVT);
5422 N0 = DAG.
getNode(ExtendCode, dl, VT, N0);
5438 if (
auto *GASD = dyn_cast<GlobalAddressSDNode>(
N)) {
5439 GA = GASD->getGlobal();
5440 Offset += GASD->getOffset();
5448 if (
auto *V = dyn_cast<ConstantSDNode>(N2)) {
5449 Offset += V->getSExtValue();
5453 if (
auto *V = dyn_cast<ConstantSDNode>(N1)) {
5454 Offset += V->getSExtValue();
5475 unsigned S = Constraint.
size();
5478 switch (Constraint[0]) {
5481 return C_RegisterClass;
5509 if (S > 1 && Constraint[0] ==
'{' && Constraint[S - 1] ==
'}') {
5510 if (S == 8 && Constraint.
substr(1, 6) ==
"memory")
5538 std::vector<SDValue> &Ops,
5541 if (Constraint.
size() > 1)
5544 char ConstraintLetter = Constraint[0];
5545 switch (ConstraintLetter) {
5561 if ((
C = dyn_cast<ConstantSDNode>(
Op)) && ConstraintLetter !=
's') {
5565 bool IsBool =
C->getConstantIntValue()->getBitWidth() == 1;
5575 if (ConstraintLetter !=
'n') {
5576 if (
const auto *GA = dyn_cast<GlobalAddressSDNode>(
Op)) {
5578 GA->getValueType(0),
5579 Offset + GA->getOffset()));
5582 if (
const auto *BA = dyn_cast<BlockAddressSDNode>(
Op)) {
5584 BA->getBlockAddress(), BA->getValueType(0),
5585 Offset + BA->getOffset(), BA->getTargetFlags()));
5588 if (isa<BasicBlockSDNode>(
Op)) {
5593 const unsigned OpCode =
Op.getOpcode();
5595 if ((
C = dyn_cast<ConstantSDNode>(
Op.getOperand(0))))
5596 Op =
Op.getOperand(1);
5599 (
C = dyn_cast<ConstantSDNode>(
Op.getOperand(1))))
5600 Op =
Op.getOperand(0);
5617std::pair<unsigned, const TargetRegisterClass *>
5623 assert(*(Constraint.
end() - 1) ==
'}' &&
"Not a brace enclosed constraint?");
5628 std::pair<unsigned, const TargetRegisterClass *> R =
5640 std::pair<unsigned, const TargetRegisterClass *> S =
5641 std::make_pair(PR, RC);
5663 assert(!ConstraintCode.empty() &&
"No known constraint!");
5664 return isdigit(
static_cast<unsigned char>(ConstraintCode[0]));
5670 assert(!ConstraintCode.empty() &&
"No known constraint!");
5671 return atoi(ConstraintCode.c_str());
5685 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
5686 unsigned maCount = 0;
5692 unsigned LabelNo = 0;
5695 ConstraintOperands.emplace_back(std::move(CI));
5705 switch (OpInfo.
Type) {
5715 assert(!Call.getType()->isVoidTy() &&
"Bad inline asm!");
5716 if (
auto *STy = dyn_cast<StructType>(Call.getType())) {
5720 assert(ResNo == 0 &&
"Asm only has one result!");
5730 OpInfo.
CallOperandVal = cast<CallBrInst>(&Call)->getIndirectDest(LabelNo);
5741 OpTy = Call.getParamElementType(ArgNo);
5742 assert(OpTy &&
"Indirect operand must have elementtype attribute");
5746 if (
StructType *STy = dyn_cast<StructType>(OpTy))
5747 if (STy->getNumElements() == 1)
5748 OpTy = STy->getElementType(0);
5753 unsigned BitSize =
DL.getTypeSizeInBits(OpTy);
5774 if (!ConstraintOperands.empty()) {
5776 unsigned bestMAIndex = 0;
5777 int bestWeight = -1;
5783 for (maIndex = 0; maIndex < maCount; ++maIndex) {
5785 for (
unsigned cIndex = 0, eIndex = ConstraintOperands.size();
5786 cIndex != eIndex; ++cIndex) {
5807 weight = getMultipleConstraintMatchWeight(OpInfo, maIndex);
5812 weightSum += weight;
5815 if (weightSum > bestWeight) {
5816 bestWeight = weightSum;
5817 bestMAIndex = maIndex;
5824 cInfo.selectAlternative(bestMAIndex);
5829 for (
unsigned cIndex = 0, eIndex = ConstraintOperands.size();
5830 cIndex != eIndex; ++cIndex) {
5841 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
5844 std::pair<unsigned, const TargetRegisterClass *> InputRC =
5849 (MatchRC.second != InputRC.second)) {
5851 " with a matching output constraint of"
5852 " incompatible type!");
5858 return ConstraintOperands;
5893 if (maIndex >= (
int)
info.multipleAlternatives.size())
5894 rCodes = &
info.Codes;
5896 rCodes = &
info.multipleAlternatives[maIndex].Codes;
5900 for (
const std::string &rCode : *rCodes) {
5902 getSingleConstraintMatchWeight(
info, rCode.c_str());
5903 if (weight > BestWeight)
5904 BestWeight = weight;
5917 Value *CallOperandVal =
info.CallOperandVal;
5920 if (!CallOperandVal)
5923 switch (*constraint) {
5926 if (isa<ConstantInt>(CallOperandVal))
5927 weight = CW_Constant;
5930 if (isa<GlobalValue>(CallOperandVal))
5931 weight = CW_Constant;
5935 if (isa<ConstantFP>(CallOperandVal))
5936 weight = CW_Constant;
5949 weight = CW_Register;
5953 weight = CW_Default;
5987 Ret.reserve(OpInfo.
Codes.size());
6002 Ret.emplace_back(Code, CType);
6007 return getConstraintPiority(a.second) > getConstraintPiority(b.second);
6021 "need immediate or other");
6026 std::vector<SDValue> ResultOps;
6028 return !ResultOps.empty();
6036 assert(!OpInfo.
Codes.empty() &&
"Must have at least one constraint");
6039 if (OpInfo.
Codes.size() == 1) {
6047 unsigned BestIdx = 0;
6048 for (
const unsigned E =
G.size();
6055 if (BestIdx + 1 == E) {
6071 if (isa<ConstantInt>(v) || isa<Function>(v)) {
6075 if (isa<BasicBlock>(v) || isa<BlockAddress>(v)) {
6082 if (
const char *Repl = LowerXConstraint(OpInfo.
ConstraintVT)) {
6096 EVT VT =
N->getValueType(0);
6101 bool UseSRA =
false;
6107 APInt Divisor =
C->getAPIntValue();
6129 "Expected matchUnaryPredicate to return one element for scalable "
6134 assert(isa<ConstantSDNode>(Op1) &&
"Expected a constant");
6136 Factor = Factors[0];
6145 Flags.setExact(
true);
6186 EVT VT =
N->getValueType(0);
6222 bool IsAfterLegalization,
6225 EVT VT =
N->getValueType(0);
6251 if (
N->getFlags().hasExact())
6260 const APInt &Divisor =
C->getAPIntValue();
6262 int NumeratorFactor = 0;
6273 NumeratorFactor = 1;
6276 NumeratorFactor = -1;
6293 SDValue MagicFactor, Factor, Shift, ShiftMask;
6301 Shifts.
size() == 1 && ShiftMasks.
size() == 1 &&
6302 "Expected matchUnaryPredicate to return one element for scalable "
6309 assert(isa<ConstantSDNode>(N1) &&
"Expected a constant");
6310 MagicFactor = MagicFactors[0];
6311 Factor = Factors[0];
6313 ShiftMask = ShiftMasks[0];
6354 SDValue Q = GetMULHS(N0, MagicFactor);
6384 bool IsAfterLegalization,
6387 EVT VT =
N->getValueType(0);
6418 unsigned LeadingZeros = 0;
6419 if (!VT.
isVector() && isa<ConstantSDNode>(N1)) {
6427 bool UseNPQ =
false, UsePreShift =
false, UsePostShift =
false;
6433 const APInt& Divisor =
C->getAPIntValue();
6435 SDValue PreShift, MagicFactor, NPQFactor, PostShift;
6439 if (Divisor.
isOne()) {
6440 PreShift = PostShift = DAG.
getUNDEF(ShSVT);
6441 MagicFactor = NPQFactor = DAG.
getUNDEF(SVT);
6449 "We shouldn't generate an undefined shift!");
6451 "We shouldn't generate an undefined shift!");
6453 "Unexpected pre-shift");
6460 UseNPQ |= magics.
IsAdd;
6461 UsePreShift |= magics.
PreShift != 0;
6476 SDValue PreShift, PostShift, MagicFactor, NPQFactor;
6484 NPQFactors.
size() == 1 && PostShifts.
size() == 1 &&
6485 "Expected matchUnaryPredicate to return one for scalable vectors");
6491 assert(isa<ConstantSDNode>(N1) &&
"Expected a constant");
6492 PreShift = PreShifts[0];
6493 MagicFactor = MagicFactors[0];
6494 PostShift = PostShifts[0];
6541 Q = GetMULHU(Q, MagicFactor);
6554 NPQ = GetMULHU(NPQ, NPQFactor);
6573 return DAG.
getSelect(dl, VT, IsOne, N0, Q);
6582 std::function<
bool(
SDValue)> Predicate,
6587 if (SplatValue != Values.
end()) {
6590 return Value == *SplatValue || Predicate(
Value);
6592 Replacement = *SplatValue;
6596 if (!AlternativeReplacement)
6599 Replacement = AlternativeReplacement;
6601 std::replace_if(Values.
begin(), Values.
end(), Predicate, Replacement);
6612 DAGCombinerInfo &DCI,
6615 if (
SDValue Folded = prepareUREMEqFold(SETCCVT, REMNode, CompTargetNode,
Cond,
6618 DCI.AddToWorklist(
N);
6626TargetLowering::prepareUREMEqFold(
EVT SETCCVT,
SDValue REMNode,
6628 DAGCombinerInfo &DCI,
const SDLoc &
DL,
6636 "Only applicable for (in)equality comparisons.");
6649 bool ComparingWithAllZeros =
true;
6650 bool AllComparisonsWithNonZerosAreTautological =
true;
6651 bool HadTautologicalLanes =
false;
6652 bool AllLanesAreTautological =
true;
6653 bool HadEvenDivisor =
false;
6654 bool AllDivisorsArePowerOfTwo =
true;
6655 bool HadTautologicalInvertedLanes =
false;
6664 const APInt &
Cmp = CCmp->getAPIntValue();
6666 ComparingWithAllZeros &=
Cmp.isZero();
6672 bool TautologicalInvertedLane =
D.ule(Cmp);
6673 HadTautologicalInvertedLanes |= TautologicalInvertedLane;
6678 bool TautologicalLane =
D.isOne() || TautologicalInvertedLane;
6679 HadTautologicalLanes |= TautologicalLane;
6680 AllLanesAreTautological &= TautologicalLane;
6686 AllComparisonsWithNonZerosAreTautological &= TautologicalLane;
6689 unsigned K =
D.countr_zero();
6690 assert((!
D.isOne() || (K == 0)) &&
"For divisor '1' we won't rotate.");
6694 HadEvenDivisor |= (
K != 0);
6697 AllDivisorsArePowerOfTwo &= D0.
isOne();
6701 unsigned W =
D.getBitWidth();
6703 assert((D0 *
P).isOne() &&
"Multiplicative inverse basic check failed.");
6716 "We are expecting that K is always less than all-ones for ShSVT");
6719 if (TautologicalLane) {
6743 if (AllLanesAreTautological)
6748 if (AllDivisorsArePowerOfTwo)
6753 if (HadTautologicalLanes) {
6768 "Expected matchBinaryPredicate to return one element for "
6779 if (!ComparingWithAllZeros && !AllComparisonsWithNonZerosAreTautological) {
6783 "Expecting that the types on LHS and RHS of comparisons match.");
6793 if (HadEvenDivisor) {
6806 if (!HadTautologicalInvertedLanes)
6812 assert(VT.
isVector() &&
"Can/should only get here for vectors.");
6819 SDValue TautologicalInvertedChannels =
6829 DL, SETCCVT, SETCCVT);
6831 Replacement, NewCC);
6839 TautologicalInvertedChannels);
6852 DAGCombinerInfo &DCI,
6855 if (
SDValue Folded = prepareSREMEqFold(SETCCVT, REMNode, CompTargetNode,
Cond,
6857 assert(Built.
size() <= 7 &&
"Max size prediction failed.");
6859 DCI.AddToWorklist(
N);
6867TargetLowering::prepareSREMEqFold(
EVT SETCCVT,
SDValue REMNode,
6869 DAGCombinerInfo &DCI,
const SDLoc &
DL,
6894 "Only applicable for (in)equality comparisons.");
6910 if (!CompTarget || !CompTarget->
isZero())
6913 bool HadIntMinDivisor =
false;
6914 bool HadOneDivisor =
false;
6915 bool AllDivisorsAreOnes =
true;
6916 bool HadEvenDivisor =
false;
6917 bool NeedToApplyOffset =
false;
6918 bool AllDivisorsArePowerOfTwo =
true;
6933 HadIntMinDivisor |=
D.isMinSignedValue();
6936 HadOneDivisor |=
D.isOne();
6937 AllDivisorsAreOnes &=
D.isOne();
6940 unsigned K =
D.countr_zero();
6941 assert((!
D.isOne() || (K == 0)) &&
"For divisor '1' we won't rotate.");
6944 if (!
D.isMinSignedValue()) {
6947 HadEvenDivisor |= (
K != 0);
6952 AllDivisorsArePowerOfTwo &= D0.
isOne();
6956 unsigned W =
D.getBitWidth();
6958 assert((D0 *
P).isOne() &&
"Multiplicative inverse basic check failed.");
6964 if (!
D.isMinSignedValue()) {
6967 NeedToApplyOffset |=
A != 0;
6974 "We are expecting that A is always less than all-ones for SVT");
6976 "We are expecting that K is always less than all-ones for ShSVT");
7014 if (AllDivisorsAreOnes)
7019 if (AllDivisorsArePowerOfTwo)
7022 SDValue PVal, AVal, KVal, QVal;
7024 if (HadOneDivisor) {
7044 QAmts.
size() == 1 &&
7045 "Expected matchUnaryPredicate to return one element for scalable "
7052 assert(isa<ConstantSDNode>(
D) &&
"Expected a constant");
7063 if (NeedToApplyOffset) {
7075 if (HadEvenDivisor) {
7090 if (!HadIntMinDivisor)
7096 assert(VT.
isVector() &&
"Can/should only get here for vectors.");
7131 MaskedIsZero, Fold);
7138 if (!isa<ConstantSDNode>(
Op.getOperand(0))) {
7140 "be a constant integer");
7150 EVT VT =
Op.getValueType();
7173 bool LegalOps,
bool OptForSize,
7175 unsigned Depth)
const {
7177 if (
Op.getOpcode() ==
ISD::FNEG ||
Op.getOpcode() == ISD::VP_FNEG) {
7179 return Op.getOperand(0);
7190 EVT VT =
Op.getValueType();
7191 unsigned Opcode =
Op.getOpcode();
7201 auto RemoveDeadNode = [&](
SDValue N) {
7202 if (
N &&
N.getNode()->use_empty())
7211 std::list<HandleSDNode> Handles;
7222 if (LegalOps && !IsOpLegal)
7225 APFloat V = cast<ConstantFPSDNode>(
Op)->getValueAPF();
7239 return !N.isUndef() && !isa<ConstantFPSDNode>(N);
7247 return N.isUndef() ||
7252 if (LegalOps && !IsOpLegal)
7261 APFloat V = cast<ConstantFPSDNode>(
C)->getValueAPF();
7269 if (!
Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
7280 getNegatedExpression(
X, DAG, LegalOps, OptForSize, CostX,
Depth);
7283 Handles.emplace_back(NegX);
7288 getNegatedExpression(
Y, DAG, LegalOps, OptForSize, CostY,
Depth);
7294 if (NegX && (CostX <= CostY)) {
7298 RemoveDeadNode(NegY);
7307 RemoveDeadNode(NegX);
7314 if (!
Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
7336 getNegatedExpression(
X, DAG, LegalOps, OptForSize, CostX,
Depth);
7339 Handles.emplace_back(NegX);
7344 getNegatedExpression(
Y, DAG, LegalOps, OptForSize, CostY,
Depth);
7350 if (NegX && (CostX <= CostY)) {
7354 RemoveDeadNode(NegY);
7360 if (
C->isExactlyValue(2.0) &&
Op.getOpcode() ==
ISD::FMUL)
7368 RemoveDeadNode(NegX);
7375 if (!
Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
7378 SDValue X =
Op.getOperand(0),
Y =
Op.getOperand(1), Z =
Op.getOperand(2);
7381 getNegatedExpression(Z, DAG, LegalOps, OptForSize, CostZ,
Depth);
7387 Handles.emplace_back(NegZ);
7392 getNegatedExpression(
X, DAG, LegalOps, OptForSize, CostX,
Depth);
7395 Handles.emplace_back(NegX);
7400 getNegatedExpression(
Y, DAG, LegalOps, OptForSize, CostY,
Depth);
7406 if (NegX && (CostX <= CostY)) {
7407 Cost = std::min(CostX, CostZ);
7410 RemoveDeadNode(NegY);
7416 Cost = std::min(CostY, CostZ);
7419 RemoveDeadNode(NegX);
7427 if (
SDValue NegV = getNegatedExpression(
Op.getOperand(0), DAG, LegalOps,
7429 return DAG.
getNode(Opcode,
DL, VT, NegV);
7432 if (
SDValue NegV = getNegatedExpression(
Op.getOperand(0), DAG, LegalOps,
7443 getNegatedExpression(
LHS, DAG, LegalOps, OptForSize, CostLHS,
Depth);
7445 RemoveDeadNode(NegLHS);
7450 Handles.emplace_back(NegLHS);
7455 getNegatedExpression(
RHS, DAG, LegalOps, OptForSize, CostRHS,
Depth);
7463 RemoveDeadNode(NegLHS);
7464 RemoveDeadNode(NegRHS);
7468 Cost = std::min(CostLHS, CostRHS);
7469 return DAG.
getSelect(
DL, VT,
Op.getOperand(0), NegLHS, NegRHS);
7498 if (!HasMULHU && !HasMULHS && !HasUMUL_LOHI && !HasSMUL_LOHI)
7511 if ((
Signed && HasSMUL_LOHI) || (!
Signed && HasUMUL_LOHI)) {
7539 if (MakeMUL_LOHI(LL, RL,
Lo,
Hi,
false)) {
7540 Result.push_back(
Lo);
7541 Result.push_back(
Hi);
7544 Result.push_back(Zero);
7545 Result.push_back(Zero);
7556 if (MakeMUL_LOHI(LL, RL,
Lo,
Hi,
true)) {
7557 Result.push_back(
Lo);
7558 Result.push_back(
Hi);
7563 unsigned ShiftAmount = OuterBitSize - InnerBitSize;
7578 if (!MakeMUL_LOHI(LL, RL,
Lo,
Hi,
false))
7581 Result.push_back(
Lo);
7588 Result.push_back(
Hi);
7601 if (!MakeMUL_LOHI(LL, RH,
Lo,
Hi,
false))
7608 if (!MakeMUL_LOHI(LH, RL,
Lo,
Hi,
false))
7660 bool Ok = expandMUL_LOHI(
N->getOpcode(),
N->getValueType(0),
SDLoc(
N),
7661 N->getOperand(0),
N->getOperand(1), Result, HiLoVT,
7662 DAG, Kind, LL, LH, RL, RH);
7664 assert(Result.size() == 2);
7696 unsigned Opcode =
N->getOpcode();
7697 EVT VT =
N->getValueType(0);
7704 "Unexpected opcode");
7706 auto *CN = dyn_cast<ConstantSDNode>(
N->getOperand(1));
7710 APInt Divisor = CN->getAPIntValue();
7718 if (Divisor.
uge(HalfMaxPlus1))
7736 unsigned TrailingZeros = 0;
7750 if (HalfMaxPlus1.
urem(Divisor).
isOne()) {
7751 assert(!LL == !LH &&
"Expected both input halves or no input halves!");
7753 std::tie(LL, LH) = DAG.
SplitScalar(
N->getOperand(0), dl, HiLoVT, HiLoVT);
7757 if (TrailingZeros) {
7825 std::tie(QuotL, QuotH) = DAG.
SplitScalar(Quotient, dl, HiLoVT, HiLoVT);
7826 Result.push_back(QuotL);
7827 Result.push_back(QuotH);
7833 if (TrailingZeros) {
7839 Result.push_back(RemL);
7855 EVT VT =
Node->getValueType(0);
7865 bool IsFSHL =
Node->getOpcode() == ISD::VP_FSHL;
7868 EVT ShVT = Z.getValueType();
7874 ShAmt = DAG.
getNode(ISD::VP_UREM,
DL, ShVT, Z, BitWidthC, Mask, VL);
7875 InvShAmt = DAG.
getNode(ISD::VP_SUB,
DL, ShVT, BitWidthC, ShAmt, Mask, VL);
7876 ShX = DAG.
getNode(ISD::VP_SHL,
DL, VT,
X, IsFSHL ? ShAmt : InvShAmt, Mask,
7878 ShY = DAG.
getNode(ISD::VP_SRL,
DL, VT,
Y, IsFSHL ? InvShAmt : ShAmt, Mask,
7886 ShAmt = DAG.
getNode(ISD::VP_AND,
DL, ShVT, Z, BitMask, Mask, VL);
7890 InvShAmt = DAG.
getNode(ISD::VP_AND,
DL, ShVT, NotZ, BitMask, Mask, VL);
7893 ShAmt = DAG.
getNode(ISD::VP_UREM,
DL, ShVT, Z, BitWidthC, Mask, VL);
7894 InvShAmt = DAG.
getNode(ISD::VP_SUB,
DL, ShVT, BitMask, ShAmt, Mask, VL);
7899 ShX = DAG.
getNode(ISD::VP_SHL,
DL, VT,
X, ShAmt, Mask, VL);
7901 ShY = DAG.
getNode(ISD::VP_SRL,
DL, VT, ShY1, InvShAmt, Mask, VL);
7904 ShX = DAG.
getNode(ISD::VP_SHL,
DL, VT, ShX1, InvShAmt, Mask, VL);
7905 ShY = DAG.
getNode(ISD::VP_SRL,
DL, VT,
Y, ShAmt, Mask, VL);
7908 return DAG.
getNode(ISD::VP_OR,
DL, VT, ShX, ShY, Mask, VL);
7913 if (Node->isVPOpcode())
7916 EVT VT = Node->getValueType(0);
7926 SDValue Z = Node->getOperand(2);
7929 bool IsFSHL = Node->getOpcode() ==
ISD::FSHL;
7932 EVT ShVT = Z.getValueType();
8002 EVT VT = Node->getValueType(0);
8004 bool IsLeft = Node->getOpcode() ==
ISD::ROTL;
8005 SDValue Op0 = Node->getOperand(0);
8006 SDValue Op1 = Node->getOperand(1);
8017 return DAG.
getNode(RevRot,
DL, VT, Op0, Sub);
8020 if (!AllowVectorOps && VT.
isVector() &&
8038 ShVal = DAG.
getNode(ShOpc,
DL, VT, Op0, ShAmt);
8040 HsVal = DAG.
getNode(HsOpc,
DL, VT, Op0, HsAmt);
8046 ShVal = DAG.
getNode(ShOpc,
DL, VT, Op0, ShAmt);
8057 assert(Node->getNumOperands() == 3 &&
"Not a double-shift!");
8058 EVT VT = Node->getValueType(0);
8064 SDValue ShOpLo = Node->getOperand(0);
8065 SDValue ShOpHi = Node->getOperand(1);
8066 SDValue ShAmt = Node->getOperand(2);
8109 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0;
8110 SDValue Src = Node->getOperand(OpNo);
8111 EVT SrcVT = Src.getValueType();
8112 EVT DstVT = Node->getValueType(0);
8116 if (SrcVT != MVT::f32 || DstVT != MVT::i64)
8119 if (Node->isStrictFPOpcode())
8182 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0;
8183 SDValue Src = Node->getOperand(OpNo);
8185 EVT SrcVT = Src.getValueType();
8186 EVT DstVT = Node->getValueType(0);
8207 if (Node->isStrictFPOpcode()) {
8209 { Node->getOperand(0), Src });
8210 Chain = Result.getValue(1);
8224 if (Node->isStrictFPOpcode()) {
8226 Node->getOperand(0),
true);
8232 bool Strict = Node->isStrictFPOpcode() ||
8251 if (Node->isStrictFPOpcode()) {
8253 { Chain, Src, FltOfs });
8275 Result = DAG.
getSelect(dl, DstVT, Sel, True, False);
8286 if (Node->isStrictFPOpcode())
8289 SDValue Src = Node->getOperand(0);
8290 EVT SrcVT = Src.getValueType();
8291 EVT DstVT = Node->getValueType(0);
8314 llvm::bit_cast<double>(UINT64_C(0x4530000000100000)), dl, DstVT);
8334 unsigned Opcode = Node->getOpcode();
8339 if (Node->getFlags().hasNoNaNs()) {
8341 SDValue Op1 = Node->getOperand(0);
8342 SDValue Op2 = Node->getOperand(1);
8347 Flags.setNoSignedZeros(
true);
8360 EVT VT = Node->getValueType(0);
8364 "Expanding fminnum/fmaxnum for scalable vectors is undefined.");
8367 SDValue Quiet0 = Node->getOperand(0);
8368 SDValue Quiet1 = Node->getOperand(1);
8370 if (!Node->getFlags().hasNoNaNs()) {
8383 return DAG.
getNode(NewOp, dl, VT, Quiet0, Quiet1, Node->getFlags());
8389 if ((Node->getFlags().hasNoNaNs() ||
8392 (Node->getFlags().hasNoSignedZeros() ||
8395 unsigned IEEE2018Op =
8398 return DAG.
getNode(IEEE2018Op, dl, VT, Node->getOperand(0),
8399 Node->getOperand(1), Node->getFlags());
8402 if (
SDValue SelCC = createSelectForFMINNUM_FMAXNUM(Node, DAG))
8413 unsigned Opc =
N->getOpcode();
8414 EVT VT =
N->getValueType(0);
8427 bool MinMaxMustRespectOrderedZero =
false;
8431 MinMaxMustRespectOrderedZero =
true;
8445 if (!
N->getFlags().hasNoNaNs() &&
8454 if (!MinMaxMustRespectOrderedZero && !
N->getFlags().hasNoSignedZeros() &&
8480 bool IsOrdered = NanTest ==
fcNone;
8481 bool IsUnordered = NanTest ==
fcNan;
8484 if (!IsOrdered && !IsUnordered)
8485 return std::nullopt;
8487 if (OrderedMask ==
fcZero &&
8493 return std::nullopt;
8500 EVT OperandVT =
Op.getValueType();
8511 if (OperandVT == MVT::ppcf128) {
8514 OperandVT = MVT::f64;
8519 bool IsInverted =
false;
8522 Test = InvertedCheck;
8529 bool IsF80 = (ScalarFloatVT == MVT::f80);
8533 if (Flags.hasNoFPExcept() &&
8538 if (std::optional<bool> IsCmp0 =
8541 *IsCmp0 ? OrderedCmpOpcode : UnorderedCmpOpcode,
8548 *IsCmp0 ? OrderedCmpOpcode : UnorderedCmpOpcode);
8583 const unsigned ExplicitIntBitInF80 = 63;
8584 APInt ExpMask = Inf;
8586 ExpMask.
clearBit(ExplicitIntBitInF80);
8600 const auto appendResult = [&](
SDValue PartialRes) {
8610 const auto getIntBitIsSet = [&]() ->
SDValue {
8611 if (!IntBitIsSetV) {
8612 APInt IntBitMask(BitSize, 0);
8613 IntBitMask.
setBit(ExplicitIntBitInF80);
8618 return IntBitIsSetV;
8639 Test &= ~fcPosFinite;
8644 Test &= ~fcNegFinite;
8646 appendResult(PartialRes);
8655 appendResult(ExpIsZero);
8665 else if (PartialCheck ==
fcZero)
8669 appendResult(PartialRes);
8682 appendResult(PartialRes);
8685 if (
unsigned PartialCheck =
Test &
fcInf) {
8688 else if (PartialCheck ==
fcInf)
8695 appendResult(PartialRes);
8698 if (
unsigned PartialCheck =
Test &
fcNan) {
8699 APInt InfWithQnanBit = Inf | QNaNBitMask;
8701 if (PartialCheck ==
fcNan) {
8714 }
else if (PartialCheck ==
fcQNan) {
8726 appendResult(PartialRes);
8731 APInt ExpLSB = ExpMask & ~(ExpMask.
shl(1));
8734 APInt ExpLimit = ExpMask - ExpLSB;
8747 appendResult(PartialRes);
8770 EVT VT = Node->getValueType(0);
8777 if (!(Len <= 128 && Len % 8 == 0))
8836 for (
unsigned Shift = 8; Shift < Len; Shift *= 2) {
8847 EVT VT = Node->getValueType(0);
8850 SDValue Mask = Node->getOperand(1);
8851 SDValue VL = Node->getOperand(2);
8856 if (!(Len <= 128 && Len % 8 == 0))
8868 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5;
8871 Tmp1 = DAG.
getNode(ISD::VP_AND, dl, VT,
8875 Op = DAG.
getNode(ISD::VP_SUB, dl, VT,
Op, Tmp1, Mask, VL);
8878 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op, Mask33, Mask, VL);
8879 Tmp3 = DAG.
getNode(ISD::VP_AND, dl, VT,
8883 Op = DAG.
getNode(ISD::VP_ADD, dl, VT, Tmp2, Tmp3, Mask, VL);
8888 Tmp5 = DAG.
getNode(ISD::VP_ADD, dl, VT,
Op, Tmp4, Mask, VL);
8889 Op = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp5, Mask0F, Mask, VL);
8900 V = DAG.
getNode(ISD::VP_MUL, dl, VT,
Op, Mask01, Mask, VL);
8903 for (
unsigned Shift = 8; Shift < Len; Shift *= 2) {
8905 V = DAG.
getNode(ISD::VP_ADD, dl, VT, V,
8906 DAG.
getNode(ISD::VP_SHL, dl, VT, V, ShiftC, Mask, VL),
8916 EVT VT = Node->getValueType(0);
8955 for (
unsigned i = 0; (1U << i) < NumBitsPerElt; ++i) {
8966 EVT VT = Node->getValueType(0);
8969 SDValue Mask = Node->getOperand(1);
8970 SDValue VL = Node->getOperand(2);
8980 for (
unsigned i = 0; (1U << i) < NumBitsPerElt; ++i) {
8983 DAG.
getNode(ISD::VP_SRL, dl, VT,
Op, Tmp, Mask, VL), Mask,
8988 return DAG.
getNode(ISD::VP_CTPOP, dl, VT,
Op, Mask, VL);
8997 :
APInt(64, 0x0218A392CD3D5DBFULL);
9011 for (
unsigned i = 0; i <
BitWidth; i++) {
9037 EVT VT = Node->getValueType(0);
9071 if (
SDValue V = CTTZTableLookup(Node, DAG, dl, VT,
Op, NumBitsPerElt))
9093 SDValue Mask = Node->getOperand(1);
9094 SDValue VL = Node->getOperand(2);
9096 EVT VT = Node->getValueType(0);
9103 SDValue Tmp = DAG.
getNode(ISD::VP_AND, dl, VT, Not, MinusOne, Mask, VL);
9104 return DAG.
getNode(ISD::VP_CTPOP, dl, VT, Tmp, Mask, VL);
9118 EVT SrcVT = Source.getValueType();
9119 EVT ResVT =
N->getValueType(0);
9128 Source = DAG.
getNode(ISD::VP_SETCC,
DL, SrcVT, Source, AllZero,
9136 DAG.
getNode(ISD::VP_SELECT,
DL, ResVecVT, Source, StepVec,
Splat, EVL);
9137 return DAG.
getNode(ISD::VP_REDUCE_UMIN,
DL, ResVT, ExtEVL,
Select, Mask, EVL);
9141 bool IsNegative)
const {
9143 EVT VT =
N->getValueType(0);
9197 EVT VT =
N->getValueType(0);
9239 EVT VT =
N->getValueType(0);
9243 unsigned Opc =
N->getOpcode();
9252 "Unknown AVG node");
9264 return DAG.
getNode(ShiftOpc, dl, VT, Sum,
9296 return DAG.
getNode(SumOpc, dl, VT, Sign, Shift);
9301 EVT VT =
N->getValueType(0);
9308 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
9359 EVT VT =
N->getValueType(0);
9368 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
9377 return DAG.
getNode(ISD::VP_OR, dl, VT, Tmp1, Tmp2, Mask, EVL);
9387 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9391 Tmp4 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp3, Mask, EVL);
9392 Tmp2 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp1, Mask, EVL);
9393 return DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp2, Mask, EVL);
9397 Tmp7 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op,
9401 Tmp6 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op,
9402 DAG.
getConstant(255ULL << 16, dl, VT), Mask, EVL);
9405 Tmp5 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op,
9406 DAG.
getConstant(255ULL << 24, dl, VT), Mask, EVL);
9411 Tmp4 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp4,
9412 DAG.
getConstant(255ULL << 24, dl, VT), Mask, EVL);
9415 Tmp3 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp3,
9416 DAG.
getConstant(255ULL << 16, dl, VT), Mask, EVL);
9419 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9423 Tmp8 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp7, Mask, EVL);
9424 Tmp6 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp6, Tmp5, Mask, EVL);
9425 Tmp4 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp3, Mask, EVL);
9426 Tmp2 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp1, Mask, EVL);
9427 Tmp8 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp6, Mask, EVL);
9428 Tmp4 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp2, Mask, EVL);
9429 return DAG.
getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp4, Mask, EVL);
9435 EVT VT =
N->getValueType(0);
9478 for (
unsigned I = 0, J = Sz-1;
I < Sz; ++
I, --J) {
9495 assert(
N->getOpcode() == ISD::VP_BITREVERSE);
9498 EVT VT =
N->getValueType(0);
9517 Tmp = (Sz > 8 ? DAG.
getNode(ISD::VP_BSWAP, dl, VT,
Op, Mask, EVL) :
Op);
9522 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9528 Tmp = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
9533 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9539 Tmp = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
9544 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9550 Tmp = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
9556std::pair<SDValue, SDValue>
9560 SDValue Chain = LD->getChain();
9561 SDValue BasePTR = LD->getBasePtr();
9562 EVT SrcVT = LD->getMemoryVT();
9563 EVT DstVT = LD->getValueType(0);
9595 LD->getPointerInfo(), SrcIntVT, LD->getOriginalAlign(),
9596 LD->getMemOperand()->getFlags(), LD->getAAInfo());
9599 for (
unsigned Idx = 0;
Idx < NumElem; ++
Idx) {
9600 unsigned ShiftIntoIdx =
9612 Scalar = DAG.
getNode(ExtendOp, SL, DstEltVT, Scalar);
9619 return std::make_pair(
Value, Load.getValue(1));
9628 for (
unsigned Idx = 0;
Idx < NumElem; ++
Idx) {
9630 DAG.
getExtLoad(ExtType, SL, DstEltVT, Chain, BasePTR,
9631 LD->getPointerInfo().getWithOffset(
Idx * Stride),
9632 SrcEltVT, LD->getOriginalAlign(),
9633 LD->getMemOperand()->getFlags(), LD->getAAInfo());
9644 return std::make_pair(
Value, NewChain);
9651 SDValue Chain = ST->getChain();
9652 SDValue BasePtr = ST->getBasePtr();
9654 EVT StVT = ST->getMemoryVT();
9680 for (
unsigned Idx = 0;
Idx < NumElem; ++
Idx) {
9685 unsigned ShiftIntoIdx =
9694 return DAG.
getStore(Chain, SL, CurrVal, BasePtr, ST->getPointerInfo(),
9695 ST->getOriginalAlign(), ST->getMemOperand()->getFlags(),
9701 assert(Stride &&
"Zero stride!");
9705 for (
unsigned Idx = 0;
Idx < NumElem; ++
Idx) {
9714 Chain, SL, Elt,
Ptr, ST->getPointerInfo().getWithOffset(
Idx * Stride),
9715 MemSclVT, ST->getOriginalAlign(), ST->getMemOperand()->getFlags(),
9724std::pair<SDValue, SDValue>
9727 "unaligned indexed loads not implemented!");
9728 SDValue Chain = LD->getChain();
9730 EVT VT = LD->getValueType(0);
9731 EVT LoadedVT = LD->getMemoryVT();
9741 return scalarizeVectorLoad(LD, DAG);
9747 LD->getMemOperand());
9753 return std::make_pair(Result, newLoad.
getValue(1));
9761 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes;
9765 auto FrameIndex = cast<FrameIndexSDNode>(StackBase.
getNode())->getIndex();
9770 EVT PtrVT =
Ptr.getValueType();
9771 EVT StackPtrVT = StackPtr.getValueType();
9777 for (
unsigned i = 1; i < NumRegs; i++) {
9780 RegVT, dl, Chain,
Ptr, LD->getPointerInfo().getWithOffset(
Offset),
9781 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(),
9785 Load.getValue(1), dl, Load, StackPtr,
9796 8 * (LoadedBytes -
Offset));
9799 LD->getPointerInfo().getWithOffset(
Offset), MemVT,
9800 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(),
9806 Load.getValue(1), dl, Load, StackPtr,
9813 Load = DAG.
getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase,
9818 return std::make_pair(Load, TF);
9822 "Unaligned load of unsupported type.");
9831 Align Alignment = LD->getOriginalAlign();
9832 unsigned IncrementSize = NumBits / 8;
9843 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
9848 LD->getPointerInfo().getWithOffset(IncrementSize),
9849 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
9852 Hi = DAG.
getExtLoad(HiExtType, dl, VT, Chain,
Ptr, LD->getPointerInfo(),
9853 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
9858 LD->getPointerInfo().getWithOffset(IncrementSize),
9859 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
9871 return std::make_pair(Result, TF);
9877 "unaligned indexed stores not implemented!");
9878 SDValue Chain = ST->getChain();
9882 Align Alignment = ST->getOriginalAlign();
9884 EVT StoreMemVT = ST->getMemoryVT();
9900 Result = DAG.
getStore(Chain, dl, Result,
Ptr, ST->getPointerInfo(),
9901 Alignment, ST->getMemOperand()->getFlags());
9909 EVT PtrVT =
Ptr.getValueType();
9912 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes;
9916 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
9920 Chain, dl, Val, StackPtr,
9923 EVT StackPtrVT = StackPtr.getValueType();
9931 for (
unsigned i = 1; i < NumRegs; i++) {
9934 RegVT, dl, Store, StackPtr,
9938 ST->getPointerInfo().getWithOffset(
Offset),
9939 ST->getOriginalAlign(),
9940 ST->getMemOperand()->getFlags()));
9960 ST->getPointerInfo().getWithOffset(
Offset), LoadMemVT,
9961 ST->getOriginalAlign(),
9962 ST->getMemOperand()->getFlags(), ST->getAAInfo()));
9969 "Unaligned store of unknown type.");
9973 unsigned IncrementSize = NumBits / 8;
9982 if (
auto *
C = dyn_cast<ConstantSDNode>(
Lo);
C && !
C->isOpaque())
9993 Ptr, ST->getPointerInfo(), NewStoredVT, Alignment,
9994 ST->getMemOperand()->getFlags());
9999 ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment,
10000 ST->getMemOperand()->getFlags(), ST->getAAInfo());
10011 bool IsCompressedMemory)
const {
10013 EVT AddrVT =
Addr.getValueType();
10014 EVT MaskVT = Mask.getValueType();
10016 "Incompatible types of Data and Mask");
10017 if (IsCompressedMemory) {
10020 "Cannot currently handle compressed memory with scalable vectors");
10026 MaskIntVT = MVT::i32;
10050 "Cannot index a scalable vector within a fixed-width vector");
10054 EVT IdxVT =
Idx.getValueType();
10060 if (
auto *IdxCst = dyn_cast<ConstantSDNode>(
Idx))
10061 if (IdxCst->getZExtValue() + (NumSubElts - 1) < NElts)
10075 unsigned MaxIndex = NumSubElts < NElts ? NElts - NumSubElts : 0;
10083 return getVectorSubVecPointer(
10084 DAG, VecPtr, VecVT,
10102 "Converting bits to bytes lost precision");
10104 "Sub-vector must be a vector with matching element type");
10133 std::string NameString = (
"__emutls_v." + GA->
getGlobal()->
getName()).str();
10137 assert(EmuTlsVar &&
"Cannot find EmuTlsVar ");
10139 Entry.Ty = VoidPtrType;
10140 Args.push_back(Entry);
10147 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
10156 "Emulated TLS must have zero offset in GlobalAddressSDNode");
10157 return CallResult.first;
10168 EVT VT =
Op.getOperand(0).getValueType();
10170 if (VT.
bitsLT(MVT::i32)) {
10184 SDValue Op0 = Node->getOperand(0);
10185 SDValue Op1 = Node->getOperand(1);
10188 unsigned Opcode = Node->getOpcode();
10230 {Op0, Op1, DAG.getCondCode(CC)})) {
10237 {Op0, Op1, DAG.getCondCode(CC)})) {
10265 unsigned Opcode = Node->getOpcode();
10268 EVT VT =
LHS.getValueType();
10271 assert(VT ==
RHS.getValueType() &&
"Expected operands to be the same type");
10287 unsigned OverflowOp;
10302 llvm_unreachable(
"Expected method to receive signed or unsigned saturation "
10303 "addition or subtraction node.");
10311 unsigned BitWidth =
LHS.getScalarValueSizeInBits();
10314 SDValue SumDiff = Result.getValue(0);
10315 SDValue Overflow = Result.getValue(1);
10337 return DAG.
getSelect(dl, VT, Overflow, Zero, SumDiff);
10357 if (LHSIsNonNegative || RHSIsNonNegative) {
10359 return DAG.
getSelect(dl, VT, Overflow, SatMax, SumDiff);
10365 if (LHSIsNegative || RHSIsNegative) {
10367 return DAG.
getSelect(dl, VT, Overflow, SatMin, SumDiff);
10377 return DAG.
getSelect(dl, VT, Overflow, Result, SumDiff);
10381 unsigned Opcode = Node->getOpcode();
10384 EVT VT =
LHS.getValueType();
10385 EVT ResVT = Node->getValueType(0);
10402 unsigned Opcode = Node->getOpcode();
10406 EVT VT =
LHS.getValueType();
10411 "Expected a SHLSAT opcode");
10412 assert(VT ==
RHS.getValueType() &&
"Expected operands to be the same type");
10450 if (WideVT == MVT::i16)
10451 LC = RTLIB::MUL_I16;
10452 else if (WideVT == MVT::i32)
10453 LC = RTLIB::MUL_I32;
10454 else if (WideVT == MVT::i64)
10455 LC = RTLIB::MUL_I64;
10456 else if (WideVT == MVT::i128)
10457 LC = RTLIB::MUL_I128;
10466 unsigned HalfBits = Bits >> 1;
10505 if (shouldSplitFunctionArgumentsAsLittleEndian(DAG.
getDataLayout())) {
10510 SDValue Args[] = {LL, LH, RL, RH};
10511 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first;
10513 SDValue Args[] = {LH, LL, RH, RL};
10514 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first;
10517 "Ret value is a collection of constituent nodes holding result.");
10520 Lo = Ret.getOperand(0);
10521 Hi = Ret.getOperand(1);
10523 Lo = Ret.getOperand(1);
10524 Hi = Ret.getOperand(0);
10533 EVT VT =
LHS.getValueType();
10534 assert(
RHS.getValueType() == VT &&
"Mismatching operand types");
10553 forceExpandWideMUL(DAG, dl,
Signed, WideVT,
LHS, HiLHS,
RHS, HiRHS,
Lo,
Hi);
10562 "Expected a fixed point multiplication opcode");
10567 EVT VT =
LHS.getValueType();
10568 unsigned Scale = Node->getConstantOperandVal(2);
10584 SDValue Product = Result.getValue(0);
10585 SDValue Overflow = Result.getValue(1);
10596 Result = DAG.
getSelect(dl, VT, ProdNeg, SatMin, SatMax);
10597 return DAG.
getSelect(dl, VT, Overflow, Result, Product);
10601 SDValue Product = Result.getValue(0);
10602 SDValue Overflow = Result.getValue(1);
10606 return DAG.
getSelect(dl, VT, Overflow, SatMax, Product);
10611 "Expected scale to be less than the number of bits if signed or at "
10612 "most the number of bits if unsigned.");
10614 "Expected both operands to be the same type");
10623 Lo = Result.getValue(0);
10624 Hi = Result.getValue(1);
10645 if (Scale == VTSize)
10691 return DAG.
getSelect(dl, VT, Overflow, ResultIfOverflow, Result);
10716 "Expected a fixed point division opcode");
10718 EVT VT =
LHS.getValueType();
10740 if (LHSLead + RHSTrail < Scale + (
unsigned)(Saturating &&
Signed))
10743 unsigned LHSShift = std::min(LHSLead, Scale);
10744 unsigned RHSShift = Scale - LHSShift;
10801 bool IsAdd = Node->getOpcode() ==
ISD::UADDO;
10807 SDValue NodeCarry = DAG.
getNode(OpcCarry, dl, Node->getVTList(),
10808 { LHS, RHS, CarryIn });
10817 EVT ResultType = Node->getValueType(1);
10828 DAG.
getSetCC(dl, SetCCType, Result,
10847 bool IsAdd = Node->getOpcode() ==
ISD::SADDO;
10852 EVT ResultType = Node->getValueType(1);
10878 DAG.
getNode(
ISD::XOR, dl, OType, ConditionRHS, ResultLowerThanLHS), dl,
10879 ResultType, ResultType);
10885 EVT VT = Node->getValueType(0);
10893 const APInt &
C = RHSC->getAPIntValue();
10895 if (
C.isPowerOf2()) {
10897 bool UseArithShift =
isSigned && !
C.isMinSignedValue();
10900 Overflow = DAG.
getSetCC(dl, SetCCVT,
10902 dl, VT, Result, ShiftAmt),
10915 static const unsigned Ops[2][3] =
10938 forceExpandWideMUL(DAG, dl,
isSigned,
LHS,
RHS, BottomHalf, TopHalf);
10941 Result = BottomHalf;
10948 Overflow = DAG.
getSetCC(dl, SetCCVT, TopHalf,
10953 EVT RType = Node->getValueType(1);
10958 "Unexpected result type for S/UMULO legalization");
10966 EVT VT =
Op.getValueType();
10970 "Expanding reductions for scalable vectors is undefined.");
10981 Op = DAG.
getNode(BaseOpcode, dl, HalfVT,
Lo,
Hi, Node->getFlags());
10993 for (
unsigned i = 1; i < NumElts; i++)
10994 Res = DAG.
getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Node->getFlags());
10997 if (EltVT != Node->getValueType(0))
11004 SDValue AccOp = Node->getOperand(0);
11005 SDValue VecOp = Node->getOperand(1);
11013 "Expanding reductions for scalable vectors is undefined.");
11023 for (
unsigned i = 0; i < NumElts; i++)
11024 Res = DAG.
getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Flags);
11031 EVT VT = Node->getValueType(0);
11036 SDValue Dividend = Node->getOperand(0);
11037 SDValue Divisor = Node->getOperand(1);
11040 Result = DAG.
getNode(DivRemOpc, dl, VTs, Dividend, Divisor).
getValue(1);
11045 SDValue Divide = DAG.
getNode(DivOpc, dl, VT, Dividend, Divisor);
11057 SDValue Src = Node->getOperand(0);
11060 EVT SrcVT = Src.getValueType();
11061 EVT DstVT = Node->getValueType(0);
11063 EVT SatVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
11066 assert(SatWidth <= DstWidth &&
11067 "Expected saturation width smaller than result width");
11071 APInt MinInt, MaxInt;
11082 if (SrcVT == MVT::f16 || SrcVT == MVT::bf16) {
11084 SrcVT = Src.getValueType();
11105 if (AreExactFloatBounds && MinMaxLegal) {
11114 dl, DstVT, Clamped);
11126 return DAG.
getSelect(dl, DstVT, IsNan, ZeroInt, FpToInt);
11165 EVT OperandVT =
Op.getValueType();
11187 AbsWide = DAG.
getBitcast(OperandVT, ClearedSign);
11210 KeepNarrow = DAG.
getNode(
ISD::OR, dl, WideSetCCVT, KeepNarrow, AlreadyOdd);
11219 SDValue Adjust = DAG.
getSelect(dl, ResultIntVT, NarrowIsRd, One, NegativeOne);
11221 Op = DAG.
getSelect(dl, ResultIntVT, KeepNarrow, NarrowBits, Adjusted);
11233 EVT VT = Node->getValueType(0);
11236 if (Node->getConstantOperandVal(1) == 1) {
11239 EVT OperandVT =
Op.getValueType();
11251 EVT I32 =
F32.changeTypeToInteger();
11252 Op = expandRoundInexactToOdd(
F32,
Op, dl, DAG);
11277 EVT I16 = I32.isVector() ? I32.changeVectorElementType(MVT::i16) : MVT::i16;
11287 assert(Node->getValueType(0).isScalableVector() &&
11288 "Fixed length vector types expected to use SHUFFLE_VECTOR!");
11290 EVT VT = Node->getValueType(0);
11291 SDValue V1 = Node->getOperand(0);
11292 SDValue V2 = Node->getOperand(1);
11293 int64_t Imm = cast<ConstantSDNode>(Node->getOperand(2))->getSExtValue();
11312 EVT PtrVT = StackPtr.getValueType();
11314 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
11329 StackPtr = getVectorElementPointer(DAG, StackPtr, VT, Node->getOperand(2));
11331 return DAG.
getLoad(VT,
DL, StoreV2, StackPtr,
11354 return DAG.
getLoad(VT,
DL, StoreV2, StackPtr2,
11361 SDValue EVL,
bool &NeedInvert,
11363 bool IsSignaling)
const {
11365 MVT OpVT =
LHS.getSimpleValueType();
11367 NeedInvert =
false;
11368 assert(!EVL == !Mask &&
"VP Mask and EVL must either both be set or unset");
11369 bool IsNonVP = !EVL;
11384 bool NeedSwap =
false;
11385 InvCC = getSetCCInverse(CCCode, OpVT);
11413 "If SETUE is expanded, SETOEQ or SETUNE must be legal!");
11418 "If SETO is expanded, SETOEQ must be legal!");
11436 NeedInvert = ((
unsigned)CCCode & 0x8U);
11477 SetCC1 = DAG.
getSetCC(dl, VT,
LHS,
RHS, CC1, Chain, IsSignaling);
11478 SetCC2 = DAG.
getSetCC(dl, VT,
LHS,
RHS, CC2, Chain, IsSignaling);
11486 SetCC1 = DAG.
getSetCC(dl, VT,
LHS,
LHS, CC1, Chain, IsSignaling);
11487 SetCC2 = DAG.
getSetCC(dl, VT,
RHS,
RHS, CC2, Chain, IsSignaling);
11497 LHS = DAG.
getNode(Opc, dl, VT, SetCC1, SetCC2);
11501 Opc = Opc ==
ISD::OR ? ISD::VP_OR : ISD::VP_AND;
11502 LHS = DAG.
getNode(Opc, dl, VT, SetCC1, SetCC2, Mask, EVL);
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu AMDGPU Register Bank Select
block Block Frequency Analysis
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
static bool isUndef(ArrayRef< int > Mask)
static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo, const APInt &Demanded)
Check to see if the specified operand of the specified instruction is a constant integer.
static bool isNonZeroModBitWidthOrUndef(const MachineRegisterInfo &MRI, Register Reg, unsigned BW)
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
unsigned const TargetRegisterInfo * TRI
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const char LLVMTargetMachineRef TM
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static SDValue foldSetCCWithFunnelShift(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, const SDLoc &dl, SelectionDAG &DAG)
static bool lowerImmediateIfPossible(TargetLowering::ConstraintPair &P, SDValue Op, SelectionDAG *DAG, const TargetLowering &TLI)
If we have an immediate, see if we can lower it.
static SDValue expandVPFunnelShift(SDNode *Node, SelectionDAG &DAG)
static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG, const APInt &UndefOp0, const APInt &UndefOp1)
Given a vector binary operation and known undefined elements for each input operand,...
static SDValue clampDynamicVectorIndex(SelectionDAG &DAG, SDValue Idx, EVT VecVT, const SDLoc &dl, ElementCount SubEC)
static unsigned getConstraintPiority(TargetLowering::ConstraintType CT)
Return a number indicating our preference for chosing a type of constraint over another,...
static std::optional< bool > isFCmpEqualZero(FPClassTest Test, const fltSemantics &Semantics, const MachineFunction &MF)
Returns a true value if if this FPClassTest can be performed with an ordered fcmp to 0,...
static void turnVectorIntoSplatVector(MutableArrayRef< SDValue > Values, std::function< bool(SDValue)> Predicate, SDValue AlternativeReplacement=SDValue())
If all values in Values that don't match the predicate are same 'splat' value, then replace all value...
static bool canExpandVectorCTPOP(const TargetLowering &TLI, EVT VT)
static SDValue foldSetCCWithRotate(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, const SDLoc &dl, SelectionDAG &DAG)
static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created)
Given an exact SDIV by a constant, create a multiplication with the multiplicative inverse of the con...
static SDValue simplifySetCCWithCTPOP(const TargetLowering &TLI, EVT VT, SDValue N0, const APInt &C1, ISD::CondCode Cond, const SDLoc &dl, SelectionDAG &DAG)
static SDValue combineShiftToAVG(SDValue Op, TargetLowering::TargetLoweringOpt &TLO, const TargetLowering &TLI, const APInt &DemandedBits, const APInt &DemandedElts, unsigned Depth)
This file describes how to lower LLVM code to machine code.
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT, SelectionDAG &DAG)
Scalarize a vector store, bitcasting to TargetVT to determine the scalar type.
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
static APFloat getSmallestNormalized(const fltSemantics &Sem, bool Negative=false)
Returns the smallest (by magnitude) normalized finite number in the given semantics.
APInt bitcastToAPInt() const
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
static APFloat getNaN(const fltSemantics &Sem, bool Negative=false, uint64_t payload=0)
Factory for NaN values.
Class for arbitrary precision integers.
APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
void setSignBit()
Set the sign bit to 1.
unsigned getBitWidth() const
Return the number of bits in the APInt.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
void clearAllBits()
Set every bit to 0.
APInt reverseBits() const
void ashrInPlace(unsigned ShiftAmt)
Arithmetic right-shift this APInt by ShiftAmt in place.
void negate()
Negate this APInt in place.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
unsigned getSignificantBits() const
Get the minimum bit size for this signed APInt.
unsigned countLeadingZeros() const
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
unsigned logBase2() const
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
void setAllBits()
Set every bit to 1.
APInt multiplicativeInverse() const
bool isMaxSignedValue() const
Determine if this is the largest signed value.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
APInt sext(unsigned width) const
Sign extend to a new width.
void setBits(unsigned loBit, unsigned hiBit)
Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
bool isOne() const
Determine if this is a value of 1.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
void clearHighBits(unsigned hiBits)
Set top hiBits bits to 0.
int64_t getSExtValue() const
Get sign extended value.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
unsigned countr_one() const
Count the number of trailing one bits.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
void setBitVal(unsigned BitPosition, bool BitValue)
Set a given bit to a given value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool hasAttributes() const
Return true if the builder has IR-level attributes.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
AttrBuilder & removeAttribute(Attribute::AttrKind Val)
Remove an attribute from the builder.
bool hasFnAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the function.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
ConstantSDNode * getConstantSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant or null if this is not a constant splat.
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents a function call, abstracting a target machine's calling convention.
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
ConstantFP - Floating Point Values [float, double].
const APInt & getAPIntValue() const
This is an important base class in LLVM.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
bool isLittleEndian() const
Layout endianness...
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
AttributeList getAttributes() const
Return the attribute list for this Function.
int64_t getOffset() const
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
std::vector< std::string > ConstraintCodeVector
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
This class is used to represent ISD::LOAD nodes.
Context object for machine code objects.
Base class for the full range of assembler expressions which are needed for parsing.
Wrapper class representing physical registers. Should be passed by value.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setAdjustsStack(bool V)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
MCSymbol * getJTISymbol(unsigned JTI, MCContext &Ctx, bool isLinkerPrivate=false) const
getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
Function & getFunction()
Return the LLVM function that this machine code represents.
@ EK_GPRel32BlockAddress
EK_GPRel32BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
@ EK_LabelDifference32
EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table.
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
@ EK_GPRel64BlockAddress
EK_GPRel64BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
const GlobalVariable * getNamedGlobal(StringRef Name) const
Return the global variable in the module with the specified name, of arbitrary type.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Class to represent pointers.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
static SDNodeIterator end(const SDNode *N)
static SDNodeIterator begin(const SDNode *N)
Represents one node in the SelectionDAG.
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
SDNodeFlags getFlags() const
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
void setFlags(SDNodeFlags NewFlags)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
bool use_empty() const
Return true if there are no nodes using value ResNo of Node.
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
bool willNotOverflowAdd(bool IsSigned, SDValue N0, SDValue N1) const
Determine if the result of the addition of 2 nodes can never overflow.
Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth=0) const
Get the upper bound on bit size for this Value Op as a signed integer.
bool isKnownNeverSNaN(SDValue Op, unsigned Depth=0) const
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond, const SDLoc &dl)
Constant fold a setcc to true or false.
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDNode * isConstantIntBuildVectorOrConstantInt(SDValue N) const
Test whether the given value is a constant int or similar node.
SDValue getJumpTableDebugInfo(int JTI, SDValue Chain, const SDLoc &DL)
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
SDValue getStepVector(const SDLoc &DL, EVT ResVT, const APInt &StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
bool shouldOptForSize() const
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
const DataLayout & getDataLayout() const
bool doesNodeExist(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops)
Check if a node exists without modifying its flags.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
bool isGuaranteedNotToBeUndefOrPoison(SDValue Op, bool PoisonOnly=false, unsigned Depth=0) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
bool isKnownToBeAPowerOfTwo(SDValue Val, unsigned Depth=0) const
Test if the given value is known to have exactly one bit set.
bool isKnownNeverZero(SDValue Op, unsigned Depth=0) const
Test whether the given SDValue is known to contain non-zero value(s).
SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SDNodeFlags Flags=SDNodeFlags())
SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT)
Convert Op, which must be of integer type, to the integer type VT, by using an extension appropriate ...
static const fltSemantics & EVTToAPFloatSemantics(EVT VT)
Returns an APFloat semantics tag appropriate for the given type.
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
bool isKnownNeverZeroFloat(SDValue Op) const
Test whether the given floating point SDValue is known to never be positive or negative zero.
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
bool isKnownNeverNaN(SDValue Op, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT)
Create a true or false constant of type VT using the target's BooleanContent for type OpVT.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
std::optional< uint64_t > getValidMaximumShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getCondCode(ISD::CondCode Cond)
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
std::optional< uint64_t > getValidShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has a uniform shift amount that is less than the element bit-width of the shi...
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL, bool LegalTypes=true)
SDValue getSetCCVP(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Mask, SDValue EVL)
Helper function to make it easier to build VP_SETCCs if you just have an ISD::CondCode instead of an ...
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
static void commuteMask(MutableArrayRef< int > Mask)
Change values in a shuffle permute mask assuming the two vector operands have swapped position.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr size_t size() const
size - Get the string size.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Class to represent struct types.
void setAttributes(const CallBase *Call, unsigned ArgIdx)
Set CallLoweringInfo attribute flags based on a call instruction and called function attributes.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
virtual bool isShuffleMaskLegal(ArrayRef< int >, EVT) const
Targets can use this to indicate that they only support some VECTOR_SHUFFLE operations,...
virtual bool shouldRemoveRedundantExtend(SDValue Op) const
Return true (the default) if it is profitable to remove a sext_inreg(x) where the sext is redundant,...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
virtual bool isLegalICmpImmediate(int64_t) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
virtual bool isSafeMemOpType(MVT) const
Returns true if it's safe to use load / store of the specified type to expand memcpy / memset inline.
const TargetMachine & getTargetMachine() const
virtual bool isCtpopFast(EVT VT) const
Return true if ctpop instruction is fast.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
bool isPaddedAtMostSignificantBitsWhenStored(EVT VT) const
Indicates if any padding is guaranteed to go at the most significant bits when storing the type to me...
virtual EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &) const
Returns the target specific optimal type for load and store operations as a result of memset,...
LegalizeAction getCondCodeAction(ISD::CondCode CC, MVT VT) const
Return how the condition code should be treated: either it is legal, needs to be expanded to some oth...
virtual bool isCommutativeBinOp(unsigned Opcode) const
Returns true if the opcode is a commutative binary operation.
virtual bool isFPImmLegal(const APFloat &, EVT, bool ForCodeSize=false) const
Returns true if the target can instruction select the specified FP immediate natively.
virtual MVT::SimpleValueType getCmpLibcallReturnType() const
Return the ValueType for comparison libcalls.
virtual bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
bool isLegalRC(const TargetRegisterInfo &TRI, const TargetRegisterClass &RC) const
Return true if the value types that can be represented by the specified register class are all legal.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
virtual bool shouldExtendTypeInLibCall(EVT Type) const
Returns true if arguments should be extended in lib calls.
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const
Return true if creating a shift of the type by the given amount is not profitable.
virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const
Return true if an fpext operation is free (for instance, because single-precision floating-point numb...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, bool LegalTypes=true) const
Returns the type for the shift amount of a shift opcode.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
MVT getSimpleValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the MVT corresponding to this LLVM type. See getValueType.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal on this target.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual bool isNarrowingProfitable(EVT SrcVT, EVT DestVT) const
Return true if it's profitable to narrow operations of type SrcVT to DestVT.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT) const
Return true if it is profitable to reduce a load to a smaller type.
virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const
Return the maximum number of "x & (x - 1)" operations that can be done instead of deferring to a cust...
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const
Return true if integer divide is usually cheaper than a sequence of several shifts,...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
virtual bool hasAndNotCompare(SDValue Y) const
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) !...
virtual bool isBinOp(unsigned Opcode) const
Return true if the node is a math/logic binary operator.
virtual bool isCtlzFast() const
Return true if ctlz instruction is fast.
virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT, bool IsSigned) const
Return true if it is more correct/profitable to use strict FP_TO_INT conversion operations - canonica...
NegatibleCost
Enum that specifies when a float negation is beneficial.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const
Get the CondCode that's to be used to test the result of the comparison libcall against zero.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
virtual EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal or custom on this target.
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
MulExpansionKind
Enum that specifies when a multiplication should be expanded.
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT.
SDValue buildSDIVPow2WithCMov(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Build sdiv by power-of-2 with conditional move instructions Ref: "Hacker's Delight" by Henry Warren 1...
virtual ConstraintWeight getMultipleConstraintMatchWeight(AsmOperandInfo &info, int maIndex) const
Examine constraint type and operand type and determine a weight value.
SDValue expandVPCTLZ(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTLZ/VP_CTLZ_ZERO_UNDEF nodes.
bool expandMULO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]MULO.
bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, SelectionDAG &DAG, MulExpansionKind Kind, SDValue LL=SDValue(), SDValue LH=SDValue(), SDValue RL=SDValue(), SDValue RH=SDValue()) const
Expand a MUL into two nodes.
virtual const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Vector Op.
virtual bool isUsedByReturnOnly(SDNode *, SDValue &) const
Return true if result of the specified node is used by a return node only.
virtual void computeKnownBitsForFrameIndex(int FIOp, KnownBits &Known, const MachineFunction &MF) const
Determine which of the bits of FrameIndex FIOp are known to be 0.
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const
virtual unsigned computeNumSignBitsForTargetInstr(GISelKnownBits &Analysis, Register R, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
SDValue expandVPBSWAP(SDNode *N, SelectionDAG &DAG) const
Expand VP_BSWAP nodes.
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, const SDLoc &DL, const SDValue OldLHS, const SDValue OldRHS) const
Soften the operands of a comparison.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
SDValue expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const
Expand a VECREDUCE_SEQ_* into an explicit ordered calculation.
SDValue expandCTLZ(SDNode *N, SelectionDAG &DAG) const
Expand CTLZ/CTLZ_ZERO_UNDEF nodes.
SDValue expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const
Expand BITREVERSE nodes.
SDValue expandCTTZ(SDNode *N, SelectionDAG &DAG) const
Expand CTTZ/CTTZ_ZERO_UNDEF nodes.
virtual SDValue expandIndirectJTBranch(const SDLoc &dl, SDValue Value, SDValue Addr, int JTI, SelectionDAG &DAG) const
Expands target specific indirect branch for the case of JumpTable expansion.
SDValue expandABD(SDNode *N, SelectionDAG &DAG) const
Expand ABDS/ABDU nodes.
virtual Align computeKnownAlignForTargetInstr(GISelKnownBits &Analysis, Register R, const MachineRegisterInfo &MRI, unsigned Depth=0) const
Determine the known alignment for the pointer value R.
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandShlSat(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]SHLSAT.
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
SDValue expandFP_TO_INT_SAT(SDNode *N, SelectionDAG &DAG) const
Expand FP_TO_[US]INT_SAT into FP_TO_[US]INT and selects or min/max.
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "look through" ops that don't contri...
SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const
Expands an unaligned store to 2 half-size stores for integer values, and possibly more for vectors.
SDValue SimplifyMultipleUseDemandedVectorElts(SDValue Op, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all bits from only some vector eleme...
virtual bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const
Determines the optimal series of memory ops to replace the memset / memcpy.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
void expandSADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::S(ADD|SUB)O.
SDValue expandVPBITREVERSE(SDNode *N, SelectionDAG &DAG) const
Expand VP_BITREVERSE nodes.
SDValue expandABS(SDNode *N, SelectionDAG &DAG, bool IsNegative=false) const
Expand ABS nodes.
SDValue expandVecReduce(SDNode *Node, SelectionDAG &DAG) const
Expand a VECREDUCE_* into an explicit calculation.
bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const
Check to see if the specified operand of the specified instruction is a constant integer.
SDValue expandVPCTTZElements(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTTZ_ELTS/VP_CTTZ_ELTS_ZERO_UNDEF nodes.
virtual const char * getTargetNodeName(unsigned Opcode) const
This method returns the name of a target specific DAG node.
bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SDValue &Chain, SelectionDAG &DAG) const
Expand float to UINT conversion.
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
virtual bool SimplifyDemandedVectorEltsForTargetNode(SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0) const
Attempt to simplify any target nodes based on the demanded vector elements, returning true on success...
bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const
Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal.
std::pair< SDValue, SDValue > expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Expands an unaligned load to 2 half-size loads for an integer, and possibly more for vectors.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::VECTOR_SPLICE.
virtual const char * LowerXConstraint(EVT ConstraintVT) const
Try to replace an X constraint, which matches anything, with another that has more specific requireme...
SDValue expandCTPOP(SDNode *N, SelectionDAG &DAG) const
Expand CTPOP nodes.
SDValue expandBSWAP(SDNode *N, SelectionDAG &DAG) const
Expand BSWAP nodes.
SDValue expandFMINIMUM_FMAXIMUM(SDNode *N, SelectionDAG &DAG) const
Expand fminimum/fmaximum into multiple comparison with selects.
SDValue CTTZTableLookup(SDNode *N, SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Op, unsigned NumBitsPerElt) const
Expand CTTZ via Table Lookup.
virtual bool isKnownNeverNaNForTargetNode(SDValue Op, const SelectionDAG &DAG, bool SNaN=false, unsigned Depth=0) const
If SNaN is false,.
bool expandDIVREMByConstant(SDNode *N, SmallVectorImpl< SDValue > &Result, EVT HiLoVT, SelectionDAG &DAG, SDValue LL=SDValue(), SDValue LH=SDValue()) const
Attempt to expand an n-bit div/rem/divrem by constant using a n/2-bit urem by constant and other arit...
SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, EVT SubVecVT, SDValue Index) const
Get a pointer to a sub-vector of type SubVecVT at index Idx located in memory for a vector of type Ve...
virtual void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool isPositionIndependent() const
std::pair< StringRef, TargetLowering::ConstraintType > ConstraintPair
virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG, const DenormalMode &Mode) const
Return a target-dependent comparison result if the input operand is suitable for use with a square ro...
ConstraintGroup getConstraintPreferences(AsmOperandInfo &OpInfo) const
Given an OpInfo with list of constraints codes as strings, return a sorted Vector of pairs of constra...
bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const
Expand float(f32) to SINT(i64) conversion.
virtual SDValue SimplifyMultipleUseDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth) const
More limited version of SimplifyDemandedBits that can be used to "look through" ops that don't contri...
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
SDValue buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, SDValue N1, MutableArrayRef< int > Mask, SelectionDAG &DAG) const
Tries to build a legal vector shuffle using the provided parameters or equivalent variations.
virtual SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const
Returns relocation base for the given PIC jumptable.
std::pair< SDValue, SDValue > scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Turn load of vector type into a load of the individual elements.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
void forceExpandWideMUL(SelectionDAG &DAG, const SDLoc &dl, bool Signed, EVT WideVT, const SDValue LL, const SDValue LH, const SDValue RL, const SDValue RH, SDValue &Lo, SDValue &Hi) const
forceExpandWideMUL - Unconditionally expand a MUL into either a libcall or brute force via a wide mul...
virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
virtual bool isDesirableToCommuteXorWithShift(const SDNode *N) const
Return true if it is profitable to combine an XOR of a logical shift to create a logical shift of NOT...
TargetLowering(const TargetLowering &)=delete
virtual bool shouldSimplifyDemandedVectorElts(SDValue Op, const TargetLoweringOpt &TLO) const
Return true if the target supports simplifying demanded vector elements by converting them to undefs.
bool isConstFalseVal(SDValue N) const
Return if the N is a constant or constant vector equal to the false value from getBooleanContents().
SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, SmallVectorImpl< SDNode * > &Created) const
Given an ISD::UDIV node expressing a divide by constant, return a DAG expression to select that will ...
SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL, EVT DataVT, SelectionDAG &DAG, bool IsCompressedMemory) const
Increments memory address Addr according to the type of the value DataVT that should be stored.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts, APInt &UndefElts, const SelectionDAG &DAG, unsigned Depth=0) const
Return true if vector Op has the same value across all DemandedElts, indicating any elements which ma...
SDValue expandRoundInexactToOdd(EVT ResultVT, SDValue Op, const SDLoc &DL, SelectionDAG &DAG) const
Truncate Op to ResultVT.
SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, bool foldBooleans, DAGCombinerInfo &DCI, const SDLoc &dl) const
Try to simplify a setcc built with the specified operands and cc.
SDValue expandFunnelShift(SDNode *N, SelectionDAG &DAG) const
Expand funnel shift.
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
bool LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC, SDValue Mask, SDValue EVL, bool &NeedInvert, const SDLoc &dl, SDValue &Chain, bool IsSignaling=false) const
Legalize a SETCC or VP_SETCC with given LHS and RHS and condition code CC on the current target.
bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const
Return if N is a True value when extended to VT.
bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &DemandedBits, TargetLoweringOpt &TLO) const
Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
bool isConstTrueVal(SDValue N) const
Return if the N is a constant or constant vector equal to the true value from getBooleanContents().
SDValue expandVPCTPOP(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTPOP nodes.
SDValue expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, SDValue LHS, SDValue RHS, unsigned Scale, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]DIVFIX[SAT].
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base a...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
SDValue expandVPCTTZ(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTTZ/VP_CTTZ_ZERO_UNDEF nodes.
virtual const Constant * getTargetConstantFromLoad(LoadSDNode *LD) const
This method returns the constant pool value that will be loaded by LD.
SDValue expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const
Expand round(fp) to fp conversion.
SDValue createSelectForFMINNUM_FMAXNUM(SDNode *Node, SelectionDAG &DAG) const
Try to convert the fminnum/fmaxnum to a compare/select sequence.
SDValue expandROT(SDNode *N, bool AllowVectorOps, SelectionDAG &DAG) const
Expand rotations.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
SDValue expandFMINNUM_FMAXNUM(SDNode *N, SelectionDAG &DAG) const
Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs.
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.
virtual bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
SDValue expandCMP(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]CMP.
void expandShiftParts(SDNode *N, SDValue &Lo, SDValue &Hi, SelectionDAG &DAG) const
Expand shift-by-parts.
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
This method will be invoked for all target nodes and for any target-independent nodes that the target...
virtual bool canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
SDValue expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[U|S]MULFIX[SAT].
SDValue expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US][MIN|MAX].
virtual void computeKnownBitsForTargetInstr(GISelKnownBits &Analysis, Register R, KnownBits &Known, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
void expandUADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::U(ADD|SUB)O.
SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, SmallVectorImpl< SDNode * > &Created) const
Given an ISD::SDIV node expressing a divide by constant, return a DAG expression to select that will ...
virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Targets may override this function to provide custom SDIV lowering for power-of-2 denominators.
virtual SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Targets may override this function to provide custom SREM lowering for power-of-2 denominators.
bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SDValue &Chain, SelectionDAG &DAG) const
Expand UINT(i64) to double(f64) conversion.
bool expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, SDValue LHS, SDValue RHS, SmallVectorImpl< SDValue > &Result, EVT HiLoVT, SelectionDAG &DAG, MulExpansionKind Kind, SDValue LL=SDValue(), SDValue LH=SDValue(), SDValue RL=SDValue(), SDValue RH=SDValue()) const
Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes, respectively,...
SDValue expandAVG(SDNode *N, SelectionDAG &DAG) const
Expand vector/scalar AVGCEILS/AVGCEILU/AVGFLOORS/AVGFLOORU nodes.
Primary interface to the complete machine description for the target machine.
bool isPositionIndependent() const
const Triple & getTargetTriple() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
iterator_range< regclass_iterator > regclasses() const
virtual StringRef getRegAsmName(MCRegister Reg) const
Return the assembly name for Reg.
bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const
Return true if the given TargetRegisterClass has the ValueType T.
bool isOSBinFormatCOFF() const
Tests whether the OS uses the COFF binary format.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
const fltSemantics & getFltSemantics() const
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isIntegerTy() const
True if this is an instance of IntegerType.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
StringRef getName() const
Return a constant reference to the value's name.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ FGETSIGN
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ SSUBO
Same for subtraction.
@ BRIND
BRIND - Indirect branch.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
NodeType getExtForLoadExtType(bool IsFP, LoadExtType)
bool matchUnaryPredicate(SDValue Op, std::function< bool(ConstantSDNode *)> Match, bool AllowUndefs=false)
Hook for matching ConstantSDNode predicate.
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
bool isTrueWhenEqual(CondCode Cond)
Return true if the specified condition returns true if the two operands to the condition are equal.
unsigned getUnorderedFlavor(CondCode Cond)
This function returns 0 if the condition is always false if an operand is a NaN, 1 if the condition i...
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
bool isSignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs a signed comparison when used with integer o...
bool matchBinaryPredicate(SDValue LHS, SDValue RHS, std::function< bool(ConstantSDNode *, ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTypeMismatch=false)
Attempt to match a binary predicate against a pair of scalar/splat constants or every element of a pa...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
FPClassTest invertFPClassTestIfSimpler(FPClassTest Test)
Evaluates if the specified FP class test is better performed as the inverse (i.e.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
ConstantFPSDNode * isConstOrConstSplatFP(SDValue N, bool AllowUndefs=false)
Returns the SDNode if it is a constant splat BuildVector or constant float.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
bool isBitwiseNot(SDValue V, bool AllowUndefs=false)
Returns true if V is a bitwise not operation.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
auto find_if_not(R &&Range, UnaryPredicate P)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool isOneOrOneSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
constexpr unsigned BitWidth
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool isNullFPConstant(SDValue V)
Returns true if V is an FP constant with a value of positive zero.
APFloat neg(APFloat X)
Returns the negated value of the argument.
unsigned Log2(Align A)
Returns the log2 of the alignment.
uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the largest uint64_t less than or equal to Value and is Skew mod Align.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static constexpr roundingMode rmNearestTiesToEven
static constexpr roundingMode rmTowardZero
opStatus
IEEE-754R 7: Default exception handling.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ IEEE
IEEE-754 denormal numbers preserved.
constexpr bool inputsAreZero() const
Return true if input denormals must be implicitly treated as 0.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
EVT getHalfSizedIntegerVT(LLVMContext &Context) const
Finds the smallest simple value type that is greater than or equal to half the width of this EVT.
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
TypeSize getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool bitsLE(EVT VT) const
Return true if this has no more bits than VT.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber/label.
int MatchingInput
MatchingInput - If this is not -1, this is an output constraint where an input constraint is required...
ConstraintCodeVector Codes
Code - The constraint code, either the register name (in braces) or the constraint letter/number.
SubConstraintInfoVector multipleAlternatives
multipleAlternatives - If there are multiple alternative constraints, this array will contain them.
bool isIndirect
isIndirect - True if this operand is an indirect operand.
bool hasMatchingInput() const
hasMatchingInput - Return true if this is an output constraint that has a matching input constraint.
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
unsigned countMinSignBits() const
Returns the number of times the sign bit is replicated into the other bits.
static KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
bool isUnknown() const
Returns true if we don't know any bits.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
static std::optional< bool > sge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGE result.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
unsigned getBitWidth() const
Get the bit width of this value.
static KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits unionWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for either this or RHS or both.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
static KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static std::optional< bool > ugt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGT result.
static std::optional< bool > slt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLT result.
static KnownBits computeForAddSub(bool Add, bool NSW, bool NUW, const KnownBits &LHS, const KnownBits &RHS)
Compute known bits resulting from adding LHS and RHS.
static std::optional< bool > ult(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULT result.
static std::optional< bool > ule(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULE result.
bool isNegative() const
Returns true if this value is known to be negative.
static KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
static std::optional< bool > sle(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLE result.
static std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
unsigned countMinPopulation() const
Returns the number of bits known to be one.
static std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
static KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
These are IR-level optimization flags that may be propagated to SDNodes.
bool hasNoUnsignedWrap() const
bool hasNoSignedWrap() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Magic data for optimising signed division by a constant.
unsigned ShiftAmount
shift amount
static SignedDivisionByConstantInfo get(const APInt &D)
Calculate the magic numbers required to implement a signed integer division by a constant as a sequen...
This contains information for each constraint that we are lowering.
MVT ConstraintVT
The ValueType for the operand value.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
std::string ConstraintCode
This contains the actual string for the code, like "m".
Value * CallOperandVal
If this is the result output operand or a clobber, this is null, otherwise it is the incoming operand...
unsigned getMatchedOperand() const
If this is an input matching constraint, this method returns the output operand it matches.
bool isMatchingInputConstraint() const
Return true of this is an input operand that is a matching constraint like "4".
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setIsPostTypeLegalization(bool Value=true)
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setSExtResult(bool Value=true)
CallLoweringInfo & setNoReturn(bool Value=true)
CallLoweringInfo & setChain(SDValue InChain)
bool isBeforeLegalizeOps() const
void AddToWorklist(SDNode *N)
bool isCalledByLegalizer() const
bool isBeforeLegalize() const
void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO)
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setIsPostTypeLegalization(bool Value=true)
ArrayRef< EVT > OpsVTBeforeSoften
bool IsPostTypeLegalization
MakeLibCallOptions & setSExt(bool Value=true)
MakeLibCallOptions & setTypeListBeforeSoften(ArrayRef< EVT > OpsVT, EVT RetVT, bool Value=true)
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
bool CombineTo(SDValue O, SDValue N)
bool LegalOperations() const
Magic data for optimising unsigned division by a constant.
unsigned PreShift
pre-shift amount
static UnsignedDivisionByConstantInfo get(const APInt &D, unsigned LeadingZeros=0, bool AllowEvenDivisorOptimization=true)
Calculate the magic numbers required to implement an unsigned integer division by a constant as a seq...
unsigned PostShift
post-shift amount