41#define DEBUG_TYPE "gisel-known-bits"
49 "Analysis for ComputingKnownBits",
false,
true)
52 : MF(MF),
MRI(MF.getRegInfo()), TL(*MF.getSubtarget().getTargetLowering()),
57 switch (
MI->getOpcode()) {
58 case TargetOpcode::COPY:
60 case TargetOpcode::G_ASSERT_ALIGN: {
62 return Align(
MI->getOperand(2).getImm());
64 case TargetOpcode::G_FRAME_INDEX: {
65 int FrameIdx =
MI->getOperand(1).getIndex();
66 return MF.getFrameInfo().getObjectAlign(FrameIdx);
68 case TargetOpcode::G_INTRINSIC:
69 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
70 case TargetOpcode::G_INTRINSIC_CONVERGENT:
71 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
73 return TL.computeKnownAlignForTargetInstr(*
this, R, MRI,
Depth + 1);
78 assert(
MI.getNumExplicitDefs() == 1 &&
79 "expected single return generic instruction");
84 const LLT Ty = MRI.getType(R);
94 const APInt &DemandedElts,
102 LLT Ty = MRI.getType(R);
103 unsigned BitWidth = Ty.getScalarSizeInBits();
115[[maybe_unused]]
static void
118 <<
"] Computed for: " <<
MI <<
"[" <<
Depth <<
"] Known: 0x"
129 const APInt &DemandedElts,
160 const APInt &DemandedElts,
163 unsigned Opcode =
MI.getOpcode();
164 LLT DstTy = MRI.getType(R);
178 "DemandedElt width should equal the fixed vector number of elements");
181 "DemandedElt width should be 1 for scalars or scalable vectors");
206 TL.computeKnownBitsForTargetInstr(*
this, R, Known, DemandedElts, MRI,
209 case TargetOpcode::G_BUILD_VECTOR: {
214 if (!DemandedElts[
I])
228 case TargetOpcode::G_SPLAT_VECTOR: {
236 case TargetOpcode::COPY:
237 case TargetOpcode::G_PHI:
238 case TargetOpcode::PHI: {
244 assert(
MI.getOperand(0).getSubReg() == 0 &&
"Is this code in SSA?");
247 for (
unsigned Idx = 1; Idx <
MI.getNumOperands(); Idx += 2) {
250 LLT SrcTy = MRI.getType(SrcReg);
258 if (SrcReg.
isVirtual() && Src.getSubReg() == 0 &&
268 Depth + (Opcode != TargetOpcode::COPY));
283 case TargetOpcode::G_CONSTANT: {
287 case TargetOpcode::G_FRAME_INDEX: {
288 int FrameIdx =
MI.getOperand(1).getIndex();
289 TL.computeKnownBitsForFrameIndex(FrameIdx, Known, MF);
292 case TargetOpcode::G_SUB: {
300 case TargetOpcode::G_XOR: {
309 case TargetOpcode::G_PTR_ADD: {
313 LLT Ty = MRI.getType(
MI.getOperand(1).getReg());
314 if (DL.isNonIntegralAddressSpace(Ty.getAddressSpace()))
318 case TargetOpcode::G_ADD: {
326 case TargetOpcode::G_AND: {
336 case TargetOpcode::G_OR: {
346 case TargetOpcode::G_MUL: {
354 case TargetOpcode::G_UMULH: {
362 case TargetOpcode::G_SMULH: {
370 case TargetOpcode::G_SELECT: {
371 computeKnownBitsMin(
MI.getOperand(2).getReg(),
MI.getOperand(3).getReg(),
372 Known, DemandedElts,
Depth + 1);
375 case TargetOpcode::G_SMIN: {
385 case TargetOpcode::G_SMAX: {
395 case TargetOpcode::G_UMIN: {
404 case TargetOpcode::G_UMAX: {
413 case TargetOpcode::G_FCMP:
414 case TargetOpcode::G_ICMP: {
417 if (TL.getBooleanContents(DstTy.
isVector(),
418 Opcode == TargetOpcode::G_FCMP) ==
424 case TargetOpcode::G_SEXT: {
432 case TargetOpcode::G_ASSERT_SEXT:
433 case TargetOpcode::G_SEXT_INREG: {
436 Known = Known.
sextInReg(
MI.getOperand(2).getImm());
439 case TargetOpcode::G_ANYEXT: {
445 case TargetOpcode::G_LOAD: {
453 case TargetOpcode::G_SEXTLOAD:
454 case TargetOpcode::G_ZEXTLOAD: {
461 Known = Opcode == TargetOpcode::G_SEXTLOAD
466 case TargetOpcode::G_ASHR: {
475 case TargetOpcode::G_LSHR: {
484 case TargetOpcode::G_SHL: {
493 case TargetOpcode::G_INTTOPTR:
494 case TargetOpcode::G_PTRTOINT:
499 case TargetOpcode::G_ZEXT:
500 case TargetOpcode::G_TRUNC: {
506 case TargetOpcode::G_ASSERT_ZEXT: {
510 unsigned SrcBitWidth =
MI.getOperand(2).getImm();
511 assert(SrcBitWidth &&
"SrcBitWidth can't be zero");
513 Known.
Zero |= (~InMask);
514 Known.
One &= (~Known.Zero);
517 case TargetOpcode::G_ASSERT_ALIGN: {
518 int64_t LogOfAlign =
Log2_64(
MI.getOperand(2).getImm());
527 case TargetOpcode::G_MERGE_VALUES: {
528 unsigned NumOps =
MI.getNumOperands();
529 unsigned OpSize = MRI.getType(
MI.getOperand(1).getReg()).getSizeInBits();
531 for (
unsigned I = 0;
I !=
NumOps - 1; ++
I) {
534 DemandedElts,
Depth + 1);
539 case TargetOpcode::G_UNMERGE_VALUES: {
540 unsigned NumOps =
MI.getNumOperands();
542 LLT SrcTy = MRI.getType(SrcReg);
544 if (SrcTy.isVector() && SrcTy.getScalarType() != DstTy.
getScalarType())
549 for (; DstIdx !=
NumOps - 1 &&
MI.getOperand(DstIdx).
getReg() != R;
553 APInt SubDemandedElts = DemandedElts;
554 if (SrcTy.isVector()) {
557 DemandedElts.
zext(SrcTy.getNumElements()).
shl(DstIdx * DstLanes);
563 if (SrcTy.isVector())
564 Known = std::move(SrcOpKnown);
569 case TargetOpcode::G_BSWAP: {
575 case TargetOpcode::G_BITREVERSE: {
581 case TargetOpcode::G_CTPOP: {
593 case TargetOpcode::G_UBFX: {
594 KnownBits SrcOpKnown, OffsetKnown, WidthKnown;
604 case TargetOpcode::G_SBFX: {
605 KnownBits SrcOpKnown, OffsetKnown, WidthKnown;
622 case TargetOpcode::G_UADDO:
623 case TargetOpcode::G_UADDE:
624 case TargetOpcode::G_SADDO:
625 case TargetOpcode::G_SADDE: {
626 if (
MI.getOperand(1).getReg() == R) {
629 if (TL.getBooleanContents(DstTy.
isVector(),
false) ==
636 assert(
MI.getOperand(0).getReg() == R &&
637 "We only compute knownbits for the sum here.");
640 if (Opcode == TargetOpcode::G_UADDE || Opcode == TargetOpcode::G_SADDE) {
644 Carry = Carry.
trunc(1);
656 case TargetOpcode::G_USUBO:
657 case TargetOpcode::G_USUBE:
658 case TargetOpcode::G_SSUBO:
659 case TargetOpcode::G_SSUBE:
660 case TargetOpcode::G_UMULO:
661 case TargetOpcode::G_SMULO: {
662 if (
MI.getOperand(1).getReg() == R) {
665 if (TL.getBooleanContents(DstTy.
isVector(),
false) ==
672 case TargetOpcode::G_CTLZ:
673 case TargetOpcode::G_CTLZ_ZERO_UNDEF: {
683 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
690 LLT VecVT = MRI.getType(InVec);
708 if (ConstEltNo && ConstEltNo->ult(NumSrcElts))
715 case TargetOpcode::G_SHUFFLE_VECTOR: {
716 APInt DemandedLHS, DemandedRHS;
719 unsigned NumElts = MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
721 DemandedElts, DemandedLHS, DemandedRHS))
742 case TargetOpcode::G_CONCAT_VECTORS: {
743 if (MRI.getType(
MI.getOperand(0).getReg()).isScalableVector())
748 unsigned NumSubVectorElts =
749 MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
753 DemandedElts.
extractBits(NumSubVectorElts,
I * NumSubVectorElts);
765 case TargetOpcode::G_ABS: {
779 Ty = Ty.getScalarType();
788 LLT Ty = MRI.getType(R);
791 computeKnownFPClass(R, DemandedElts, InterestedClasses, Known,
Depth);
794void GISelValueTracking::computeKnownFPClassForFPTrunc(
802 KnownFPClass KnownSrc;
803 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
816void GISelValueTracking::computeKnownFPClass(
Register R,
817 const APInt &DemandedElts,
821 assert(Known.
isUnknown() &&
"should not be called with known information");
831 MachineInstr &
MI = *MRI.getVRegDef(R);
832 unsigned Opcode =
MI.getOpcode();
833 LLT DstTy = MRI.getType(R);
841 switch (Cst->getKind()) {
843 auto APF = Cst->getScalarValue();
845 Known.
SignBit = APF.isNegative();
850 bool SignBitAllZero =
true;
851 bool SignBitAllOne =
true;
853 for (
auto C : *Cst) {
856 SignBitAllZero =
false;
858 SignBitAllOne =
false;
861 if (SignBitAllOne != SignBitAllZero)
877 KnownNotFromFlags |=
fcNan;
879 KnownNotFromFlags |=
fcInf;
883 InterestedClasses &= ~KnownNotFromFlags;
886 [=, &Known] { Known.
knownNot(KnownNotFromFlags); });
892 const MachineFunction *MF =
MI.getMF();
896 TL.computeKnownFPClassForTargetInstr(*
this, R, Known, DemandedElts, MRI,
899 case TargetOpcode::G_FNEG: {
901 computeKnownFPClass(Val, DemandedElts, InterestedClasses, Known,
Depth + 1);
905 case TargetOpcode::G_SELECT: {
928 bool LookThroughFAbsFNeg = CmpLHS !=
LHS && CmpLHS !=
RHS;
929 std::tie(TestedValue, MaskIfTrue, MaskIfFalse) =
935 MaskIfTrue = TestedMask;
936 MaskIfFalse = ~TestedMask;
939 if (TestedValue ==
LHS) {
941 FilterLHS = MaskIfTrue;
942 }
else if (TestedValue ==
RHS) {
944 FilterRHS = MaskIfFalse;
948 computeKnownFPClass(
LHS, DemandedElts, InterestedClasses & FilterLHS, Known,
952 computeKnownFPClass(
RHS, DemandedElts, InterestedClasses & FilterRHS,
959 case TargetOpcode::G_FCOPYSIGN: {
960 Register Magnitude =
MI.getOperand(1).getReg();
963 KnownFPClass KnownSign;
965 computeKnownFPClass(Magnitude, DemandedElts, InterestedClasses, Known,
967 computeKnownFPClass(Sign, DemandedElts, InterestedClasses, KnownSign,
972 case TargetOpcode::G_FMA:
973 case TargetOpcode::G_STRICT_FMA:
974 case TargetOpcode::G_FMAD: {
989 KnownFPClass KnownAddend;
990 computeKnownFPClass(
C, DemandedElts, InterestedClasses, KnownAddend,
997 case TargetOpcode::G_FSQRT:
998 case TargetOpcode::G_STRICT_FSQRT: {
999 KnownFPClass KnownSrc;
1001 if (InterestedClasses &
fcNan)
1006 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc,
Depth + 1);
1021 case TargetOpcode::G_FABS: {
1026 computeKnownFPClass(Val, DemandedElts, InterestedClasses, Known,
1032 case TargetOpcode::G_FSIN:
1033 case TargetOpcode::G_FCOS:
1034 case TargetOpcode::G_FSINCOS: {
1037 KnownFPClass KnownSrc;
1039 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1047 case TargetOpcode::G_FMAXNUM:
1048 case TargetOpcode::G_FMINNUM:
1049 case TargetOpcode::G_FMINNUM_IEEE:
1050 case TargetOpcode::G_FMAXIMUM:
1051 case TargetOpcode::G_FMINIMUM:
1052 case TargetOpcode::G_FMAXNUM_IEEE:
1053 case TargetOpcode::G_FMAXIMUMNUM:
1054 case TargetOpcode::G_FMINIMUMNUM: {
1057 KnownFPClass KnownLHS, KnownRHS;
1059 computeKnownFPClass(
LHS, DemandedElts, InterestedClasses, KnownLHS,
1061 computeKnownFPClass(
RHS, DemandedElts, InterestedClasses, KnownRHS,
1065 Known = KnownLHS | KnownRHS;
1068 if (NeverNaN && (Opcode == TargetOpcode::G_FMINNUM ||
1069 Opcode == TargetOpcode::G_FMAXNUM ||
1070 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1071 Opcode == TargetOpcode::G_FMAXIMUMNUM))
1074 if (Opcode == TargetOpcode::G_FMAXNUM ||
1075 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
1076 Opcode == TargetOpcode::G_FMAXNUM_IEEE) {
1084 }
else if (Opcode == TargetOpcode::G_FMAXIMUM) {
1090 }
else if (Opcode == TargetOpcode::G_FMINNUM ||
1091 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1092 Opcode == TargetOpcode::G_FMINNUM_IEEE) {
1100 }
else if (Opcode == TargetOpcode::G_FMINIMUM) {
1132 }
else if ((Opcode == TargetOpcode::G_FMAXIMUM ||
1133 Opcode == TargetOpcode::G_FMINIMUM) ||
1134 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
1135 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1136 Opcode == TargetOpcode::G_FMAXNUM_IEEE ||
1137 Opcode == TargetOpcode::G_FMINNUM_IEEE ||
1143 if ((Opcode == TargetOpcode::G_FMAXIMUM ||
1144 Opcode == TargetOpcode::G_FMAXNUM ||
1145 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
1146 Opcode == TargetOpcode::G_FMAXNUM_IEEE) &&
1149 else if ((Opcode == TargetOpcode::G_FMINIMUM ||
1150 Opcode == TargetOpcode::G_FMINNUM ||
1151 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1152 Opcode == TargetOpcode::G_FMINNUM_IEEE) &&
1159 case TargetOpcode::G_FCANONICALIZE: {
1161 KnownFPClass KnownSrc;
1162 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1184 DenormalMode DenormMode = MF->getDenormalMode(FPType);
1203 case TargetOpcode::G_VECREDUCE_FMAX:
1204 case TargetOpcode::G_VECREDUCE_FMIN:
1205 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
1206 case TargetOpcode::G_VECREDUCE_FMINIMUM: {
1212 computeKnownFPClass(Val,
MI.getFlags(), InterestedClasses,
Depth + 1);
1218 case TargetOpcode::G_TRUNC:
1219 case TargetOpcode::G_FFLOOR:
1220 case TargetOpcode::G_FCEIL:
1221 case TargetOpcode::G_FRINT:
1222 case TargetOpcode::G_FNEARBYINT:
1223 case TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND:
1224 case TargetOpcode::G_INTRINSIC_ROUND: {
1226 KnownFPClass KnownSrc;
1232 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc,
Depth + 1);
1249 case TargetOpcode::G_FEXP:
1250 case TargetOpcode::G_FEXP2:
1251 case TargetOpcode::G_FEXP10: {
1257 KnownFPClass KnownSrc;
1258 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1267 case TargetOpcode::G_FLOG:
1268 case TargetOpcode::G_FLOG2:
1269 case TargetOpcode::G_FLOG10: {
1284 KnownFPClass KnownSrc;
1285 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc,
Depth + 1);
1295 DenormalMode
Mode = MF->getDenormalMode(FltSem);
1302 case TargetOpcode::G_FPOWI: {
1307 LLT ExpTy = MRI.getType(Exp);
1309 Exp, ExpTy.
isVector() ? DemandedElts : APInt(1, 1),
Depth + 1);
1311 if (ExponentKnownBits.
Zero[0]) {
1325 KnownFPClass KnownSrc;
1326 computeKnownFPClass(Val, DemandedElts,
fcNegative, KnownSrc,
Depth + 1);
1331 case TargetOpcode::G_FLDEXP:
1332 case TargetOpcode::G_STRICT_FLDEXP: {
1334 KnownFPClass KnownSrc;
1335 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1352 if ((InterestedClasses & ExpInfoMask) ==
fcNone)
1361 case TargetOpcode::G_INTRINSIC_ROUNDEVEN: {
1362 computeKnownFPClassForFPTrunc(
MI, DemandedElts, InterestedClasses, Known,
1366 case TargetOpcode::G_FADD:
1367 case TargetOpcode::G_STRICT_FADD:
1368 case TargetOpcode::G_FSUB:
1369 case TargetOpcode::G_STRICT_FSUB: {
1372 KnownFPClass KnownLHS, KnownRHS;
1374 (Opcode == TargetOpcode::G_FADD ||
1375 Opcode == TargetOpcode::G_STRICT_FADD) &&
1377 bool WantNaN = (InterestedClasses &
fcNan) !=
fcNone;
1380 if (!WantNaN && !WantNegative && !WantNegZero)
1386 if (InterestedClasses &
fcNan)
1387 InterestedSrcs |=
fcInf;
1388 computeKnownFPClass(
RHS, DemandedElts, InterestedSrcs, KnownRHS,
Depth + 1);
1393 (Opcode == TargetOpcode::G_FSUB ||
1394 Opcode == TargetOpcode::G_STRICT_FSUB)) {
1398 computeKnownFPClass(
LHS, DemandedElts, InterestedSrcs, KnownLHS,
1406 if (Opcode == TargetOpcode::G_FADD ||
1407 Opcode == TargetOpcode::G_STRICT_FADD) {
1434 case TargetOpcode::G_FMUL:
1435 case TargetOpcode::G_STRICT_FMUL: {
1448 KnownFPClass KnownLHS, KnownRHS;
1449 computeKnownFPClass(
RHS, DemandedElts, NeedForNan, KnownRHS,
Depth + 1);
1453 computeKnownFPClass(
LHS, DemandedElts, NeedForNan, KnownLHS,
Depth + 1);
1480 case TargetOpcode::G_FDIV:
1481 case TargetOpcode::G_FREM: {
1487 if (Opcode == TargetOpcode::G_FDIV) {
1498 const bool WantNan = (InterestedClasses &
fcNan) !=
fcNone;
1500 const bool WantPositive = Opcode == TargetOpcode::G_FREM &&
1502 if (!WantNan && !WantNegative && !WantPositive)
1505 KnownFPClass KnownLHS, KnownRHS;
1508 KnownRHS,
Depth + 1);
1510 bool KnowSomethingUseful =
1513 if (KnowSomethingUseful || WantPositive) {
1518 computeKnownFPClass(
LHS, DemandedElts, InterestedClasses & InterestedLHS,
1519 KnownLHS,
Depth + 1);
1522 if (Opcode == TargetOpcode::G_FDIV) {
1563 case TargetOpcode::G_FPEXT: {
1567 computeKnownFPClass(R, DemandedElts, InterestedClasses, Known,
Depth + 1);
1571 LLT SrcTy = MRI.getType(Src).getScalarType();
1588 case TargetOpcode::G_FPTRUNC: {
1589 computeKnownFPClassForFPTrunc(
MI, DemandedElts, InterestedClasses, Known,
1593 case TargetOpcode::G_SITOFP:
1594 case TargetOpcode::G_UITOFP: {
1603 if (Opcode == TargetOpcode::G_UITOFP)
1607 LLT Ty = MRI.getType(Val);
1609 if (InterestedClasses &
fcInf) {
1614 if (Opcode == TargetOpcode::G_SITOFP)
1628 case TargetOpcode::G_BUILD_VECTOR:
1629 case TargetOpcode::G_CONCAT_VECTORS: {
1636 for (
unsigned Idx = 0; Idx <
Merge.getNumSources(); ++Idx) {
1638 bool NeedsElt = DemandedElts[Idx];
1644 computeKnownFPClass(Src, Known, InterestedClasses,
Depth + 1);
1647 KnownFPClass Known2;
1648 computeKnownFPClass(Src, Known2, InterestedClasses,
Depth + 1);
1660 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1670 LLT VecTy = MRI.getType(Vec);
1675 if (CIdx && CIdx->ult(NumElts))
1677 return computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known,
1683 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1689 LLT VecTy = MRI.getType(Vec);
1697 APInt DemandedVecElts = DemandedElts;
1698 bool NeedsElt =
true;
1700 if (CIdx && CIdx->ult(NumElts)) {
1701 DemandedVecElts.
clearBit(CIdx->getZExtValue());
1702 NeedsElt = DemandedElts[CIdx->getZExtValue()];
1707 computeKnownFPClass(Elt, Known, InterestedClasses,
Depth + 1);
1716 if (!DemandedVecElts.
isZero()) {
1717 KnownFPClass Known2;
1718 computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known2,
1725 case TargetOpcode::G_SHUFFLE_VECTOR: {
1729 APInt DemandedLHS, DemandedRHS;
1731 assert(DemandedElts == APInt(1, 1));
1732 DemandedLHS = DemandedRHS = DemandedElts;
1735 DemandedElts, DemandedLHS,
1742 if (!!DemandedLHS) {
1744 computeKnownFPClass(
LHS, DemandedLHS, InterestedClasses, Known,
1754 if (!!DemandedRHS) {
1755 KnownFPClass Known2;
1757 computeKnownFPClass(
RHS, DemandedRHS, InterestedClasses, Known2,
1763 case TargetOpcode::COPY: {
1766 if (!Src.isVirtual())
1769 computeKnownFPClass(Src, DemandedElts, InterestedClasses, Known,
Depth + 1);
1780 computeKnownFPClass(R, DemandedElts, InterestedClasses, KnownClasses,
Depth);
1781 return KnownClasses;
1787 computeKnownFPClass(R, Known, InterestedClasses,
Depth);
1795 InterestedClasses &=
~fcNan;
1797 InterestedClasses &=
~fcInf;
1800 computeKnownFPClass(R, DemandedElts, InterestedClasses,
Depth);
1803 Result.KnownFPClasses &=
~fcNan;
1805 Result.KnownFPClasses &=
~fcInf;
1811 LLT Ty = MRI.getType(R);
1812 APInt DemandedElts =
1814 return computeKnownFPClass(R, DemandedElts, Flags, InterestedClasses,
Depth);
1818unsigned GISelValueTracking::computeNumSignBitsMin(
Register Src0,
Register Src1,
1819 const APInt &DemandedElts,
1823 if (Src1SignBits == 1)
1840 case TargetOpcode::G_SEXTLOAD:
1843 case TargetOpcode::G_ZEXTLOAD:
1856 const APInt &DemandedElts,
1859 unsigned Opcode =
MI.getOpcode();
1861 if (Opcode == TargetOpcode::G_CONSTANT)
1862 return MI.getOperand(1).getCImm()->getValue().getNumSignBits();
1870 LLT DstTy = MRI.getType(R);
1880 unsigned FirstAnswer = 1;
1882 case TargetOpcode::COPY: {
1884 if (Src.getReg().isVirtual() && Src.getSubReg() == 0 &&
1885 MRI.getType(Src.getReg()).isValid()) {
1892 case TargetOpcode::G_SEXT: {
1894 LLT SrcTy = MRI.getType(Src);
1898 case TargetOpcode::G_ASSERT_SEXT:
1899 case TargetOpcode::G_SEXT_INREG: {
1902 unsigned SrcBits =
MI.getOperand(2).getImm();
1903 unsigned InRegBits = TyBits - SrcBits + 1;
1907 case TargetOpcode::G_LOAD: {
1914 case TargetOpcode::G_SEXTLOAD: {
1929 case TargetOpcode::G_ZEXTLOAD: {
1944 case TargetOpcode::G_AND:
1945 case TargetOpcode::G_OR:
1946 case TargetOpcode::G_XOR: {
1948 unsigned Src1NumSignBits =
1950 if (Src1NumSignBits != 1) {
1952 unsigned Src2NumSignBits =
1954 FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits);
1958 case TargetOpcode::G_ASHR: {
1963 FirstAnswer = std::min<uint64_t>(FirstAnswer + *
C, TyBits);
1966 case TargetOpcode::G_SHL: {
1969 if (std::optional<ConstantRange> ShAmtRange =
1971 uint64_t MaxShAmt = ShAmtRange->getUnsignedMax().getZExtValue();
1972 uint64_t MinShAmt = ShAmtRange->getUnsignedMin().getZExtValue();
1982 if (ExtOpc == TargetOpcode::G_SEXT || ExtOpc == TargetOpcode::G_ZEXT ||
1983 ExtOpc == TargetOpcode::G_ANYEXT) {
1984 LLT ExtTy = MRI.getType(Src1);
1986 LLT ExtendeeTy = MRI.getType(Extendee);
1990 if (SizeDiff <= MinShAmt) {
1994 return Tmp - MaxShAmt;
2000 return Tmp - MaxShAmt;
2004 case TargetOpcode::G_TRUNC: {
2006 LLT SrcTy = MRI.getType(Src);
2010 unsigned NumSrcBits = SrcTy.getScalarSizeInBits();
2012 if (NumSrcSignBits > (NumSrcBits - DstTyBits))
2013 return NumSrcSignBits - (NumSrcBits - DstTyBits);
2016 case TargetOpcode::G_SELECT: {
2017 return computeNumSignBitsMin(
MI.getOperand(2).getReg(),
2018 MI.getOperand(3).getReg(), DemandedElts,
2021 case TargetOpcode::G_SMIN:
2022 case TargetOpcode::G_SMAX:
2023 case TargetOpcode::G_UMIN:
2024 case TargetOpcode::G_UMAX:
2026 return computeNumSignBitsMin(
MI.getOperand(1).getReg(),
2027 MI.getOperand(2).getReg(), DemandedElts,
2029 case TargetOpcode::G_SADDO:
2030 case TargetOpcode::G_SADDE:
2031 case TargetOpcode::G_UADDO:
2032 case TargetOpcode::G_UADDE:
2033 case TargetOpcode::G_SSUBO:
2034 case TargetOpcode::G_SSUBE:
2035 case TargetOpcode::G_USUBO:
2036 case TargetOpcode::G_USUBE:
2037 case TargetOpcode::G_SMULO:
2038 case TargetOpcode::G_UMULO: {
2042 if (
MI.getOperand(1).getReg() == R) {
2043 if (TL.getBooleanContents(DstTy.
isVector(),
false) ==
2050 case TargetOpcode::G_SUB: {
2052 unsigned Src2NumSignBits =
2054 if (Src2NumSignBits == 1)
2064 if ((Known2.
Zero | 1).isAllOnes())
2071 FirstAnswer = Src2NumSignBits;
2078 unsigned Src1NumSignBits =
2080 if (Src1NumSignBits == 1)
2085 FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits) - 1;
2088 case TargetOpcode::G_ADD: {
2090 unsigned Src2NumSignBits =
2092 if (Src2NumSignBits <= 2)
2096 unsigned Src1NumSignBits =
2098 if (Src1NumSignBits == 1)
2107 if ((Known1.
Zero | 1).isAllOnes())
2113 FirstAnswer = Src1NumSignBits;
2122 FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits) - 1;
2125 case TargetOpcode::G_FCMP:
2126 case TargetOpcode::G_ICMP: {
2127 bool IsFP = Opcode == TargetOpcode::G_FCMP;
2130 auto BC = TL.getBooleanContents(DstTy.
isVector(), IsFP);
2137 case TargetOpcode::G_BUILD_VECTOR: {
2139 FirstAnswer = TyBits;
2140 APInt SingleDemandedElt(1, 1);
2142 if (!DemandedElts[
I])
2147 FirstAnswer = std::min(FirstAnswer, Tmp2);
2150 if (FirstAnswer == 1)
2155 case TargetOpcode::G_CONCAT_VECTORS: {
2156 if (MRI.getType(
MI.getOperand(0).getReg()).isScalableVector())
2158 FirstAnswer = TyBits;
2161 unsigned NumSubVectorElts =
2162 MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
2165 DemandedElts.
extractBits(NumSubVectorElts,
I * NumSubVectorElts);
2170 FirstAnswer = std::min(FirstAnswer, Tmp2);
2173 if (FirstAnswer == 1)
2178 case TargetOpcode::G_SHUFFLE_VECTOR: {
2181 APInt DemandedLHS, DemandedRHS;
2183 unsigned NumElts = MRI.getType(Src1).getNumElements();
2185 DemandedElts, DemandedLHS, DemandedRHS))
2191 if (FirstAnswer == 1)
2193 if (!!DemandedRHS) {
2196 FirstAnswer = std::min(FirstAnswer, Tmp2);
2200 case TargetOpcode::G_SPLAT_VECTOR: {
2204 unsigned NumSrcBits = MRI.getType(Src).getSizeInBits();
2205 if (NumSrcSignBits > (NumSrcBits - TyBits))
2206 return NumSrcSignBits - (NumSrcBits - TyBits);
2209 case TargetOpcode::G_INTRINSIC:
2210 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
2211 case TargetOpcode::G_INTRINSIC_CONVERGENT:
2212 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
2215 TL.computeNumSignBitsForTargetInstr(*
this, R, DemandedElts, MRI,
Depth);
2217 FirstAnswer = std::max(FirstAnswer, NumBits);
2237 Mask <<= Mask.getBitWidth() - TyBits;
2238 return std::max(FirstAnswer, Mask.countl_one());
2242 LLT Ty = MRI.getType(R);
2243 APInt DemandedElts =
2252 unsigned Opcode =
MI.getOpcode();
2254 LLT Ty = MRI.getType(R);
2255 unsigned BitWidth = Ty.getScalarSizeInBits();
2257 if (Opcode == TargetOpcode::G_CONSTANT) {
2258 const APInt &ShAmt =
MI.getOperand(1).getCImm()->getValue();
2260 return std::nullopt;
2264 if (Opcode == TargetOpcode::G_BUILD_VECTOR) {
2265 const APInt *MinAmt =
nullptr, *MaxAmt =
nullptr;
2266 for (
unsigned I = 0, E =
MI.getNumOperands() - 1;
I != E; ++
I) {
2267 if (!DemandedElts[
I])
2270 if (
Op->getOpcode() != TargetOpcode::G_CONSTANT) {
2271 MinAmt = MaxAmt =
nullptr;
2275 const APInt &ShAmt =
Op->getOperand(1).getCImm()->getValue();
2277 return std::nullopt;
2278 if (!MinAmt || MinAmt->
ugt(ShAmt))
2280 if (!MaxAmt || MaxAmt->ult(ShAmt))
2283 assert(((!MinAmt && !MaxAmt) || (MinAmt && MaxAmt)) &&
2284 "Failed to find matching min/max shift amounts");
2285 if (MinAmt && MaxAmt)
2295 return std::nullopt;
2300 if (std::optional<ConstantRange> AmtRange =
2302 return AmtRange->getUnsignedMin().getZExtValue();
2303 return std::nullopt;
2321 Info = std::make_unique<GISelValueTracking>(MF, MaxDepth);
2346 if (!MO.isReg() || MO.getReg().isPhysical())
2349 if (!
MRI.getType(Reg).isValid())
2351 KnownBits Known = VTA.getKnownBits(Reg);
2352 unsigned SignedBits = VTA.computeNumSignBits(Reg);
2353 OS <<
" " << MO <<
" KnownBits:" << Known <<
" SignBits:" << SignedBits
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Utilities for dealing with flags related to floating point properties and mode controls.
static void dumpResult(const MachineInstr &MI, const KnownBits &Known, unsigned Depth)
static unsigned computeNumSignBitsFromRangeMetadata(const GAnyLoad *Ld, unsigned TyBits)
Compute the known number of sign bits with attached range metadata in the memory operand.
static bool outputDenormalIsIEEEOrPosZero(const MachineFunction &MF, LLT Ty)
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Implement a low-level type suitable for MachineInstr level instruction selection.
Contains matchers for matching SSA Machine Instructions.
Promote Memory to Register
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file describes how to lower LLVM code to machine code.
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static LLVM_ABI bool isRepresentableAsNormalIn(const fltSemantics &Src, const fltSemantics &Dst)
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
void clearLowBits(unsigned loBits)
Set bottom loBits bits to 0.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
void setAllBits()
Set every bit to 1.
APInt shl(unsigned shiftAmt) const
Left-shift function.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This class represents a range of values.
static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
Represents any generic load, including sign/zero extending variants.
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
static LLVM_ABI std::optional< GFConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)
To use KnownBitsInfo analysis in a pass, KnownBitsInfo &Info = getAnalysis<GISelValueTrackingInfoAnal...
GISelValueTracking & get(MachineFunction &MF)
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
GISelValueTracking Result
LLVM_ABI Result run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
LLVM_ABI PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
unsigned getMaxDepth() const
KnownBits getKnownBits(Register R)
Align computeKnownAlignment(Register R, unsigned Depth=0)
std::optional< ConstantRange > getValidShiftAmountRange(Register R, const APInt &DemandedElts, unsigned Depth)
If a G_SHL/G_ASHR/G_LSHR node with shift operand R has shift amounts that are all less than the eleme...
bool maskedValueIsZero(Register Val, const APInt &Mask)
std::optional< uint64_t > getValidMinimumShiftAmount(Register R, const APInt &DemandedElts, unsigned Depth=0)
If a G_SHL/G_ASHR/G_LSHR node with shift operand R has shift amounts that are all less than the eleme...
bool signBitIsZero(Register Op)
const DataLayout & getDataLayout() const
unsigned computeNumSignBits(Register R, const APInt &DemandedElts, unsigned Depth=0)
APInt getKnownOnes(Register R)
KnownBits getKnownBits(MachineInstr &MI)
APInt getKnownZeroes(Register R)
void computeKnownBitsImpl(Register R, KnownBits &Known, const APInt &DemandedElts, unsigned Depth=0)
Register getCondReg() const
Register getFalseReg() const
Register getTrueReg() const
Register getSrc2Reg() const
Register getSrc1Reg() const
ArrayRef< int > getMask() const
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
constexpr LLT getScalarType() const
TypeSize getValue() const
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
LLT getMemoryType() const
Return the memory type of the memory reference.
const MDNode * getRanges() const
Return the range tag for the memory reference.
LocationSize getSizeInBits() const
Return the size in bits of the memory reference.
MachineOperand class - Representation of each machine instruction operand.
Register getReg() const
getReg - Returns the register number.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
operand_type_match m_Reg()
operand_type_match m_Pred()
bind_ty< FPClassTest > m_FPClassTest(FPClassTest &T)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
ClassifyOp_match< LHS, Test, TargetOpcode::G_IS_FPCLASS > m_GIsFPClass(const LHS &L, const Test &T)
Matches the register and immediate used in a fpclass test G_IS_FPCLASS val, 96.
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_FCMP > m_GFCmp(const Pred &P, const LHS &L, const RHS &R)
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
LLVM_ABI std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
LLVM_ABI const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
scope_exit(Callable) -> scope_exit< Callable >
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
int ilogb(const APFloat &Arg)
Returns the exponent of the internal representation of the APFloat.
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
std::tuple< Value *, FPClassTest, FPClassTest > fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS, FPClassTest RHSClass, bool LookThroughSrc=true)
LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
constexpr unsigned MaxAnalysisRecursionDepth
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
DWARFExpression::Operation Op
std::string toString(const APInt &I, unsigned Radix, bool Signed, bool formatAsCLiteral=false, bool UpperCase=true, bool InsertSeparators=false)
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
static uint32_t extractBits(uint64_t Val, uint32_t Hi, uint32_t Lo)
LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
This struct is a compact representation of a valid (non-zero power of two) alignment.
A special type used by analysis passes to provide an address that identifies that particular analysis...
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
constexpr bool outputsAreZero() const
Return true if output denormals should be flushed to 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ IEEE
IEEE-754 denormal numbers preserved.
constexpr bool inputsAreZero() const
Return true if input denormals must be implicitly treated as 0.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
static constexpr DenormalMode getIEEE()
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
LLVM_ABI KnownBits sextInReg(unsigned SrcBitWidth) const
Return known bits for a in-register sign extension of the value we're tracking.
static LLVM_ABI KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from zero-extended multiply-hi.
static LLVM_ABI KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
KnownBits byteSwap() const
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
void setAllZero()
Make all bits known to be zero and discard any previous information.
KnownBits reverseBits() const
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const
Return a subset of the known bits from [bitPosition,bitPosition+numBits).
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
static KnownBits add(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from addition of LHS and RHS.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static LLVM_ABI KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static LLVM_ABI KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from sign-extended multiply-hi.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isNegative() const
Returns true if this value is known to be negative.
static LLVM_ABI KnownBits computeForAddCarry(const KnownBits &LHS, const KnownBits &RHS, const KnownBits &Carry)
Compute known bits resulting from adding LHS, RHS and a 1-bit Carry.
static KnownBits sub(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from subtraction of LHS and RHS.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
static LLVM_ABI KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
LLVM_ABI KnownBits abs(bool IntMinIsPoison=false) const
Compute known bits for the absolute value.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
static LLVM_ABI KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
bool isAllOnes() const
Returns true if value is all one bits.
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
bool isKnownNeverInfinity() const
Return true if it's known this can never be an infinity.
bool cannotBeOrderedGreaterThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never greater tha...
static constexpr FPClassTest OrderedGreaterThanZeroMask
static constexpr FPClassTest OrderedLessThanZeroMask
void knownNot(FPClassTest RuleOut)
void copysign(const KnownFPClass &Sign)
bool isKnownNeverSubnormal() const
Return true if it's known this can never be a subnormal.
LLVM_ABI bool isKnownNeverLogicalZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a zero.
bool isKnownNeverPosZero() const
Return true if it's known this can never be a literal positive zero.
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
bool isKnownNeverNegZero() const
Return true if it's known this can never be a negative zero.
void propagateNaN(const KnownFPClass &Src, bool PreserveSign=false)
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
void signBitMustBeOne()
Assume the sign bit is one.
void signBitMustBeZero()
Assume the sign bit is zero.
LLVM_ABI bool isKnownNeverLogicalPosZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a positive zero.
bool isKnownNeverPosInfinity() const
Return true if it's known this can never be +infinity.
LLVM_ABI bool isKnownNeverLogicalNegZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a negative zero.