42#define DEBUG_TYPE "gisel-known-bits"
50 "Analysis for ComputingKnownBits",
false,
true)
53 : MF(MF), MRI(MF.getRegInfo()), TL(*MF.getSubtarget().getTargetLowering()),
58 switch (
MI->getOpcode()) {
59 case TargetOpcode::COPY:
61 case TargetOpcode::G_ASSERT_ALIGN: {
63 return Align(
MI->getOperand(2).getImm());
65 case TargetOpcode::G_FRAME_INDEX: {
66 int FrameIdx =
MI->getOperand(1).getIndex();
67 return MF.getFrameInfo().getObjectAlign(FrameIdx);
69 case TargetOpcode::G_INTRINSIC:
70 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
71 case TargetOpcode::G_INTRINSIC_CONVERGENT:
72 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
74 return TL.computeKnownAlignForTargetInstr(*
this, R, MRI,
Depth + 1);
79 assert(
MI.getNumExplicitDefs() == 1 &&
80 "expected single return generic instruction");
85 const LLT Ty = MRI.getType(R);
95 const APInt &DemandedElts,
103 LLT Ty = MRI.getType(R);
104 unsigned BitWidth = Ty.getScalarSizeInBits();
116[[maybe_unused]]
static void
119 <<
"] Computed for: " <<
MI <<
"[" <<
Depth <<
"] Known: 0x"
130 const APInt &DemandedElts,
161 const APInt &DemandedElts,
164 unsigned Opcode =
MI.getOpcode();
165 LLT DstTy = MRI.getType(R);
179 "DemandedElt width should equal the fixed vector number of elements");
182 "DemandedElt width should be 1 for scalars or scalable vectors");
207 TL.computeKnownBitsForTargetInstr(*
this, R, Known, DemandedElts, MRI,
210 case TargetOpcode::G_BUILD_VECTOR: {
215 if (!DemandedElts[
I])
229 case TargetOpcode::G_SPLAT_VECTOR: {
237 case TargetOpcode::COPY:
238 case TargetOpcode::G_PHI:
239 case TargetOpcode::PHI: {
245 assert(
MI.getOperand(0).getSubReg() == 0 &&
"Is this code in SSA?");
248 for (
unsigned Idx = 1; Idx <
MI.getNumOperands(); Idx += 2) {
251 LLT SrcTy = MRI.getType(SrcReg);
259 if (SrcReg.
isVirtual() && Src.getSubReg() == 0 &&
261 APInt NowDemandedElts;
262 if (!SrcTy.isFixedVector()) {
263 NowDemandedElts =
APInt(1, 1);
266 NowDemandedElts = DemandedElts;
273 Depth + (Opcode != TargetOpcode::COPY));
288 case TargetOpcode::G_STEP_VECTOR: {
289 APInt Step =
MI.getOperand(1).getCImm()->getValue();
297 const APInt MinNumElts =
303 .
umul_ov(MinNumElts, Overflow);
306 const APInt MaxValue = (MaxNumElts - 1).
umul_ov(Step, Overflow);
312 case TargetOpcode::G_CONSTANT: {
316 case TargetOpcode::G_FRAME_INDEX: {
317 int FrameIdx =
MI.getOperand(1).getIndex();
318 TL.computeKnownBitsForFrameIndex(FrameIdx, Known, MF);
321 case TargetOpcode::G_SUB: {
330 case TargetOpcode::G_XOR: {
339 case TargetOpcode::G_PTR_ADD: {
343 LLT Ty = MRI.getType(
MI.getOperand(1).getReg());
344 if (DL.isNonIntegralAddressSpace(Ty.getAddressSpace()))
348 case TargetOpcode::G_ADD: {
356 case TargetOpcode::G_AND: {
366 case TargetOpcode::G_OR: {
376 case TargetOpcode::G_MUL: {
384 case TargetOpcode::G_UMULH: {
392 case TargetOpcode::G_SMULH: {
400 case TargetOpcode::G_ABDU: {
408 case TargetOpcode::G_ABDS: {
417 if (SignBits1 == 1) {
426 case TargetOpcode::G_UDIV: {
435 case TargetOpcode::G_SDIV: {
444 case TargetOpcode::G_SELECT: {
445 computeKnownBitsMin(
MI.getOperand(2).getReg(),
MI.getOperand(3).getReg(),
446 Known, DemandedElts,
Depth + 1);
449 case TargetOpcode::G_SMIN: {
459 case TargetOpcode::G_SMAX: {
469 case TargetOpcode::G_UMIN: {
478 case TargetOpcode::G_UMAX: {
487 case TargetOpcode::G_FCMP:
488 case TargetOpcode::G_ICMP: {
491 if (TL.getBooleanContents(DstTy.
isVector(),
492 Opcode == TargetOpcode::G_FCMP) ==
498 case TargetOpcode::G_SEXT: {
506 case TargetOpcode::G_ASSERT_SEXT:
507 case TargetOpcode::G_SEXT_INREG: {
510 Known = Known.
sextInReg(
MI.getOperand(2).getImm());
513 case TargetOpcode::G_ANYEXT: {
519 case TargetOpcode::G_LOAD: {
527 case TargetOpcode::G_SEXTLOAD:
528 case TargetOpcode::G_ZEXTLOAD: {
535 Known = Opcode == TargetOpcode::G_SEXTLOAD
540 case TargetOpcode::G_ASHR: {
549 case TargetOpcode::G_LSHR: {
558 case TargetOpcode::G_SHL: {
567 case TargetOpcode::G_ROTL:
568 case TargetOpcode::G_ROTR: {
569 MachineInstr *AmtOpMI = MRI.getVRegDef(
MI.getOperand(2).getReg());
577 unsigned Amt = MaybeAmtOp->urem(
BitWidth);
580 if (Opcode == TargetOpcode::G_ROTL)
587 case TargetOpcode::G_INTTOPTR:
588 case TargetOpcode::G_PTRTOINT:
593 case TargetOpcode::G_ZEXT:
594 case TargetOpcode::G_TRUNC: {
600 case TargetOpcode::G_ASSERT_ZEXT: {
604 unsigned SrcBitWidth =
MI.getOperand(2).getImm();
605 assert(SrcBitWidth &&
"SrcBitWidth can't be zero");
607 Known.
Zero |= (~InMask);
608 Known.
One &= (~Known.Zero);
611 case TargetOpcode::G_ASSERT_ALIGN: {
612 int64_t LogOfAlign =
Log2_64(
MI.getOperand(2).getImm());
621 case TargetOpcode::G_MERGE_VALUES: {
622 unsigned NumOps =
MI.getNumOperands();
623 unsigned OpSize = MRI.getType(
MI.getOperand(1).getReg()).getSizeInBits();
625 for (
unsigned I = 0;
I !=
NumOps - 1; ++
I) {
628 DemandedElts,
Depth + 1);
633 case TargetOpcode::G_UNMERGE_VALUES: {
634 unsigned NumOps =
MI.getNumOperands();
636 LLT SrcTy = MRI.getType(SrcReg);
638 if (SrcTy.isVector() && SrcTy.getScalarType() != DstTy.
getScalarType())
643 for (; DstIdx !=
NumOps - 1 &&
MI.getOperand(DstIdx).
getReg() != R;
647 APInt SubDemandedElts = DemandedElts;
648 if (SrcTy.isVector()) {
651 DemandedElts.
zext(SrcTy.getNumElements()).
shl(DstIdx * DstLanes);
657 if (SrcTy.isVector())
658 Known = std::move(SrcOpKnown);
663 case TargetOpcode::G_BSWAP: {
669 case TargetOpcode::G_BITREVERSE: {
675 case TargetOpcode::G_CTPOP: {
687 case TargetOpcode::G_UBFX: {
688 KnownBits SrcOpKnown, OffsetKnown, WidthKnown;
698 case TargetOpcode::G_SBFX: {
699 KnownBits SrcOpKnown, OffsetKnown, WidthKnown;
716 case TargetOpcode::G_UADDO:
717 case TargetOpcode::G_UADDE:
718 case TargetOpcode::G_SADDO:
719 case TargetOpcode::G_SADDE: {
720 if (
MI.getOperand(1).getReg() == R) {
723 if (TL.getBooleanContents(DstTy.
isVector(),
false) ==
730 assert(
MI.getOperand(0).getReg() == R &&
731 "We only compute knownbits for the sum here.");
734 if (Opcode == TargetOpcode::G_UADDE || Opcode == TargetOpcode::G_SADDE) {
738 Carry = Carry.
trunc(1);
750 case TargetOpcode::G_USUBO:
751 case TargetOpcode::G_USUBE:
752 case TargetOpcode::G_SSUBO:
753 case TargetOpcode::G_SSUBE:
754 case TargetOpcode::G_UMULO:
755 case TargetOpcode::G_SMULO: {
756 if (
MI.getOperand(1).getReg() == R) {
759 if (TL.getBooleanContents(DstTy.
isVector(),
false) ==
766 case TargetOpcode::G_CTTZ:
767 case TargetOpcode::G_CTTZ_ZERO_UNDEF: {
777 case TargetOpcode::G_CTLZ:
778 case TargetOpcode::G_CTLZ_ZERO_UNDEF: {
788 case TargetOpcode::G_CTLS: {
792 unsigned MaxUpperRedundantSignBits = MRI.getType(Reg).getScalarSizeInBits();
797 Known =
Range.toKnownBits();
800 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
807 LLT VecVT = MRI.getType(InVec);
825 if (ConstEltNo && ConstEltNo->ult(NumSrcElts))
832 case TargetOpcode::G_SHUFFLE_VECTOR: {
833 APInt DemandedLHS, DemandedRHS;
836 unsigned NumElts = MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
838 DemandedElts, DemandedLHS, DemandedRHS))
859 case TargetOpcode::G_CONCAT_VECTORS: {
860 if (MRI.getType(
MI.getOperand(0).getReg()).isScalableVector())
865 unsigned NumSubVectorElts =
866 MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
870 DemandedElts.
extractBits(NumSubVectorElts,
I * NumSubVectorElts);
882 case TargetOpcode::G_ABS: {
896 Ty = Ty.getScalarType();
905 LLT Ty = MRI.getType(R);
908 computeKnownFPClass(R, DemandedElts, InterestedClasses, Known,
Depth);
911void GISelValueTracking::computeKnownFPClassForFPTrunc(
919 KnownFPClass KnownSrc;
920 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
933void GISelValueTracking::computeKnownFPClass(
Register R,
934 const APInt &DemandedElts,
938 assert(Known.
isUnknown() &&
"should not be called with known information");
948 MachineInstr &
MI = *MRI.getVRegDef(R);
949 unsigned Opcode =
MI.getOpcode();
950 LLT DstTy = MRI.getType(R);
958 switch (Cst->getKind()) {
960 auto APF = Cst->getScalarValue();
962 Known.
SignBit = APF.isNegative();
967 bool SignBitAllZero =
true;
968 bool SignBitAllOne =
true;
970 for (
auto C : *Cst) {
973 SignBitAllZero =
false;
975 SignBitAllOne =
false;
978 if (SignBitAllOne != SignBitAllZero)
994 KnownNotFromFlags |=
fcNan;
996 KnownNotFromFlags |=
fcInf;
1000 InterestedClasses &= ~KnownNotFromFlags;
1003 [=, &Known] { Known.
knownNot(KnownNotFromFlags); });
1009 const MachineFunction *MF =
MI.getMF();
1013 TL.computeKnownFPClassForTargetInstr(*
this, R, Known, DemandedElts, MRI,
1016 case TargetOpcode::G_FNEG: {
1018 computeKnownFPClass(Val, DemandedElts, InterestedClasses, Known,
Depth + 1);
1022 case TargetOpcode::G_SELECT: {
1045 bool LookThroughFAbsFNeg = CmpLHS !=
LHS && CmpLHS !=
RHS;
1046 std::tie(TestedValue, MaskIfTrue, MaskIfFalse) =
1052 MaskIfTrue = TestedMask;
1053 MaskIfFalse = ~TestedMask;
1056 if (TestedValue ==
LHS) {
1058 FilterLHS = MaskIfTrue;
1059 }
else if (TestedValue ==
RHS) {
1061 FilterRHS = MaskIfFalse;
1064 KnownFPClass Known2;
1065 computeKnownFPClass(
LHS, DemandedElts, InterestedClasses & FilterLHS, Known,
1069 computeKnownFPClass(
RHS, DemandedElts, InterestedClasses & FilterRHS,
1076 case TargetOpcode::G_FCOPYSIGN: {
1077 Register Magnitude =
MI.getOperand(1).getReg();
1080 KnownFPClass KnownSign;
1082 computeKnownFPClass(Magnitude, DemandedElts, InterestedClasses, Known,
1084 computeKnownFPClass(Sign, DemandedElts, InterestedClasses, KnownSign,
1089 case TargetOpcode::G_FMA:
1090 case TargetOpcode::G_STRICT_FMA:
1091 case TargetOpcode::G_FMAD: {
1106 KnownFPClass KnownAddend;
1107 computeKnownFPClass(
C, DemandedElts, InterestedClasses, KnownAddend,
1114 case TargetOpcode::G_FSQRT:
1115 case TargetOpcode::G_STRICT_FSQRT: {
1116 KnownFPClass KnownSrc;
1118 if (InterestedClasses &
fcNan)
1123 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc,
Depth + 1);
1138 case TargetOpcode::G_FABS: {
1143 computeKnownFPClass(Val, DemandedElts, InterestedClasses, Known,
1149 case TargetOpcode::G_FSIN:
1150 case TargetOpcode::G_FCOS:
1151 case TargetOpcode::G_FSINCOS: {
1154 KnownFPClass KnownSrc;
1156 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1164 case TargetOpcode::G_FMAXNUM:
1165 case TargetOpcode::G_FMINNUM:
1166 case TargetOpcode::G_FMINNUM_IEEE:
1167 case TargetOpcode::G_FMAXIMUM:
1168 case TargetOpcode::G_FMINIMUM:
1169 case TargetOpcode::G_FMAXNUM_IEEE:
1170 case TargetOpcode::G_FMAXIMUMNUM:
1171 case TargetOpcode::G_FMINIMUMNUM: {
1174 KnownFPClass KnownLHS, KnownRHS;
1176 computeKnownFPClass(
LHS, DemandedElts, InterestedClasses, KnownLHS,
1178 computeKnownFPClass(
RHS, DemandedElts, InterestedClasses, KnownRHS,
1182 Known = KnownLHS | KnownRHS;
1185 if (NeverNaN && (Opcode == TargetOpcode::G_FMINNUM ||
1186 Opcode == TargetOpcode::G_FMAXNUM ||
1187 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1188 Opcode == TargetOpcode::G_FMAXIMUMNUM))
1191 if (Opcode == TargetOpcode::G_FMAXNUM ||
1192 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
1193 Opcode == TargetOpcode::G_FMAXNUM_IEEE) {
1201 }
else if (Opcode == TargetOpcode::G_FMAXIMUM) {
1207 }
else if (Opcode == TargetOpcode::G_FMINNUM ||
1208 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1209 Opcode == TargetOpcode::G_FMINNUM_IEEE) {
1217 }
else if (Opcode == TargetOpcode::G_FMINIMUM) {
1249 }
else if ((Opcode == TargetOpcode::G_FMAXIMUM ||
1250 Opcode == TargetOpcode::G_FMINIMUM) ||
1251 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
1252 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1253 Opcode == TargetOpcode::G_FMAXNUM_IEEE ||
1254 Opcode == TargetOpcode::G_FMINNUM_IEEE ||
1260 if ((Opcode == TargetOpcode::G_FMAXIMUM ||
1261 Opcode == TargetOpcode::G_FMAXNUM ||
1262 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
1263 Opcode == TargetOpcode::G_FMAXNUM_IEEE) &&
1266 else if ((Opcode == TargetOpcode::G_FMINIMUM ||
1267 Opcode == TargetOpcode::G_FMINNUM ||
1268 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1269 Opcode == TargetOpcode::G_FMINNUM_IEEE) &&
1276 case TargetOpcode::G_FCANONICALIZE: {
1278 KnownFPClass KnownSrc;
1279 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1301 DenormalMode DenormMode = MF->getDenormalMode(FPType);
1320 case TargetOpcode::G_VECREDUCE_FMAX:
1321 case TargetOpcode::G_VECREDUCE_FMIN:
1322 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
1323 case TargetOpcode::G_VECREDUCE_FMINIMUM: {
1329 computeKnownFPClass(Val,
MI.getFlags(), InterestedClasses,
Depth + 1);
1335 case TargetOpcode::G_TRUNC:
1336 case TargetOpcode::G_FFLOOR:
1337 case TargetOpcode::G_FCEIL:
1338 case TargetOpcode::G_FRINT:
1339 case TargetOpcode::G_FNEARBYINT:
1340 case TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND:
1341 case TargetOpcode::G_INTRINSIC_ROUND: {
1343 KnownFPClass KnownSrc;
1349 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc,
Depth + 1);
1366 case TargetOpcode::G_FEXP:
1367 case TargetOpcode::G_FEXP2:
1368 case TargetOpcode::G_FEXP10: {
1374 KnownFPClass KnownSrc;
1375 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1384 case TargetOpcode::G_FLOG:
1385 case TargetOpcode::G_FLOG2:
1386 case TargetOpcode::G_FLOG10: {
1401 KnownFPClass KnownSrc;
1402 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc,
Depth + 1);
1412 DenormalMode
Mode = MF->getDenormalMode(FltSem);
1419 case TargetOpcode::G_FPOWI: {
1424 LLT ExpTy = MRI.getType(Exp);
1426 Exp, ExpTy.
isVector() ? DemandedElts : APInt(1, 1),
Depth + 1);
1428 if (ExponentKnownBits.
Zero[0]) {
1442 KnownFPClass KnownSrc;
1443 computeKnownFPClass(Val, DemandedElts,
fcNegative, KnownSrc,
Depth + 1);
1448 case TargetOpcode::G_FLDEXP:
1449 case TargetOpcode::G_STRICT_FLDEXP: {
1451 KnownFPClass KnownSrc;
1452 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1469 if ((InterestedClasses & ExpInfoMask) ==
fcNone)
1478 case TargetOpcode::G_INTRINSIC_ROUNDEVEN: {
1479 computeKnownFPClassForFPTrunc(
MI, DemandedElts, InterestedClasses, Known,
1483 case TargetOpcode::G_FADD:
1484 case TargetOpcode::G_STRICT_FADD:
1485 case TargetOpcode::G_FSUB:
1486 case TargetOpcode::G_STRICT_FSUB: {
1489 KnownFPClass KnownLHS, KnownRHS;
1491 (Opcode == TargetOpcode::G_FADD ||
1492 Opcode == TargetOpcode::G_STRICT_FADD) &&
1494 bool WantNaN = (InterestedClasses &
fcNan) !=
fcNone;
1497 if (!WantNaN && !WantNegative && !WantNegZero)
1503 if (InterestedClasses &
fcNan)
1504 InterestedSrcs |=
fcInf;
1505 computeKnownFPClass(
RHS, DemandedElts, InterestedSrcs, KnownRHS,
Depth + 1);
1510 (Opcode == TargetOpcode::G_FSUB ||
1511 Opcode == TargetOpcode::G_STRICT_FSUB)) {
1515 computeKnownFPClass(
LHS, DemandedElts, InterestedSrcs, KnownLHS,
1523 if (Opcode == TargetOpcode::G_FADD ||
1524 Opcode == TargetOpcode::G_STRICT_FADD) {
1551 case TargetOpcode::G_FMUL:
1552 case TargetOpcode::G_STRICT_FMUL: {
1565 KnownFPClass KnownLHS, KnownRHS;
1566 computeKnownFPClass(
RHS, DemandedElts, NeedForNan, KnownRHS,
Depth + 1);
1570 computeKnownFPClass(
LHS, DemandedElts, NeedForNan, KnownLHS,
Depth + 1);
1597 case TargetOpcode::G_FDIV:
1598 case TargetOpcode::G_FREM: {
1604 if (Opcode == TargetOpcode::G_FDIV) {
1615 const bool WantNan = (InterestedClasses &
fcNan) !=
fcNone;
1617 const bool WantPositive = Opcode == TargetOpcode::G_FREM &&
1619 if (!WantNan && !WantNegative && !WantPositive)
1622 KnownFPClass KnownLHS, KnownRHS;
1625 KnownRHS,
Depth + 1);
1627 bool KnowSomethingUseful =
1630 if (KnowSomethingUseful || WantPositive) {
1635 computeKnownFPClass(
LHS, DemandedElts, InterestedClasses & InterestedLHS,
1636 KnownLHS,
Depth + 1);
1639 if (Opcode == TargetOpcode::G_FDIV) {
1680 case TargetOpcode::G_FPEXT: {
1684 computeKnownFPClass(R, DemandedElts, InterestedClasses, Known,
Depth + 1);
1688 LLT SrcTy = MRI.getType(Src).getScalarType();
1705 case TargetOpcode::G_FPTRUNC: {
1706 computeKnownFPClassForFPTrunc(
MI, DemandedElts, InterestedClasses, Known,
1710 case TargetOpcode::G_SITOFP:
1711 case TargetOpcode::G_UITOFP: {
1720 if (Opcode == TargetOpcode::G_UITOFP)
1724 LLT Ty = MRI.getType(Val);
1726 if (InterestedClasses &
fcInf) {
1731 if (Opcode == TargetOpcode::G_SITOFP)
1745 case TargetOpcode::G_BUILD_VECTOR:
1746 case TargetOpcode::G_CONCAT_VECTORS: {
1753 for (
unsigned Idx = 0; Idx <
Merge.getNumSources(); ++Idx) {
1755 bool NeedsElt = DemandedElts[Idx];
1761 computeKnownFPClass(Src, Known, InterestedClasses,
Depth + 1);
1764 KnownFPClass Known2;
1765 computeKnownFPClass(Src, Known2, InterestedClasses,
Depth + 1);
1777 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1787 LLT VecTy = MRI.getType(Vec);
1792 if (CIdx && CIdx->ult(NumElts))
1794 return computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known,
1800 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1806 LLT VecTy = MRI.getType(Vec);
1814 APInt DemandedVecElts = DemandedElts;
1815 bool NeedsElt =
true;
1817 if (CIdx && CIdx->ult(NumElts)) {
1818 DemandedVecElts.
clearBit(CIdx->getZExtValue());
1819 NeedsElt = DemandedElts[CIdx->getZExtValue()];
1824 computeKnownFPClass(Elt, Known, InterestedClasses,
Depth + 1);
1833 if (!DemandedVecElts.
isZero()) {
1834 KnownFPClass Known2;
1835 computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known2,
1842 case TargetOpcode::G_SHUFFLE_VECTOR: {
1846 APInt DemandedLHS, DemandedRHS;
1848 assert(DemandedElts == APInt(1, 1));
1849 DemandedLHS = DemandedRHS = DemandedElts;
1852 DemandedElts, DemandedLHS,
1859 if (!!DemandedLHS) {
1861 computeKnownFPClass(
LHS, DemandedLHS, InterestedClasses, Known,
1871 if (!!DemandedRHS) {
1872 KnownFPClass Known2;
1874 computeKnownFPClass(
RHS, DemandedRHS, InterestedClasses, Known2,
1880 case TargetOpcode::COPY: {
1883 if (!Src.isVirtual())
1886 computeKnownFPClass(Src, DemandedElts, InterestedClasses, Known,
Depth + 1);
1897 computeKnownFPClass(R, DemandedElts, InterestedClasses, KnownClasses,
Depth);
1898 return KnownClasses;
1904 computeKnownFPClass(R, Known, InterestedClasses,
Depth);
1912 InterestedClasses &=
~fcNan;
1914 InterestedClasses &=
~fcInf;
1917 computeKnownFPClass(R, DemandedElts, InterestedClasses,
Depth);
1920 Result.KnownFPClasses &=
~fcNan;
1922 Result.KnownFPClasses &=
~fcInf;
1928 LLT Ty = MRI.getType(R);
1929 APInt DemandedElts =
1931 return computeKnownFPClass(R, DemandedElts, Flags, InterestedClasses,
Depth);
1935unsigned GISelValueTracking::computeNumSignBitsMin(
Register Src0,
Register Src1,
1936 const APInt &DemandedElts,
1940 if (Src1SignBits == 1)
1957 case TargetOpcode::G_SEXTLOAD:
1960 case TargetOpcode::G_ZEXTLOAD:
1973 const APInt &DemandedElts,
1976 unsigned Opcode =
MI.getOpcode();
1978 if (Opcode == TargetOpcode::G_CONSTANT)
1979 return MI.getOperand(1).getCImm()->getValue().getNumSignBits();
1987 LLT DstTy = MRI.getType(R);
1997 unsigned FirstAnswer = 1;
1999 case TargetOpcode::COPY: {
2001 if (Src.getReg().isVirtual() && Src.getSubReg() == 0 &&
2002 MRI.getType(Src.getReg()).isValid()) {
2009 case TargetOpcode::G_SEXT: {
2011 LLT SrcTy = MRI.getType(Src);
2015 case TargetOpcode::G_ASSERT_SEXT:
2016 case TargetOpcode::G_SEXT_INREG: {
2019 unsigned SrcBits =
MI.getOperand(2).getImm();
2020 unsigned InRegBits = TyBits - SrcBits + 1;
2024 case TargetOpcode::G_LOAD: {
2031 case TargetOpcode::G_SEXTLOAD: {
2046 case TargetOpcode::G_ZEXTLOAD: {
2061 case TargetOpcode::G_AND:
2062 case TargetOpcode::G_OR:
2063 case TargetOpcode::G_XOR: {
2065 unsigned Src1NumSignBits =
2067 if (Src1NumSignBits != 1) {
2069 unsigned Src2NumSignBits =
2071 FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits);
2075 case TargetOpcode::G_ASHR: {
2080 FirstAnswer = std::min<uint64_t>(FirstAnswer + *
C, TyBits);
2083 case TargetOpcode::G_SHL: {
2086 if (std::optional<ConstantRange> ShAmtRange =
2088 uint64_t MaxShAmt = ShAmtRange->getUnsignedMax().getZExtValue();
2089 uint64_t MinShAmt = ShAmtRange->getUnsignedMin().getZExtValue();
2099 if (ExtOpc == TargetOpcode::G_SEXT || ExtOpc == TargetOpcode::G_ZEXT ||
2100 ExtOpc == TargetOpcode::G_ANYEXT) {
2101 LLT ExtTy = MRI.getType(Src1);
2103 LLT ExtendeeTy = MRI.getType(Extendee);
2107 if (SizeDiff <= MinShAmt) {
2111 return Tmp - MaxShAmt;
2117 return Tmp - MaxShAmt;
2121 case TargetOpcode::G_TRUNC: {
2123 LLT SrcTy = MRI.getType(Src);
2127 unsigned NumSrcBits = SrcTy.getScalarSizeInBits();
2129 if (NumSrcSignBits > (NumSrcBits - DstTyBits))
2130 return NumSrcSignBits - (NumSrcBits - DstTyBits);
2133 case TargetOpcode::G_SELECT: {
2134 return computeNumSignBitsMin(
MI.getOperand(2).getReg(),
2135 MI.getOperand(3).getReg(), DemandedElts,
2138 case TargetOpcode::G_SMIN:
2139 case TargetOpcode::G_SMAX:
2140 case TargetOpcode::G_UMIN:
2141 case TargetOpcode::G_UMAX:
2143 return computeNumSignBitsMin(
MI.getOperand(1).getReg(),
2144 MI.getOperand(2).getReg(), DemandedElts,
2146 case TargetOpcode::G_SADDO:
2147 case TargetOpcode::G_SADDE:
2148 case TargetOpcode::G_UADDO:
2149 case TargetOpcode::G_UADDE:
2150 case TargetOpcode::G_SSUBO:
2151 case TargetOpcode::G_SSUBE:
2152 case TargetOpcode::G_USUBO:
2153 case TargetOpcode::G_USUBE:
2154 case TargetOpcode::G_SMULO:
2155 case TargetOpcode::G_UMULO: {
2159 if (
MI.getOperand(1).getReg() == R) {
2160 if (TL.getBooleanContents(DstTy.
isVector(),
false) ==
2167 case TargetOpcode::G_SUB: {
2169 unsigned Src2NumSignBits =
2171 if (Src2NumSignBits == 1)
2181 if ((Known2.
Zero | 1).isAllOnes())
2188 FirstAnswer = Src2NumSignBits;
2195 unsigned Src1NumSignBits =
2197 if (Src1NumSignBits == 1)
2202 FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits) - 1;
2205 case TargetOpcode::G_ADD: {
2207 unsigned Src2NumSignBits =
2209 if (Src2NumSignBits <= 2)
2213 unsigned Src1NumSignBits =
2215 if (Src1NumSignBits == 1)
2224 if ((Known1.
Zero | 1).isAllOnes())
2230 FirstAnswer = Src1NumSignBits;
2239 FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits) - 1;
2242 case TargetOpcode::G_FCMP:
2243 case TargetOpcode::G_ICMP: {
2244 bool IsFP = Opcode == TargetOpcode::G_FCMP;
2247 auto BC = TL.getBooleanContents(DstTy.
isVector(), IsFP);
2254 case TargetOpcode::G_BUILD_VECTOR: {
2256 FirstAnswer = TyBits;
2257 APInt SingleDemandedElt(1, 1);
2259 if (!DemandedElts[
I])
2264 FirstAnswer = std::min(FirstAnswer, Tmp2);
2267 if (FirstAnswer == 1)
2272 case TargetOpcode::G_CONCAT_VECTORS: {
2273 if (MRI.getType(
MI.getOperand(0).getReg()).isScalableVector())
2275 FirstAnswer = TyBits;
2278 unsigned NumSubVectorElts =
2279 MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
2282 DemandedElts.
extractBits(NumSubVectorElts,
I * NumSubVectorElts);
2287 FirstAnswer = std::min(FirstAnswer, Tmp2);
2290 if (FirstAnswer == 1)
2295 case TargetOpcode::G_SHUFFLE_VECTOR: {
2298 APInt DemandedLHS, DemandedRHS;
2300 unsigned NumElts = MRI.getType(Src1).getNumElements();
2302 DemandedElts, DemandedLHS, DemandedRHS))
2308 if (FirstAnswer == 1)
2310 if (!!DemandedRHS) {
2313 FirstAnswer = std::min(FirstAnswer, Tmp2);
2317 case TargetOpcode::G_SPLAT_VECTOR: {
2321 unsigned NumSrcBits = MRI.getType(Src).getSizeInBits();
2322 if (NumSrcSignBits > (NumSrcBits - TyBits))
2323 return NumSrcSignBits - (NumSrcBits - TyBits);
2326 case TargetOpcode::G_INTRINSIC:
2327 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
2328 case TargetOpcode::G_INTRINSIC_CONVERGENT:
2329 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
2332 TL.computeNumSignBitsForTargetInstr(*
this, R, DemandedElts, MRI,
Depth);
2334 FirstAnswer = std::max(FirstAnswer, NumBits);
2354 Mask <<= Mask.getBitWidth() - TyBits;
2355 return std::max(FirstAnswer, Mask.countl_one());
2359 LLT Ty = MRI.getType(R);
2360 APInt DemandedElts =
2369 unsigned Opcode =
MI.getOpcode();
2371 LLT Ty = MRI.getType(R);
2372 unsigned BitWidth = Ty.getScalarSizeInBits();
2374 if (Opcode == TargetOpcode::G_CONSTANT) {
2375 const APInt &ShAmt =
MI.getOperand(1).getCImm()->getValue();
2377 return std::nullopt;
2381 if (Opcode == TargetOpcode::G_BUILD_VECTOR) {
2382 const APInt *MinAmt =
nullptr, *MaxAmt =
nullptr;
2383 for (
unsigned I = 0, E =
MI.getNumOperands() - 1;
I != E; ++
I) {
2384 if (!DemandedElts[
I])
2387 if (
Op->getOpcode() != TargetOpcode::G_CONSTANT) {
2388 MinAmt = MaxAmt =
nullptr;
2392 const APInt &ShAmt =
Op->getOperand(1).getCImm()->getValue();
2394 return std::nullopt;
2395 if (!MinAmt || MinAmt->
ugt(ShAmt))
2397 if (!MaxAmt || MaxAmt->ult(ShAmt))
2400 assert(((!MinAmt && !MaxAmt) || (MinAmt && MaxAmt)) &&
2401 "Failed to find matching min/max shift amounts");
2402 if (MinAmt && MaxAmt)
2412 return std::nullopt;
2417 if (std::optional<ConstantRange> AmtRange =
2419 return AmtRange->getUnsignedMin().getZExtValue();
2420 return std::nullopt;
2438 Info = std::make_unique<GISelValueTracking>(MF, MaxDepth);
2463 if (!MO.isReg() || MO.getReg().isPhysical())
2466 if (!MRI.getType(Reg).isValid())
2468 KnownBits Known = VTA.getKnownBits(Reg);
2469 unsigned SignedBits = VTA.computeNumSignBits(Reg);
2470 OS <<
" " << MO <<
" KnownBits:" << Known <<
" SignBits:" << SignedBits
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Utilities for dealing with flags related to floating point properties and mode controls.
static void dumpResult(const MachineInstr &MI, const KnownBits &Known, unsigned Depth)
static unsigned computeNumSignBitsFromRangeMetadata(const GAnyLoad *Ld, unsigned TyBits)
Compute the known number of sign bits with attached range metadata in the memory operand.
static bool outputDenormalIsIEEEOrPosZero(const MachineFunction &MF, LLT Ty)
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Implement a low-level type suitable for MachineInstr level instruction selection.
Contains matchers for matching SSA Machine Instructions.
Promote Memory to Register
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow)
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file describes how to lower LLVM code to machine code.
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static LLVM_ABI bool isRepresentableAsNormalIn(const fltSemantics &Src, const fltSemantics &Dst)
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
Class for arbitrary precision integers.
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
LLVM_ABI APInt rotr(unsigned rotateAmt) const
Rotate right by rotateAmt.
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
unsigned countl_zero() const
The APInt version of std::countl_zero.
void clearLowBits(unsigned loBits)
Set bottom loBits bits to 0.
unsigned logBase2() const
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
void setAllBits()
Set every bit to 1.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This class represents a range of values.
static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
Represents any generic load, including sign/zero extending variants.
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
static LLVM_ABI std::optional< GFConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)
To use KnownBitsInfo analysis in a pass, KnownBitsInfo &Info = getAnalysis<GISelValueTrackingInfoAnal...
GISelValueTracking & get(MachineFunction &MF)
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
GISelValueTracking Result
LLVM_ABI Result run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
LLVM_ABI PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
unsigned getMaxDepth() const
KnownBits getKnownBits(Register R)
Align computeKnownAlignment(Register R, unsigned Depth=0)
std::optional< ConstantRange > getValidShiftAmountRange(Register R, const APInt &DemandedElts, unsigned Depth)
If a G_SHL/G_ASHR/G_LSHR node with shift operand R has shift amounts that are all less than the eleme...
bool maskedValueIsZero(Register Val, const APInt &Mask)
std::optional< uint64_t > getValidMinimumShiftAmount(Register R, const APInt &DemandedElts, unsigned Depth=0)
If a G_SHL/G_ASHR/G_LSHR node with shift operand R has shift amounts that are all less than the eleme...
bool signBitIsZero(Register Op)
const DataLayout & getDataLayout() const
unsigned computeNumSignBits(Register R, const APInt &DemandedElts, unsigned Depth=0)
const MachineFunction & getMachineFunction() const
APInt getKnownOnes(Register R)
APInt getKnownZeroes(Register R)
void computeKnownBitsImpl(Register R, KnownBits &Known, const APInt &DemandedElts, unsigned Depth=0)
Register getCondReg() const
Register getFalseReg() const
Register getTrueReg() const
Register getSrc2Reg() const
Register getSrc1Reg() const
ArrayRef< int > getMask() const
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
LLT getScalarType() const
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr ElementCount getElementCount() const
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
TypeSize getValue() const
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
LLT getMemoryType() const
Return the memory type of the memory reference.
const MDNode * getRanges() const
Return the range tag for the memory reference.
LocationSize getSizeInBits() const
Return the size in bits of the memory reference.
MachineOperand class - Representation of each machine instruction operand.
Register getReg() const
getReg - Returns the register number.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
operand_type_match m_Reg()
operand_type_match m_Pred()
bind_ty< FPClassTest > m_FPClassTest(FPClassTest &T)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
ClassifyOp_match< LHS, Test, TargetOpcode::G_IS_FPCLASS > m_GIsFPClass(const LHS &L, const Test &T)
Matches the register and immediate used in a fpclass test G_IS_FPCLASS val, 96.
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_FCMP > m_GFCmp(const Pred &P, const LHS &L, const RHS &R)
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
LLVM_ABI std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
LLVM_ABI const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
scope_exit(Callable) -> scope_exit< Callable >
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
int ilogb(const APFloat &Arg)
Returns the exponent of the internal representation of the APFloat.
LLVM_ABI std::optional< APInt > isConstantOrConstantSplatVector(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a constant integer or a splat vector of constant integers.
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
std::tuple< Value *, FPClassTest, FPClassTest > fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS, FPClassTest RHSClass, bool LookThroughSrc=true)
LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
constexpr unsigned MaxAnalysisRecursionDepth
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
DWARFExpression::Operation Op
std::string toString(const APInt &I, unsigned Radix, bool Signed, bool formatAsCLiteral=false, bool UpperCase=true, bool InsertSeparators=false)
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
static uint32_t extractBits(uint64_t Val, uint32_t Hi, uint32_t Lo)
LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
This struct is a compact representation of a valid (non-zero power of two) alignment.
A special type used by analysis passes to provide an address that identifies that particular analysis...
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
constexpr bool outputsAreZero() const
Return true if output denormals should be flushed to 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ IEEE
IEEE-754 denormal numbers preserved.
constexpr bool inputsAreZero() const
Return true if input denormals must be implicitly treated as 0.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
static constexpr DenormalMode getIEEE()
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
LLVM_ABI KnownBits sextInReg(unsigned SrcBitWidth) const
Return known bits for a in-register sign extension of the value we're tracking.
static LLVM_ABI KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from zero-extended multiply-hi.
static LLVM_ABI KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
KnownBits byteSwap() const
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
void setAllZero()
Make all bits known to be zero and discard any previous information.
KnownBits reverseBits() const
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
static KnownBits add(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false, bool SelfAdd=false)
Compute knownbits resulting from addition of LHS and RHS.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
static LLVM_ABI KnownBits abdu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for abdu(LHS, RHS).
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const
Return a subset of the known bits from [bitPosition,bitPosition+numBits).
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static LLVM_ABI KnownBits abds(KnownBits LHS, KnownBits RHS)
Compute known bits for abds(LHS, RHS).
static LLVM_ABI KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static LLVM_ABI KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from sign-extended multiply-hi.
static LLVM_ABI KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for udiv(LHS, RHS).
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
static LLVM_ABI KnownBits sdiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for sdiv(LHS, RHS).
bool isNegative() const
Returns true if this value is known to be negative.
static LLVM_ABI KnownBits computeForAddCarry(const KnownBits &LHS, const KnownBits &RHS, const KnownBits &Carry)
Compute known bits resulting from adding LHS, RHS and a 1-bit Carry.
static KnownBits sub(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from subtraction of LHS and RHS.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
static LLVM_ABI KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
LLVM_ABI KnownBits abs(bool IntMinIsPoison=false) const
Compute known bits for the absolute value.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
static LLVM_ABI KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
bool isAllOnes() const
Returns true if value is all one bits.
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
bool isKnownNeverInfinity() const
Return true if it's known this can never be an infinity.
bool cannotBeOrderedGreaterThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never greater tha...
static constexpr FPClassTest OrderedGreaterThanZeroMask
static constexpr FPClassTest OrderedLessThanZeroMask
void knownNot(FPClassTest RuleOut)
void copysign(const KnownFPClass &Sign)
bool isKnownNeverSubnormal() const
Return true if it's known this can never be a subnormal.
LLVM_ABI bool isKnownNeverLogicalZero(DenormalMode Mode) const
Return true if it's known this can never be interpreted as a zero.
bool isKnownNeverPosZero() const
Return true if it's known this can never be a literal positive zero.
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
bool isKnownNeverNegZero() const
Return true if it's known this can never be a negative zero.
void propagateNaN(const KnownFPClass &Src, bool PreserveSign=false)
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
void signBitMustBeOne()
Assume the sign bit is one.
void signBitMustBeZero()
Assume the sign bit is zero.
LLVM_ABI bool isKnownNeverLogicalPosZero(DenormalMode Mode) const
Return true if it's known this can never be interpreted as a positive zero.
bool isKnownNeverPosInfinity() const
Return true if it's known this can never be +infinity.
LLVM_ABI bool isKnownNeverLogicalNegZero(DenormalMode Mode) const
Return true if it's known this can never be interpreted as a negative zero.