41#define DEBUG_TYPE "gisel-known-bits" 
   49                "Analysis for ComputingKnownBits", 
false, 
true)
 
   52    : MF(MF), 
MRI(MF.getRegInfo()), TL(*MF.getSubtarget().getTargetLowering()),
 
 
   57  switch (
MI->getOpcode()) {
 
   58  case TargetOpcode::COPY:
 
   60  case TargetOpcode::G_ASSERT_ALIGN: {
 
   62    return Align(
MI->getOperand(2).getImm());
 
   64  case TargetOpcode::G_FRAME_INDEX: {
 
   65    int FrameIdx = 
MI->getOperand(1).getIndex();
 
   66    return MF.getFrameInfo().getObjectAlign(FrameIdx);
 
   68  case TargetOpcode::G_INTRINSIC:
 
   69  case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
 
   70  case TargetOpcode::G_INTRINSIC_CONVERGENT:
 
   71  case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
 
   73    return TL.computeKnownAlignForTargetInstr(*
this, R, MRI, 
Depth + 1);
 
 
   78  assert(
MI.getNumExplicitDefs() == 1 &&
 
   79         "expected single return generic instruction");
 
 
   84  const LLT Ty = MRI.getType(R);
 
 
   94                                           const APInt &DemandedElts,
 
 
  102  LLT Ty = MRI.getType(R);
 
  103  unsigned BitWidth = Ty.getScalarSizeInBits();
 
 
  115[[maybe_unused]] 
static void 
  118         << 
"] Computed for: " << 
MI << 
"[" << 
Depth << 
"] Known: 0x" 
 
  129                                             const APInt &DemandedElts,
 
  160                                              const APInt &DemandedElts,
 
  163  unsigned Opcode = 
MI.getOpcode();
 
  164  LLT DstTy = MRI.getType(R);
 
  178        "DemandedElt width should equal the fixed vector number of elements");
 
  181           "DemandedElt width should be 1 for scalars or scalable vectors");
 
  206    TL.computeKnownBitsForTargetInstr(*
this, R, Known, DemandedElts, MRI,
 
  209  case TargetOpcode::G_BUILD_VECTOR: {
 
  214      if (!DemandedElts[
I])
 
  228  case TargetOpcode::G_SPLAT_VECTOR: {
 
  236  case TargetOpcode::COPY:
 
  237  case TargetOpcode::G_PHI:
 
  238  case TargetOpcode::PHI: {
 
  244    assert(
MI.getOperand(0).getSubReg() == 0 && 
"Is this code in SSA?");
 
  247    for (
unsigned Idx = 1; Idx < 
MI.getNumOperands(); Idx += 2) {
 
  257      if (SrcReg.
isVirtual() && Src.getSubReg() == 0  &&
 
  258          MRI.getType(SrcReg).isValid()) {
 
  261                             Depth + (Opcode != TargetOpcode::COPY));
 
  276  case TargetOpcode::G_CONSTANT: {
 
  280  case TargetOpcode::G_FRAME_INDEX: {
 
  281    int FrameIdx = 
MI.getOperand(1).getIndex();
 
  282    TL.computeKnownBitsForFrameIndex(FrameIdx, Known, MF);
 
  285  case TargetOpcode::G_SUB: {
 
  293  case TargetOpcode::G_XOR: {
 
  302  case TargetOpcode::G_PTR_ADD: {
 
  306    LLT Ty = MRI.getType(
MI.getOperand(1).getReg());
 
  307    if (DL.isNonIntegralAddressSpace(Ty.getAddressSpace()))
 
  311  case TargetOpcode::G_ADD: {
 
  319  case TargetOpcode::G_AND: {
 
  329  case TargetOpcode::G_OR: {
 
  339  case TargetOpcode::G_MUL: {
 
  347  case TargetOpcode::G_UMULH: {
 
  355  case TargetOpcode::G_SMULH: {
 
  363  case TargetOpcode::G_SELECT: {
 
  364    computeKnownBitsMin(
MI.getOperand(2).getReg(), 
MI.getOperand(3).getReg(),
 
  365                        Known, DemandedElts, 
Depth + 1);
 
  368  case TargetOpcode::G_SMIN: {
 
  378  case TargetOpcode::G_SMAX: {
 
  388  case TargetOpcode::G_UMIN: {
 
  397  case TargetOpcode::G_UMAX: {
 
  406  case TargetOpcode::G_FCMP:
 
  407  case TargetOpcode::G_ICMP: {
 
  410    if (TL.getBooleanContents(DstTy.
isVector(),
 
  411                              Opcode == TargetOpcode::G_FCMP) ==
 
  417  case TargetOpcode::G_SEXT: {
 
  425  case TargetOpcode::G_ASSERT_SEXT:
 
  426  case TargetOpcode::G_SEXT_INREG: {
 
  429    Known = Known.
sextInReg(
MI.getOperand(2).getImm());
 
  432  case TargetOpcode::G_ANYEXT: {
 
  438  case TargetOpcode::G_LOAD: {
 
  446  case TargetOpcode::G_SEXTLOAD:
 
  447  case TargetOpcode::G_ZEXTLOAD: {
 
  454    Known = Opcode == TargetOpcode::G_SEXTLOAD
 
  459  case TargetOpcode::G_ASHR: {
 
  468  case TargetOpcode::G_LSHR: {
 
  477  case TargetOpcode::G_SHL: {
 
  486  case TargetOpcode::G_INTTOPTR:
 
  487  case TargetOpcode::G_PTRTOINT:
 
  492  case TargetOpcode::G_ZEXT:
 
  493  case TargetOpcode::G_TRUNC: {
 
  499  case TargetOpcode::G_ASSERT_ZEXT: {
 
  503    unsigned SrcBitWidth = 
MI.getOperand(2).getImm();
 
  504    assert(SrcBitWidth && 
"SrcBitWidth can't be zero");
 
  506    Known.
Zero |= (~InMask);
 
  507    Known.
One &= (~Known.Zero);
 
  510  case TargetOpcode::G_ASSERT_ALIGN: {
 
  511    int64_t LogOfAlign = 
Log2_64(
MI.getOperand(2).getImm());
 
  520  case TargetOpcode::G_MERGE_VALUES: {
 
  521    unsigned NumOps = 
MI.getNumOperands();
 
  522    unsigned OpSize = MRI.getType(
MI.getOperand(1).getReg()).getSizeInBits();
 
  524    for (
unsigned I = 0; 
I != 
NumOps - 1; ++
I) {
 
  527                           DemandedElts, 
Depth + 1);
 
  532  case TargetOpcode::G_UNMERGE_VALUES: {
 
  533    unsigned NumOps = 
MI.getNumOperands();
 
  535    LLT SrcTy = MRI.getType(SrcReg);
 
  537    if (SrcTy.isVector() && SrcTy.getScalarType() != DstTy.
getScalarType())
 
  542    for (; DstIdx != 
NumOps - 1 && 
MI.getOperand(DstIdx).
getReg() != R;
 
  546    APInt SubDemandedElts = DemandedElts;
 
  547    if (SrcTy.isVector()) {
 
  550          DemandedElts.
zext(SrcTy.getNumElements()).
shl(DstIdx * DstLanes);
 
  556    if (SrcTy.isVector())
 
  557      Known = std::move(SrcOpKnown);
 
  562  case TargetOpcode::G_BSWAP: {
 
  568  case TargetOpcode::G_BITREVERSE: {
 
  574  case TargetOpcode::G_CTPOP: {
 
  586  case TargetOpcode::G_UBFX: {
 
  587    KnownBits SrcOpKnown, OffsetKnown, WidthKnown;
 
  597  case TargetOpcode::G_SBFX: {
 
  598    KnownBits SrcOpKnown, OffsetKnown, WidthKnown;
 
  615  case TargetOpcode::G_UADDO:
 
  616  case TargetOpcode::G_UADDE:
 
  617  case TargetOpcode::G_SADDO:
 
  618  case TargetOpcode::G_SADDE:
 
  619  case TargetOpcode::G_USUBO:
 
  620  case TargetOpcode::G_USUBE:
 
  621  case TargetOpcode::G_SSUBO:
 
  622  case TargetOpcode::G_SSUBE:
 
  623  case TargetOpcode::G_UMULO:
 
  624  case TargetOpcode::G_SMULO: {
 
  625    if (
MI.getOperand(1).getReg() == R) {
 
  628      if (TL.getBooleanContents(DstTy.
isVector(), 
false) ==
 
  635  case TargetOpcode::G_CTLZ:
 
  636  case TargetOpcode::G_CTLZ_ZERO_UNDEF: {
 
  646  case TargetOpcode::G_SHUFFLE_VECTOR: {
 
  647    APInt DemandedLHS, DemandedRHS;
 
  650    unsigned NumElts = MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
 
  652                                DemandedElts, DemandedLHS, DemandedRHS))
 
  673  case TargetOpcode::G_CONCAT_VECTORS: {
 
  674    if (MRI.getType(
MI.getOperand(0).getReg()).isScalableVector())
 
  679    unsigned NumSubVectorElts =
 
  680        MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
 
  684          DemandedElts.
extractBits(NumSubVectorElts, 
I * NumSubVectorElts);
 
  696  case TargetOpcode::G_ABS: {
 
 
  710  Ty = Ty.getScalarType();
 
 
  719  LLT Ty = MRI.getType(R);
 
  722  computeKnownFPClass(R, DemandedElts, InterestedClasses, Known, 
Depth);
 
  725void GISelValueTracking::computeKnownFPClassForFPTrunc(
 
  733  KnownFPClass KnownSrc;
 
  734  computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
 
  747void GISelValueTracking::computeKnownFPClass(
Register R,
 
  748                                             const APInt &DemandedElts,
 
  752  assert(Known.
isUnknown() && 
"should not be called with known information");
 
  762  MachineInstr &
MI = *MRI.getVRegDef(R);
 
  763  unsigned Opcode = 
MI.getOpcode();
 
  764  LLT DstTy = MRI.getType(R);
 
  772    switch (Cst->getKind()) {
 
  774      auto APF = Cst->getScalarValue();
 
  776      Known.
SignBit = APF.isNegative();
 
  781      bool SignBitAllZero = 
true;
 
  782      bool SignBitAllOne = 
true;
 
  784      for (
auto C : *Cst) {
 
  787          SignBitAllZero = 
false;
 
  789          SignBitAllOne = 
false;
 
  792      if (SignBitAllOne != SignBitAllZero)
 
  808    KnownNotFromFlags |= 
fcNan;
 
  810    KnownNotFromFlags |= 
fcInf;
 
  814  InterestedClasses &= ~KnownNotFromFlags;
 
  816  auto ClearClassesFromFlags =
 
  823  const MachineFunction *MF = 
MI.getMF();
 
  827    TL.computeKnownFPClassForTargetInstr(*
this, R, Known, DemandedElts, MRI,
 
  830  case TargetOpcode::G_FNEG: {
 
  832    computeKnownFPClass(Val, DemandedElts, InterestedClasses, Known, 
Depth + 1);
 
  836  case TargetOpcode::G_SELECT: {
 
  859      bool LookThroughFAbsFNeg = CmpLHS != 
LHS && CmpLHS != 
RHS;
 
  860      std::tie(TestedValue, MaskIfTrue, MaskIfFalse) =
 
  866      MaskIfTrue = TestedMask;
 
  867      MaskIfFalse = ~TestedMask;
 
  870    if (TestedValue == 
LHS) {
 
  872      FilterLHS = MaskIfTrue;
 
  873    } 
else if (TestedValue == 
RHS) { 
 
  875      FilterRHS = MaskIfFalse;
 
  879    computeKnownFPClass(
LHS, DemandedElts, InterestedClasses & FilterLHS, Known,
 
  883    computeKnownFPClass(
RHS, DemandedElts, InterestedClasses & FilterRHS,
 
  890  case TargetOpcode::G_FCOPYSIGN: {
 
  891    Register Magnitude = 
MI.getOperand(1).getReg();
 
  894    KnownFPClass KnownSign;
 
  896    computeKnownFPClass(Magnitude, DemandedElts, InterestedClasses, Known,
 
  898    computeKnownFPClass(Sign, DemandedElts, InterestedClasses, KnownSign,
 
  903  case TargetOpcode::G_FMA:
 
  904  case TargetOpcode::G_STRICT_FMA:
 
  905  case TargetOpcode::G_FMAD: {
 
  920    KnownFPClass KnownAddend;
 
  921    computeKnownFPClass(
C, DemandedElts, InterestedClasses, KnownAddend,
 
  928  case TargetOpcode::G_FSQRT:
 
  929  case TargetOpcode::G_STRICT_FSQRT: {
 
  930    KnownFPClass KnownSrc;
 
  932    if (InterestedClasses & 
fcNan)
 
  937    computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc, 
Depth + 1);
 
  952  case TargetOpcode::G_FABS: {
 
  957      computeKnownFPClass(Val, DemandedElts, InterestedClasses, Known,
 
  963  case TargetOpcode::G_FSIN:
 
  964  case TargetOpcode::G_FCOS:
 
  965  case TargetOpcode::G_FSINCOS: {
 
  968    KnownFPClass KnownSrc;
 
  970    computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
 
  978  case TargetOpcode::G_FMAXNUM:
 
  979  case TargetOpcode::G_FMINNUM:
 
  980  case TargetOpcode::G_FMINNUM_IEEE:
 
  981  case TargetOpcode::G_FMAXIMUM:
 
  982  case TargetOpcode::G_FMINIMUM:
 
  983  case TargetOpcode::G_FMAXNUM_IEEE:
 
  984  case TargetOpcode::G_FMAXIMUMNUM:
 
  985  case TargetOpcode::G_FMINIMUMNUM: {
 
  988    KnownFPClass KnownLHS, KnownRHS;
 
  990    computeKnownFPClass(
LHS, DemandedElts, InterestedClasses, KnownLHS,
 
  992    computeKnownFPClass(
RHS, DemandedElts, InterestedClasses, KnownRHS,
 
  996    Known = KnownLHS | KnownRHS;
 
  999    if (NeverNaN && (Opcode == TargetOpcode::G_FMINNUM ||
 
 1000                     Opcode == TargetOpcode::G_FMAXNUM ||
 
 1001                     Opcode == TargetOpcode::G_FMINIMUMNUM ||
 
 1002                     Opcode == TargetOpcode::G_FMAXIMUMNUM))
 
 1005    if (Opcode == TargetOpcode::G_FMAXNUM ||
 
 1006        Opcode == TargetOpcode::G_FMAXIMUMNUM ||
 
 1007        Opcode == TargetOpcode::G_FMAXNUM_IEEE) {
 
 1015    } 
else if (Opcode == TargetOpcode::G_FMAXIMUM) {
 
 1021    } 
else if (Opcode == TargetOpcode::G_FMINNUM ||
 
 1022               Opcode == TargetOpcode::G_FMINIMUMNUM ||
 
 1023               Opcode == TargetOpcode::G_FMINNUM_IEEE) {
 
 1031    } 
else if (Opcode == TargetOpcode::G_FMINIMUM) {
 
 1063      } 
else if ((Opcode == TargetOpcode::G_FMAXIMUM ||
 
 1064                  Opcode == TargetOpcode::G_FMINIMUM) ||
 
 1065                 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
 
 1066                 Opcode == TargetOpcode::G_FMINIMUMNUM ||
 
 1067                 Opcode == TargetOpcode::G_FMAXNUM_IEEE ||
 
 1068                 Opcode == TargetOpcode::G_FMINNUM_IEEE ||
 
 1074        if ((Opcode == TargetOpcode::G_FMAXIMUM ||
 
 1075             Opcode == TargetOpcode::G_FMAXNUM ||
 
 1076             Opcode == TargetOpcode::G_FMAXIMUMNUM ||
 
 1077             Opcode == TargetOpcode::G_FMAXNUM_IEEE) &&
 
 1080        else if ((Opcode == TargetOpcode::G_FMINIMUM ||
 
 1081                  Opcode == TargetOpcode::G_FMINNUM ||
 
 1082                  Opcode == TargetOpcode::G_FMINIMUMNUM ||
 
 1083                  Opcode == TargetOpcode::G_FMINNUM_IEEE) &&
 
 1090  case TargetOpcode::G_FCANONICALIZE: {
 
 1092    KnownFPClass KnownSrc;
 
 1093    computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
 
 1115    DenormalMode DenormMode = MF->getDenormalMode(FPType);
 
 1134  case TargetOpcode::G_VECREDUCE_FMAX:
 
 1135  case TargetOpcode::G_VECREDUCE_FMIN:
 
 1136  case TargetOpcode::G_VECREDUCE_FMAXIMUM:
 
 1137  case TargetOpcode::G_VECREDUCE_FMINIMUM: {
 
 1143        computeKnownFPClass(Val, 
MI.getFlags(), InterestedClasses, 
Depth + 1);
 
 1149  case TargetOpcode::G_TRUNC:
 
 1150  case TargetOpcode::G_FFLOOR:
 
 1151  case TargetOpcode::G_FCEIL:
 
 1152  case TargetOpcode::G_FRINT:
 
 1153  case TargetOpcode::G_FNEARBYINT:
 
 1154  case TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND:
 
 1155  case TargetOpcode::G_INTRINSIC_ROUND: {
 
 1157    KnownFPClass KnownSrc;
 
 1163    computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc, 
Depth + 1);
 
 1180  case TargetOpcode::G_FEXP:
 
 1181  case TargetOpcode::G_FEXP2:
 
 1182  case TargetOpcode::G_FEXP10: {
 
 1188    KnownFPClass KnownSrc;
 
 1189    computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
 
 1198  case TargetOpcode::G_FLOG:
 
 1199  case TargetOpcode::G_FLOG2:
 
 1200  case TargetOpcode::G_FLOG10: {
 
 1215    KnownFPClass KnownSrc;
 
 1216    computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc, 
Depth + 1);
 
 1226    DenormalMode 
Mode = MF->getDenormalMode(FltSem);
 
 1233  case TargetOpcode::G_FPOWI: {
 
 1238    LLT ExpTy = MRI.getType(Exp);
 
 1240        Exp, ExpTy.
isVector() ? DemandedElts : APInt(1, 1), 
Depth + 1);
 
 1242    if (ExponentKnownBits.
Zero[0]) { 
 
 1256    KnownFPClass KnownSrc;
 
 1257    computeKnownFPClass(Val, DemandedElts, 
fcNegative, KnownSrc, 
Depth + 1);
 
 1262  case TargetOpcode::G_FLDEXP:
 
 1263  case TargetOpcode::G_STRICT_FLDEXP: {
 
 1265    KnownFPClass KnownSrc;
 
 1266    computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
 
 1283    if ((InterestedClasses & ExpInfoMask) == 
fcNone)
 
 1292  case TargetOpcode::G_INTRINSIC_ROUNDEVEN: {
 
 1293    computeKnownFPClassForFPTrunc(
MI, DemandedElts, InterestedClasses, Known,
 
 1297  case TargetOpcode::G_FADD:
 
 1298  case TargetOpcode::G_STRICT_FADD:
 
 1299  case TargetOpcode::G_FSUB:
 
 1300  case TargetOpcode::G_STRICT_FSUB: {
 
 1303    KnownFPClass KnownLHS, KnownRHS;
 
 1305        (Opcode == TargetOpcode::G_FADD ||
 
 1306         Opcode == TargetOpcode::G_STRICT_FADD) &&
 
 1308    bool WantNaN = (InterestedClasses & 
fcNan) != 
fcNone;
 
 1311    if (!WantNaN && !WantNegative && !WantNegZero)
 
 1317    if (InterestedClasses & 
fcNan)
 
 1318      InterestedSrcs |= 
fcInf;
 
 1319    computeKnownFPClass(
RHS, DemandedElts, InterestedSrcs, KnownRHS, 
Depth + 1);
 
 1324        (Opcode == TargetOpcode::G_FSUB ||
 
 1325         Opcode == TargetOpcode::G_STRICT_FSUB)) {
 
 1329      computeKnownFPClass(
LHS, DemandedElts, InterestedSrcs, KnownLHS,
 
 1337      if (Opcode == Instruction::FAdd) {
 
 1364  case TargetOpcode::G_FMUL:
 
 1365  case TargetOpcode::G_STRICT_FMUL: {
 
 1378    KnownFPClass KnownLHS, KnownRHS;
 
 1379    computeKnownFPClass(
RHS, DemandedElts, NeedForNan, KnownRHS, 
Depth + 1);
 
 1383    computeKnownFPClass(
LHS, DemandedElts, NeedForNan, KnownLHS, 
Depth + 1);
 
 1410  case TargetOpcode::G_FDIV:
 
 1411  case TargetOpcode::G_FREM: {
 
 1417      if (Opcode == TargetOpcode::G_FDIV) {
 
 1428    const bool WantNan = (InterestedClasses & 
fcNan) != 
fcNone;
 
 1430    const bool WantPositive = Opcode == TargetOpcode::G_FREM &&
 
 1432    if (!WantNan && !WantNegative && !WantPositive)
 
 1435    KnownFPClass KnownLHS, KnownRHS;
 
 1438                        KnownRHS, 
Depth + 1);
 
 1440    bool KnowSomethingUseful =
 
 1443    if (KnowSomethingUseful || WantPositive) {
 
 1448      computeKnownFPClass(
LHS, DemandedElts, InterestedClasses & InterestedLHS,
 
 1449                          KnownLHS, 
Depth + 1);
 
 1452    if (Opcode == Instruction::FDiv) {
 
 1493  case TargetOpcode::G_FPEXT: {
 
 1497    computeKnownFPClass(R, DemandedElts, InterestedClasses, Known, 
Depth + 1);
 
 1501    LLT SrcTy = MRI.getType(Src).getScalarType();
 
 1518  case TargetOpcode::G_FPTRUNC: {
 
 1519    computeKnownFPClassForFPTrunc(
MI, DemandedElts, InterestedClasses, Known,
 
 1523  case TargetOpcode::G_SITOFP:
 
 1524  case TargetOpcode::G_UITOFP: {
 
 1533    if (Opcode == TargetOpcode::G_UITOFP)
 
 1537    LLT Ty = MRI.getType(Val);
 
 1539    if (InterestedClasses & 
fcInf) {
 
 1544      if (Opcode == TargetOpcode::G_SITOFP)
 
 1558  case TargetOpcode::G_BUILD_VECTOR:
 
 1559  case TargetOpcode::G_CONCAT_VECTORS: {
 
 1566    for (
unsigned Idx = 0; Idx < 
Merge.getNumSources(); ++Idx) {
 
 1568      bool NeedsElt = DemandedElts[Idx];
 
 1574          computeKnownFPClass(Src, Known, InterestedClasses, 
Depth + 1);
 
 1577          KnownFPClass Known2;
 
 1578          computeKnownFPClass(Src, Known2, InterestedClasses, 
Depth + 1);
 
 1590  case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
 
 1600    LLT VecTy = MRI.getType(Vec);
 
 1605      if (CIdx && CIdx->ult(NumElts))
 
 1607      return computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known,
 
 1613  case TargetOpcode::G_INSERT_VECTOR_ELT: {
 
 1619    LLT VecTy = MRI.getType(Vec);
 
 1627    APInt DemandedVecElts = DemandedElts;
 
 1628    bool NeedsElt = 
true;
 
 1630    if (CIdx && CIdx->ult(NumElts)) {
 
 1631      DemandedVecElts.
clearBit(CIdx->getZExtValue());
 
 1632      NeedsElt = DemandedElts[CIdx->getZExtValue()];
 
 1637      computeKnownFPClass(Elt, Known, InterestedClasses, 
Depth + 1);
 
 1646    if (!DemandedVecElts.
isZero()) {
 
 1647      KnownFPClass Known2;
 
 1648      computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known2,
 
 1655  case TargetOpcode::G_SHUFFLE_VECTOR: {
 
 1659    APInt DemandedLHS, DemandedRHS;
 
 1661      assert(DemandedElts == APInt(1, 1));
 
 1662      DemandedLHS = DemandedRHS = DemandedElts;
 
 1665                                        DemandedElts, DemandedLHS,
 
 1672    if (!!DemandedLHS) {
 
 1674      computeKnownFPClass(
LHS, DemandedLHS, InterestedClasses, Known,
 
 1684    if (!!DemandedRHS) {
 
 1685      KnownFPClass Known2;
 
 1687      computeKnownFPClass(
RHS, DemandedRHS, InterestedClasses, Known2,
 
 1693  case TargetOpcode::COPY: {
 
 1696    if (!Src.isVirtual())
 
 1699    computeKnownFPClass(Src, DemandedElts, InterestedClasses, Known, 
Depth + 1);
 
 1710  computeKnownFPClass(R, DemandedElts, InterestedClasses, KnownClasses, 
Depth);
 
 1711  return KnownClasses;
 
 
 1717  computeKnownFPClass(R, Known, InterestedClasses, 
Depth);
 
 
 1725    InterestedClasses &= 
~fcNan;
 
 1727    InterestedClasses &= 
~fcInf;
 
 1730      computeKnownFPClass(R, DemandedElts, InterestedClasses, 
Depth);
 
 1733    Result.KnownFPClasses &= 
~fcNan;
 
 1735    Result.KnownFPClasses &= 
~fcInf;
 
 
 1741  LLT Ty = MRI.getType(R);
 
 1742  APInt DemandedElts =
 
 1744  return computeKnownFPClass(R, DemandedElts, Flags, InterestedClasses, 
Depth);
 
 
 1748unsigned GISelValueTracking::computeNumSignBitsMin(
Register Src0, 
Register Src1,
 
 1749                                                   const APInt &DemandedElts,
 
 1753  if (Src1SignBits == 1)
 
 1770    case TargetOpcode::G_SEXTLOAD:
 
 1773    case TargetOpcode::G_ZEXTLOAD:
 
 
 1786                                                const APInt &DemandedElts,
 
 1789  unsigned Opcode = 
MI.getOpcode();
 
 1791  if (Opcode == TargetOpcode::G_CONSTANT)
 
 1792    return MI.getOperand(1).getCImm()->getValue().getNumSignBits();
 
 1800  LLT DstTy = MRI.getType(R);
 
 1810  unsigned FirstAnswer = 1;
 
 1812  case TargetOpcode::COPY: {
 
 1814    if (Src.getReg().isVirtual() && Src.getSubReg() == 0 &&
 
 1815        MRI.getType(Src.getReg()).isValid()) {
 
 1822  case TargetOpcode::G_SEXT: {
 
 1824    LLT SrcTy = MRI.getType(Src);
 
 1828  case TargetOpcode::G_ASSERT_SEXT:
 
 1829  case TargetOpcode::G_SEXT_INREG: {
 
 1832    unsigned SrcBits = 
MI.getOperand(2).getImm();
 
 1833    unsigned InRegBits = TyBits - SrcBits + 1;
 
 1837  case TargetOpcode::G_LOAD: {
 
 1844  case TargetOpcode::G_SEXTLOAD: {
 
 1859  case TargetOpcode::G_ZEXTLOAD: {
 
 1874  case TargetOpcode::G_AND:
 
 1875  case TargetOpcode::G_OR:
 
 1876  case TargetOpcode::G_XOR: {
 
 1878    unsigned Src1NumSignBits =
 
 1880    if (Src1NumSignBits != 1) {
 
 1882      unsigned Src2NumSignBits =
 
 1884      FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits);
 
 1888  case TargetOpcode::G_ASHR: {
 
 1893      FirstAnswer = std::min<uint64_t>(FirstAnswer + *
C, TyBits);
 
 1896  case TargetOpcode::G_SHL: {
 
 1899    if (std::optional<ConstantRange> ShAmtRange =
 
 1901      uint64_t MaxShAmt = ShAmtRange->getUnsignedMax().getZExtValue();
 
 1902      uint64_t MinShAmt = ShAmtRange->getUnsignedMin().getZExtValue();
 
 1912      if (ExtOpc == TargetOpcode::G_SEXT || ExtOpc == TargetOpcode::G_ZEXT ||
 
 1913          ExtOpc == TargetOpcode::G_ANYEXT) {
 
 1914        LLT ExtTy = MRI.getType(Src1);
 
 1916        LLT ExtendeeTy = MRI.getType(Extendee);
 
 1920        if (SizeDiff <= MinShAmt) {
 
 1924            return Tmp - MaxShAmt;
 
 1930        return Tmp - MaxShAmt;
 
 1934  case TargetOpcode::G_TRUNC: {
 
 1936    LLT SrcTy = MRI.getType(Src);
 
 1940    unsigned NumSrcBits = SrcTy.getScalarSizeInBits();
 
 1942    if (NumSrcSignBits > (NumSrcBits - DstTyBits))
 
 1943      return NumSrcSignBits - (NumSrcBits - DstTyBits);
 
 1946  case TargetOpcode::G_SELECT: {
 
 1947    return computeNumSignBitsMin(
MI.getOperand(2).getReg(),
 
 1948                                 MI.getOperand(3).getReg(), DemandedElts,
 
 1951  case TargetOpcode::G_SMIN:
 
 1952  case TargetOpcode::G_SMAX:
 
 1953  case TargetOpcode::G_UMIN:
 
 1954  case TargetOpcode::G_UMAX:
 
 1956    return computeNumSignBitsMin(
MI.getOperand(1).getReg(),
 
 1957                                 MI.getOperand(2).getReg(), DemandedElts,
 
 1959  case TargetOpcode::G_SADDO:
 
 1960  case TargetOpcode::G_SADDE:
 
 1961  case TargetOpcode::G_UADDO:
 
 1962  case TargetOpcode::G_UADDE:
 
 1963  case TargetOpcode::G_SSUBO:
 
 1964  case TargetOpcode::G_SSUBE:
 
 1965  case TargetOpcode::G_USUBO:
 
 1966  case TargetOpcode::G_USUBE:
 
 1967  case TargetOpcode::G_SMULO:
 
 1968  case TargetOpcode::G_UMULO: {
 
 1972    if (
MI.getOperand(1).getReg() == R) {
 
 1973      if (TL.getBooleanContents(DstTy.
isVector(), 
false) ==
 
 1980  case TargetOpcode::G_SUB: {
 
 1982    unsigned Src2NumSignBits =
 
 1984    if (Src2NumSignBits == 1)
 
 1994      if ((Known2.
Zero | 1).isAllOnes())
 
 2001        FirstAnswer = Src2NumSignBits;
 
 2008    unsigned Src1NumSignBits =
 
 2010    if (Src1NumSignBits == 1)
 
 2015    FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits) - 1;
 
 2018  case TargetOpcode::G_ADD: {
 
 2020    unsigned Src2NumSignBits =
 
 2022    if (Src2NumSignBits <= 2)
 
 2026    unsigned Src1NumSignBits =
 
 2028    if (Src1NumSignBits == 1)
 
 2037      if ((Known1.
Zero | 1).isAllOnes())
 
 2043        FirstAnswer = Src1NumSignBits;
 
 2052    FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits) - 1;
 
 2055  case TargetOpcode::G_FCMP:
 
 2056  case TargetOpcode::G_ICMP: {
 
 2057    bool IsFP = Opcode == TargetOpcode::G_FCMP;
 
 2060    auto BC = TL.getBooleanContents(DstTy.
isVector(), IsFP);
 
 2067  case TargetOpcode::G_BUILD_VECTOR: {
 
 2069    FirstAnswer = TyBits;
 
 2070    APInt SingleDemandedElt(1, 1);
 
 2072      if (!DemandedElts[
I])
 
 2077      FirstAnswer = std::min(FirstAnswer, Tmp2);
 
 2080      if (FirstAnswer == 1)
 
 2085  case TargetOpcode::G_CONCAT_VECTORS: {
 
 2086    if (MRI.getType(
MI.getOperand(0).getReg()).isScalableVector())
 
 2088    FirstAnswer = TyBits;
 
 2091    unsigned NumSubVectorElts =
 
 2092        MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
 
 2095          DemandedElts.
extractBits(NumSubVectorElts, 
I * NumSubVectorElts);
 
 2100      FirstAnswer = std::min(FirstAnswer, Tmp2);
 
 2103      if (FirstAnswer == 1)
 
 2108  case TargetOpcode::G_SHUFFLE_VECTOR: {
 
 2111    APInt DemandedLHS, DemandedRHS;
 
 2113    unsigned NumElts = MRI.getType(Src1).getNumElements();
 
 2115                                DemandedElts, DemandedLHS, DemandedRHS))
 
 2121    if (FirstAnswer == 1)
 
 2123    if (!!DemandedRHS) {
 
 2126      FirstAnswer = std::min(FirstAnswer, Tmp2);
 
 2130  case TargetOpcode::G_SPLAT_VECTOR: {
 
 2134    unsigned NumSrcBits = MRI.getType(Src).getSizeInBits();
 
 2135    if (NumSrcSignBits > (NumSrcBits - TyBits))
 
 2136      return NumSrcSignBits - (NumSrcBits - TyBits);
 
 2139  case TargetOpcode::G_INTRINSIC:
 
 2140  case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
 
 2141  case TargetOpcode::G_INTRINSIC_CONVERGENT:
 
 2142  case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
 
 2145        TL.computeNumSignBitsForTargetInstr(*
this, R, DemandedElts, MRI, 
Depth);
 
 2147      FirstAnswer = std::max(FirstAnswer, NumBits);
 
 2167  Mask <<= Mask.getBitWidth() - TyBits;
 
 2168  return std::max(FirstAnswer, Mask.countl_one());
 
 
 2172  LLT Ty = MRI.getType(R);
 
 2173  APInt DemandedElts =
 
 
 2182  unsigned Opcode = 
MI.getOpcode();
 
 2184  LLT Ty = MRI.getType(R);
 
 2185  unsigned BitWidth = Ty.getScalarSizeInBits();
 
 2187  if (Opcode == TargetOpcode::G_CONSTANT) {
 
 2188    const APInt &ShAmt = 
MI.getOperand(1).getCImm()->getValue();
 
 2190      return std::nullopt;
 
 2194  if (Opcode == TargetOpcode::G_BUILD_VECTOR) {
 
 2195    const APInt *MinAmt = 
nullptr, *MaxAmt = 
nullptr;
 
 2196    for (
unsigned I = 0, E = 
MI.getNumOperands() - 1; 
I != E; ++
I) {
 
 2197      if (!DemandedElts[
I])
 
 2200      if (
Op->getOpcode() != TargetOpcode::G_CONSTANT) {
 
 2201        MinAmt = MaxAmt = 
nullptr;
 
 2205      const APInt &ShAmt = 
Op->getOperand(1).getCImm()->getValue();
 
 2207        return std::nullopt;
 
 2208      if (!MinAmt || MinAmt->
ugt(ShAmt))
 
 2210      if (!MaxAmt || MaxAmt->ult(ShAmt))
 
 2213    assert(((!MinAmt && !MaxAmt) || (MinAmt && MaxAmt)) &&
 
 2214           "Failed to find matching min/max shift amounts");
 
 2215    if (MinAmt && MaxAmt)
 
 2225  return std::nullopt;
 
 
 2230  if (std::optional<ConstantRange> AmtRange =
 
 2232    return AmtRange->getUnsignedMin().getZExtValue();
 
 2233  return std::nullopt;
 
 
 2251    Info = std::make_unique<GISelValueTracking>(MF, MaxDepth);
 
 
 2276        if (!MO.isReg() || MO.getReg().isPhysical())
 
 2279        if (!
MRI.getType(Reg).isValid())
 
 2281        KnownBits Known = VTA.getKnownBits(Reg);
 
 2282        unsigned SignedBits = VTA.computeNumSignBits(Reg);
 
 2283        OS << 
"  " << MO << 
" KnownBits:" << Known << 
" SignBits:" << SignedBits
 
 
unsigned const MachineRegisterInfo * MRI
 
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
 
This file declares a class to represent arbitrary precision floating point values and provide a varie...
 
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
 
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
 
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
 
Utilities for dealing with flags related to floating point properties and mode controls.
 
static void dumpResult(const MachineInstr &MI, const KnownBits &Known, unsigned Depth)
 
static unsigned computeNumSignBitsFromRangeMetadata(const GAnyLoad *Ld, unsigned TyBits)
Compute the known number of sign bits with attached range metadata in the memory operand.
 
Provides analysis for querying information about KnownBits during GISel passes.
 
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
 
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
 
Implement a low-level type suitable for MachineInstr level instruction selection.
 
Contains matchers for matching SSA Machine Instructions.
 
Promote Memory to Register
 
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
 
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
 
const SmallVectorImpl< MachineOperand > & Cond
 
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
 
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
 
This file describes how to lower LLVM code to machine code.
 
static bool outputDenormalIsIEEEOrPosZero(const Function &F, const Type *Ty)
 
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
 
static LLVM_ABI bool isRepresentableAsNormalIn(const fltSemantics &Src, const fltSemantics &Dst)
 
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
 
Class for arbitrary precision integers.
 
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
 
void clearBit(unsigned BitPosition)
Set a given bit to 0.
 
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
 
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
 
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
 
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
 
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
 
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
 
unsigned getBitWidth() const
Return the number of bits in the APInt.
 
bool ult(const APInt &RHS) const
Unsigned less than comparison.
 
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
 
void clearLowBits(unsigned loBits)
Set bottom loBits bits to 0.
 
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
 
void setAllBits()
Set every bit to 1.
 
APInt shl(unsigned shiftAmt) const
Left-shift function.
 
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
 
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
 
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
 
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
 
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
 
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
 
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
 
Represent the analysis usage information of a pass.
 
void setPreservesAll()
Set by analyses that do not transform their input at all.
 
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
 
This class represents a range of values.
 
static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
 
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
 
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
 
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
 
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
 
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
 
Represents any generic load, including sign/zero extending variants.
 
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
 
static LLVM_ABI std::optional< GFConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)
 
To use KnownBitsInfo analysis in a pass, KnownBitsInfo &Info = getAnalysis<GISelValueTrackingInfoAnal...
 
GISelValueTracking & get(MachineFunction &MF)
 
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
 
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
 
GISelValueTracking Result
 
LLVM_ABI Result run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
 
LLVM_ABI PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
 
unsigned getMaxDepth() const
 
KnownBits getKnownBits(Register R)
 
Align computeKnownAlignment(Register R, unsigned Depth=0)
 
std::optional< ConstantRange > getValidShiftAmountRange(Register R, const APInt &DemandedElts, unsigned Depth)
If a G_SHL/G_ASHR/G_LSHR node with shift operand R has shift amounts that are all less than the eleme...
 
bool maskedValueIsZero(Register Val, const APInt &Mask)
 
std::optional< uint64_t > getValidMinimumShiftAmount(Register R, const APInt &DemandedElts, unsigned Depth=0)
If a G_SHL/G_ASHR/G_LSHR node with shift operand R has shift amounts that are all less than the eleme...
 
bool signBitIsZero(Register Op)
 
const DataLayout & getDataLayout() const
 
unsigned computeNumSignBits(Register R, const APInt &DemandedElts, unsigned Depth=0)
 
APInt getKnownOnes(Register R)
 
KnownBits getKnownBits(MachineInstr &MI)
 
APInt getKnownZeroes(Register R)
 
void computeKnownBitsImpl(Register R, KnownBits &Known, const APInt &DemandedElts, unsigned Depth=0)
 
Register getCondReg() const
 
Register getFalseReg() const
 
Register getTrueReg() const
 
Register getSrc2Reg() const
 
Register getSrc1Reg() const
 
ArrayRef< int > getMask() const
 
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
 
constexpr unsigned getScalarSizeInBits() const
 
constexpr bool isValid() const
 
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
 
constexpr bool isVector() const
 
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
 
constexpr LLT getScalarType() const
 
TypeSize getValue() const
 
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
 
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
 
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
 
Function & getFunction()
Return the LLVM function that this machine code represents.
 
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
 
Representation of each machine instruction.
 
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
 
const MachineOperand & getOperand(unsigned i) const
 
A description of a memory reference used in the backend.
 
LLT getMemoryType() const
Return the memory type of the memory reference.
 
const MDNode * getRanges() const
Return the range tag for the memory reference.
 
LocationSize getSizeInBits() const
Return the size in bits of the memory reference.
 
MachineOperand class - Representation of each machine instruction operand.
 
Register getReg() const
getReg - Returns the register number.
 
A set of analyses that are preserved following a run of a transformation pass.
 
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
 
Wrapper class representing virtual and physical registers.
 
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
 
@ ZeroOrOneBooleanContent
 
@ ZeroOrNegativeOneBooleanContent
 
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
 
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
 
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
 
@ C
The default llvm calling convention, compatible with C.
 
operand_type_match m_Reg()
 
operand_type_match m_Pred()
 
bind_ty< FPClassTest > m_FPClassTest(FPClassTest &T)
 
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
 
ClassifyOp_match< LHS, Test, TargetOpcode::G_IS_FPCLASS > m_GIsFPClass(const LHS &L, const Test &T)
Matches the register and immediate used in a fpclass test G_IS_FPCLASS val, 96.
 
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_FCMP > m_GFCmp(const Pred &P, const LHS &L, const RHS &R)
 
This is an optimization pass for GlobalISel generic memory operations.
 
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
 
LLVM_ABI std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
 
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
 
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
 
LLVM_ABI const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
 
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
 
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
 
int ilogb(const APFloat &Arg)
Returns the exponent of the internal representation of the APFloat.
 
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
 
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
 
std::tuple< Value *, FPClassTest, FPClassTest > fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS, FPClassTest RHSClass, bool LookThroughSrc=true)
 
LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
 
constexpr unsigned MaxAnalysisRecursionDepth
 
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
 
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
 
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
 
DWARFExpression::Operation Op
 
std::string toString(const APInt &I, unsigned Radix, bool Signed, bool formatAsCLiteral=false, bool UpperCase=true, bool InsertSeparators=false)
 
constexpr unsigned BitWidth
 
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
 
static uint32_t extractBits(uint64_t Val, uint32_t Hi, uint32_t Lo)
 
LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
 
This struct is a compact representation of a valid (non-zero power of two) alignment.
 
A special type used by analysis passes to provide an address that identifies that particular analysis...
 
Represent subnormal handling kind for floating point instruction inputs and outputs.
 
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
 
constexpr bool outputsAreZero() const
Return true if output denormals should be flushed to 0.
 
@ PositiveZero
Denormals are flushed to positive zero.
 
@ IEEE
IEEE-754 denormal numbers preserved.
 
constexpr bool inputsAreZero() const
Return true if input denormals must be implicitly treated as 0.
 
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
 
static constexpr DenormalMode getIEEE()
 
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
 
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
 
LLVM_ABI KnownBits sextInReg(unsigned SrcBitWidth) const
Return known bits for a in-register sign extension of the value we're tracking.
 
static LLVM_ABI KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from zero-extended multiply-hi.
 
static LLVM_ABI KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
 
bool isNonNegative() const
Returns true if this value is known to be non-negative.
 
bool isZero() const
Returns true if value is all zero.
 
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
 
bool isUnknown() const
Returns true if we don't know any bits.
 
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
 
KnownBits byteSwap() const
 
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
 
KnownBits reverseBits() const
 
unsigned getBitWidth() const
Get the bit width of this value.
 
static LLVM_ABI KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
 
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
 
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
 
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const
Return a subset of the known bits from [bitPosition,bitPosition+numBits).
 
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
 
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
 
static KnownBits add(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from addition of LHS and RHS.
 
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
 
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
 
static LLVM_ABI KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
 
static LLVM_ABI KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from sign-extended multiply-hi.
 
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
 
bool isNegative() const
Returns true if this value is known to be negative.
 
static KnownBits sub(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from subtraction of LHS and RHS.
 
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
 
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
 
static LLVM_ABI KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
 
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
 
LLVM_ABI KnownBits abs(bool IntMinIsPoison=false) const
Compute known bits for the absolute value.
 
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
 
static LLVM_ABI KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
 
bool isAllOnes() const
Returns true if value is all one bits.
 
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
 
bool isKnownNeverInfinity() const
Return true if it's known this can never be an infinity.
 
bool cannotBeOrderedGreaterThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never greater tha...
 
static constexpr FPClassTest OrderedGreaterThanZeroMask
 
static constexpr FPClassTest OrderedLessThanZeroMask
 
void knownNot(FPClassTest RuleOut)
 
void copysign(const KnownFPClass &Sign)
 
bool isKnownNeverSubnormal() const
Return true if it's known this can never be a subnormal.
 
LLVM_ABI bool isKnownNeverLogicalZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a zero.
 
bool isKnownNeverPosZero() const
Return true if it's known this can never be a literal positive zero.
 
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
 
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
 
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
 
bool isKnownNeverNegZero() const
Return true if it's known this can never be a negative zero.
 
void propagateNaN(const KnownFPClass &Src, bool PreserveSign=false)
 
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
 
void signBitMustBeOne()
Assume the sign bit is one.
 
void signBitMustBeZero()
Assume the sign bit is zero.
 
LLVM_ABI bool isKnownNeverLogicalPosZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a positive zero.
 
bool isKnownNeverPosInfinity() const
Return true if it's known this can never be +infinity.
 
LLVM_ABI bool isKnownNeverLogicalNegZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a negative zero.