57#include "llvm/IR/IntrinsicsAArch64.h"
58#include "llvm/IR/IntrinsicsAMDGPU.h"
59#include "llvm/IR/IntrinsicsRISCV.h"
60#include "llvm/IR/IntrinsicsX86.h"
97 return DL.getPointerTypeSizeInBits(Ty);
109 CxtI = dyn_cast<Instruction>(V);
123 CxtI = dyn_cast<Instruction>(V1);
127 CxtI = dyn_cast<Instruction>(V2);
135 const APInt &DemandedElts,
137 if (isa<ScalableVectorType>(Shuf->
getType())) {
139 DemandedLHS = DemandedRHS = DemandedElts;
146 DemandedElts, DemandedLHS, DemandedRHS);
158 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
186 V, DemandedElts,
Depth,
255 "LHS and RHS should have the same type");
257 "LHS and RHS should be integers");
268 return !
I->user_empty() &&
all_of(
I->users(), [](
const User *U) {
269 return match(U, m_ICmp(m_Value(), m_Zero()));
274 return !
I->user_empty() &&
all_of(
I->users(), [](
const User *U) {
276 return match(U, m_ICmp(P, m_Value(), m_Zero())) && ICmpInst::isEquality(P);
281 bool OrZero,
unsigned Depth,
284 return ::isKnownToBeAPowerOfTwo(
299 if (
auto *CI = dyn_cast<ConstantInt>(V))
300 return CI->getValue().isStrictlyPositive();
323 if (V1 == V2 || V1->
getType() != V2->getType())
325 auto *FVTy = dyn_cast<FixedVectorType>(V1->
getType());
328 return ::isKnownNonEqual(
329 V1, V2, DemandedElts, 0,
337 return Mask.isSubsetOf(Known.
Zero);
345 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
355 return ::ComputeNumSignBits(
364 return V->getType()->getScalarSizeInBits() - SignBits + 1;
369 const APInt &DemandedElts,
376 if (KnownOut.
isUnknown() && !NSW && !NUW)
384 bool NUW,
const APInt &DemandedElts,
401 bool isKnownNegativeOp0 = Known2.
isNegative();
404 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
416 (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
418 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.
isNonZero());
422 bool SelfMultiply = Op0 == Op1;
442 unsigned NumRanges = Ranges.getNumOperands() / 2;
448 for (
unsigned i = 0; i < NumRanges; ++i) {
450 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
452 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
456 unsigned CommonPrefixBits =
460 Known.
One &= UnsignedMax & Mask;
461 Known.
Zero &= ~UnsignedMax & Mask;
476 while (!WorkSet.
empty()) {
478 if (!Visited.
insert(V).second)
483 return EphValues.count(U);
488 if (V ==
I || (isa<Instruction>(V) &&
490 !cast<Instruction>(V)->isTerminator())) {
492 if (
const User *U = dyn_cast<User>(V))
504 return CI->isAssumeLikeIntrinsic();
512 bool AllowEphemerals) {
530 if (!AllowEphemerals && Inv == CxtI)
566 if (Pred == ICmpInst::ICMP_UGT)
570 if (Pred == ICmpInst::ICMP_NE)
581 auto *VC = dyn_cast<ConstantDataVector>(
RHS);
585 for (
unsigned ElemIdx = 0, NElem = VC->getNumElements(); ElemIdx < NElem;
588 Pred, VC->getElementAsAPInt(ElemIdx));
607 "Got assumption for the wrong function!");
610 if (!V->getType()->isPointerTy())
613 *
I,
I->bundle_op_info_begin()[Elem.Index])) {
615 (RK.AttrKind == Attribute::NonNull ||
616 (RK.AttrKind == Attribute::Dereferenceable &&
618 V->getType()->getPointerAddressSpace()))) &&
650 case ICmpInst::ICMP_EQ:
653 case ICmpInst::ICMP_SGE:
654 case ICmpInst::ICMP_SGT:
657 case ICmpInst::ICMP_SLT:
675 case ICmpInst::ICMP_EQ:
685 Known.
Zero |= ~*
C & *Mask;
691 Known.
One |= *
C & ~*Mask;
712 Known.
Zero |= RHSKnown.
Zero << ShAmt;
713 Known.
One |= RHSKnown.
One << ShAmt;
716 case ICmpInst::ICMP_NE: {
732 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
738 (*
C + (Pred == ICmpInst::ICMP_UGT)).countLeadingOnes());
740 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
746 (*
C - (Pred == ICmpInst::ICMP_ULT)).countLeadingZeros());
758 Invert ? Cmp->getInversePredicate() : Cmp->getPredicate();
791 if (
auto *Cmp = dyn_cast<ICmpInst>(
Cond))
836 "Got assumption for the wrong function!");
839 if (!V->getType()->isPointerTy())
842 *
I,
I->bundle_op_info_begin()[Elem.Index])) {
846 if (RK.WasOn == V && RK.AttrKind == Attribute::Alignment &&
858 Value *Arg =
I->getArgOperand(0);
878 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
914 Known = KF(Known2, Known, ShAmtNonZero);
925 Value *
X =
nullptr, *
Y =
nullptr;
927 switch (
I->getOpcode()) {
928 case Instruction::And:
929 KnownOut = KnownLHS & KnownRHS;
939 KnownOut = KnownLHS.
blsi();
941 KnownOut = KnownRHS.
blsi();
944 case Instruction::Or:
945 KnownOut = KnownLHS | KnownRHS;
947 case Instruction::Xor:
948 KnownOut = KnownLHS ^ KnownRHS;
958 const KnownBits &XBits =
I->getOperand(0) ==
X ? KnownLHS : KnownRHS;
959 KnownOut = XBits.
blsmsk();
972 if (!KnownOut.
Zero[0] && !KnownOut.
One[0] &&
993 APInt DemandedEltsLHS, DemandedEltsRHS;
995 DemandedElts, DemandedEltsLHS,
998 const auto ComputeForSingleOpFunc =
1000 return KnownBitsFunc(
1005 if (DemandedEltsRHS.
isZero())
1006 return ComputeForSingleOpFunc(
I->getOperand(0), DemandedEltsLHS);
1007 if (DemandedEltsLHS.
isZero())
1008 return ComputeForSingleOpFunc(
I->getOperand(1), DemandedEltsRHS);
1010 return ComputeForSingleOpFunc(
I->getOperand(0), DemandedEltsLHS)
1011 .intersectWith(ComputeForSingleOpFunc(
I->getOperand(1), DemandedEltsRHS));
1020 auto *FVTy = dyn_cast<FixedVectorType>(
I->getType());
1021 APInt DemandedElts =
1029 Attribute Attr =
F->getFnAttribute(Attribute::VScaleRange);
1037 return ConstantRange::getEmpty(
BitWidth);
1087 "Input should be a Select!");
1097 const Value *LHS2 =
nullptr, *RHS2 =
nullptr;
1109 return CLow->
sle(*CHigh);
1114 const APInt *&CHigh) {
1115 assert((
II->getIntrinsicID() == Intrinsic::smin ||
1116 II->getIntrinsicID() == Intrinsic::smax) &&
1117 "Must be smin/smax");
1120 auto *InnerII = dyn_cast<IntrinsicInst>(
II->getArgOperand(0));
1121 if (!InnerII || InnerII->getIntrinsicID() != InverseID ||
1126 if (
II->getIntrinsicID() == Intrinsic::smin)
1128 return CLow->
sle(*CHigh);
1133 const APInt *CLow, *CHigh;
1140 const APInt &DemandedElts,
1146 switch (
I->getOpcode()) {
1148 case Instruction::Load:
1153 case Instruction::And:
1159 case Instruction::Or:
1165 case Instruction::Xor:
1171 case Instruction::Mul: {
1175 DemandedElts, Known, Known2,
Depth, Q);
1178 case Instruction::UDiv: {
1185 case Instruction::SDiv: {
1192 case Instruction::Select: {
1193 auto ComputeForArm = [&](
Value *Arm,
bool Invert) {
1201 ComputeForArm(
I->getOperand(1),
false)
1205 case Instruction::FPTrunc:
1206 case Instruction::FPExt:
1207 case Instruction::FPToUI:
1208 case Instruction::FPToSI:
1209 case Instruction::SIToFP:
1210 case Instruction::UIToFP:
1212 case Instruction::PtrToInt:
1213 case Instruction::IntToPtr:
1216 case Instruction::ZExt:
1217 case Instruction::Trunc: {
1218 Type *SrcTy =
I->getOperand(0)->getType();
1220 unsigned SrcBitWidth;
1228 assert(SrcBitWidth &&
"SrcBitWidth can't be zero");
1231 if (
auto *Inst = dyn_cast<PossiblyNonNegInst>(
I);
1232 Inst && Inst->hasNonNeg() && !Known.
isNegative())
1237 case Instruction::BitCast: {
1238 Type *SrcTy =
I->getOperand(0)->getType();
1242 !
I->getType()->isVectorTy()) {
1250 V->getType()->isFPOrFPVectorTy()) {
1251 Type *FPType = V->getType()->getScalarType();
1264 if (FPClasses &
fcInf)
1276 if (Result.SignBit) {
1277 if (*Result.SignBit)
1287 auto *SrcVecTy = dyn_cast<FixedVectorType>(SrcTy);
1288 if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() ||
1289 !
I->getType()->isIntOrIntVectorTy() ||
1290 isa<ScalableVectorType>(
I->getType()))
1295 unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits();
1312 unsigned SubScale =
BitWidth / SubBitWidth;
1314 for (
unsigned i = 0; i != NumElts; ++i) {
1315 if (DemandedElts[i])
1316 SubDemandedElts.
setBit(i * SubScale);
1320 for (
unsigned i = 0; i != SubScale; ++i) {
1324 Known.
insertBits(KnownSrc, ShiftElt * SubBitWidth);
1329 case Instruction::SExt: {
1331 unsigned SrcBitWidth =
I->getOperand(0)->getType()->getScalarSizeInBits();
1333 Known = Known.
trunc(SrcBitWidth);
1340 case Instruction::Shl: {
1344 bool ShAmtNonZero) {
1345 return KnownBits::shl(KnownVal, KnownAmt, NUW, NSW, ShAmtNonZero);
1355 case Instruction::LShr: {
1356 bool Exact = Q.
IIQ.
isExact(cast<BinaryOperator>(
I));
1358 bool ShAmtNonZero) {
1369 case Instruction::AShr: {
1370 bool Exact = Q.
IIQ.
isExact(cast<BinaryOperator>(
I));
1372 bool ShAmtNonZero) {
1379 case Instruction::Sub: {
1383 DemandedElts, Known, Known2,
Depth, Q);
1386 case Instruction::Add: {
1390 DemandedElts, Known, Known2,
Depth, Q);
1393 case Instruction::SRem:
1399 case Instruction::URem:
1404 case Instruction::Alloca:
1407 case Instruction::GetElementPtr: {
1416 for (
unsigned i = 1, e =
I->getNumOperands(); i != e; ++i, ++GTI) {
1421 Value *Index =
I->getOperand(i);
1424 Constant *CIndex = dyn_cast<Constant>(Index);
1432 "Access to structure field must be known at compile time");
1437 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1440 AccConstIndices +=
Offset;
1451 unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits();
1465 APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes);
1466 IndexConst *= ScalingFactor;
1490 case Instruction::PHI: {
1493 Value *R =
nullptr, *L =
nullptr;
1506 case Instruction::LShr:
1507 case Instruction::AShr:
1508 case Instruction::Shl:
1509 case Instruction::UDiv:
1516 case Instruction::URem: {
1529 case Instruction::Shl:
1533 case Instruction::LShr:
1534 case Instruction::UDiv:
1535 case Instruction::URem:
1540 case Instruction::AShr:
1552 case Instruction::Add:
1553 case Instruction::Sub:
1554 case Instruction::And:
1555 case Instruction::Or:
1556 case Instruction::Mul: {
1563 unsigned OpNum =
P->getOperand(0) == R ? 0 : 1;
1564 Instruction *RInst =
P->getIncomingBlock(OpNum)->getTerminator();
1565 Instruction *LInst =
P->getIncomingBlock(1 - OpNum)->getTerminator();
1580 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(BO);
1594 case Instruction::Add: {
1604 case Instruction::Sub: {
1615 case Instruction::Mul:
1632 if (
P->getNumIncomingValues() == 0)
1639 if (isa_and_nonnull<UndefValue>(
P->hasConstantValue()))
1644 for (
unsigned u = 0, e =
P->getNumIncomingValues(); u < e; ++u) {
1645 Value *IncValue =
P->getIncomingValue(u);
1647 if (IncValue ==
P)
continue;
1651 if (
auto *SI = dyn_cast<SelectInst>(IncValue)) {
1652 if (SI->getTrueValue() ==
P || SI->getFalseValue() ==
P)
1653 IncValue = SI->getTrueValue() ==
P ? SI->getFalseValue()
1654 : SI->getTrueValue();
1662 RecQ.
CxtI =
P->getIncomingBlock(u)->getTerminator();
1684 if ((TrueSucc ==
P->getParent()) != (FalseSucc ==
P->getParent())) {
1686 if (FalseSucc ==
P->getParent())
1700 Known2 = KnownUnion;
1714 case Instruction::Call:
1715 case Instruction::Invoke: {
1723 const auto *CB = cast<CallBase>(
I);
1725 if (std::optional<ConstantRange>
Range = CB->getRange())
1728 if (
const Value *RV = CB->getReturnedArgOperand()) {
1729 if (RV->getType() ==
I->getType()) {
1741 switch (
II->getIntrinsicID()) {
1744 case Intrinsic::abs: {
1746 bool IntMinIsPoison =
match(
II->getArgOperand(1),
m_One());
1747 Known = Known2.
abs(IntMinIsPoison);
1750 case Intrinsic::bitreverse:
1755 case Intrinsic::bswap:
1760 case Intrinsic::ctlz: {
1766 PossibleLZ = std::min(PossibleLZ,
BitWidth - 1);
1771 case Intrinsic::cttz: {
1777 PossibleTZ = std::min(PossibleTZ,
BitWidth - 1);
1782 case Intrinsic::ctpop: {
1793 case Intrinsic::fshr:
1794 case Intrinsic::fshl: {
1801 if (
II->getIntrinsicID() == Intrinsic::fshr)
1814 case Intrinsic::uadd_sat:
1819 case Intrinsic::usub_sat:
1824 case Intrinsic::sadd_sat:
1829 case Intrinsic::ssub_sat:
1835 case Intrinsic::vector_reverse:
1841 case Intrinsic::vector_reduce_and:
1842 case Intrinsic::vector_reduce_or:
1843 case Intrinsic::vector_reduce_umax:
1844 case Intrinsic::vector_reduce_umin:
1845 case Intrinsic::vector_reduce_smax:
1846 case Intrinsic::vector_reduce_smin:
1849 case Intrinsic::vector_reduce_xor: {
1854 auto *VecTy = cast<VectorType>(
I->getOperand(0)->getType());
1856 bool EvenCnt = VecTy->getElementCount().isKnownEven();
1860 if (VecTy->isScalableTy() || EvenCnt)
1864 case Intrinsic::umin:
1869 case Intrinsic::umax:
1874 case Intrinsic::smin:
1880 case Intrinsic::smax:
1886 case Intrinsic::ptrmask: {
1889 const Value *Mask =
I->getOperand(1);
1890 Known2 =
KnownBits(Mask->getType()->getScalarSizeInBits());
1896 case Intrinsic::x86_sse2_pmulh_w:
1897 case Intrinsic::x86_avx2_pmulh_w:
1898 case Intrinsic::x86_avx512_pmulh_w_512:
1903 case Intrinsic::x86_sse2_pmulhu_w:
1904 case Intrinsic::x86_avx2_pmulhu_w:
1905 case Intrinsic::x86_avx512_pmulhu_w_512:
1910 case Intrinsic::x86_sse42_crc32_64_64:
1913 case Intrinsic::x86_ssse3_phadd_d_128:
1914 case Intrinsic::x86_ssse3_phadd_w_128:
1915 case Intrinsic::x86_avx2_phadd_d:
1916 case Intrinsic::x86_avx2_phadd_w: {
1918 I, DemandedElts,
Depth, Q,
1924 case Intrinsic::x86_ssse3_phadd_sw_128:
1925 case Intrinsic::x86_avx2_phadd_sw: {
1930 case Intrinsic::x86_ssse3_phsub_d_128:
1931 case Intrinsic::x86_ssse3_phsub_w_128:
1932 case Intrinsic::x86_avx2_phsub_d:
1933 case Intrinsic::x86_avx2_phsub_w: {
1935 I, DemandedElts,
Depth, Q,
1941 case Intrinsic::x86_ssse3_phsub_sw_128:
1942 case Intrinsic::x86_avx2_phsub_sw: {
1947 case Intrinsic::riscv_vsetvli:
1948 case Intrinsic::riscv_vsetvlimax: {
1949 bool HasAVL =
II->getIntrinsicID() == Intrinsic::riscv_vsetvli;
1952 cast<ConstantInt>(
II->getArgOperand(HasAVL))->getZExtValue());
1954 cast<ConstantInt>(
II->getArgOperand(1 + HasAVL))->getZExtValue());
1961 if (
auto *CI = dyn_cast<ConstantInt>(
II->getArgOperand(0)))
1962 MaxVL = std::min(MaxVL, CI->getZExtValue());
1964 unsigned KnownZeroFirstBit =
Log2_32(MaxVL) + 1;
1969 case Intrinsic::vscale: {
1970 if (!
II->getParent() || !
II->getFunction())
1980 case Instruction::ShuffleVector: {
1981 auto *Shuf = dyn_cast<ShuffleVectorInst>(
I);
1989 APInt DemandedLHS, DemandedRHS;
1996 if (!!DemandedLHS) {
1997 const Value *
LHS = Shuf->getOperand(0);
2003 if (!!DemandedRHS) {
2004 const Value *
RHS = Shuf->getOperand(1);
2010 case Instruction::InsertElement: {
2011 if (isa<ScalableVectorType>(
I->getType())) {
2015 const Value *Vec =
I->getOperand(0);
2016 const Value *Elt =
I->getOperand(1);
2017 auto *CIdx = dyn_cast<ConstantInt>(
I->getOperand(2));
2019 APInt DemandedVecElts = DemandedElts;
2020 bool NeedsElt =
true;
2022 if (CIdx && CIdx->getValue().ult(NumElts)) {
2023 DemandedVecElts.
clearBit(CIdx->getZExtValue());
2024 NeedsElt = DemandedElts[CIdx->getZExtValue()];
2036 if (!DemandedVecElts.
isZero()) {
2042 case Instruction::ExtractElement: {
2045 const Value *Vec =
I->getOperand(0);
2047 auto *CIdx = dyn_cast<ConstantInt>(
Idx);
2048 if (isa<ScalableVectorType>(Vec->
getType())) {
2053 unsigned NumElts = cast<FixedVectorType>(Vec->
getType())->getNumElements();
2055 if (CIdx && CIdx->getValue().ult(NumElts))
2060 case Instruction::ExtractValue:
2065 switch (
II->getIntrinsicID()) {
2067 case Intrinsic::uadd_with_overflow:
2068 case Intrinsic::sadd_with_overflow:
2070 true,
II->getArgOperand(0),
II->getArgOperand(1),
false,
2071 false, DemandedElts, Known, Known2,
Depth, Q);
2073 case Intrinsic::usub_with_overflow:
2074 case Intrinsic::ssub_with_overflow:
2076 false,
II->getArgOperand(0),
II->getArgOperand(1),
false,
2077 false, DemandedElts, Known, Known2,
Depth, Q);
2079 case Intrinsic::umul_with_overflow:
2080 case Intrinsic::smul_with_overflow:
2082 false, DemandedElts, Known, Known2,
Depth, Q);
2088 case Instruction::Freeze:
2132 if (!DemandedElts) {
2138 assert(V &&
"No Value?");
2142 Type *Ty = V->getType();
2146 "Not integer or pointer type!");
2148 if (
auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2150 FVTy->getNumElements() == DemandedElts.
getBitWidth() &&
2151 "DemandedElt width should equal the fixed vector number of elements");
2154 "DemandedElt width should be 1 for scalars or scalable vectors");
2160 "V and Known should have same BitWidth");
2163 "V and Known should have same BitWidth");
2174 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
2181 assert(!isa<ScalableVectorType>(V->getType()));
2185 for (
unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
2186 if (!DemandedElts[i])
2188 APInt Elt = CDV->getElementAsAPInt(i);
2197 if (
const auto *CV = dyn_cast<ConstantVector>(V)) {
2198 assert(!isa<ScalableVectorType>(V->getType()));
2202 for (
unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
2203 if (!DemandedElts[i])
2206 if (isa<PoisonValue>(Element))
2208 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
2213 const APInt &Elt = ElementCI->getValue();
2226 if (isa<UndefValue>(V))
2231 assert(!isa<ConstantData>(V) &&
"Unhandled constant data!");
2233 if (
const auto *
A = dyn_cast<Argument>(V))
2234 if (std::optional<ConstantRange>
Range =
A->getRange())
2243 if (
const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
2244 if (!GA->isInterposable())
2249 if (
const Operator *
I = dyn_cast<Operator>(V))
2251 else if (
const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2252 if (std::optional<ConstantRange> CR = GV->getAbsoluteSymbolRange())
2253 Known = CR->toKnownBits();
2257 if (isa<PointerType>(V->getType())) {
2258 Align Alignment = V->getPointerAlignment(Q.
DL);
2274 Value *Start =
nullptr, *Step =
nullptr;
2280 if (U.get() == Start) {
2296 case Instruction::Mul:
2301 case Instruction::SDiv:
2307 case Instruction::UDiv:
2313 case Instruction::Shl:
2315 case Instruction::AShr:
2319 case Instruction::LShr:
2337 Pred = ICmpInst::getInversePredicate(Pred);
2339 if (OrZero && Pred == ICmpInst::ICMP_ULT && *RHSC == 2)
2342 return Pred == ICmpInst::ICMP_EQ && *RHSC == 1;
2353 if (isa<Constant>(V))
2357 if (OrZero && V->getType()->getScalarSizeInBits() == 1)
2392 auto *
I = dyn_cast<Instruction>(V);
2399 return F->hasFnAttribute(Attribute::VScaleRange);
2416 switch (
I->getOpcode()) {
2417 case Instruction::ZExt:
2419 case Instruction::Trunc:
2421 case Instruction::Shl:
2425 case Instruction::LShr:
2426 if (OrZero || Q.
IIQ.
isExact(cast<BinaryOperator>(
I)))
2429 case Instruction::UDiv:
2433 case Instruction::Mul:
2437 case Instruction::And:
2448 case Instruction::Add: {
2454 if (
match(
I->getOperand(0),
2458 if (
match(
I->getOperand(1),
2463 unsigned BitWidth = V->getType()->getScalarSizeInBits();
2472 if ((~(LHSBits.
Zero & RHSBits.
Zero)).isPowerOf2())
2485 case Instruction::Select:
2488 case Instruction::PHI: {
2492 auto *PN = cast<PHINode>(
I);
2509 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2510 return isKnownToBeAPowerOfTwo(U.get(), OrZero, NewDepth, RecQ);
2513 case Instruction::Invoke:
2514 case Instruction::Call: {
2515 if (
auto *
II = dyn_cast<IntrinsicInst>(
I)) {
2516 switch (
II->getIntrinsicID()) {
2517 case Intrinsic::umax:
2518 case Intrinsic::smax:
2519 case Intrinsic::umin:
2520 case Intrinsic::smin:
2525 case Intrinsic::bitreverse:
2526 case Intrinsic::bswap:
2528 case Intrinsic::fshr:
2529 case Intrinsic::fshl:
2531 if (
II->getArgOperand(0) ==
II->getArgOperand(1))
2555 F =
I->getFunction();
2559 if (!
GEP->hasNoUnsignedWrap() &&
2560 !(
GEP->isInBounds() &&
2565 assert(
GEP->getType()->isPointerTy() &&
"We only support plain pointer GEP");
2576 GTI != GTE; ++GTI) {
2578 if (
StructType *STy = GTI.getStructTypeOrNull()) {
2579 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
2583 if (ElementOffset > 0)
2589 if (GTI.getSequentialElementStride(Q.
DL).isZero())
2594 if (
ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
2618 assert(!isa<Constant>(V) &&
"Called for constant?");
2623 unsigned NumUsesExplored = 0;
2624 for (
const auto *U : V->users()) {
2632 if (
const auto *CB = dyn_cast<CallBase>(U))
2633 if (
auto *CalledFunc = CB->getCalledFunction())
2634 for (
const Argument &Arg : CalledFunc->args())
2635 if (CB->getArgOperand(Arg.getArgNo()) == V &&
2636 Arg.hasNonNullAttr(
false) &&
2644 V->getType()->getPointerAddressSpace()) &&
2662 NonNullIfTrue =
true;
2664 NonNullIfTrue =
false;
2670 for (
const auto *CmpU : U->users()) {
2672 if (Visited.
insert(CmpU).second)
2675 while (!WorkList.
empty()) {
2684 for (
const auto *CurrU : Curr->users())
2685 if (Visited.
insert(CurrU).second)
2690 if (
const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2691 assert(BI->isConditional() &&
"uses a comparison!");
2694 BI->getSuccessor(NonNullIfTrue ? 0 : 1);
2698 }
else if (NonNullIfTrue &&
isGuard(Curr) &&
2699 DT->
dominates(cast<Instruction>(Curr), CtxI)) {
2713 const unsigned NumRanges = Ranges->getNumOperands() / 2;
2715 for (
unsigned i = 0; i < NumRanges; ++i) {
2717 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2719 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2731 Value *Start =
nullptr, *Step =
nullptr;
2732 const APInt *StartC, *StepC;
2738 case Instruction::Add:
2744 case Instruction::Mul:
2747 case Instruction::Shl:
2749 case Instruction::AShr:
2750 case Instruction::LShr:
2766 Value *
Y,
bool NSW,
bool NUW) {
2819 if (
auto *
C = dyn_cast<Constant>(
X))
2823 return ::isKnownNonEqual(
X,
Y, DemandedElts,
Depth, Q);
2828 Value *
Y,
bool NSW,
bool NUW) {
2857 auto ShiftOp = [&](
const APInt &Lhs,
const APInt &Rhs) {
2858 switch (
I->getOpcode()) {
2859 case Instruction::Shl:
2860 return Lhs.
shl(Rhs);
2861 case Instruction::LShr:
2862 return Lhs.
lshr(Rhs);
2863 case Instruction::AShr:
2864 return Lhs.
ashr(Rhs);
2870 auto InvShiftOp = [&](
const APInt &Lhs,
const APInt &Rhs) {
2871 switch (
I->getOpcode()) {
2872 case Instruction::Shl:
2873 return Lhs.
lshr(Rhs);
2874 case Instruction::LShr:
2875 case Instruction::AShr:
2876 return Lhs.
shl(Rhs);
2889 if (MaxShift.
uge(NumBits))
2892 if (!ShiftOp(KnownVal.
One, MaxShift).isZero())
2897 if (InvShiftOp(KnownVal.
Zero, NumBits - MaxShift)
2906 const APInt &DemandedElts,
2909 switch (
I->getOpcode()) {
2910 case Instruction::Alloca:
2912 return I->getType()->getPointerAddressSpace() == 0;
2913 case Instruction::GetElementPtr:
2914 if (
I->getType()->isPointerTy())
2917 case Instruction::BitCast: {
2945 Type *FromTy =
I->getOperand(0)->getType();
2950 case Instruction::IntToPtr:
2954 if (!isa<ScalableVectorType>(
I->getType()) &&
2959 case Instruction::PtrToInt:
2962 if (!isa<ScalableVectorType>(
I->getType()) &&
2967 case Instruction::Trunc:
2969 if (
auto *TI = dyn_cast<TruncInst>(
I))
2970 if (TI->hasNoSignedWrap() || TI->hasNoUnsignedWrap())
2974 case Instruction::Sub:
2977 case Instruction::Xor:
2982 case Instruction::Or:
2989 case Instruction::SExt:
2990 case Instruction::ZExt:
2994 case Instruction::Shl: {
3009 case Instruction::LShr:
3010 case Instruction::AShr: {
3025 case Instruction::UDiv:
3026 case Instruction::SDiv: {
3029 if (cast<PossiblyExactOperator>(
I)->isExact())
3041 if (
I->getOpcode() == Instruction::SDiv) {
3043 XKnown = XKnown.
abs(
false);
3044 YKnown = YKnown.
abs(
false);
3050 return XUgeY && *XUgeY;
3052 case Instruction::Add: {
3057 auto *BO = cast<OverflowingBinaryOperator>(
I);
3062 case Instruction::Mul: {
3068 case Instruction::Select: {
3075 auto SelectArmIsNonZero = [&](
bool IsTrueArm) {
3077 Op = IsTrueArm ?
I->getOperand(1) :
I->getOperand(2);
3090 Pred = ICmpInst::getInversePredicate(Pred);
3095 if (SelectArmIsNonZero(
true) &&
3096 SelectArmIsNonZero(
false))
3100 case Instruction::PHI: {
3101 auto *PN = cast<PHINode>(
I);
3111 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
3115 BasicBlock *TrueSucc, *FalseSucc;
3116 if (match(RecQ.CxtI,
3117 m_Br(m_c_ICmp(Pred, m_Specific(U.get()), m_Value(X)),
3118 m_BasicBlock(TrueSucc), m_BasicBlock(FalseSucc)))) {
3120 if ((TrueSucc == PN->getParent()) != (FalseSucc == PN->getParent())) {
3122 if (FalseSucc == PN->getParent())
3123 Pred = CmpInst::getInversePredicate(Pred);
3124 if (cmpExcludesZero(Pred, X))
3132 case Instruction::InsertElement: {
3133 if (isa<ScalableVectorType>(
I->getType()))
3136 const Value *Vec =
I->getOperand(0);
3137 const Value *Elt =
I->getOperand(1);
3138 auto *CIdx = dyn_cast<ConstantInt>(
I->getOperand(2));
3141 APInt DemandedVecElts = DemandedElts;
3142 bool SkipElt =
false;
3144 if (CIdx && CIdx->getValue().ult(NumElts)) {
3145 DemandedVecElts.
clearBit(CIdx->getZExtValue());
3146 SkipElt = !DemandedElts[CIdx->getZExtValue()];
3152 (DemandedVecElts.
isZero() ||
3155 case Instruction::ExtractElement:
3156 if (
const auto *EEI = dyn_cast<ExtractElementInst>(
I)) {
3157 const Value *Vec = EEI->getVectorOperand();
3158 const Value *
Idx = EEI->getIndexOperand();
3159 auto *CIdx = dyn_cast<ConstantInt>(
Idx);
3160 if (
auto *VecTy = dyn_cast<FixedVectorType>(Vec->
getType())) {
3161 unsigned NumElts = VecTy->getNumElements();
3163 if (CIdx && CIdx->getValue().ult(NumElts))
3169 case Instruction::ShuffleVector: {
3170 auto *Shuf = dyn_cast<ShuffleVectorInst>(
I);
3173 APInt DemandedLHS, DemandedRHS;
3179 return (DemandedRHS.
isZero() ||
3184 case Instruction::Freeze:
3188 case Instruction::Load: {
3189 auto *LI = cast<LoadInst>(
I);
3192 if (
auto *PtrT = dyn_cast<PointerType>(
I->getType())) {
3205 case Instruction::ExtractValue: {
3211 case Instruction::Add:
3216 case Instruction::Sub:
3219 case Instruction::Mul:
3228 case Instruction::Call:
3229 case Instruction::Invoke: {
3230 const auto *Call = cast<CallBase>(
I);
3231 if (
I->getType()->isPointerTy()) {
3232 if (Call->isReturnNonNull())
3239 if (std::optional<ConstantRange>
Range = Call->getRange()) {
3244 if (
const Value *RV = Call->getReturnedArgOperand())
3249 if (
auto *
II = dyn_cast<IntrinsicInst>(
I)) {
3250 switch (
II->getIntrinsicID()) {
3251 case Intrinsic::sshl_sat:
3252 case Intrinsic::ushl_sat:
3253 case Intrinsic::abs:
3254 case Intrinsic::bitreverse:
3255 case Intrinsic::bswap:
3256 case Intrinsic::ctpop:
3260 case Intrinsic::ssub_sat:
3262 II->getArgOperand(0),
II->getArgOperand(1));
3263 case Intrinsic::sadd_sat:
3265 II->getArgOperand(0),
II->getArgOperand(1),
3268 case Intrinsic::vector_reverse:
3272 case Intrinsic::vector_reduce_or:
3273 case Intrinsic::vector_reduce_umax:
3274 case Intrinsic::vector_reduce_umin:
3275 case Intrinsic::vector_reduce_smax:
3276 case Intrinsic::vector_reduce_smin:
3278 case Intrinsic::umax:
3279 case Intrinsic::uadd_sat:
3287 case Intrinsic::smax: {
3290 auto IsNonZero = [&](
Value *
Op, std::optional<bool> &OpNonZero,
3292 if (!OpNonZero.has_value())
3293 OpNonZero = OpKnown.isNonZero() ||
3298 std::optional<bool> Op0NonZero, Op1NonZero;
3302 IsNonZero(
II->getArgOperand(1), Op1NonZero, Op1Known))
3307 IsNonZero(
II->getArgOperand(0), Op0NonZero, Op0Known))
3309 return IsNonZero(
II->getArgOperand(1), Op1NonZero, Op1Known) &&
3310 IsNonZero(
II->getArgOperand(0), Op0NonZero, Op0Known);
3312 case Intrinsic::smin: {
3328 case Intrinsic::umin:
3331 case Intrinsic::cttz:
3334 case Intrinsic::ctlz:
3337 case Intrinsic::fshr:
3338 case Intrinsic::fshl:
3340 if (
II->getArgOperand(0) ==
II->getArgOperand(1))
3343 case Intrinsic::vscale:
3345 case Intrinsic::experimental_get_vector_length:
3359 return Known.
One != 0;
3370 Type *Ty = V->getType();
3375 if (
auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
3377 FVTy->getNumElements() == DemandedElts.
getBitWidth() &&
3378 "DemandedElt width should equal the fixed vector number of elements");
3381 "DemandedElt width should be 1 for scalars");
3385 if (
auto *
C = dyn_cast<Constant>(V)) {
3386 if (
C->isNullValue())
3388 if (isa<ConstantInt>(
C))
3394 if (
auto *VecTy = dyn_cast<FixedVectorType>(Ty)) {
3395 for (
unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
3396 if (!DemandedElts[i])
3398 Constant *Elt =
C->getAggregateElement(i);
3401 if (!isa<PoisonValue>(Elt) && !isa<ConstantInt>(Elt))
3408 if (
auto *CPA = dyn_cast<ConstantPtrAuth>(V))
3414 if (
const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
3415 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
3416 GV->getType()->getAddressSpace() == 0)
3421 if (!isa<ConstantExpr>(V))
3425 if (
const auto *
A = dyn_cast<Argument>(V))
3426 if (std::optional<ConstantRange>
Range =
A->getRange()) {
3441 if (
PointerType *PtrTy = dyn_cast<PointerType>(Ty)) {
3444 if (
const Argument *
A = dyn_cast<Argument>(V)) {
3445 if (((
A->hasPassPointeeByValueCopyAttr() &&
3447 A->hasNonNullAttr()))
3452 if (
const auto *
I = dyn_cast<Operator>(V))
3456 if (!isa<Constant>(V) &&
3465 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
3466 APInt DemandedElts =
3468 return ::isKnownNonZero(V, DemandedElts, Q,
Depth);
3477static std::optional<std::pair<Value*, Value*>>
3481 return std::nullopt;
3490 case Instruction::Or:
3491 if (!cast<PossiblyDisjointInst>(Op1)->isDisjoint() ||
3492 !cast<PossiblyDisjointInst>(Op2)->isDisjoint())
3495 case Instruction::Xor:
3496 case Instruction::Add: {
3504 case Instruction::Sub:
3510 case Instruction::Mul: {
3514 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
3515 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
3516 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
3517 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
3523 !cast<ConstantInt>(Op1->
getOperand(1))->isZero())
3527 case Instruction::Shl: {
3530 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
3531 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
3532 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
3533 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
3540 case Instruction::AShr:
3541 case Instruction::LShr: {
3542 auto *PEO1 = cast<PossiblyExactOperator>(Op1);
3543 auto *PEO2 = cast<PossiblyExactOperator>(Op2);
3544 if (!PEO1->isExact() || !PEO2->isExact())
3551 case Instruction::SExt:
3552 case Instruction::ZExt:
3556 case Instruction::PHI: {
3557 const PHINode *PN1 = cast<PHINode>(Op1);
3558 const PHINode *PN2 = cast<PHINode>(Op2);
3564 Value *Start1 =
nullptr, *Step1 =
nullptr;
3566 Value *Start2 =
nullptr, *Step2 =
nullptr;
3573 cast<Operator>(BO2));
3582 if (Values->first != PN1 || Values->second != PN2)
3585 return std::make_pair(Start1, Start2);
3588 return std::nullopt;
3603 case Instruction::Or:
3604 if (!cast<PossiblyDisjointInst>(V1)->isDisjoint())
3607 case Instruction::Xor:
3608 case Instruction::Add:
3626 if (
auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
3629 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
3630 !
C->isZero() && !
C->isOne() &&
3641 if (
auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
3644 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
3658 bool UsedFullRecursion =
false;
3660 if (!VisitedBBs.
insert(IncomBB).second)
3664 const APInt *C1, *C2;
3669 if (UsedFullRecursion)
3673 RecQ.
CxtI = IncomBB->getTerminator();
3676 UsedFullRecursion =
true;
3684 const SelectInst *SI1 = dyn_cast<SelectInst>(V1);
3688 if (
const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) {
3690 const Value *Cond2 = SI2->getCondition();
3693 DemandedElts,
Depth + 1, Q) &&
3695 DemandedElts,
Depth + 1, Q);
3708 if (!
A->getType()->isPointerTy() || !
B->getType()->isPointerTy())
3711 auto *GEPA = dyn_cast<GEPOperator>(
A);
3712 if (!GEPA || GEPA->getNumIndices() != 1 || !isa<Constant>(GEPA->idx_begin()))
3716 auto *PN = dyn_cast<PHINode>(GEPA->getPointerOperand());
3717 if (!PN || PN->getNumIncomingValues() != 2)
3722 Value *Start =
nullptr;
3724 if (PN->getIncomingValue(0) == Step)
3725 Start = PN->getIncomingValue(1);
3726 else if (PN->getIncomingValue(1) == Step)
3727 Start = PN->getIncomingValue(0);
3738 APInt StartOffset(IndexWidth, 0);
3739 Start = Start->stripAndAccumulateInBoundsConstantOffsets(Q.
DL, StartOffset);
3740 APInt StepOffset(IndexWidth, 0);
3746 APInt OffsetB(IndexWidth, 0);
3747 B =
B->stripAndAccumulateInBoundsConstantOffsets(Q.
DL, OffsetB);
3748 return Start ==
B &&
3759 if (V1->
getType() != V2->getType())
3769 auto *O1 = dyn_cast<Operator>(V1);
3770 auto *O2 = dyn_cast<Operator>(V2);
3771 if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
3776 if (
const PHINode *PN1 = dyn_cast<PHINode>(V1)) {
3777 const PHINode *PN2 = cast<PHINode>(V2);
3832 const APInt &DemandedElts,
3834 const auto *CV = dyn_cast<Constant>(V);
3835 if (!CV || !isa<FixedVectorType>(CV->getType()))
3838 unsigned MinSignBits = TyBits;
3839 unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements();
3840 for (
unsigned i = 0; i != NumElts; ++i) {
3841 if (!DemandedElts[i])
3844 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
3848 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
3855 const APInt &DemandedElts,
3861 assert(Result > 0 &&
"At least one sign bit needs to be present!");
3873 const APInt &DemandedElts,
3875 Type *Ty = V->getType();
3879 if (
auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
3881 FVTy->getNumElements() == DemandedElts.
getBitWidth() &&
3882 "DemandedElt width should equal the fixed vector number of elements");
3885 "DemandedElt width should be 1 for scalars");
3899 unsigned FirstAnswer = 1;
3907 if (
auto *U = dyn_cast<Operator>(V)) {
3910 case Instruction::SExt:
3911 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
3915 case Instruction::SDiv: {
3916 const APInt *Denominator;
3929 return std::min(TyBits, NumBits + Denominator->
logBase2());
3934 case Instruction::SRem: {
3937 const APInt *Denominator;
3958 unsigned ResBits = TyBits - Denominator->
ceilLogBase2();
3959 Tmp = std::max(Tmp, ResBits);
3965 case Instruction::AShr: {
3970 if (ShAmt->
uge(TyBits))
3973 Tmp += ShAmtLimited;
3974 if (Tmp > TyBits) Tmp = TyBits;
3978 case Instruction::Shl: {
3983 if (ShAmt->
uge(TyBits))
3988 ShAmt->
uge(TyBits -
X->getType()->getScalarSizeInBits())) {
3990 Tmp += TyBits -
X->getType()->getScalarSizeInBits();
3994 if (ShAmt->
uge(Tmp))
4001 case Instruction::And:
4002 case Instruction::Or:
4003 case Instruction::Xor:
4008 FirstAnswer = std::min(Tmp, Tmp2);
4015 case Instruction::Select: {
4019 const APInt *CLow, *CHigh;
4027 return std::min(Tmp, Tmp2);
4030 case Instruction::Add:
4034 if (Tmp == 1)
break;
4037 if (
const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
4038 if (CRHS->isAllOnesValue()) {
4044 if ((Known.
Zero | 1).isAllOnes())
4056 return std::min(Tmp, Tmp2) - 1;
4058 case Instruction::Sub:
4064 if (
const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
4065 if (CLHS->isNullValue()) {
4070 if ((Known.
Zero | 1).isAllOnes())
4087 return std::min(Tmp, Tmp2) - 1;
4089 case Instruction::Mul: {
4092 unsigned SignBitsOp0 =
4094 if (SignBitsOp0 == 1)
4096 unsigned SignBitsOp1 =
4098 if (SignBitsOp1 == 1)
4100 unsigned OutValidBits =
4101 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
4102 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
4105 case Instruction::PHI: {
4106 const PHINode *PN = cast<PHINode>(U);
4109 if (NumIncomingValues > 4)
break;
4111 if (NumIncomingValues == 0)
break;
4117 for (
unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
4118 if (Tmp == 1)
return Tmp;
4121 DemandedElts,
Depth + 1, RecQ));
4126 case Instruction::Trunc: {
4131 unsigned OperandTyBits = U->getOperand(0)->getType()->getScalarSizeInBits();
4132 if (Tmp > (OperandTyBits - TyBits))
4133 return Tmp - (OperandTyBits - TyBits);
4138 case Instruction::ExtractElement:
4145 case Instruction::ShuffleVector: {
4148 auto *Shuf = dyn_cast<ShuffleVectorInst>(U);
4153 APInt DemandedLHS, DemandedRHS;
4158 Tmp = std::numeric_limits<unsigned>::max();
4159 if (!!DemandedLHS) {
4160 const Value *
LHS = Shuf->getOperand(0);
4167 if (!!DemandedRHS) {
4168 const Value *
RHS = Shuf->getOperand(1);
4170 Tmp = std::min(Tmp, Tmp2);
4176 assert(Tmp <= TyBits &&
"Failed to determine minimum sign bits");
4179 case Instruction::Call: {
4180 if (
const auto *
II = dyn_cast<IntrinsicInst>(U)) {
4181 switch (
II->getIntrinsicID()) {
4184 case Intrinsic::abs:
4192 case Intrinsic::smin:
4193 case Intrinsic::smax: {
4194 const APInt *CLow, *CHigh;
4209 if (
unsigned VecSignBits =
4227 if (
F->isIntrinsic())
4228 return F->getIntrinsicID();
4234 if (
F->hasLocalLinkage() || !TLI || !TLI->
getLibFunc(CB, Func) ||
4244 return Intrinsic::sin;
4248 return Intrinsic::cos;
4252 return Intrinsic::tan;
4256 return Intrinsic::asin;
4260 return Intrinsic::acos;
4264 return Intrinsic::atan;
4266 case LibFunc_atan2f:
4267 case LibFunc_atan2l:
4268 return Intrinsic::atan2;
4272 return Intrinsic::sinh;
4276 return Intrinsic::cosh;
4280 return Intrinsic::tanh;
4284 return Intrinsic::exp;
4288 return Intrinsic::exp2;
4290 case LibFunc_exp10f:
4291 case LibFunc_exp10l:
4292 return Intrinsic::exp10;
4296 return Intrinsic::log;
4298 case LibFunc_log10f:
4299 case LibFunc_log10l:
4300 return Intrinsic::log10;
4304 return Intrinsic::log2;
4308 return Intrinsic::fabs;
4312 return Intrinsic::minnum;
4316 return Intrinsic::maxnum;
4317 case LibFunc_copysign:
4318 case LibFunc_copysignf:
4319 case LibFunc_copysignl:
4320 return Intrinsic::copysign;
4322 case LibFunc_floorf:
4323 case LibFunc_floorl:
4324 return Intrinsic::floor;
4328 return Intrinsic::ceil;
4330 case LibFunc_truncf:
4331 case LibFunc_truncl:
4332 return Intrinsic::trunc;
4336 return Intrinsic::rint;
4337 case LibFunc_nearbyint:
4338 case LibFunc_nearbyintf:
4339 case LibFunc_nearbyintl:
4340 return Intrinsic::nearbyint;
4342 case LibFunc_roundf:
4343 case LibFunc_roundl:
4344 return Intrinsic::round;
4345 case LibFunc_roundeven:
4346 case LibFunc_roundevenf:
4347 case LibFunc_roundevenl:
4348 return Intrinsic::roundeven;
4352 return Intrinsic::pow;
4356 return Intrinsic::sqrt;
4404 switch (Mode.Input) {
4424 if (!Src.isKnownNeverPosZero() && !Src.isKnownNeverNegZero())
4428 if (Src.isKnownNeverSubnormal())
4458 bool &TrueIfSigned) {
4461 TrueIfSigned =
true;
4462 return RHS.isZero();
4464 TrueIfSigned =
true;
4465 return RHS.isAllOnes();
4467 TrueIfSigned =
false;
4468 return RHS.isAllOnes();
4470 TrueIfSigned =
false;
4471 return RHS.isZero();
4474 TrueIfSigned =
true;
4475 return RHS.isMaxSignedValue();
4478 TrueIfSigned =
true;
4479 return RHS.isMinSignedValue();
4482 TrueIfSigned =
false;
4483 return RHS.isMinSignedValue();
4486 TrueIfSigned =
false;
4487 return RHS.isMaxSignedValue();
4498 bool LookThroughSrc) {
4506std::pair<Value *, FPClassTest>
4508 const APFloat *ConstRHS,
bool LookThroughSrc) {
4510 auto [Src, ClassIfTrue, ClassIfFalse] =
4512 if (Src && ClassIfTrue == ~ClassIfFalse)
4513 return {Src, ClassIfTrue};
4524std::tuple<Value *, FPClassTest, FPClassTest>
4538 const bool IsNegativeRHS = (RHSClass &
fcNegative) == RHSClass;
4539 const bool IsPositiveRHS = (RHSClass &
fcPositive) == RHSClass;
4540 const bool IsNaN = (RHSClass & ~fcNan) ==
fcNone;
4560 const bool IsZero = (OrigClass &
fcZero) == OrigClass;
4607 const bool IsDenormalRHS = (OrigClass &
fcSubnormal) == OrigClass;
4609 const bool IsInf = (OrigClass &
fcInf) == OrigClass;
4627 if (IsNegativeRHS) {
4650 if (IsNegativeRHS) {
4651 Mask = ~fcNegInf & ~fcNan;
4655 Mask = ~fcPosInf & ~fcNan;
4664 if (IsNegativeRHS) {
4684 if (IsNegativeRHS) {
4704 if (IsNegativeRHS) {
4719 if (IsNegativeRHS) {
4747 return {Src, Class, ~fcNan};
4751 return {Src, ~fcNan, RHSClass |
fcNan};
4760 "should have been recognized as an exact class test");
4762 if (IsNegativeRHS) {
4772 return {Src, ~fcNan,
fcNan};
4781 return {Src,
fcNan, ~fcNan};
4800 return {Src, ClassesGE, ~ClassesGE | RHSClass};
4803 return {Src, ClassesGE |
fcNan, ~(ClassesGE |
fcNan) | RHSClass};
4806 return {Src, ClassesLE, ~ClassesLE | RHSClass};
4809 return {Src, ClassesLE |
fcNan, ~(ClassesLE |
fcNan) | RHSClass};
4813 }
else if (IsPositiveRHS) {
4829 return {Src, ClassesGE, ~ClassesGE | RHSClass};
4832 return {Src, ClassesGE |
fcNan, ~(ClassesGE |
fcNan) | RHSClass};
4835 return {Src, ClassesLE, ~ClassesLE | RHSClass};
4838 return {Src, ClassesLE |
fcNan, ~(ClassesLE |
fcNan) | RHSClass};
4847std::tuple<Value *, FPClassTest, FPClassTest>
4849 const APFloat &ConstRHS,
bool LookThroughSrc) {
4897std::tuple<Value *, FPClassTest, FPClassTest>
4899 Value *RHS,
bool LookThroughSrc) {
4909 unsigned Depth,
bool CondIsTrue,
4931 KnownFromContext.
knownNot(~(CondIsTrue ? MaskIfTrue : MaskIfFalse));
4932 }
else if (
match(
Cond, m_Intrinsic<Intrinsic::is_fpclass>(
4935 KnownFromContext.
knownNot(CondIsTrue ? ~Mask : Mask);
4941 if (TrueIfSigned == CondIsTrue)
4953 return KnownFromContext;
4963 Q.
CxtI, KnownFromContext);
4968 Q.
CxtI, KnownFromContext);
4973 return KnownFromContext;
4983 "Got assumption for the wrong function!");
4984 assert(
I->getIntrinsicID() == Intrinsic::assume &&
4985 "must be an assume intrinsic");
4991 true, Q.
CxtI, KnownFromContext);
4994 return KnownFromContext;
5004 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
5005 APInt DemandedElts =
5011 const APInt &DemandedElts,
5015 if ((InterestedClasses &
5021 KnownSrc,
Depth + 1, Q);
5036 assert(Known.
isUnknown() &&
"should not be called with known information");
5038 if (!DemandedElts) {
5046 if (
auto *CFP = dyn_cast<ConstantFP>(V)) {
5048 Known.
SignBit = CFP->isNegative();
5052 if (isa<ConstantAggregateZero>(V)) {
5058 if (isa<PoisonValue>(V)) {
5065 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
5066 const Constant *CV = dyn_cast<Constant>(V);
5069 bool SignBitAllZero =
true;
5070 bool SignBitAllOne =
true;
5073 unsigned NumElts = VFVTy->getNumElements();
5074 for (
unsigned i = 0; i != NumElts; ++i) {
5075 if (!DemandedElts[i])
5083 if (isa<PoisonValue>(Elt))
5085 auto *CElt = dyn_cast<ConstantFP>(Elt);
5091 const APFloat &
C = CElt->getValueAPF();
5094 SignBitAllZero =
false;
5096 SignBitAllOne =
false;
5098 if (SignBitAllOne != SignBitAllZero)
5099 Known.
SignBit = SignBitAllOne;
5104 if (
const auto *CB = dyn_cast<CallBase>(V))
5105 KnownNotFromFlags |= CB->getRetNoFPClass();
5106 else if (
const auto *Arg = dyn_cast<Argument>(V))
5107 KnownNotFromFlags |= Arg->getNoFPClass();
5111 if (FPOp->hasNoNaNs())
5112 KnownNotFromFlags |=
fcNan;
5113 if (FPOp->hasNoInfs())
5114 KnownNotFromFlags |=
fcInf;
5118 KnownNotFromFlags |= ~AssumedClasses.KnownFPClasses;
5122 InterestedClasses &= ~KnownNotFromFlags;
5127 if (*AssumedClasses.SignBit)
5128 Known.signBitMustBeOne();
5130 Known.signBitMustBeZero();
5141 const unsigned Opc =
Op->getOpcode();
5143 case Instruction::FNeg: {
5145 Known,
Depth + 1, Q);
5149 case Instruction::Select: {
5157 Value *TestedValue =
nullptr;
5161 const Function *
F = cast<Instruction>(
Op)->getFunction();
5163 Value *CmpLHS, *CmpRHS;
5170 bool LookThroughFAbsFNeg = CmpLHS !=
LHS && CmpLHS !=
RHS;
5171 std::tie(TestedValue, MaskIfTrue, MaskIfFalse) =
5174 m_Intrinsic<Intrinsic::is_fpclass>(
5177 MaskIfTrue = TestedMask;
5178 MaskIfFalse = ~TestedMask;
5181 if (TestedValue ==
LHS) {
5183 FilterLHS = MaskIfTrue;
5184 }
else if (TestedValue ==
RHS) {
5186 FilterRHS = MaskIfFalse;
5195 Known2,
Depth + 1, Q);
5201 case Instruction::Call: {
5205 case Intrinsic::fabs: {
5210 InterestedClasses, Known,
Depth + 1, Q);
5216 case Intrinsic::copysign: {
5220 Known,
Depth + 1, Q);
5222 KnownSign,
Depth + 1, Q);
5226 case Intrinsic::fma:
5227 case Intrinsic::fmuladd: {
5231 if (
II->getArgOperand(0) !=
II->getArgOperand(1))
5240 KnownAddend,
Depth + 1, Q);
5246 case Intrinsic::sqrt:
5247 case Intrinsic::experimental_constrained_sqrt: {
5250 if (InterestedClasses &
fcNan)
5254 KnownSrc,
Depth + 1, Q);
5277 case Intrinsic::sin:
5278 case Intrinsic::cos: {
5282 KnownSrc,
Depth + 1, Q);
5288 case Intrinsic::maxnum:
5289 case Intrinsic::minnum:
5290 case Intrinsic::minimum:
5291 case Intrinsic::maximum: {
5294 KnownLHS,
Depth + 1, Q);
5296 KnownRHS,
Depth + 1, Q);
5299 Known = KnownLHS | KnownRHS;
5302 if (NeverNaN && (IID == Intrinsic::minnum || IID == Intrinsic::maxnum))
5305 if (IID == Intrinsic::maxnum) {
5313 }
else if (IID == Intrinsic::maximum) {
5319 }
else if (IID == Intrinsic::minnum) {
5349 II->getType()->getScalarType()->getFltSemantics());
5361 }
else if ((IID == Intrinsic::maximum || IID == Intrinsic::minimum) ||
5366 if ((IID == Intrinsic::maximum || IID == Intrinsic::maxnum) &&
5369 else if ((IID == Intrinsic::minimum || IID == Intrinsic::minnum) &&
5376 case Intrinsic::canonicalize: {
5379 KnownSrc,
Depth + 1, Q);
5403 II->getType()->getScalarType()->getFltSemantics();
5423 case Intrinsic::vector_reduce_fmax:
5424 case Intrinsic::vector_reduce_fmin:
5425 case Intrinsic::vector_reduce_fmaximum:
5426 case Intrinsic::vector_reduce_fminimum: {
5430 InterestedClasses,
Depth + 1, Q);
5437 case Intrinsic::vector_reverse:
5440 II->getFastMathFlags(), InterestedClasses,
Depth + 1, Q);
5442 case Intrinsic::trunc:
5443 case Intrinsic::floor:
5444 case Intrinsic::ceil:
5445 case Intrinsic::rint:
5446 case Intrinsic::nearbyint:
5447 case Intrinsic::round:
5448 case Intrinsic::roundeven: {
5456 KnownSrc,
Depth + 1, Q);
5465 if (IID == Intrinsic::trunc || !V->getType()->isMultiUnitFPType()) {
5480 case Intrinsic::exp:
5481 case Intrinsic::exp2:
5482 case Intrinsic::exp10: {
5489 KnownSrc,
Depth + 1, Q);
5497 case Intrinsic::fptrunc_round: {
5502 case Intrinsic::log:
5503 case Intrinsic::log10:
5504 case Intrinsic::log2:
5505 case Intrinsic::experimental_constrained_log:
5506 case Intrinsic::experimental_constrained_log10:
5507 case Intrinsic::experimental_constrained_log2: {
5523 KnownSrc,
Depth + 1, Q);
5537 case Intrinsic::powi: {
5541 const Value *Exp =
II->getArgOperand(1);
5542 Type *ExpTy = Exp->getType();
5546 ExponentKnownBits,
Depth + 1, Q);
5548 if (ExponentKnownBits.
Zero[0]) {
5563 KnownSrc,
Depth + 1, Q);
5568 case Intrinsic::ldexp: {
5571 KnownSrc,
Depth + 1, Q);
5587 if ((InterestedClasses & ExpInfoMask) ==
fcNone)
5593 II->getType()->getScalarType()->getFltSemantics();
5595 const Value *ExpArg =
II->getArgOperand(1);
5599 const int MantissaBits = Precision - 1;
5605 if (ConstVal && ConstVal->
isZero()) {
5628 case Intrinsic::arithmetic_fence: {
5630 Known,
Depth + 1, Q);
5633 case Intrinsic::experimental_constrained_sitofp:
5634 case Intrinsic::experimental_constrained_uitofp:
5644 if (IID == Intrinsic::experimental_constrained_uitofp)
5655 case Instruction::FAdd:
5656 case Instruction::FSub: {
5659 Op->getOpcode() == Instruction::FAdd &&
5661 bool WantNaN = (InterestedClasses &
fcNan) !=
fcNone;
5664 if (!WantNaN && !WantNegative && !WantNegZero)
5670 if (InterestedClasses &
fcNan)
5671 InterestedSrcs |=
fcInf;
5673 KnownRHS,
Depth + 1, Q);
5677 WantNegZero || Opc == Instruction::FSub) {
5682 KnownLHS,
Depth + 1, Q);
5690 const Function *
F = cast<Instruction>(
Op)->getFunction();
5692 if (
Op->getOpcode() == Instruction::FAdd) {
5720 case Instruction::FMul: {
5722 if (
Op->getOperand(0) ==
Op->getOperand(1))
5755 const Function *
F = cast<Instruction>(
Op)->getFunction();
5767 case Instruction::FDiv:
5768 case Instruction::FRem: {
5769 if (
Op->getOperand(0) ==
Op->getOperand(1)) {
5771 if (
Op->getOpcode() == Instruction::FDiv) {
5782 const bool WantNan = (InterestedClasses &
fcNan) !=
fcNone;
5784 const bool WantPositive =
5786 if (!WantNan && !WantNegative && !WantPositive)
5795 bool KnowSomethingUseful =
5798 if (KnowSomethingUseful || WantPositive) {
5804 InterestedClasses & InterestedLHS, KnownLHS,
5808 const Function *
F = cast<Instruction>(
Op)->getFunction();
5810 if (
Op->getOpcode() == Instruction::FDiv) {
5847 case Instruction::FPExt: {
5850 Known,
Depth + 1, Q);
5853 Op->getType()->getScalarType()->getFltSemantics();
5855 Op->getOperand(0)->getType()->getScalarType()->getFltSemantics();
5871 case Instruction::FPTrunc: {
5876 case Instruction::SIToFP:
5877 case Instruction::UIToFP: {
5886 if (
Op->getOpcode() == Instruction::UIToFP)
5889 if (InterestedClasses &
fcInf) {
5893 int IntSize =
Op->getOperand(0)->getType()->getScalarSizeInBits();
5894 if (
Op->getOpcode() == Instruction::SIToFP)
5899 Type *FPTy =
Op->getType()->getScalarType();
5906 case Instruction::ExtractElement: {
5909 const Value *Vec =
Op->getOperand(0);
5911 auto *CIdx = dyn_cast<ConstantInt>(
Idx);
5913 if (
auto *VecTy = dyn_cast<FixedVectorType>(Vec->
getType())) {
5914 unsigned NumElts = VecTy->getNumElements();
5916 if (CIdx && CIdx->getValue().ult(NumElts))
5924 case Instruction::InsertElement: {
5925 if (isa<ScalableVectorType>(
Op->getType()))
5928 const Value *Vec =
Op->getOperand(0);
5929 const Value *Elt =
Op->getOperand(1);
5930 auto *CIdx = dyn_cast<ConstantInt>(
Op->getOperand(2));
5932 APInt DemandedVecElts = DemandedElts;
5933 bool NeedsElt =
true;
5935 if (CIdx && CIdx->getValue().ult(NumElts)) {
5936 DemandedVecElts.
clearBit(CIdx->getZExtValue());
5937 NeedsElt = DemandedElts[CIdx->getZExtValue()];
5951 if (!DemandedVecElts.
isZero()) {
5960 case Instruction::ShuffleVector: {
5963 APInt DemandedLHS, DemandedRHS;
5964 auto *Shuf = dyn_cast<ShuffleVectorInst>(
Op);
5968 if (!!DemandedLHS) {
5969 const Value *
LHS = Shuf->getOperand(0);
5980 if (!!DemandedRHS) {
5982 const Value *
RHS = Shuf->getOperand(1);
5990 case Instruction::ExtractValue: {
5994 if (isa<StructType>(Src->getType()) && Indices.
size() == 1 &&
5996 if (
const auto *
II = dyn_cast<IntrinsicInst>(Src)) {
5997 switch (
II->getIntrinsicID()) {
5998 case Intrinsic::frexp: {
6003 InterestedClasses, KnownSrc,
Depth + 1, Q);
6005 const Function *
F = cast<Instruction>(
Op)->getFunction();
6038 case Instruction::PHI: {
6041 if (
P->getNumIncomingValues() == 0)
6048 if (
Depth < PhiRecursionLimit) {
6050 if (isa_and_nonnull<UndefValue>(
P->hasConstantValue()))
6055 for (
const Use &U :
P->operands()) {
6056 Value *IncValue = U.get();
6061 Instruction *CxtI =
P->getIncomingBlock(U)->getTerminator();
6069 }
else if (
auto *IncPhi = dyn_cast<PHINode>(IncValue);
6070 IncPhi && IncPhi->getNumIncomingValues() == 2) {
6072 if (IncPhi->getIncomingValue(
Idx) ==
P) {
6073 IncValue = IncPhi->getIncomingValue(1 -
Idx);
6074 CxtI = IncPhi->getIncomingBlock(1 -
Idx)->getTerminator();
6102 case Instruction::BitCast: {
6105 !Src->getType()->isIntOrIntVectorTy())
6108 const Type *Ty =
Op->getType()->getScalarType();
6113 if (Bits.isNonNegative())
6115 else if (Bits.isNegative())
6134 InfKB.Zero.clearSignBit();
6136 assert(!InfResult.value());
6138 }
else if (Bits == InfKB) {
6146 ZeroKB.Zero.clearSignBit();
6148 assert(!ZeroResult.value());
6150 }
else if (Bits == ZeroKB) {
6163 const APInt &DemandedElts,
6170 return KnownClasses;
6185 if (V->getType()->isIntegerTy(8))
6192 if (isa<UndefValue>(V))
6196 if (
DL.getTypeStoreSize(V->getType()).isZero())
6211 if (
C->isNullValue())
6218 if (CFP->getType()->isHalfTy())
6220 else if (CFP->getType()->isFloatTy())
6222 else if (CFP->getType()->isDoubleTy())
6231 if (CI->getBitWidth() % 8 == 0) {
6232 assert(CI->getBitWidth() > 8 &&
"8 bits should be handled above!");
6233 if (!CI->getValue().isSplat(8))
6235 return ConstantInt::get(Ctx, CI->getValue().trunc(8));
6239 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
6240 if (CE->getOpcode() == Instruction::IntToPtr) {
6241 if (
auto *PtrTy = dyn_cast<PointerType>(CE->getType())) {
6242 unsigned BitWidth =
DL.getPointerSizeInBits(PtrTy->getAddressSpace());
6255 if (
LHS == UndefInt8)
6257 if (
RHS == UndefInt8)
6263 Value *Val = UndefInt8;
6264 for (
unsigned I = 0, E = CA->getNumElements();
I != E; ++
I)
6270 if (isa<ConstantAggregate>(
C)) {
6271 Value *Val = UndefInt8;
6292 StructType *STy = dyn_cast<StructType>(IndexedType);
6306 while (PrevTo != OrigTo) {
6353 unsigned IdxSkip = Idxs.
size();
6366 std::optional<BasicBlock::iterator> InsertBefore) {
6369 if (idx_range.
empty())
6372 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
6373 "Not looking at a struct or array?");
6375 "Invalid indices for type?");
6377 if (
Constant *
C = dyn_cast<Constant>(V)) {
6378 C =
C->getAggregateElement(idx_range[0]);
6379 if (!
C)
return nullptr;
6386 const unsigned *req_idx = idx_range.
begin();
6387 for (
const unsigned *i =
I->idx_begin(), *e =
I->idx_end();
6388 i != e; ++i, ++req_idx) {
6389 if (req_idx == idx_range.
end()) {
6419 ArrayRef(req_idx, idx_range.
end()), InsertBefore);
6428 unsigned size =
I->getNumIndices() + idx_range.
size();
6433 Idxs.
append(
I->idx_begin(),
I->idx_end());
6439 &&
"Number of indices added not correct?");
6449 unsigned CharSize) {
6451 if (
GEP->getNumOperands() != 3)
6456 ArrayType *AT = dyn_cast<ArrayType>(
GEP->getSourceElementType());
6462 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(
GEP->getOperand(1));
6463 if (!FirstIdx || !FirstIdx->
isZero())
6477 assert(V &&
"V should not be null.");
6478 assert((ElementSize % 8) == 0 &&
6479 "ElementSize expected to be a multiple of the size of a byte.");
6480 unsigned ElementSizeInBytes = ElementSize / 8;
6492 APInt Off(
DL.getIndexTypeSizeInBits(V->getType()), 0);
6494 if (GV != V->stripAndAccumulateConstantOffsets(
DL, Off,
6499 uint64_t StartIdx = Off.getLimitedValue();
6506 if ((StartIdx % ElementSizeInBytes) != 0)
6509 Offset += StartIdx / ElementSizeInBytes;
6515 uint64_t SizeInBytes =
DL.getTypeStoreSize(GVTy).getFixedValue();
6518 Slice.
Array =
nullptr;
6529 if (
auto *ArrayInit = dyn_cast<ConstantDataArray>(
Init)) {
6530 Type *InitElTy = ArrayInit->getElementType();
6535 ArrayTy = ArrayInit->getType();
6540 if (ElementSize != 8)
6551 Array = dyn_cast<ConstantDataArray>(
Init);
6552 ArrayTy = dyn_cast<ArrayType>(
Init->getType());
6559 Slice.
Array = Array;
6575 if (Slice.
Array ==
nullptr) {
6598 Str = Str.substr(Slice.
Offset);
6604 Str = Str.substr(0, Str.find(
'\0'));
6617 unsigned CharSize) {
6619 V = V->stripPointerCasts();
6623 if (
const PHINode *PN = dyn_cast<PHINode>(V)) {
6624 if (!PHIs.
insert(PN).second)
6629 for (
Value *IncValue : PN->incoming_values()) {
6631 if (Len == 0)
return 0;
6633 if (Len == ~0ULL)
continue;
6635 if (Len != LenSoFar && LenSoFar != ~0ULL)
6645 if (
const SelectInst *SI = dyn_cast<SelectInst>(V)) {
6647 if (Len1 == 0)
return 0;
6649 if (Len2 == 0)
return 0;
6650 if (Len1 == ~0ULL)
return Len2;
6651 if (Len2 == ~0ULL)
return Len1;
6652 if (Len1 != Len2)
return 0;
6661 if (Slice.
Array ==
nullptr)
6669 unsigned NullIndex = 0;
6670 for (
unsigned E = Slice.
Length; NullIndex < E; ++NullIndex) {
6675 return NullIndex + 1;
6681 if (!V->getType()->isPointerTy())
6688 return Len == ~0ULL ? 1 : Len;
6693 bool MustPreserveNullness) {
6695 "getArgumentAliasingToReturnedPointer only works on nonnull calls");
6696 if (
const Value *RV = Call->getReturnedArgOperand())
6700 Call, MustPreserveNullness))
6701 return Call->getArgOperand(0);
6706 const CallBase *Call,
bool MustPreserveNullness) {
6707 switch (Call->getIntrinsicID()) {
6708 case Intrinsic::launder_invariant_group:
6709 case Intrinsic::strip_invariant_group:
6710 case Intrinsic::aarch64_irg:
6711 case Intrinsic::aarch64_tagp:
6721 case Intrinsic::amdgcn_make_buffer_rsrc:
6723 case Intrinsic::ptrmask:
6724 return !MustPreserveNullness;
6725 case Intrinsic::threadlocal_address:
6728 return !Call->getParent()->getParent()->isPresplitCoroutine();
6745 if (!PrevValue || LI->
getLoopFor(PrevValue->getParent()) != L)
6747 if (!PrevValue || LI->
getLoopFor(PrevValue->getParent()) != L)
6755 if (
auto *Load = dyn_cast<LoadInst>(PrevValue))
6756 if (!L->isLoopInvariant(Load->getPointerOperand()))
6762 for (
unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
6763 if (
auto *
GEP = dyn_cast<GEPOperator>(V)) {
6764 const Value *PtrOp =
GEP->getPointerOperand();
6770 Value *NewV = cast<Operator>(V)->getOperand(0);
6774 }
else if (
auto *GA = dyn_cast<GlobalAlias>(V)) {
6775 if (GA->isInterposable())
6777 V = GA->getAliasee();
6779 if (
auto *
PHI = dyn_cast<PHINode>(V)) {
6781 if (
PHI->getNumIncomingValues() == 1) {
6782 V =
PHI->getIncomingValue(0);
6785 }
else if (
auto *Call = dyn_cast<CallBase>(V)) {
6803 assert(V->getType()->isPointerTy() &&
"Unexpected operand type!");
6810 const LoopInfo *LI,
unsigned MaxLookup) {
6818 if (!Visited.
insert(
P).second)
6821 if (
auto *SI = dyn_cast<SelectInst>(
P)) {
6823 Worklist.
push_back(SI->getFalseValue());
6827 if (
auto *PN = dyn_cast<PHINode>(
P)) {
6847 }
while (!Worklist.
empty());
6851 const unsigned MaxVisited = 8;
6856 const Value *Object =
nullptr;
6866 if (!Visited.
insert(
P).second)
6869 if (Visited.
size() == MaxVisited)
6872 if (
auto *SI = dyn_cast<SelectInst>(
P)) {
6874 Worklist.
push_back(SI->getFalseValue());
6878 if (
auto *PN = dyn_cast<PHINode>(
P)) {
6885 else if (Object !=
P)
6887 }
while (!Worklist.
empty());
6896 if (
const Operator *U = dyn_cast<Operator>(V)) {
6899 if (U->getOpcode() == Instruction::PtrToInt)
6900 return U->getOperand(0);
6907 if (U->getOpcode() != Instruction::Add ||
6908 (!isa<ConstantInt>(U->getOperand(1)) &&
6910 !isa<PHINode>(U->getOperand(1))))
6912 V = U->getOperand(0);
6916 assert(V->getType()->isIntegerTy() &&
"Unexpected operand type!");
6933 for (
const Value *V : Objs) {
6934 if (!Visited.
insert(V).second)
6939 if (O->getType()->isPointerTy()) {
6952 }
while (!Working.
empty());
6961 auto AddWork = [&](
Value *V) {
6962 if (Visited.
insert(V).second)
6971 if (
AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
6972 if (Result && Result != AI)
6975 }
else if (
CastInst *CI = dyn_cast<CastInst>(V)) {
6976 AddWork(CI->getOperand(0));
6977 }
else if (
PHINode *PN = dyn_cast<PHINode>(V)) {
6978 for (
Value *IncValue : PN->incoming_values())
6980 }
else if (
auto *SI = dyn_cast<SelectInst>(V)) {
6981 AddWork(SI->getTrueValue());
6982 AddWork(SI->getFalseValue());
6984 if (OffsetZero && !
GEP->hasAllZeroIndices())
6986 AddWork(
GEP->getPointerOperand());
6987 }
else if (
CallBase *CB = dyn_cast<CallBase>(V)) {
6988 Value *Returned = CB->getReturnedArgOperand();
6996 }
while (!Worklist.
empty());
7002 const Value *V,
bool AllowLifetime,
bool AllowDroppable) {
7003 for (
const User *U : V->users()) {
7008 if (AllowLifetime &&
II->isLifetimeStartOrEnd())
7011 if (AllowDroppable &&
II->isDroppable())
7029 if (
auto *
II = dyn_cast<IntrinsicInst>(
I))
7031 auto *Shuffle = dyn_cast<ShuffleVectorInst>(
I);
7032 return (!Shuffle || Shuffle->isSelect()) &&
7033 !isa<CallBase, BitCastInst, ExtractElementInst>(
I);
7041 bool UseVariableInfo) {
7043 AC, DT, TLI, UseVariableInfo);
7049 bool UseVariableInfo) {
7053 auto hasEqualReturnAndLeadingOperandTypes =
7054 [](
const Instruction *Inst,
unsigned NumLeadingOperands) {
7058 for (
unsigned ItOp = 0; ItOp < NumLeadingOperands; ++ItOp)
7064 hasEqualReturnAndLeadingOperandTypes(Inst, 2));
7066 hasEqualReturnAndLeadingOperandTypes(Inst, 1));
7073 case Instruction::UDiv:
7074 case Instruction::URem: {
7081 case Instruction::SDiv:
7082 case Instruction::SRem: {
7084 const APInt *Numerator, *Denominator;
7088 if (*Denominator == 0)
7100 case Instruction::Load: {
7101 if (!UseVariableInfo)
7104 const LoadInst *LI = dyn_cast<LoadInst>(Inst);
7114 case Instruction::Call: {
7115 auto *CI = dyn_cast<const CallInst>(Inst);
7118 const Function *Callee = CI->getCalledFunction();
7122 return Callee && Callee->isSpeculatable();
7124 case Instruction::VAArg:
7125 case Instruction::Alloca:
7126 case Instruction::Invoke:
7127 case Instruction::CallBr:
7128 case Instruction::PHI:
7129 case Instruction::Store:
7130 case Instruction::Ret:
7131 case Instruction::Br:
7132 case Instruction::IndirectBr:
7133 case Instruction::Switch:
7134 case Instruction::Unreachable:
7135 case Instruction::Fence:
7136 case Instruction::AtomicRMW:
7137 case Instruction::AtomicCmpXchg:
7138 case Instruction::LandingPad:
7139 case Instruction::Resume:
7140 case Instruction::CatchSwitch:
7141 case Instruction::CatchPad:
7142 case Instruction::CatchRet:
7143 case Instruction::CleanupPad:
7144 case Instruction::CleanupRet:
7150 if (
I.mayReadOrWriteMemory())
7263 if (
Add &&
Add->hasNoSignedWrap()) {
7303 bool LHSOrRHSKnownNonNegative =
7305 bool LHSOrRHSKnownNegative =
7307 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
7310 if ((AddKnown.
isNonNegative() && LHSOrRHSKnownNonNegative) ||
7311 (AddKnown.
isNegative() && LHSOrRHSKnownNegative))
7386 if (
const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
7387 assert(EVI->getNumIndices() == 1 &&
"Obvious from CI's type");
7389 if (EVI->getIndices()[0] == 0)
7392 assert(EVI->getIndices()[0] == 1 &&
"Obvious from CI's type");
7394 for (
const auto *U : EVI->users())
7395 if (
const auto *
B = dyn_cast<BranchInst>(U)) {
7396 assert(
B->isConditional() &&
"How else is it using an i1?");
7407 auto AllUsesGuardedByBranch = [&](
const BranchInst *BI) {
7413 for (
const auto *Result :
Results) {
7416 if (DT.
dominates(NoWrapEdge, Result->getParent()))
7419 for (
const auto &RU : Result->uses())
7427 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
7432 auto *
C = dyn_cast<Constant>(ShiftAmount);
7438 if (
auto *FVTy = dyn_cast<FixedVectorType>(
C->getType())) {
7439 unsigned NumElts = FVTy->getNumElements();
7440 for (
unsigned i = 0; i < NumElts; ++i)
7441 ShiftAmounts.
push_back(
C->getAggregateElement(i));
7442 }
else if (isa<ScalableVectorType>(
C->getType()))
7448 auto *CI = dyn_cast_or_null<ConstantInt>(
C);
7449 return CI && CI->getValue().ult(
C->getType()->getIntegerBitWidth());
7462 return (
unsigned(Kind) &
unsigned(UndefPoisonKind::PoisonOnly)) != 0;
7466 return (
unsigned(Kind) &
unsigned(UndefPoisonKind::UndefOnly)) != 0;
7470 bool ConsiderFlagsAndMetadata) {
7473 Op->hasPoisonGeneratingAnnotations())
7476 unsigned Opcode =
Op->getOpcode();
7480 case Instruction::Shl:
7481 case Instruction::AShr:
7482 case Instruction::LShr:
7484 case Instruction::FPToSI:
7485 case Instruction::FPToUI:
7489 case Instruction::Call:
7490 if (
auto *
II = dyn_cast<IntrinsicInst>(
Op)) {
7491 switch (
II->getIntrinsicID()) {
7493 case Intrinsic::ctlz:
7494 case Intrinsic::cttz:
7495 case Intrinsic::abs:
7496 if (cast<ConstantInt>(
II->getArgOperand(1))->isNullValue())
7499 case Intrinsic::ctpop:
7500 case Intrinsic::bswap:
7501 case Intrinsic::bitreverse:
7502 case Intrinsic::fshl:
7503 case Intrinsic::fshr:
7504 case Intrinsic::smax:
7505 case Intrinsic::smin:
7506 case Intrinsic::umax:
7507 case Intrinsic::umin:
7508 case Intrinsic::ptrmask:
7509 case Intrinsic::fptoui_sat:
7510 case Intrinsic::fptosi_sat:
7511 case Intrinsic::sadd_with_overflow:
7512 case Intrinsic::ssub_with_overflow:
7513 case Intrinsic::smul_with_overflow:
7514 case Intrinsic::uadd_with_overflow:
7515 case Intrinsic::usub_with_overflow:
7516 case Intrinsic::umul_with_overflow:
7517 case Intrinsic::sadd_sat:
7518 case Intrinsic::uadd_sat:
7519 case Intrinsic::ssub_sat:
7520 case Intrinsic::usub_sat:
7522 case Intrinsic::sshl_sat:
7523 case Intrinsic::ushl_sat:
7526 case Intrinsic::fma:
7527 case Intrinsic::fmuladd:
7528 case Intrinsic::sqrt:
7529 case Intrinsic::powi:
7530 case Intrinsic::sin:
7531 case Intrinsic::cos:
7532 case Intrinsic::pow:
7533 case Intrinsic::log:
7534 case Intrinsic::log10:
7535 case Intrinsic::log2:
7536 case Intrinsic::exp:
7537 case Intrinsic::exp2:
7538 case Intrinsic::exp10:
7539 case Intrinsic::fabs:
7540 case Intrinsic::copysign:
7541 case Intrinsic::floor:
7542 case Intrinsic::ceil:
7543 case Intrinsic::trunc:
7544 case Intrinsic::rint:
7545 case Intrinsic::nearbyint:
7546 case Intrinsic::round:
7547 case Intrinsic::roundeven:
7548 case Intrinsic::fptrunc_round:
7549 case Intrinsic::canonicalize:
7550 case Intrinsic::arithmetic_fence:
7551 case Intrinsic::minnum:
7552 case Intrinsic::maxnum:
7553 case Intrinsic::minimum:
7554 case Intrinsic::maximum:
7555 case Intrinsic::is_fpclass:
7556 case Intrinsic::ldexp:
7557 case Intrinsic::frexp:
7559 case Intrinsic::lround:
7560 case Intrinsic::llround:
7561 case Intrinsic::lrint:
7562 case Intrinsic::llrint:
7569 case Instruction::CallBr:
7570 case Instruction::Invoke: {
7571 const auto *CB = cast<CallBase>(
Op);
7572 return !CB->hasRetAttr(Attribute::NoUndef);
7574 case Instruction::InsertElement:
7575 case Instruction::ExtractElement: {
7577 auto *VTy = cast<VectorType>(
Op->getOperand(0)->getType());
7578 unsigned IdxOp =
Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
7579 auto *
Idx = dyn_cast<ConstantInt>(
Op->getOperand(IdxOp));
7582 Idx->getValue().uge(VTy->getElementCount().getKnownMinValue());
7585 case Instruction::ShuffleVector: {
7587 ? cast<ConstantExpr>(
Op)->getShuffleMask()
7588 : cast<ShuffleVectorInst>(
Op)->getShuffleMask();
7591 case Instruction::FNeg:
7592 case Instruction::PHI:
7593 case Instruction::Select:
7594 case Instruction::URem:
7595 case Instruction::SRem:
7596 case Instruction::ExtractValue:
7597 case Instruction::InsertValue:
7598 case Instruction::Freeze:
7599 case Instruction::ICmp:
7600 case Instruction::FCmp:
7601 case Instruction::FAdd:
7602 case Instruction::FSub:
7603 case Instruction::FMul:
7604 case Instruction::FDiv:
7605 case Instruction::FRem:
7607 case Instruction::GetElementPtr:
7612 const auto *CE = dyn_cast<ConstantExpr>(
Op);
7613 if (isa<CastInst>(
Op) || (CE && CE->isCast()))
7624 bool ConsiderFlagsAndMetadata) {
7625 return ::canCreateUndefOrPoison(
Op, UndefPoisonKind::UndefOrPoison,
7626 ConsiderFlagsAndMetadata);
7630 return ::canCreateUndefOrPoison(
Op, UndefPoisonKind::PoisonOnly,
7631 ConsiderFlagsAndMetadata);
7636 if (ValAssumedPoison == V)
7639 const unsigned MaxDepth = 2;
7640 if (
Depth >= MaxDepth)
7643 if (
const auto *
I = dyn_cast<Instruction>(V)) {
7645 return propagatesPoison(Op) &&
7646 directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1);
7670 const unsigned MaxDepth = 2;
7671 if (
Depth >= MaxDepth)
7674 const auto *
I = dyn_cast<Instruction>(ValAssumedPoison);
7677 return impliesPoison(Op, V, Depth + 1);
7684 return ::impliesPoison(ValAssumedPoison, V, 0);
7695 if (isa<MetadataAsValue>(V))
7698 if (
const auto *
A = dyn_cast<Argument>(V)) {
7699 if (
A->hasAttribute(Attribute::NoUndef) ||
7700 A->hasAttribute(Attribute::Dereferenceable) ||
7701 A->hasAttribute(Attribute::DereferenceableOrNull))
7705 if (
auto *
C = dyn_cast<Constant>(V)) {
7706 if (isa<PoisonValue>(
C))
7709 if (isa<UndefValue>(
C))
7712 if (isa<ConstantInt>(
C) || isa<GlobalVariable>(
C) || isa<ConstantFP>(V) ||
7713 isa<ConstantPointerNull>(
C) || isa<Function>(
C))
7716 if (
C->getType()->isVectorTy() && !isa<ConstantExpr>(
C)) {
7721 return !
C->containsConstantExpression();
7733 auto *StrippedV = V->stripPointerCastsSameRepresentation();
7734 if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) ||
7735 isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV))
7738 auto OpCheck = [&](
const Value *V) {
7742 if (
auto *Opr = dyn_cast<Operator>(V)) {
7745 if (isa<FreezeInst>(V))
7748 if (
const auto *CB = dyn_cast<CallBase>(V)) {
7749 if (CB->hasRetAttr(Attribute::NoUndef) ||
7750 CB->hasRetAttr(Attribute::Dereferenceable) ||
7751 CB->hasRetAttr(Attribute::DereferenceableOrNull))
7755 if (
const auto *PN = dyn_cast<PHINode>(V)) {
7756 unsigned Num = PN->getNumIncomingValues();
7757 bool IsWellDefined =
true;
7758 for (
unsigned i = 0; i < Num; ++i) {
7759 auto *TI = PN->getIncomingBlock(i)->getTerminator();
7761 DT,
Depth + 1, Kind)) {
7762 IsWellDefined =
false;
7770 all_of(Opr->operands(), OpCheck))
7774 if (
auto *
I = dyn_cast<LoadInst>(V))
7775 if (
I->hasMetadata(LLVMContext::MD_noundef) ||
7776 I->hasMetadata(LLVMContext::MD_dereferenceable) ||
7777 I->hasMetadata(LLVMContext::MD_dereferenceable_or_null))
7797 auto *Dominator = DNode->
getIDom();
7802 auto *TI = Dominator->
getBlock()->getTerminator();
7805 if (
auto BI = dyn_cast_or_null<BranchInst>(TI)) {
7806 if (BI->isConditional())
7807 Cond = BI->getCondition();
7808 }
else if (
auto SI = dyn_cast_or_null<SwitchInst>(TI)) {
7809 Cond = SI->getCondition();
7817 auto *Opr = cast<Operator>(
Cond);
7818 if (
any_of(Opr->operands(), [V](
const Use &U) {
7819 return V == U && propagatesPoison(U);
7825 Dominator = Dominator->getIDom();
7838 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT,
Depth,
7839 UndefPoisonKind::UndefOrPoison);
7845 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT,
Depth,
7846 UndefPoisonKind::PoisonOnly);
7852 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT,
Depth,
7853 UndefPoisonKind::UndefOnly);
7876 while (!Worklist.
empty()) {
7885 if (
I != Root && !
any_of(
I->operands(), [&KnownPoison](
const Use &U) {
7886 return KnownPoison.contains(U) && propagatesPoison(U);
7890 if (KnownPoison.
insert(
I).second)
7902 return ::computeOverflowForSignedAdd(
Add->getOperand(0),
Add->getOperand(1),
7910 return ::computeOverflowForSignedAdd(
LHS,
RHS,
nullptr, SQ);
7919 if (isa<ReturnInst>(
I))
7921 if (isa<UnreachableInst>(
I))
7928 if (isa<CatchPadInst>(
I)) {
7942 return !
I->mayThrow() &&
I->willReturn();
7956 unsigned ScanLimit) {
7963 assert(ScanLimit &&
"scan limit must be non-zero");
7965 if (isa<DbgInfoIntrinsic>(
I))
7967 if (--ScanLimit == 0)
7981 if (
I->getParent() != L->getHeader())
return false;
7984 if (&LI ==
I)
return true;
7987 llvm_unreachable(
"Instruction not contained in its own parent basic block.");
7992 switch (
I->getOpcode()) {
7993 case Instruction::Freeze:
7994 case Instruction::PHI:
7995 case Instruction::Invoke:
7997 case Instruction::Select:
7999 case Instruction::Call:
8000 if (
auto *
II = dyn_cast<IntrinsicInst>(
I)) {
8001 switch (
II->getIntrinsicID()) {
8003 case Intrinsic::sadd_with_overflow:
8004 case Intrinsic::ssub_with_overflow:
8005 case Intrinsic::smul_with_overflow:
8006 case Intrinsic::uadd_with_overflow:
8007 case Intrinsic::usub_with_overflow:
8008 case Intrinsic::umul_with_overflow:
8013 case Intrinsic::ctpop:
8014 case Intrinsic::ctlz:
8015 case Intrinsic::cttz:
8016 case Intrinsic::abs:
8017 case Intrinsic::smax:
8018 case Intrinsic::smin:
8019 case Intrinsic::umax:
8020 case Intrinsic::umin:
8021 case Intrinsic::bitreverse:
8022 case Intrinsic::bswap:
8023 case Intrinsic::sadd_sat:
8024 case Intrinsic::ssub_sat:
8025 case Intrinsic::sshl_sat:
8026 case Intrinsic::uadd_sat:
8027 case Intrinsic::usub_sat:
8028 case Intrinsic::ushl_sat:
8033 case Instruction::ICmp:
8034 case Instruction::FCmp:
8035 case Instruction::GetElementPtr:
8038 if (isa<BinaryOperator>(
I) || isa<UnaryOperator>(
I) || isa<CastInst>(
I))
8049template <
typename CallableT>
8051 const CallableT &Handle) {
8052 switch (
I->getOpcode()) {
8053 case Instruction::Store:
8058 case Instruction::Load:
8065 case Instruction::AtomicCmpXchg:
8070 case Instruction::AtomicRMW:
8075 case Instruction::Call:
8076 case Instruction::Invoke: {
8080 for (
unsigned i = 0; i < CB->
arg_size(); ++i)
8083 CB->
paramHasAttr(i, Attribute::DereferenceableOrNull)) &&
8088 case Instruction::Ret:
8089 if (
I->getFunction()->hasRetAttribute(Attribute::NoUndef) &&
8090 Handle(
I->getOperand(0)))
8093 case Instruction::Switch:
8094 if (Handle(cast<SwitchInst>(
I)->getCondition()))
8097 case Instruction::Br: {
8098 auto *BR = cast<BranchInst>(
I);
8099 if (BR->isConditional() && Handle(BR->getCondition()))
8119template <
typename CallableT>
8121 const CallableT &Handle) {
8124 switch (
I->getOpcode()) {
8126 case Instruction::UDiv:
8127 case Instruction::SDiv:
8128 case Instruction::URem:
8129 case Instruction::SRem:
8130 return Handle(
I->getOperand(1));
8147 I, [&](
const Value *V) {
return KnownPoison.
count(V); });
8161 if (
const auto *Inst = dyn_cast<Instruction>(V)) {
8165 }
else if (
const auto *Arg = dyn_cast<Argument>(V)) {
8166 if (Arg->getParent()->isDeclaration())
8169 Begin = BB->
begin();
8176 unsigned ScanLimit = 32;
8185 if (isa<DbgInfoIntrinsic>(
I))
8187 if (--ScanLimit == 0)
8191 return WellDefinedOp == V;
8211 if (isa<DbgInfoIntrinsic>(
I))
8213 if (--ScanLimit == 0)
8221 for (
const Use &
Op :
I.operands()) {
8231 if (
I.getOpcode() == Instruction::Select &&
8232 YieldsPoison.
count(
I.getOperand(1)) &&
8233 YieldsPoison.
count(
I.getOperand(2))) {
8239 if (!BB || !Visited.
insert(BB).second)
8249 return ::programUndefinedIfUndefOrPoison(Inst,
false);
8253 return ::programUndefinedIfUndefOrPoison(Inst,
true);
8260 if (
auto *
C = dyn_cast<ConstantFP>(V))
8263 if (
auto *
C = dyn_cast<ConstantDataVector>(V)) {
8264 if (!
C->getElementType()->isFloatingPointTy())
8266 for (
unsigned I = 0, E =
C->getNumElements();
I < E; ++
I) {
8267 if (
C->getElementAsAPFloat(
I).isNaN())
8273 if (isa<ConstantAggregateZero>(V))
8280 if (
auto *
C = dyn_cast<ConstantFP>(V))
8281 return !
C->isZero();
8283 if (
auto *
C = dyn_cast<ConstantDataVector>(V)) {
8284 if (!
C->getElementType()->isFloatingPointTy())
8286 for (
unsigned I = 0, E =
C->getNumElements();
I < E; ++
I) {
8287 if (
C->getElementAsAPFloat(
I).isZero())
8310 if (CmpRHS == FalseVal) {
8354 if (CmpRHS != TrueVal) {
8393 Value *
A =
nullptr, *
B =
nullptr;
8398 Value *
C =
nullptr, *
D =
nullptr;
8400 if (L.Flavor != R.Flavor)
8452 return {L.Flavor,
SPNB_NA,
false};
8459 return {L.Flavor,
SPNB_NA,
false};
8466 return {L.Flavor,
SPNB_NA,
false};
8473 return {L.Flavor,
SPNB_NA,
false};
8489 return ConstantInt::get(V->getType(), ~(*
C));
8546 if ((CmpLHS == TrueVal &&
match(FalseVal,
m_APInt(C2))) ||
8566 assert(
X &&
Y &&
"Invalid operand");
8568 auto IsNegationOf = [&](
const Value *
X,
const Value *
Y) {
8572 auto *BO = cast<BinaryOperator>(
X);
8573 if (NeedNSW && !BO->hasNoSignedWrap())
8576 auto *Zero = cast<Constant>(BO->getOperand(0));
8577 if (!AllowPoison && !Zero->isNullValue())
8584 if (IsNegationOf(
X,
Y) || IsNegationOf(
Y,
X))
8604 if (cast<ICmpInst>(
X)->hasSameSign() != cast<ICmpInst>(
Y)->hasSameSign())
8611 const APInt *RHSC1, *RHSC2;
8616 if (cast<ICmpInst>(
X)->hasSameSign() &&
8623 return CR1.inverse() == CR2;
8657std::optional<std::pair<CmpPredicate, Constant *>>
8660 "Only for relational integer predicates.");
8661 if (isa<UndefValue>(
C))
8662 return std::nullopt;
8668 bool WillIncrement =
8673 auto ConstantIsOk = [WillIncrement, IsSigned](
ConstantInt *
C) {
8674 return WillIncrement ? !
C->isMaxValue(IsSigned) : !
C->isMinValue(IsSigned);
8677 Constant *SafeReplacementConstant =
nullptr;
8678 if (
auto *CI = dyn_cast<ConstantInt>(
C)) {
8680 if (!ConstantIsOk(CI))
8681 return std::nullopt;
8682 }
else if (
auto *FVTy = dyn_cast<FixedVectorType>(
Type)) {
8683 unsigned NumElts = FVTy->getNumElements();
8684 for (
unsigned i = 0; i != NumElts; ++i) {
8685 Constant *Elt =
C->getAggregateElement(i);
8687 return std::nullopt;
8689 if (isa<UndefValue>(Elt))
8694 auto *CI = dyn_cast<ConstantInt>(Elt);
8695 if (!CI || !ConstantIsOk(CI))
8696 return std::nullopt;
8698 if (!SafeReplacementConstant)
8699 SafeReplacementConstant = CI;
8701 }
else if (isa<VectorType>(
C->getType())) {
8703 Value *SplatC =
C->getSplatValue();
8704 auto *CI = dyn_cast_or_null<ConstantInt>(SplatC);
8706 if (!CI || !ConstantIsOk(CI))
8707 return std::nullopt;
8710 return std::nullopt;
8717 if (
C->containsUndefOrPoisonElement()) {
8718 assert(SafeReplacementConstant &&
"Replacement constant not set");
8725 Constant *OneOrNegOne = ConstantInt::get(
Type, WillIncrement ? 1 : -1,
true);
8728 return std::make_pair(NewPred, NewC);
8737 bool HasMismatchedZeros =
false;
8743 Value *OutputZeroVal =
nullptr;
8745 !cast<Constant>(TrueVal)->containsUndefOrPoisonElement())
8746 OutputZeroVal = TrueVal;
8748 !cast<Constant>(FalseVal)->containsUndefOrPoisonElement())
8749 OutputZeroVal = FalseVal;
8751 if (OutputZeroVal) {
8753 HasMismatchedZeros =
true;
8754 CmpLHS = OutputZeroVal;
8757 HasMismatchedZeros =
true;
8758 CmpRHS = OutputZeroVal;
8775 if (!HasMismatchedZeros)
8786 bool Ordered =
false;
8797 if (LHSSafe && RHSSafe) {
8827 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
8838 if (TrueVal == CmpLHS && FalseVal == CmpRHS)
8844 auto MaybeSExtCmpLHS =
8848 if (
match(TrueVal, MaybeSExtCmpLHS)) {
8870 else if (
match(FalseVal, MaybeSExtCmpLHS)) {
8910 case Instruction::ZExt:
8914 case Instruction::SExt:
8918 case Instruction::Trunc:
8921 CmpConst->
getType() == SrcTy) {
8943 CastedTo = CmpConst;
8945 unsigned ExtOp = CmpI->
isSigned() ? Instruction::SExt : Instruction::ZExt;
8949 case Instruction::FPTrunc:
8952 case Instruction::FPExt:
8955 case Instruction::FPToUI:
8958 case Instruction::FPToSI:
8961 case Instruction::UIToFP:
8964 case Instruction::SIToFP:
8977 if (CastedBack && CastedBack !=
C)
9001 auto *Cast1 = dyn_cast<CastInst>(V1);
9005 *CastOp = Cast1->getOpcode();
9006 Type *SrcTy = Cast1->getSrcTy();
9007 if (
auto *Cast2 = dyn_cast<CastInst>(V2)) {
9009 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
9010 return Cast2->getOperand(0);
9014 auto *
C = dyn_cast<Constant>(V2);
9018 Value *CastedTo =
nullptr;
9019 if (*CastOp == Instruction::Trunc) {
9032 assert(V2->getType() == Cast1->getType() &&
9033 "V2 and Cast1 should be the same type.");
9049 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
9052 Value *TrueVal = SI->getTrueValue();
9053 Value *FalseVal = SI->getFalseValue();
9066 if (isa<FPMathOperator>(CmpI))
9074 if (CastOp && CmpLHS->
getType() != TrueVal->getType()) {
9078 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
9080 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
9081 cast<CastInst>(TrueVal)->getOperand(0),
C,
9087 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
9089 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
9090 C, cast<CastInst>(FalseVal)->getOperand(0),
9094 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
9113 return Intrinsic::umin;
9115 return Intrinsic::umax;
9117 return Intrinsic::smin;
9119 return Intrinsic::smax;
9135 case Intrinsic::smax:
return Intrinsic::smin;
9136 case Intrinsic::smin:
return Intrinsic::smax;
9137 case Intrinsic::umax:
return Intrinsic::umin;
9138 case Intrinsic::umin:
return Intrinsic::umax;
9141 case Intrinsic::maximum:
return Intrinsic::minimum;
9142 case Intrinsic::minimum:
return Intrinsic::maximum;
9143 case Intrinsic::maxnum:
return Intrinsic::minnum;
9144 case Intrinsic::minnum:
return Intrinsic::maxnum;
9159std::pair<Intrinsic::ID, bool>
9164 bool AllCmpSingleUse =
true;
9167 if (
all_of(VL, [&SelectPattern, &AllCmpSingleUse](
Value *
I) {
9173 SelectPattern.
Flavor != CurrentPattern.Flavor)
9175 SelectPattern = CurrentPattern;
9180 switch (SelectPattern.
Flavor) {
9182 return {Intrinsic::smin, AllCmpSingleUse};
9184 return {Intrinsic::umin, AllCmpSingleUse};
9186 return {Intrinsic::smax, AllCmpSingleUse};
9188 return {Intrinsic::umax, AllCmpSingleUse};
9190 return {Intrinsic::maxnum, AllCmpSingleUse};
9192 return {Intrinsic::minnum, AllCmpSingleUse};
9205 if (
P->getNumIncomingValues() != 2)
9208 for (
unsigned i = 0; i != 2; ++i) {
9209 Value *L =
P->getIncomingValue(i);
9210 Value *R =
P->getIncomingValue(!i);
9211 auto *LU = dyn_cast<BinaryOperator>(L);
9214 unsigned Opcode = LU->getOpcode();
9220 case Instruction::LShr:
9221 case Instruction::AShr:
9222 case Instruction::Shl:
9223 case Instruction::Add:
9224 case Instruction::Sub:
9225 case Instruction::UDiv:
9226 case Instruction::URem:
9227 case Instruction::And:
9228 case Instruction::Or:
9229 case Instruction::Mul:
9230 case Instruction::FMul: {
9231 Value *LL = LU->getOperand(0);
9232 Value *LR = LU->getOperand(1);
9262 P = dyn_cast<PHINode>(
I->getOperand(0));
9264 P = dyn_cast<PHINode>(
I->getOperand(1));
9285 return !
C->isNegative();
9297 const APInt *CLHS, *CRHS;
9300 return CLHS->
sle(*CRHS);
9338 const APInt *CLHS, *CRHS;
9341 return CLHS->
ule(*CRHS);
9350static std::optional<bool>
9355 return std::nullopt;
9362 return std::nullopt;
9369 return std::nullopt;
9376 return std::nullopt;
9383 return std::nullopt;
9390static std::optional<bool>
9396 if (CR.
icmp(Pred, RCR))
9403 return std::nullopt;
9416 return std::nullopt;
9422static std::optional<bool>
9431 LHSIsTrue ?
LHS->getCmpPredicate() :
LHS->getInverseCmpPredicate();
9455 const APInt *Unused;
9474 return std::nullopt;
9478 if (L0 == R0 && L1 == R1)
9513 return std::nullopt;
9520static std::optional<bool>
9525 assert((
LHS->getOpcode() == Instruction::And ||
9526 LHS->getOpcode() == Instruction::Or ||
9527 LHS->getOpcode() == Instruction::Select) &&
9528 "Expected LHS to be 'and', 'or', or 'select'.");
9535 const Value *ALHS, *ARHS;
9540 ALHS, RHSPred, RHSOp0, RHSOp1,
DL, LHSIsTrue,
Depth + 1))
9543 ARHS, RHSPred, RHSOp0, RHSOp1,
DL, LHSIsTrue,
Depth + 1))
9545 return std::nullopt;
9547 return std::nullopt;
9556 return std::nullopt;
9561 return std::nullopt;
9564 "Expected integer type only!");
9568 LHSIsTrue = !LHSIsTrue;
9579 if ((LHSI->getOpcode() == Instruction::And ||
9580 LHSI->getOpcode() == Instruction::Or ||
9581 LHSI->getOpcode() == Instruction::Select))
9585 return std::nullopt;
9590 bool LHSIsTrue,
unsigned Depth) {
9596 bool InvertRHS =
false;
9603 if (
const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(
RHS)) {
9605 LHS, RHSCmp->getCmpPredicate(), RHSCmp->getOperand(0),
9606 RHSCmp->getOperand(1),
DL, LHSIsTrue,
Depth))
9607 return InvertRHS ? !*Implied : *Implied;
9608 return std::nullopt;
9612 return std::nullopt;
9616 const Value *RHS1, *RHS2;
9618 if (std::optional<bool> Imp =
9622 if (std::optional<bool> Imp =
9628 if (std::optional<bool> Imp =
9632 if (std::optional<bool> Imp =
9638 return std::nullopt;
9643static std::pair<Value *, bool>
9645 if (!ContextI || !ContextI->
getParent())
9646 return {
nullptr,
false};
9653 return {
nullptr,
false};
9659 return {
nullptr,
false};
9662 if (TrueBB == FalseBB)
9663 return {
nullptr,
false};
9665 assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
9666 "Predecessor block does not point to successor?");
9669 return {PredCond, TrueBB == ContextBB};
9675 assert(
Cond->getType()->isIntOrIntVectorTy(1) &&
"Condition must be bool");
9679 return std::nullopt;
9691 return std::nullopt;
9696 bool PreferSignedRange) {
9697 unsigned Width =
Lower.getBitWidth();
9700 case Instruction::Add:
9709 if (PreferSignedRange && HasNSW && HasNUW)
9715 }
else if (HasNSW) {
9716 if (
C->isNegative()) {
9729 case Instruction::And:
9740 case Instruction::Or:
9746 case Instruction::AShr:
9752 unsigned ShiftAmount = Width - 1;
9753 if (!
C->isZero() && IIQ.
isExact(&BO))
9754 ShiftAmount =
C->countr_zero();
9755 if (
C->isNegative()) {
9758 Upper =
C->ashr(ShiftAmount) + 1;
9761 Lower =
C->ashr(ShiftAmount);
9767 case Instruction::LShr:
9773 unsigned ShiftAmount = Width - 1;
9774 if (!
C->isZero() && IIQ.
isExact(&BO))
9775 ShiftAmount =
C->countr_zero();
9776 Lower =
C->lshr(ShiftAmount);
9781 case Instruction::Shl:
9788 if (
C->isNegative()) {
9790 unsigned ShiftAmount =
C->countl_one() - 1;
9791 Lower =
C->shl(ShiftAmount);
9795 unsigned ShiftAmount =
C->countl_zero() - 1;
9797 Upper =
C->shl(ShiftAmount) + 1;
9816 case Instruction::SDiv:
9820 if (
C->isAllOnes()) {
9825 }
else if (
C->countl_zero() < Width - 1) {
9836 if (
C->isMinSignedValue()) {
9848 case Instruction::UDiv:
9858 case Instruction::SRem:
9864 if (
C->isNegative()) {
9875 case Instruction::URem:
9890 bool UseInstrInfo) {
9891 unsigned Width =
II.getType()->getScalarSizeInBits();
9893 switch (
II.getIntrinsicID()) {
9894 case Intrinsic::ctlz:
9895 case Intrinsic::cttz: {
9897 if (!UseInstrInfo || !
match(
II.getArgOperand(1),
m_One()))
9902 case Intrinsic::ctpop:
9905 APInt(Width, Width) + 1);
9906 case Intrinsic::uadd_sat:
9912 case Intrinsic::sadd_sat:
9915 if (
C->isNegative())
9926 case Intrinsic::usub_sat:
9936 case Intrinsic::ssub_sat:
9938 if (
C->isNegative())
9948 if (
C->isNegative())
9959 case Intrinsic::umin:
9960 case Intrinsic::umax:
9961 case Intrinsic::smin:
9962 case Intrinsic::smax:
9967 switch (
II.getIntrinsicID()) {
9968 case Intrinsic::umin:
9970 case Intrinsic::umax:
9972 case Intrinsic::smin:
9975 case Intrinsic::smax:
9982 case Intrinsic::abs:
9991 case Intrinsic::vscale:
9992 if (!
II.getParent() || !
II.getFunction())
9995 case Intrinsic::scmp:
9996 case Intrinsic::ucmp:
10003 return ConstantRange::getFull(Width);
10008 unsigned BitWidth = SI.getType()->getScalarSizeInBits();
10012 return ConstantRange::getFull(
BitWidth);
10035 return ConstantRange::getFull(
BitWidth);
10037 switch (R.Flavor) {
10049 return ConstantRange::getFull(
BitWidth);
10056 unsigned BitWidth =
I->getType()->getScalarSizeInBits();
10057 if (!
I->getOperand(0)->getType()->getScalarType()->isHalfTy())
10059 if (isa<FPToSIInst>(
I) &&
BitWidth >= 17) {
10064 if (isa<FPToUIInst>(
I) &&
BitWidth >= 16) {
10075 assert(V->getType()->isIntOrIntVectorTy() &&
"Expected integer instruction");
10078 return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
10080 if (
auto *
C = dyn_cast<Constant>(V))
10081 return C->toConstantRange();
10083 unsigned BitWidth = V->getType()->getScalarSizeInBits();
10086 if (
auto *BO = dyn_cast<BinaryOperator>(V)) {
10092 }
else if (
auto *
II = dyn_cast<IntrinsicInst>(V))
10094 else if (
auto *SI = dyn_cast<SelectInst>(V)) {
10096 SI->getTrueValue(), ForSigned, UseInstrInfo, AC, CtxI, DT,
Depth + 1);
10098 SI->getFalseValue(), ForSigned, UseInstrInfo, AC, CtxI, DT,
Depth + 1);
10101 }
else if (isa<FPToUIInst>(V) || isa<FPToSIInst>(V)) {
10107 }
else if (
const auto *
A = dyn_cast<Argument>(V))
10108 if (std::optional<ConstantRange>
Range =
A->getRange())
10111 if (
auto *
I = dyn_cast<Instruction>(V)) {
10115 if (
const auto *CB = dyn_cast<CallBase>(V))
10116 if (std::optional<ConstantRange>
Range = CB->getRange())
10125 CallInst *
I = cast<CallInst>(AssumeVH);
10127 "Got assumption for the wrong function!");
10128 assert(
I->getIntrinsicID() == Intrinsic::assume &&
10129 "must be an assume intrinsic");
10133 Value *Arg =
I->getArgOperand(0);
10134 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
10136 if (!Cmp || Cmp->getOperand(0) != V)
10141 UseInstrInfo, AC,
I, DT,
Depth + 1);
10154 if (isa<Argument>(V) || isa<GlobalValue>(V)) {
10156 }
else if (
auto *
I = dyn_cast<Instruction>(V)) {
10162 if (isa<Instruction>(
Op) || isa<Argument>(
Op))
10163 InsertAffected(
Op);
10170 auto AddAffected = [&InsertAffected](
Value *V) {
10185 while (!Worklist.
empty()) {
10187 if (!Visited.
insert(V).second)
10210 AddCmpOperands(
A,
B);
10261 if (HasRHSC &&
match(
A, m_Intrinsic<Intrinsic::ctpop>(
m_Value(
X))))
10264 AddCmpOperands(
A,
B);
10274 }
else if (
match(V, m_Intrinsic<Intrinsic::is_fpclass>(
m_Value(
A),
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
This file contains the simple types necessary to represent the attributes associated with functions a...
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
std::optional< std::vector< StOtherPiece > > Other
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static MaybeAlign getAlign(Value *Ptr)
Module.h This file contains the declarations for the Module class.
static bool hasNoUnsignedWrap(BinaryOperator &I)
mir Rename Register Operands
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
static bool mayHaveSideEffects(MachineInstr &MI)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static SmallVector< VPValue *, 4 > getOperands(ArrayRef< VPValue * > Values, unsigned OperandIndex)
static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS)
static cl::opt< unsigned > DomConditionsMaxUses("dom-conditions-max-uses", cl::Hidden, cl::init(20))
static unsigned computeNumSignBitsVectorConstant(const Value *V, const APInt &DemandedElts, unsigned TyBits)
For vector constants, loop over the elements and find the constant with the minimum number of sign bi...
static bool isKnownNonZeroFromOperator(const Operator *I, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q)
static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS, const Value *RHS)
Return true if "icmp Pred LHS RHS" is always true.
static bool isNonZeroMul(const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y, bool NSW, bool NUW)
static bool isKnownNonNullFromDominatingCondition(const Value *V, const Instruction *CtxI, const DominatorTree *DT)
static const Value * getUnderlyingObjectFromInt(const Value *V)
This is the function that does the work of looking through basic ptrtoint+arithmetic+inttoptr sequenc...
static bool isNonZeroShift(const Operator *I, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q, const KnownBits &KnownVal)
static bool rangeMetadataExcludesValue(const MDNode *Ranges, const APInt &Value)
Does the 'Range' metadata (which must be a valid MD_range operand list) ensure that the value it's at...
static bool outputDenormalIsIEEEOrPosZero(const Function &F, const Type *Ty)
static bool inputDenormalIsIEEE(const Function &F, const Type *Ty)
Return true if it's possible to assume IEEE treatment of input denormals in F for Val.
static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR)
Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q)
static bool isNonEqualShl(const Value *V1, const Value *V2, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q)
Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and the shift is nuw or nsw.
static void addValueAffectedByCondition(Value *V, function_ref< void(Value *)> InsertAffected)
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static std::tuple< Value *, FPClassTest, FPClassTest > exactClass(Value *V, FPClassTest M)
Return the return value for fcmpImpliesClass for a compare that produces an exact class test.
static bool haveNoCommonBitsSetSpecialCases(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower, APInt &Upper, const InstrInfoQuery &IIQ, bool PreferSignedRange)
static Value * lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, Instruction::CastOps *CastOp)
Helps to match a select pattern in case of a type mismatch.
static std::pair< Value *, bool > getDomPredecessorCondition(const Instruction *ContextI)
static bool isKnownNonEqual(const Value *V1, const Value *V2, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q)
Return true if it is known that V1 != V2.
static bool isKnownNonZero(const Value *V, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
Return true if the given value is known to be non-zero when defined.
static bool isNonEqualSelect(const Value *V1, const Value *V2, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q)
static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q)
static bool includesPoison(UndefPoisonKind Kind)
static bool isNonEqualMul(const Value *V1, const Value *V2, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q)
Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and the multiplication is nuw o...
static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS)
Match clamp pattern for float types without care about NaNs or signed zeros.
static bool includesUndef(UndefPoisonKind Kind)
static std::optional< bool > isImpliedCondCommonOperandWithCR(CmpPredicate LPred, const ConstantRange &LCR, CmpPredicate RPred, const ConstantRange &RCR)
Return true if "icmp LPred X, LCR" implies "icmp RPred X, RCR" is true.
static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero, unsigned Depth, SimplifyQuery &Q)
Try to detect a recurrence that the value of the induction variable is always a power of two (or zero...
static bool isModifyingBinopOfNonZero(const Value *V1, const Value *V2, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q)
Return true if V1 == (binop V2, X), where X is known non-zero.
static ConstantRange getRangeForSelectPattern(const SelectInst &SI, const InstrInfoQuery &IIQ)
static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred, FastMathFlags FMF, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, unsigned Depth)
static uint64_t GetStringLengthH(const Value *V, SmallPtrSetImpl< const PHINode * > &PHIs, unsigned CharSize)
If we can compute the length of the string pointed to by the specified pointer, return 'len+1'.
static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(const Value *V, bool AllowLifetime, bool AllowDroppable)
static std::optional< bool > isImpliedCondAndOr(const Instruction *LHS, CmpPredicate RHSPred, const Value *RHSOp0, const Value *RHSOp1, const DataLayout &DL, bool LHSIsTrue, unsigned Depth)
Return true if LHS implies RHS is true.
static void computeKnownFPClassFromCond(const Value *V, Value *Cond, unsigned Depth, bool CondIsTrue, const Instruction *CxtI, KnownFPClass &KnownFromContext)
static std::optional< bool > isImpliedCondICmps(const ICmpInst *LHS, CmpPredicate RPred, const Value *R0, const Value *R1, const DataLayout &DL, bool LHSIsTrue)
Return true if LHS implies RHS (expanded to its components as "R0 RPred R1") is true.
static bool isSignedMinMaxClamp(const Value *Select, const Value *&In, const APInt *&CLow, const APInt *&CHigh)
static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, bool NSW, bool NUW, const APInt &DemandedElts, KnownBits &KnownOut, KnownBits &Known2, unsigned Depth, const SimplifyQuery &Q)
static void computeKnownBitsFromOperator(const Operator *I, const APInt &DemandedElts, KnownBits &Known, unsigned Depth, const SimplifyQuery &Q)
static bool directlyImpliesPoison(const Value *ValAssumedPoison, const Value *V, unsigned Depth)
static void computeKnownBitsFromCmp(const Value *V, CmpInst::Predicate Pred, Value *LHS, Value *RHS, KnownBits &Known, const SimplifyQuery &Q)
static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TVal, Value *FVal, unsigned Depth)
Recognize variations of: a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
static void unionWithMinMaxIntrinsicClamp(const IntrinsicInst *II, KnownBits &Known)
static void setLimitForFPToI(const Instruction *I, APInt &Lower, APInt &Upper)
static bool isSameUnderlyingObjectInLoop(const PHINode *PN, const LoopInfo *LI)
PN defines a loop-variant pointer to an object.
static bool isNonEqualPointersWithRecursiveGEP(const Value *A, const Value *B, const SimplifyQuery &Q)
static bool isSignedMinMaxIntrinsicClamp(const IntrinsicInst *II, const APInt *&CLow, const APInt *&CHigh)
static Value * lookThroughCastConst(CmpInst *CmpI, Type *SrcTy, Constant *C, Instruction::CastOps *CastOp)
static void computeKnownFPClassForFPTrunc(const Operator *Op, const APInt &DemandedElts, FPClassTest InterestedClasses, KnownFPClass &Known, unsigned Depth, const SimplifyQuery &Q)
static bool handleGuaranteedWellDefinedOps(const Instruction *I, const CallableT &Handle)
Enumerates all operands of I that are guaranteed to not be undef or poison.
static void computeKnownBits(const Value *V, const APInt &DemandedElts, KnownBits &Known, unsigned Depth, const SimplifyQuery &Q)
Determine which bits of V are known to be either zero or one and return them in the Known bit set.
static KnownFPClass computeKnownFPClassFromContext(const Value *V, const SimplifyQuery &Q)
static Value * getNotValue(Value *V)
If the input value is the result of a 'not' op, constant integer, or vector splat of a constant integ...
static void computeKnownBitsFromCond(const Value *V, Value *Cond, KnownBits &Known, unsigned Depth, const SimplifyQuery &SQ, bool Invert)
static void computeKnownBitsFromICmpCond(const Value *V, ICmpInst *Cmp, KnownBits &Known, const SimplifyQuery &SQ, bool Invert)
static KnownBits computeKnownBitsForHorizontalOperation(const Operator *I, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q, const function_ref< KnownBits(const KnownBits &, const KnownBits &)> KnownBitsFunc)
static bool matchOpWithOpEqZero(Value *Op0, Value *Op1)
static bool isNonZeroRecurrence(const PHINode *PN)
Try to detect a recurrence that monotonically increases/decreases from a non-zero starting value.
static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, bool NUW, const APInt &DemandedElts, KnownBits &Known, KnownBits &Known2, unsigned Depth, const SimplifyQuery &Q)
static SelectPatternResult matchClamp(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal)
Recognize variations of: CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
static bool shiftAmountKnownInRange(const Value *ShiftAmount)
Shifts return poison if shiftwidth is larger than the bitwidth.
static bool isEphemeralValueOf(const Instruction *I, const Value *E)
static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, unsigned Depth)
Match non-obvious integer minimum and maximum sequences.
static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth, const SimplifyQuery &Q)
Test whether a GEP's result is known to be non-null.
static bool handleGuaranteedNonPoisonOps(const Instruction *I, const CallableT &Handle)
Enumerates all operands of I that are guaranteed to not be poison.
static bool isNonZeroSub(const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y)
static std::optional< std::pair< Value *, Value * > > getInvertibleOperands(const Operator *Op1, const Operator *Op2)
If the pair of operators are the same invertible function, return the the operands of the function co...
static void computeKnownBitsFromShiftOperator(const Operator *I, const APInt &DemandedElts, KnownBits &Known, KnownBits &Known2, unsigned Depth, const SimplifyQuery &Q, function_ref< KnownBits(const KnownBits &, const KnownBits &, bool)> KF)
Compute known bits from a shift operator, including those with a non-constant shift amount.
static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS)
static bool inputDenormalIsIEEEOrPosZero(const Function &F, const Type *Ty)
static KnownBits getKnownBitsFromAndXorOr(const Operator *I, const APInt &DemandedElts, const KnownBits &KnownLHS, const KnownBits &KnownRHS, unsigned Depth, const SimplifyQuery &Q)
static bool isKnownNonZeroFromAssume(const Value *V, const SimplifyQuery &Q)
static std::optional< bool > isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, const Value *ARHS, const Value *BLHS, const Value *BRHS)
Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred ALHS ARHS" is true.
static unsigned ComputeNumSignBitsImpl(const Value *V, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q)
Return the number of times the sign bit of the register is replicated into the other bits.
static bool isNonZeroAdd(const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y, bool NSW, bool NUW)
static const Instruction * safeCxtI(const Value *V, const Instruction *CxtI)
static bool isImpliedToBeAPowerOfTwoFromCond(const Value *V, bool OrZero, const Value *Cond, bool CondIsTrue)
Return true if we can infer that V is known to be a power of 2 from dominating condition Cond (e....
static bool isKnownNonNaN(const Value *V, FastMathFlags FMF)
static ConstantRange getRangeForIntrinsic(const IntrinsicInst &II, bool UseInstrInfo)
static Value * BuildSubAggregate(Value *From, Value *To, Type *IndexedType, SmallVectorImpl< unsigned > &Idxs, unsigned IdxSkip, BasicBlock::iterator InsertBefore)
void computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, KnownFPClass &Known, unsigned Depth, const SimplifyQuery &Q)
APInt bitcastToAPInt() const
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
FPClassTest classify() const
Return the FPClassTest which will return true for the value.
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
bool isSmallestNormalized() const
Class for arbitrary precision integers.
APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
unsigned ceilLogBase2() const
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
void clearAllBits()
Set every bit to 0.
APInt reverseBits() const
bool sle(const APInt &RHS) const
Signed less or equal comparison.
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
unsigned logBase2() const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
void setAllBits()
Set every bit to 1.
bool getBoolValue() const
Convert APInt to a boolean value.
bool isMaxSignedValue() const
Determine if this is the largest signed value.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
void clearSignBit()
Set the sign bit to 0.
an instruction to allocate memory on the stack
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Class to represent array types.
Type * getElementType() const
This represents the llvm.assume intrinsic.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
std::optional< unsigned > getVScaleRangeMax() const
Returns the maximum value for the vscale_range attribute or std::nullopt when unknown.
unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
bool isSingleEdge() const
Check if this is the only edge between Start and End.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
InstListType::const_iterator const_iterator
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BinaryOps getOpcode() const
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
bool isIndirectCall() const
Return true if the callsite is an indirect call.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
static bool isEquality(Predicate pred)
Determine if this is an equals/not equals predicate.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
bool isFPPredicate() const
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
static bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
bool isIntPredicate() const
static bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
bool hasSameSign() const
Query samesign information, for optimizations.
An array constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
StringRef getAsString() const
If this array is isString(), then this method returns the array as a StringRef.
uint64_t getElementAsInteger(unsigned i) const
If this is a sequential container of integers (of any size), return the specified element in the low ...
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This class represents a range of values.
PreferredRangeType
If represented precisely, the result of some range operations may consist of multiple disjoint ranges...
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
OverflowResult unsignedSubMayOverflow(const ConstantRange &Other) const
Return whether unsigned sub of the two ranges always/never overflows.
bool isAllNegative() const
Return true if all values in this range are negative.
OverflowResult unsignedAddMayOverflow(const ConstantRange &Other) const
Return whether unsigned add of the two ranges always/never overflows.
KnownBits toKnownBits() const
Return known bits for values in this range.
APInt getUnsignedMin() const
Return the smallest unsigned value contained in the ConstantRange.
bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other? NOTE: false does not mean that inverse pr...
APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
OverflowResult unsignedMulMayOverflow(const ConstantRange &Other) const
Return whether unsigned mul of the two ranges always/never overflows.
bool isAllNonNegative() const
Return true if all values in this range are non-negative.
static ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other)
Produce the smallest range such that all values that may satisfy the given predicate with any value c...
ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
OverflowResult signedAddMayOverflow(const ConstantRange &Other) const
Return whether signed add of the two ranges always/never overflows.
APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
OverflowResult
Represents whether an operation on the given constant range is known to always or never overflow.
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
OverflowResult signedSubMayOverflow(const ConstantRange &Other) const
Return whether signed sub of the two ranges always/never overflows.
ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
This is an important base class in LLVM.
static Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
Constant * getSplatValue(bool AllowPoison=false) const
If all elements of the vector constant have the same value, return that value.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
bool isZeroValue() const
Return true if the value is negative zero or null value.
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
bool isLittleEndian() const
Layout endianness...
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
unsigned getIndexTypeSizeInBits(Type *Ty) const
Layout size of the index used in GEP calculation.
unsigned getPointerTypeSizeInBits(Type *) const
Layout pointer size, in bits, based on the type.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
ArrayRef< BranchInst * > conditionsFor(const Value *V) const
Access the list of branches which affect this value.
DomTreeNodeBase * getIDom() const
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Utility class for floating point operations which can have information about relaxed accuracy require...
Convenience struct for specifying and reasoning about fast-math flags.
bool noSignedZeros() const
void setNoSignedZeros(bool B=true)
const BasicBlock & getEntryBlock() const
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
const DataLayout & getDataLayout() const
Get the data layout of the module this global belongs to.
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getSwappedCmpPredicate() const
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
bool isEquality() const
Return true if this predicate is either EQ or NE.
static std::optional< bool > isImpliedByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
Determine if Pred1 implies Pred2 is true, false, or if nothing can be inferred about the implication,...
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
This instruction inserts a struct field of array element value into an aggregate value.
Value * getAggregateOperand()
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
bool isExact() const LLVM_READONLY
Determine whether the exact flag is set.
const Function * getFunction() const
Return the function this instruction belongs to.
bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
Value * getPointerOperand()
Align getAlign() const
Return the alignment of the access that is being performed.
bool isLoopHeader(const BlockT *BB) const
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
This is a utility class that provides an abstraction for the common functionality between Instruction...
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
iterator_range< const_block_iterator > blocks() const
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A udiv or sdiv instruction, which can be marked as "exact", indicating that no bits are destroyed.
bool isExact() const
Test whether this division is known to be exact, with zero remainder.
This class represents the LLVM 'select' instruction.
const Value * getFalseValue() const
const Value * getCondition() const
const Value * getTrueValue() const
This instruction constructs a fixed permutation of two input vectors.
VectorType * getType() const
Overload to return most specific vector type.
static void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
unsigned getNumElements() const
Random access to the elements.
Type * getElementType(unsigned N) const
Provides information about what library functions are available for the current target.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
const fltSemantics & getFltSemantics() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
uint64_t getArrayNumElements() const
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
static IntegerType * getInt16Ty(LLVMContext &C)
static IntegerType * getInt8Ty(LLVMContext &C)
bool isIEEE() const
Return whether the type is IEEE compatible, as defined by the eponymous method in APFloat.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
unsigned getOperandNo() const
Return the operand # of this use in its User.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
iterator_range< user_iterator > users()
const KnownBits & getKnownBits(const SimplifyQuery &Q) const
PointerType getValue() const
Represents an op.with.overflow intrinsic.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
An efficient, type-erasing, non-owning reference to a callable.
StructType * getStructTypeOrNull() const
TypeSize getSequentialElementStride(const DataLayout &DL) const
Type * getIndexedType() const
const ParentTy * getParent() const
self_iterator getIterator()
A range adaptor for a pair of iterators.
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrToIntSameSize_match< OpTy > m_PtrToIntSameSize(const DataLayout &DL, const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
auto m_LogicalOp()
Matches either L && R or L || R where L and R are arbitrary values.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
cst_pred_ty< is_power2_or_zero > m_Power2OrZero()
Match an integer or vector of 0 or power-of-2 values.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap, true > m_c_NUWAdd(const LHS &L, const RHS &R)
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
match_combine_or< MaxMin_match< FCmpInst, LHS, RHS, ofmin_pred_ty >, MaxMin_match< FCmpInst, LHS, RHS, ufmin_pred_ty > > m_OrdOrUnordFMin(const LHS &L, const RHS &R)
Match an 'ordered' or 'unordered' floating point minimum function.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
OneUse_match< T > m_OneUse(const T &SubPattern)
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > m_c_SMin(const LHS &L, const RHS &R)
Matches an SMin with LHS and RHS in either order.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true > m_c_UMax(const LHS &L, const RHS &R)
Matches a UMax with LHS and RHS in either order.
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > m_c_UMin(const LHS &L, const RHS &R)
Matches a UMin with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
apfloat_match m_APFloatAllowPoison(const APFloat *&Res)
Match APFloat while allowing poison in splat vector constants.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
match_combine_or< MaxMin_match< FCmpInst, LHS, RHS, ofmax_pred_ty >, MaxMin_match< FCmpInst, LHS, RHS, ufmax_pred_ty > > m_OrdOrUnordFMax(const LHS &L, const RHS &R)
Match an 'ordered' or 'unordered' floating point maximum function.
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true > m_c_SMax(const LHS &L, const RHS &R)
Matches an SMax with LHS and RHS in either order.
VScaleVal_match m_VScale()
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
BinOpPred_match< LHS, RHS, is_irem_op > m_IRem(const LHS &L, const RHS &R)
Matches integer remainder operations.
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
cst_pred_ty< is_nonpositive > m_NonPositive()
Match an integer or vector of non-positive values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
BinOpPred_match< LHS, RHS, is_bitwiselogic_op > m_BitwiseLogic(const LHS &L, const RHS &R)
Matches bitwise logic operations.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
static unsigned decodeVSEW(unsigned VSEW)
unsigned getSEWLMULRatio(unsigned SEW, RISCVII::VLMUL VLMul)
static constexpr unsigned RVVBitsPerBlock
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
bool haveNoCommonBitsSet(const WithCache< const Value * > &LHSCache, const WithCache< const Value * > &RHSCache, const SimplifyQuery &SQ)
Return true if LHS and RHS have no common bits set.
bool mustExecuteUBIfPoisonOnPathTo(Instruction *Root, Instruction *OnPathTo, DominatorTree *DT)
Return true if undefined behavior would provable be executed on the path to OnPathTo if Root produced...
Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
bool mustTriggerUB(const Instruction *I, const SmallPtrSetImpl< const Value * > &KnownPoison)
Return true if the given instruction must trigger undefined behavior when I is executed with any oper...
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
bool isAssumeLikeIntrinsic(const Instruction *I)
Return true if it is an intrinsic that cannot be speculated but also cannot trap.
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth)
Return the minimum or maximum constant value for the specified integer min/max flavor and type.
void getGuaranteedNonPoisonOps(const Instruction *I, SmallVectorImpl< const Value * > &Ops)
Insert operands of I into Ops such that I will trigger undefined behavior if I is executed and that o...
bool isOnlyUsedInZeroComparison(const Instruction *CxtI)
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
bool getConstantStringInfo(const Value *V, StringRef &Str, bool TrimAtNul=true)
This function computes the length of a null-terminated C string pointed to by V.
bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V)
Return true if the only users of this pointer are lifetime markers or droppable instructions.
Constant * ReadByteArrayFromGlobal(const GlobalVariable *GV, uint64_t Offset)
bool getUnderlyingObjectsForCodeGen(const Value *V, SmallVectorImpl< Value * > &Objects)
This is a wrapper around getUnderlyingObjects and adds support for basic ptrtoint+arithmetic+inttoptr...
std::pair< Intrinsic::ID, bool > canConvertToMinOrMaxIntrinsic(ArrayRef< Value * > VL)
Check if the values in VL are select instructions that can be converted to a min or max (vector) intr...
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice, unsigned ElementSize, uint64_t Offset=0)
Returns true if the value V is a pointer into a ConstantDataArray.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
bool isGuaranteedToExecuteForEveryIteration(const Instruction *I, const Loop *L)
Return true if this function can prove that the instruction I is executed for every iteration of the ...
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to have exactly one bit set when defined.
bool mustSuppressSpeculation(const LoadInst &LI)
Return true if speculation of the given load must be suppressed to avoid ordering or interfering with...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
gep_type_iterator gep_type_end(const User *GEP)
CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered=false)
Return the canonical comparison predicate for the specified minimum/maximum flavor.
void computeKnownBitsFromContext(const Value *V, KnownBits &Known, unsigned Depth, const SimplifyQuery &Q)
Merge bits known from context-dependent facts into Known.
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, const DominatorTree &DT)
Returns true if the arithmetic part of the WO 's result is used only along the paths control dependen...
RetainedKnowledge getKnowledgeValidInContext(const Value *V, ArrayRef< Attribute::AttrKind > AttrKinds, const Instruction *CtxI, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr)
Return a valid Knowledge associated to the Value V if its Attribute kind is in AttrKinds and the know...
RetainedKnowledge getKnowledgeFromBundle(AssumeInst &Assume, const CallBase::BundleOpInfo &BOI)
This extracts the Knowledge from an element of an operand bundle.
bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
bool isSafeToSpeculativelyExecuteWithOpcode(unsigned Opcode, const Instruction *Inst, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true)
This returns the same result as isSafeToSpeculativelyExecute if Opcode is the actual opcode of Inst.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
KnownBits analyzeKnownBitsFromAndXorOr(const Operator *I, const KnownBits &KnownLHS, const KnownBits &KnownRHS, unsigned Depth, const SimplifyQuery &SQ)
Using KnownBits LHS/RHS produce the known bits for logic op (and/xor/or).
OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ, bool IsNSW=false)
bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
bool isGuard(const User *U)
Returns true iff U has semantics of a guard expressed in a form of call of llvm.experimental....
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF)
Return the inverse minimum/maximum flavor of the specified flavor.
constexpr unsigned MaxAnalysisRecursionDepth
bool isKnownNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be negative (i.e.
void getGuaranteedWellDefinedOps(const Instruction *I, SmallVectorImpl< const Value * > &Ops)
Insert operands of I into Ops such that I will trigger undefined behavior if I is executed and that o...
OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
std::tuple< Value *, FPClassTest, FPClassTest > fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS, Value *RHS, bool LookThroughSrc=true)
Compute the possible floating-point classes that LHS could be based on fcmp \Pred LHS,...
SelectPatternFlavor
Specific patterns of select instructions we can match.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_FMINNUM
Unsigned maximum.
bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(const CallBase *Call, bool MustPreserveNullness)
{launder,strip}.invariant.group returns pointer that aliases its argument, and it only captures point...
void adjustKnownBitsForSelectArm(KnownBits &Known, Value *Cond, Value *Arm, bool Invert, unsigned Depth, const SimplifyQuery &Q)
Adjust Known for the given select Arm to include information from the select Cond.
bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
void getHorizDemandedEltsForFirstOperand(unsigned VectorBitWidth, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS)
Compute the demanded elements mask of horizontal binary operations.
SelectPatternResult getSelectPattern(CmpInst::Predicate Pred, SelectPatternNaNBehavior NaNBehavior=SPNB_NA, bool Ordered=false)
Determine the pattern for predicate X Pred Y ? X : Y.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool programUndefinedIfPoison(const Instruction *Inst)
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
bool programUndefinedIfUndefOrPoison(const Instruction *Inst)
Return true if this function can prove that if Inst is executed and yields a poison value or undef bi...
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
FPClassTest inverse_fabs(FPClassTest Mask)
Return the test mask which returns true after fabs is applied to the value.
uint64_t GetStringLength(const Value *V, unsigned CharSize=8)
If we can compute the length of the string pointed to by the specified pointer, return 'len+1'.
OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
bool isKnownInversion(const Value *X, const Value *Y)
Return true iff:
bool isNotCrossLaneOperation(const Instruction *I)
Return true if the instruction doesn't potentially cross vector lanes.
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
bool onlyUsedByLifetimeMarkers(const Value *V)
Return true if the only users of this pointer are lifetime markers.
Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB, const TargetLibraryInfo *TLI)
Map a call instruction to an intrinsic ID.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
const Value * getUnderlyingObjectAggressive(const Value *V)
Like getUnderlyingObject(), but will try harder to find a single underlying object.
Intrinsic::ID getMinMaxIntrinsic(SelectPatternFlavor SPF)
Convert given SPF to equivalent min/max intrinsic.
OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
bool propagatesPoison(const Use &PoisonOp)
Return true if PoisonOp's user yields poison or raises UB if its operand PoisonOp is poison.
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given values are known to be non-equal when defined.
ConstantRange computeConstantRangeIncludingKnownBits(const WithCache< const Value * > &V, bool ForSigned, const SimplifyQuery &SQ)
Combine constant ranges from computeConstantRange() and computeKnownBits().
SelectPatternNaNBehavior
Behavior when a floating point min/max is given one NaN and one non-NaN as input.
@ SPNB_RETURNS_NAN
NaN behavior not applicable.
@ SPNB_RETURNS_OTHER
Given one NaN input, returns the NaN.
@ SPNB_RETURNS_ANY
Given one NaN input, returns the non-NaN.
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
DWARFExpression::Operation Op
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
constexpr unsigned BitWidth
SelectPatternResult matchDecomposedSelectPattern(CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Determine the pattern that a select with the given compare as its predicate and given values as its t...
OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
gep_type_iterator gep_type_begin(const User *GEP)
std::pair< Value *, FPClassTest > fcmpToClassTest(CmpInst::Predicate Pred, const Function &F, Value *LHS, Value *RHS, bool LookThroughSrc=true)
Returns a pair of values, which if passed to llvm.is.fpclass, returns the same result as an fcmp with...
Value * isBytewiseValue(Value *V, const DataLayout &DL)
If the specified value can be set by repeating the same byte in memory, return the i8 value that it i...
std::optional< std::pair< CmpPredicate, Constant * > > getFlippedStrictnessPredicateAndConstant(CmpPredicate Pred, Constant *C)
Convert an integer comparison with a constant RHS into an equivalent form with the strictness flipped...
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return the number of times the sign bit of the register is replicated into the other bits.
OverflowResult computeOverflowForUnsignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
unsigned Log2(Align A)
Returns the log2 of the alignment.
std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
bool isGEPBasedOnPointerToString(const GEPOperator *GEP, unsigned CharSize=8)
Returns true if the GEP is based on a pointer to a string (array of.
bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, unsigned Depth, const SimplifyQuery &SQ)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
Value * FindInsertedValue(Value *V, ArrayRef< unsigned > idx_range, std::optional< BasicBlock::iterator > InsertBefore=std::nullopt)
Given an aggregate and an sequence of indices, see if the scalar value indexed is already around as a...
bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)
Return true if the two given values are negation.
bool isKnownPositive(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be positive (i.e.
Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Get the upper bound on bit size for this Value Op as a signed integer.
bool mayHaveNonDefUseDependency(const Instruction &I)
Returns true if the result or effects of the given instructions I depend values not reachable through...
bool isTriviallyVectorizable(Intrinsic::ID ID)
Identify if the intrinsic is trivially vectorizable.
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
void findValuesAffectedByCondition(Value *Cond, bool IsAssume, function_ref< void(Value *)> InsertAffected)
Call InsertAffected on all Values whose known bits / value may be affected by the condition Cond.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static unsigned int semanticsPrecision(const fltSemantics &)
static bool isRepresentableAsNormalIn(const fltSemantics &Src, const fltSemantics &Dst)
This struct is a compact representation of a valid (non-zero power of two) alignment.
SmallPtrSet< Value *, 4 > AffectedValues
Represents offset+length into a ConstantDataArray.
uint64_t Length
Length of the slice.
uint64_t Offset
Slice starts at this Offset.
const ConstantDataArray * Array
ConstantDataArray pointer.
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
constexpr bool outputsAreZero() const
Return true if output denormals should be flushed to 0.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ Dynamic
Denormals have unknown treatment.
@ IEEE
IEEE-754 denormal numbers preserved.
static constexpr DenormalMode getPositiveZero()
constexpr bool inputsAreZero() const
Return true if input denormals must be implicitly treated as 0.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
static constexpr DenormalMode getIEEE()
InstrInfoQuery provides an interface to query additional information for instructions like metadata o...
bool isExact(const BinaryOperator *Op) const
MDNode * getMetadata(const Instruction *I, unsigned KindID) const
bool hasNoSignedZeros(const InstT *Op) const
bool hasNoSignedWrap(const InstT *Op) const
bool hasNoUnsignedWrap(const InstT *Op) const
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
static KnownBits sadd_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.sadd.sat(LHS, RHS)
static std::optional< bool > eq(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_EQ result.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
static KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from zero-extended multiply-hi.
unsigned countMinSignBits() const
Returns the number of times the sign bit is replicated into the other bits.
static KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
KnownBits blsi() const
Compute known bits for X & -X, which has only the lowest bit set of X set.
void makeNonNegative()
Make this value non-negative.
static KnownBits usub_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.usub.sat(LHS, RHS)
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
static KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
static KnownBits ssub_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.ssub.sat(LHS, RHS)
static KnownBits urem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for urem(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
KnownBits blsmsk() const
Compute known bits for X ^ (X - 1), which has all bits up to and including the lowest set bit of X se...
void makeNegative()
Make this value negative.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
bool hasConflict() const
Returns true if there is conflicting information.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
void setAllZero()
Make all bits known to be zero and discard any previous information.
unsigned getBitWidth() const
Get the bit width of this value.
static KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
bool isConstant() const
Returns true if we know the value of all bits.
void resetAll()
Resets the known state of all bits.
KnownBits unionWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for either this or RHS or both.
static KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
bool isNonZero() const
Returns true if this value is known to be non-zero.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
unsigned countMinTrailingOnes() const
Returns the minimum number of trailing one bits.
static KnownBits add(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from addition of LHS and RHS.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from sign-extended multiply-hi.
static KnownBits srem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for srem(LHS, RHS).
static KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for udiv(LHS, RHS).
static KnownBits computeForAddSub(bool Add, bool NSW, bool NUW, const KnownBits &LHS, const KnownBits &RHS)
Compute known bits resulting from adding LHS and RHS.
static KnownBits sdiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for sdiv(LHS, RHS).
static bool haveNoCommonBitsSet(const KnownBits &LHS, const KnownBits &RHS)
Return true if LHS and RHS have no common bits set.
bool isNegative() const
Returns true if this value is known to be negative.
static KnownBits sub(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from subtraction of LHS and RHS.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
void setAllOnes()
Make all bits known to be one and discard any previous information.
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
static KnownBits uadd_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.uadd.sat(LHS, RHS)
static KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
KnownBits abs(bool IntMinIsPoison=false) const
Compute known bits for the absolute value.
static std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
static std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
static KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
static KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
KnownBits sextOrTrunc(unsigned BitWidth) const
Return known bits for a sign extension or truncation of the value we're tracking.
const APInt & getConstant() const
Returns the value when all bits have a known value.
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
bool isKnownNeverInfinity() const
Return true if it's known this can never be an infinity.
bool cannotBeOrderedGreaterThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never greater tha...
static constexpr FPClassTest OrderedGreaterThanZeroMask
static constexpr FPClassTest OrderedLessThanZeroMask
void knownNot(FPClassTest RuleOut)
bool isKnownNeverZero() const
Return true if it's known this can never be a zero.
void copysign(const KnownFPClass &Sign)
bool isKnownNeverSubnormal() const
Return true if it's known this can never be a subnormal.
bool isKnownNeverLogicalNegZero(const Function &F, Type *Ty) const
Return true if it's know this can never be interpreted as a negative zero.
bool isKnownNeverLogicalPosZero(const Function &F, Type *Ty) const
Return true if it's know this can never be interpreted as a positive zero.
void propagateCanonicalizingSrc(const KnownFPClass &Src, const Function &F, Type *Ty)
Report known classes if Src is evaluated through a potentially canonicalizing operation.
void propagateDenormal(const KnownFPClass &Src, const Function &F, Type *Ty)
Propagate knowledge from a source value that could be a denormal or zero.
bool isKnownNeverNegInfinity() const
Return true if it's known this can never be -infinity.
bool isKnownNeverNegSubnormal() const
Return true if it's known this can never be a negative subnormal.
bool isKnownNeverPosZero() const
Return true if it's known this can never be a literal positive zero.
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
bool isKnownNeverNegZero() const
Return true if it's known this can never be a negative zero.
bool isKnownNeverLogicalZero(const Function &F, Type *Ty) const
Return true if it's know this can never be interpreted as a zero.
void propagateNaN(const KnownFPClass &Src, bool PreserveSign=false)
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
void signBitMustBeOne()
Assume the sign bit is one.
void signBitMustBeZero()
Assume the sign bit is zero.
bool isKnownNeverPosInfinity() const
Return true if it's known this can never be +infinity.
bool isKnownNeverPosSubnormal() const
Return true if it's known this can never be a positive subnormal.
Represent one information held inside an operand bundle of an llvm.assume.
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
SimplifyQuery getWithoutCondContext() const
SimplifyQuery getWithInstruction(const Instruction *I) const
const DomConditionCache * DC