58#include "llvm/IR/IntrinsicsAArch64.h"
59#include "llvm/IR/IntrinsicsAMDGPU.h"
60#include "llvm/IR/IntrinsicsRISCV.h"
61#include "llvm/IR/IntrinsicsX86.h"
98 return DL.getPointerTypeSizeInBits(Ty);
110 CxtI = dyn_cast<Instruction>(V);
124 CxtI = dyn_cast<Instruction>(V1);
128 CxtI = dyn_cast<Instruction>(V2);
136 const APInt &DemandedElts,
138 if (isa<ScalableVectorType>(Shuf->
getType())) {
140 DemandedLHS = DemandedRHS = DemandedElts;
147 DemandedElts, DemandedLHS, DemandedRHS);
159 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
187 V, DemandedElts,
Depth,
243 "LHS and RHS should have the same type");
245 "LHS and RHS should be integers");
256 return !
I->user_empty() &&
all_of(
I->users(), [](
const User *U) {
257 ICmpInst::Predicate P;
258 return match(U, m_ICmp(P, m_Value(), m_Zero())) && ICmpInst::isEquality(P);
266 bool OrZero,
unsigned Depth,
269 return ::isKnownToBeAPowerOfTwo(
283 return ::isKnownNonZero(
294 if (
auto *CI = dyn_cast<ConstantInt>(V))
295 return CI->getValue().isStrictlyPositive();
316 return ::isKnownNonEqual(
325 return Mask.isSubsetOf(Known.
Zero);
333 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
343 return ::ComputeNumSignBits(
352 return V->getType()->getScalarSizeInBits() - SignBits + 1;
357 const APInt &DemandedElts,
364 if (KnownOut.
isUnknown() && !NSW && !NUW)
389 bool isKnownNegativeOp0 = Known2.
isNegative();
392 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
397 (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
399 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.
isNonZero());
403 bool SelfMultiply = Op0 == Op1;
423 unsigned NumRanges = Ranges.getNumOperands() / 2;
429 for (
unsigned i = 0; i < NumRanges; ++i) {
431 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
433 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
437 unsigned CommonPrefixBits =
438 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).
countl_zero();
440 APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(
BitWidth);
441 Known.
One &= UnsignedMax & Mask;
442 Known.
Zero &= ~UnsignedMax & Mask;
457 while (!WorkSet.
empty()) {
459 if (!Visited.
insert(V).second)
464 return EphValues.count(U);
469 if (V ==
I || (isa<Instruction>(V) &&
471 !cast<Instruction>(V)->isTerminator())) {
473 if (
const User *U = dyn_cast<User>(V))
485 return CI->isAssumeLikeIntrinsic();
493 bool AllowEphemerals) {
511 if (!AllowEphemerals && Inv == CxtI)
546 if (Pred == ICmpInst::ICMP_UGT)
550 if (Pred == ICmpInst::ICMP_NE)
561 auto *VC = dyn_cast<ConstantDataVector>(
RHS);
565 for (
unsigned ElemIdx = 0, NElem = VC->getNumElements(); ElemIdx < NElem;
568 Pred, VC->getElementAsAPInt(ElemIdx));
587 "Got assumption for the wrong function!");
590 if (!V->getType()->isPointerTy())
593 *
I,
I->bundle_op_info_begin()[Elem.Index])) {
595 (RK.AttrKind == Attribute::NonNull ||
596 (RK.AttrKind == Attribute::Dereferenceable &&
598 V->getType()->getPointerAddressSpace()))) &&
630 case ICmpInst::ICMP_EQ:
633 case ICmpInst::ICMP_SGE:
634 case ICmpInst::ICMP_SGT:
637 case ICmpInst::ICMP_SLT:
654 case ICmpInst::ICMP_EQ:
662 Known.
Zero |= ~*
C & *Mask;
663 Known.
One |= *
C & *Mask;
667 Known.
Zero |= ~*
C & ~*Mask;
668 Known.
One |= *
C & ~*Mask;
689 Known.
Zero |= RHSKnown.
Zero << ShAmt;
690 Known.
One |= RHSKnown.
One << ShAmt;
693 case ICmpInst::ICMP_NE: {
717 Invert ? Cmp->getInversePredicate() : Cmp->getPredicate();
750 if (
auto *Cmp = dyn_cast<ICmpInst>(
Cond))
791 "Got assumption for the wrong function!");
794 if (!V->getType()->isPointerTy())
797 *
I,
I->bundle_op_info_begin()[Elem.Index])) {
798 if (RK.WasOn == V && RK.AttrKind == Attribute::Alignment &&
810 Value *Arg =
I->getArgOperand(0);
830 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
866 Known = KF(Known2, Known, ShAmtNonZero);
877 Value *
X =
nullptr, *
Y =
nullptr;
879 switch (
I->getOpcode()) {
880 case Instruction::And:
881 KnownOut = KnownLHS & KnownRHS;
891 KnownOut = KnownLHS.
blsi();
893 KnownOut = KnownRHS.
blsi();
896 case Instruction::Or:
897 KnownOut = KnownLHS | KnownRHS;
899 case Instruction::Xor:
900 KnownOut = KnownLHS ^ KnownRHS;
910 const KnownBits &XBits =
I->getOperand(0) ==
X ? KnownLHS : KnownRHS;
911 KnownOut = XBits.
blsmsk();
924 if (!KnownOut.
Zero[0] && !KnownOut.
One[0] &&
946 auto *FVTy = dyn_cast<FixedVectorType>(
I->getType());
955 Attribute Attr =
F->getFnAttribute(Attribute::VScaleRange);
963 return ConstantRange::getEmpty(
BitWidth);
974 const APInt &DemandedElts,
980 switch (
I->getOpcode()) {
982 case Instruction::Load:
987 case Instruction::And:
993 case Instruction::Or:
999 case Instruction::Xor:
1005 case Instruction::Mul: {
1008 Known, Known2,
Depth, Q);
1011 case Instruction::UDiv: {
1018 case Instruction::SDiv: {
1025 case Instruction::Select: {
1026 auto ComputeForArm = [&](
Value *Arm,
bool Invert) {
1062 ComputeForArm(
I->getOperand(1),
false)
1066 case Instruction::FPTrunc:
1067 case Instruction::FPExt:
1068 case Instruction::FPToUI:
1069 case Instruction::FPToSI:
1070 case Instruction::SIToFP:
1071 case Instruction::UIToFP:
1073 case Instruction::PtrToInt:
1074 case Instruction::IntToPtr:
1077 case Instruction::ZExt:
1078 case Instruction::Trunc: {
1079 Type *SrcTy =
I->getOperand(0)->getType();
1081 unsigned SrcBitWidth;
1089 assert(SrcBitWidth &&
"SrcBitWidth can't be zero");
1092 if (
auto *Inst = dyn_cast<PossiblyNonNegInst>(
I);
1093 Inst && Inst->hasNonNeg() && !Known.
isNegative())
1098 case Instruction::BitCast: {
1099 Type *SrcTy =
I->getOperand(0)->getType();
1103 !
I->getType()->isVectorTy()) {
1109 auto *SrcVecTy = dyn_cast<FixedVectorType>(SrcTy);
1110 if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() ||
1111 !
I->getType()->isIntOrIntVectorTy() ||
1112 isa<ScalableVectorType>(
I->getType()))
1117 unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits();
1134 unsigned SubScale =
BitWidth / SubBitWidth;
1136 for (
unsigned i = 0; i != NumElts; ++i) {
1137 if (DemandedElts[i])
1138 SubDemandedElts.
setBit(i * SubScale);
1142 for (
unsigned i = 0; i != SubScale; ++i) {
1146 Known.
insertBits(KnownSrc, ShiftElt * SubBitWidth);
1151 case Instruction::SExt: {
1153 unsigned SrcBitWidth =
I->getOperand(0)->getType()->getScalarSizeInBits();
1155 Known = Known.
trunc(SrcBitWidth);
1162 case Instruction::Shl: {
1166 bool ShAmtNonZero) {
1167 return KnownBits::shl(KnownVal, KnownAmt, NUW, NSW, ShAmtNonZero);
1177 case Instruction::LShr: {
1178 bool Exact = Q.
IIQ.
isExact(cast<BinaryOperator>(
I));
1180 bool ShAmtNonZero) {
1191 case Instruction::AShr: {
1192 bool Exact = Q.
IIQ.
isExact(cast<BinaryOperator>(
I));
1194 bool ShAmtNonZero) {
1201 case Instruction::Sub: {
1205 DemandedElts, Known, Known2,
Depth, Q);
1208 case Instruction::Add: {
1212 DemandedElts, Known, Known2,
Depth, Q);
1215 case Instruction::SRem:
1221 case Instruction::URem:
1226 case Instruction::Alloca:
1229 case Instruction::GetElementPtr: {
1238 for (
unsigned i = 1, e =
I->getNumOperands(); i != e; ++i, ++GTI) {
1254 "Access to structure field must be known at compile time");
1259 unsigned Idx = cast<ConstantInt>(
Index)->getZExtValue();
1262 AccConstIndices +=
Offset;
1273 unsigned IndexBitWidth =
Index->getType()->getScalarSizeInBits();
1287 APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes);
1288 IndexConst *= ScalingFactor;
1305 true,
false,
false, Known, IndexBits);
1310 true,
false,
false, Known,
Index);
1314 case Instruction::PHI: {
1317 Value *R =
nullptr, *L =
nullptr;
1327 if ((Opcode == Instruction::LShr || Opcode == Instruction::AShr ||
1328 Opcode == Instruction::Shl) &&
1343 case Instruction::Shl:
1347 case Instruction::LShr:
1352 case Instruction::AShr:
1363 if (Opcode == Instruction::Add ||
1364 Opcode == Instruction::Sub ||
1365 Opcode == Instruction::And ||
1366 Opcode == Instruction::Or ||
1367 Opcode == Instruction::Mul) {
1374 unsigned OpNum =
P->getOperand(0) == R ? 0 : 1;
1375 Instruction *RInst =
P->getIncomingBlock(OpNum)->getTerminator();
1376 Instruction *LInst =
P->getIncomingBlock(1-OpNum)->getTerminator();
1391 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(BO);
1402 if (Opcode == Instruction::Add) {
1411 else if (Opcode == Instruction::Sub && BO->
getOperand(0) ==
I) {
1419 else if (Opcode == Instruction::Mul && Known2.
isNonNegative() &&
1429 if (
P->getNumIncomingValues() == 0)
1436 if (isa_and_nonnull<UndefValue>(
P->hasConstantValue()))
1441 for (
unsigned u = 0, e =
P->getNumIncomingValues(); u < e; ++u) {
1442 Value *IncValue =
P->getIncomingValue(u);
1444 if (IncValue ==
P)
continue;
1451 RecQ.
CxtI =
P->getIncomingBlock(u)->getTerminator();
1472 if ((TrueSucc ==
P->getParent()) != (FalseSucc ==
P->getParent())) {
1474 if (FalseSucc ==
P->getParent())
1488 Known2 = KnownUnion;
1502 case Instruction::Call:
1503 case Instruction::Invoke:
1510 if (
const Value *RV = cast<CallBase>(
I)->getReturnedArgOperand()) {
1511 if (RV->getType() ==
I->getType()) {
1523 switch (II->getIntrinsicID()) {
1525 case Intrinsic::abs: {
1527 bool IntMinIsPoison =
match(II->getArgOperand(1),
m_One());
1528 Known = Known2.
abs(IntMinIsPoison);
1531 case Intrinsic::bitreverse:
1536 case Intrinsic::bswap:
1541 case Intrinsic::ctlz: {
1547 PossibleLZ = std::min(PossibleLZ,
BitWidth - 1);
1552 case Intrinsic::cttz: {
1558 PossibleTZ = std::min(PossibleTZ,
BitWidth - 1);
1563 case Intrinsic::ctpop: {
1574 case Intrinsic::fshr:
1575 case Intrinsic::fshl: {
1582 if (II->getIntrinsicID() == Intrinsic::fshr)
1595 case Intrinsic::uadd_sat:
1600 case Intrinsic::usub_sat:
1605 case Intrinsic::sadd_sat:
1610 case Intrinsic::ssub_sat:
1615 case Intrinsic::umin:
1620 case Intrinsic::umax:
1625 case Intrinsic::smin:
1630 case Intrinsic::smax:
1635 case Intrinsic::ptrmask: {
1638 const Value *Mask =
I->getOperand(1);
1639 Known2 =
KnownBits(Mask->getType()->getScalarSizeInBits());
1645 case Intrinsic::x86_sse42_crc32_64_64:
1648 case Intrinsic::riscv_vsetvli:
1649 case Intrinsic::riscv_vsetvlimax: {
1650 bool HasAVL = II->getIntrinsicID() == Intrinsic::riscv_vsetvli;
1653 cast<ConstantInt>(II->getArgOperand(HasAVL))->getZExtValue());
1655 cast<ConstantInt>(II->getArgOperand(1 + HasAVL))->getZExtValue());
1664 if (
auto *CI = dyn_cast<ConstantInt>(II->getArgOperand(0)))
1665 MaxVL = std::min(MaxVL, CI->getZExtValue());
1667 unsigned KnownZeroFirstBit =
Log2_32(MaxVL) + 1;
1672 case Intrinsic::vscale: {
1673 if (!II->getParent() || !II->getFunction())
1682 case Instruction::ShuffleVector: {
1683 auto *Shuf = dyn_cast<ShuffleVectorInst>(
I);
1691 APInt DemandedLHS, DemandedRHS;
1698 if (!!DemandedLHS) {
1699 const Value *
LHS = Shuf->getOperand(0);
1705 if (!!DemandedRHS) {
1706 const Value *
RHS = Shuf->getOperand(1);
1712 case Instruction::InsertElement: {
1713 if (isa<ScalableVectorType>(
I->getType())) {
1717 const Value *Vec =
I->getOperand(0);
1718 const Value *Elt =
I->getOperand(1);
1719 auto *CIdx = dyn_cast<ConstantInt>(
I->getOperand(2));
1722 if (!CIdx || CIdx->getValue().uge(NumElts)) {
1728 unsigned EltIdx = CIdx->getZExtValue();
1730 if (DemandedElts[EltIdx]) {
1737 APInt DemandedVecElts = DemandedElts;
1739 if (!!DemandedVecElts) {
1745 case Instruction::ExtractElement: {
1748 const Value *Vec =
I->getOperand(0);
1750 auto *CIdx = dyn_cast<ConstantInt>(
Idx);
1751 if (isa<ScalableVectorType>(Vec->
getType())) {
1756 unsigned NumElts = cast<FixedVectorType>(Vec->
getType())->getNumElements();
1758 if (CIdx && CIdx->getValue().ult(NumElts))
1763 case Instruction::ExtractValue:
1764 if (
IntrinsicInst *II = dyn_cast<IntrinsicInst>(
I->getOperand(0))) {
1768 switch (II->getIntrinsicID()) {
1770 case Intrinsic::uadd_with_overflow:
1771 case Intrinsic::sadd_with_overflow:
1773 true, II->getArgOperand(0), II->getArgOperand(1),
false,
1774 false, DemandedElts, Known, Known2,
Depth, Q);
1776 case Intrinsic::usub_with_overflow:
1777 case Intrinsic::ssub_with_overflow:
1779 false, II->getArgOperand(0), II->getArgOperand(1),
false,
1780 false, DemandedElts, Known, Known2,
Depth, Q);
1782 case Intrinsic::umul_with_overflow:
1783 case Intrinsic::smul_with_overflow:
1785 DemandedElts, Known, Known2,
Depth, Q);
1791 case Instruction::Freeze:
1835 if (!DemandedElts) {
1841 assert(V &&
"No Value?");
1845 Type *Ty = V->getType();
1849 "Not integer or pointer type!");
1851 if (
auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
1853 FVTy->getNumElements() == DemandedElts.
getBitWidth() &&
1854 "DemandedElt width should equal the fixed vector number of elements");
1857 "DemandedElt width should be 1 for scalars or scalable vectors");
1863 "V and Known should have same BitWidth");
1866 "V and Known should have same BitWidth");
1877 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1884 assert(!isa<ScalableVectorType>(V->getType()));
1888 for (
unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
1889 if (!DemandedElts[i])
1891 APInt Elt = CDV->getElementAsAPInt(i);
1900 if (
const auto *CV = dyn_cast<ConstantVector>(V)) {
1901 assert(!isa<ScalableVectorType>(V->getType()));
1905 for (
unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1906 if (!DemandedElts[i])
1909 if (isa<PoisonValue>(Element))
1911 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1916 const APInt &Elt = ElementCI->getValue();
1929 if (isa<UndefValue>(V))
1934 assert(!isa<ConstantData>(V) &&
"Unhandled constant data!");
1942 if (
const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1943 if (!GA->isInterposable())
1948 if (
const Operator *
I = dyn_cast<Operator>(V))
1950 else if (
const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
1951 if (std::optional<ConstantRange> CR = GV->getAbsoluteSymbolRange())
1952 Known = CR->toKnownBits();
1956 if (isa<PointerType>(V->getType())) {
1957 Align Alignment = V->getPointerAlignment(Q.
DL);
1967 assert((Known.
Zero & Known.
One) == 0 &&
"Bits known to be one AND zero?");
1975 Value *Start =
nullptr, *Step =
nullptr;
1981 if (U.get() == Start) {
1997 case Instruction::Mul:
2002 case Instruction::SDiv:
2008 case Instruction::UDiv:
2014 case Instruction::Shl:
2016 case Instruction::AShr:
2020 case Instruction::LShr:
2035 if (isa<Constant>(V))
2039 if (OrZero && V->getType()->getScalarSizeInBits() == 1)
2042 auto *
I = dyn_cast<Instruction>(V);
2049 return F->hasFnAttribute(Attribute::VScaleRange);
2066 switch (
I->getOpcode()) {
2067 case Instruction::ZExt:
2069 case Instruction::Trunc:
2071 case Instruction::Shl:
2075 case Instruction::LShr:
2076 if (OrZero || Q.
IIQ.
isExact(cast<BinaryOperator>(
I)))
2079 case Instruction::UDiv:
2083 case Instruction::Mul:
2087 case Instruction::And:
2098 case Instruction::Add: {
2104 if (
match(
I->getOperand(0),
2108 if (
match(
I->getOperand(1),
2113 unsigned BitWidth = V->getType()->getScalarSizeInBits();
2122 if ((~(LHSBits.
Zero & RHSBits.
Zero)).isPowerOf2())
2130 case Instruction::Select:
2133 case Instruction::PHI: {
2137 auto *PN = cast<PHINode>(
I);
2154 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2155 return isKnownToBeAPowerOfTwo(U.get(), OrZero, NewDepth, RecQ);
2158 case Instruction::Invoke:
2159 case Instruction::Call: {
2160 if (
auto *II = dyn_cast<IntrinsicInst>(
I)) {
2161 switch (II->getIntrinsicID()) {
2162 case Intrinsic::umax:
2163 case Intrinsic::smax:
2164 case Intrinsic::umin:
2165 case Intrinsic::smin:
2170 case Intrinsic::bitreverse:
2171 case Intrinsic::bswap:
2173 case Intrinsic::fshr:
2174 case Intrinsic::fshl:
2176 if (II->getArgOperand(0) == II->getArgOperand(1))
2200 F =
I->getFunction();
2202 if (!
GEP->isInBounds() ||
2207 assert(
GEP->getType()->isPointerTy() &&
"We only support plain pointer GEP");
2218 GTI != GTE; ++GTI) {
2220 if (
StructType *STy = GTI.getStructTypeOrNull()) {
2221 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
2225 if (ElementOffset > 0)
2231 if (GTI.getSequentialElementStride(Q.
DL).isZero())
2236 if (
ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
2260 assert(!isa<Constant>(V) &&
"Called for constant?");
2265 unsigned NumUsesExplored = 0;
2266 for (
const auto *U : V->users()) {
2274 if (
const auto *CB = dyn_cast<CallBase>(U))
2275 if (
auto *CalledFunc = CB->getCalledFunction())
2276 for (
const Argument &Arg : CalledFunc->args())
2277 if (CB->getArgOperand(Arg.getArgNo()) == V &&
2278 Arg.hasNonNullAttr(
false) &&
2286 V->getType()->getPointerAddressSpace()) &&
2304 NonNullIfTrue =
true;
2306 NonNullIfTrue =
false;
2312 for (
const auto *CmpU : U->users()) {
2314 if (Visited.
insert(CmpU).second)
2317 while (!WorkList.
empty()) {
2326 for (
const auto *CurrU : Curr->users())
2327 if (Visited.
insert(CurrU).second)
2332 if (
const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2333 assert(BI->isConditional() &&
"uses a comparison!");
2336 BI->getSuccessor(NonNullIfTrue ? 0 : 1);
2340 }
else if (NonNullIfTrue &&
isGuard(Curr) &&
2341 DT->
dominates(cast<Instruction>(Curr), CtxI)) {
2355 const unsigned NumRanges = Ranges->getNumOperands() / 2;
2357 for (
unsigned i = 0; i < NumRanges; ++i) {
2359 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2361 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2363 if (Range.contains(
Value))
2373 Value *Start =
nullptr, *Step =
nullptr;
2374 const APInt *StartC, *StepC;
2380 case Instruction::Add:
2386 case Instruction::Mul:
2389 case Instruction::Shl:
2391 case Instruction::AShr:
2392 case Instruction::LShr:
2401 Value *
Y,
bool NSW,
bool NUW) {
2446 if (
auto *
C = dyn_cast<Constant>(
X))
2450 return ::isKnownNonEqual(
X,
Y,
Depth, Q);
2456 auto ShiftOp = [&](
const APInt &Lhs,
const APInt &Rhs) {
2457 switch (
I->getOpcode()) {
2458 case Instruction::Shl:
2459 return Lhs.
shl(Rhs);
2460 case Instruction::LShr:
2461 return Lhs.
lshr(Rhs);
2462 case Instruction::AShr:
2463 return Lhs.
ashr(Rhs);
2469 auto InvShiftOp = [&](
const APInt &Lhs,
const APInt &Rhs) {
2470 switch (
I->getOpcode()) {
2471 case Instruction::Shl:
2472 return Lhs.
lshr(Rhs);
2473 case Instruction::LShr:
2474 case Instruction::AShr:
2475 return Lhs.
shl(Rhs);
2488 if (MaxShift.
uge(NumBits))
2491 if (!ShiftOp(KnownVal.
One, MaxShift).isZero())
2496 if (InvShiftOp(KnownVal.
Zero, NumBits - MaxShift)
2505 const APInt &DemandedElts,
2508 switch (
I->getOpcode()) {
2509 case Instruction::Alloca:
2511 return I->getType()->getPointerAddressSpace() == 0;
2512 case Instruction::GetElementPtr:
2513 if (
I->getType()->isPointerTy())
2516 case Instruction::BitCast: {
2544 Type *FromTy =
I->getOperand(0)->getType();
2549 case Instruction::IntToPtr:
2553 if (!isa<ScalableVectorType>(
I->getType()) &&
2558 case Instruction::PtrToInt:
2561 if (!isa<ScalableVectorType>(
I->getType()) &&
2566 case Instruction::Sub:
2569 case Instruction::Or:
2573 case Instruction::SExt:
2574 case Instruction::ZExt:
2578 case Instruction::Shl: {
2593 case Instruction::LShr:
2594 case Instruction::AShr: {
2609 case Instruction::UDiv:
2610 case Instruction::SDiv: {
2613 if (cast<PossiblyExactOperator>(
I)->isExact())
2616 std::optional<bool> XUgeY;
2626 if (
I->getOpcode() == Instruction::SDiv) {
2628 XKnown = XKnown.
abs(
false);
2629 YKnown = YKnown.
abs(
false);
2635 return XUgeY && *XUgeY;
2637 case Instruction::Add: {
2642 auto *BO = cast<OverflowingBinaryOperator>(
I);
2647 case Instruction::Mul: {
2676 case Instruction::Select: {
2683 auto SelectArmIsNonZero = [&](
bool IsTrueArm) {
2685 Op = IsTrueArm ?
I->getOperand(1) :
I->getOperand(2);
2698 Pred = ICmpInst::getInversePredicate(Pred);
2703 if (SelectArmIsNonZero(
true) &&
2704 SelectArmIsNonZero(
false))
2708 case Instruction::PHI: {
2709 auto *PN = cast<PHINode>(
I);
2719 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2721 ICmpInst::Predicate Pred;
2723 BasicBlock *TrueSucc, *FalseSucc;
2724 if (match(RecQ.CxtI,
2725 m_Br(m_c_ICmp(Pred, m_Specific(U.get()), m_Value(X)),
2726 m_BasicBlock(TrueSucc), m_BasicBlock(FalseSucc)))) {
2728 if ((TrueSucc == PN->getParent()) != (FalseSucc == PN->getParent())) {
2730 if (FalseSucc == PN->getParent())
2731 Pred = CmpInst::getInversePredicate(Pred);
2732 if (cmpExcludesZero(Pred, X))
2740 case Instruction::ExtractElement:
2741 if (
const auto *EEI = dyn_cast<ExtractElementInst>(
I)) {
2742 const Value *Vec = EEI->getVectorOperand();
2743 const Value *
Idx = EEI->getIndexOperand();
2744 auto *CIdx = dyn_cast<ConstantInt>(
Idx);
2745 if (
auto *VecTy = dyn_cast<FixedVectorType>(Vec->
getType())) {
2746 unsigned NumElts = VecTy->getNumElements();
2748 if (CIdx && CIdx->getValue().ult(NumElts))
2754 case Instruction::Freeze:
2758 case Instruction::Load: {
2759 auto *LI = cast<LoadInst>(
I);
2762 if (
auto *PtrT = dyn_cast<PointerType>(
I->getType()))
2772 case Instruction::Call:
2773 case Instruction::Invoke:
2774 if (
I->getType()->isPointerTy()) {
2775 const auto *Call = cast<CallBase>(
I);
2776 if (Call->isReturnNonNull())
2780 }
else if (
const Value *RV = cast<CallBase>(
I)->getReturnedArgOperand()) {
2785 if (
auto *II = dyn_cast<IntrinsicInst>(
I)) {
2786 switch (II->getIntrinsicID()) {
2787 case Intrinsic::sshl_sat:
2788 case Intrinsic::ushl_sat:
2789 case Intrinsic::abs:
2790 case Intrinsic::bitreverse:
2791 case Intrinsic::bswap:
2792 case Intrinsic::ctpop:
2794 case Intrinsic::ssub_sat:
2796 II->getArgOperand(0), II->getArgOperand(1));
2797 case Intrinsic::sadd_sat:
2799 II->getArgOperand(0), II->getArgOperand(1),
2801 case Intrinsic::umax:
2802 case Intrinsic::uadd_sat:
2805 case Intrinsic::smin:
2806 case Intrinsic::smax: {
2807 auto KnownOpImpliesNonZero = [&](
const KnownBits &K) {
2808 return II->getIntrinsicID() == Intrinsic::smin
2810 : K.isStrictlyPositive();
2814 if (KnownOpImpliesNonZero(XKnown))
2818 if (KnownOpImpliesNonZero(YKnown))
2825 case Intrinsic::umin:
2828 case Intrinsic::cttz:
2831 case Intrinsic::ctlz:
2834 case Intrinsic::fshr:
2835 case Intrinsic::fshl:
2837 if (II->getArgOperand(0) == II->getArgOperand(1))
2840 case Intrinsic::vscale:
2842 case Intrinsic::experimental_get_vector_length:
2855 return Known.
One != 0;
2868 Type *Ty = V->getType();
2871 if (
auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2873 FVTy->getNumElements() == DemandedElts.
getBitWidth() &&
2874 "DemandedElt width should equal the fixed vector number of elements");
2877 "DemandedElt width should be 1 for scalars");
2881 if (
auto *
C = dyn_cast<Constant>(V)) {
2882 if (
C->isNullValue())
2884 if (isa<ConstantInt>(
C))
2890 if (
auto *VecTy = dyn_cast<FixedVectorType>(
C->getType())) {
2891 for (
unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2892 if (!DemandedElts[i])
2894 Constant *Elt =
C->getAggregateElement(i);
2897 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2906 if (
const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2907 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2908 GV->getType()->getAddressSpace() == 0)
2913 if (!isa<ConstantExpr>(V))
2917 if (
auto *
I = dyn_cast<Instruction>(V)) {
2921 if (
auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2922 const APInt ZeroValue(Ty->getBitWidth(), 0);
2938 if (
PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) {
2941 if (
const Argument *
A = dyn_cast<Argument>(V)) {
2942 if (((
A->hasPassPointeeByValueCopyAttr() &&
2944 A->hasNonNullAttr()))
2949 if (
const auto *
I = dyn_cast<Operator>(V))
2953 if (!isa<Constant>(V) &&
2961 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
2962 APInt DemandedElts =
2973static std::optional<std::pair<Value*, Value*>>
2977 return std::nullopt;
2986 case Instruction::Add:
2987 case Instruction::Sub:
2993 case Instruction::Mul: {
2997 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2998 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
2999 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
3000 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
3006 !cast<ConstantInt>(Op1->
getOperand(1))->isZero())
3010 case Instruction::Shl: {
3013 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
3014 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
3015 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
3016 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
3023 case Instruction::AShr:
3024 case Instruction::LShr: {
3025 auto *PEO1 = cast<PossiblyExactOperator>(Op1);
3026 auto *PEO2 = cast<PossiblyExactOperator>(Op2);
3027 if (!PEO1->isExact() || !PEO2->isExact())
3034 case Instruction::SExt:
3035 case Instruction::ZExt:
3039 case Instruction::PHI: {
3040 const PHINode *PN1 = cast<PHINode>(Op1);
3041 const PHINode *PN2 = cast<PHINode>(Op2);
3047 Value *Start1 =
nullptr, *Step1 =
nullptr;
3049 Value *Start2 =
nullptr, *Step2 =
nullptr;
3056 cast<Operator>(BO2));
3065 if (Values->first != PN1 || Values->second != PN2)
3068 return std::make_pair(Start1, Start2);
3071 return std::nullopt;
3078 if (!BO || BO->
getOpcode() != Instruction::Add)
3094 if (
auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
3097 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
3107 if (
auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
3110 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
3123 bool UsedFullRecursion =
false;
3125 if (!VisitedBBs.
insert(IncomBB).second)
3129 const APInt *C1, *C2;
3134 if (UsedFullRecursion)
3138 RecQ.
CxtI = IncomBB->getTerminator();
3141 UsedFullRecursion =
true;
3148 const SelectInst *SI1 = dyn_cast<SelectInst>(V1);
3152 if (
const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) {
3154 const Value *Cond2 = SI2->getCondition();
3172 if (!
A->getType()->isPointerTy() || !
B->getType()->isPointerTy())
3175 auto *GEPA = dyn_cast<GEPOperator>(
A);
3176 if (!GEPA || GEPA->getNumIndices() != 1 || !isa<Constant>(GEPA->idx_begin()))
3180 auto *PN = dyn_cast<PHINode>(GEPA->getPointerOperand());
3181 if (!PN || PN->getNumIncomingValues() != 2)
3186 Value *Start =
nullptr;
3188 if (PN->getIncomingValue(0) == Step)
3189 Start = PN->getIncomingValue(1);
3190 else if (PN->getIncomingValue(1) == Step)
3191 Start = PN->getIncomingValue(0);
3202 APInt StartOffset(IndexWidth, 0);
3203 Start = Start->stripAndAccumulateInBoundsConstantOffsets(Q.
DL, StartOffset);
3204 APInt StepOffset(IndexWidth, 0);
3210 APInt OffsetB(IndexWidth, 0);
3211 B =
B->stripAndAccumulateInBoundsConstantOffsets(Q.
DL, OffsetB);
3212 return Start ==
B &&
3222 if (V1->
getType() != V2->getType())
3232 auto *O1 = dyn_cast<Operator>(V1);
3233 auto *O2 = dyn_cast<Operator>(V2);
3234 if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
3238 if (
const PHINode *PN1 = dyn_cast<PHINode>(V1)) {
3239 const PHINode *PN2 = cast<PHINode>(V2);
3291 "Input should be a Select!");
3301 const Value *LHS2 =
nullptr, *RHS2 =
nullptr;
3313 return CLow->
sle(*CHigh);
3318 const APInt *&CHigh) {
3320 II->
getIntrinsicID() == Intrinsic::smax) &&
"Must be smin/smax");
3323 auto *InnerII = dyn_cast<IntrinsicInst>(II->
getArgOperand(0));
3324 if (!InnerII || InnerII->getIntrinsicID() != InverseID ||
3331 return CLow->
sle(*CHigh);
3339 const APInt &DemandedElts,
3341 const auto *CV = dyn_cast<Constant>(V);
3342 if (!CV || !isa<FixedVectorType>(CV->getType()))
3345 unsigned MinSignBits = TyBits;
3346 unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements();
3347 for (
unsigned i = 0; i != NumElts; ++i) {
3348 if (!DemandedElts[i])
3351 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
3355 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
3362 const APInt &DemandedElts,
3368 assert(Result > 0 &&
"At least one sign bit needs to be present!");
3380 const APInt &DemandedElts,
3382 Type *Ty = V->getType();
3386 if (
auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
3388 FVTy->getNumElements() == DemandedElts.
getBitWidth() &&
3389 "DemandedElt width should equal the fixed vector number of elements");
3392 "DemandedElt width should be 1 for scalars");
3406 unsigned FirstAnswer = 1;
3414 if (
auto *U = dyn_cast<Operator>(V)) {
3417 case Instruction::SExt:
3418 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
3421 case Instruction::SDiv: {
3422 const APInt *Denominator;
3434 return std::min(TyBits, NumBits + Denominator->
logBase2());
3439 case Instruction::SRem: {
3442 const APInt *Denominator;
3463 unsigned ResBits = TyBits - Denominator->
ceilLogBase2();
3464 Tmp = std::max(Tmp, ResBits);
3470 case Instruction::AShr: {
3475 if (ShAmt->
uge(TyBits))
3478 Tmp += ShAmtLimited;
3479 if (Tmp > TyBits) Tmp = TyBits;
3483 case Instruction::Shl: {
3488 if (ShAmt->
uge(TyBits) ||
3489 ShAmt->
uge(Tmp))
break;
3495 case Instruction::And:
3496 case Instruction::Or:
3497 case Instruction::Xor:
3502 FirstAnswer = std::min(Tmp, Tmp2);
3509 case Instruction::Select: {
3513 const APInt *CLow, *CHigh;
3518 if (Tmp == 1)
break;
3520 return std::min(Tmp, Tmp2);
3523 case Instruction::Add:
3527 if (Tmp == 1)
break;
3530 if (
const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
3531 if (CRHS->isAllOnesValue()) {
3537 if ((Known.
Zero | 1).isAllOnes())
3547 if (Tmp2 == 1)
break;
3548 return std::min(Tmp, Tmp2) - 1;
3550 case Instruction::Sub:
3552 if (Tmp2 == 1)
break;
3555 if (
const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
3556 if (CLHS->isNullValue()) {
3561 if ((Known.
Zero | 1).isAllOnes())
3576 if (Tmp == 1)
break;
3577 return std::min(Tmp, Tmp2) - 1;
3579 case Instruction::Mul: {
3583 if (SignBitsOp0 == 1)
break;
3585 if (SignBitsOp1 == 1)
break;
3586 unsigned OutValidBits =
3587 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
3588 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
3591 case Instruction::PHI: {
3592 const PHINode *PN = cast<PHINode>(U);
3595 if (NumIncomingValues > 4)
break;
3597 if (NumIncomingValues == 0)
break;
3603 for (
unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
3604 if (Tmp == 1)
return Tmp;
3612 case Instruction::Trunc: {
3617 unsigned OperandTyBits = U->getOperand(0)->getType()->getScalarSizeInBits();
3618 if (Tmp > (OperandTyBits - TyBits))
3619 return Tmp - (OperandTyBits - TyBits);
3624 case Instruction::ExtractElement:
3631 case Instruction::ShuffleVector: {
3634 auto *Shuf = dyn_cast<ShuffleVectorInst>(U);
3639 APInt DemandedLHS, DemandedRHS;
3644 Tmp = std::numeric_limits<unsigned>::max();
3645 if (!!DemandedLHS) {
3646 const Value *
LHS = Shuf->getOperand(0);
3653 if (!!DemandedRHS) {
3654 const Value *
RHS = Shuf->getOperand(1);
3656 Tmp = std::min(Tmp, Tmp2);
3662 assert(Tmp <= TyBits &&
"Failed to determine minimum sign bits");
3665 case Instruction::Call: {
3666 if (
const auto *II = dyn_cast<IntrinsicInst>(U)) {
3667 switch (II->getIntrinsicID()) {
3669 case Intrinsic::abs:
3671 if (Tmp == 1)
break;
3675 case Intrinsic::smin:
3676 case Intrinsic::smax: {
3677 const APInt *CLow, *CHigh;
3692 if (
unsigned VecSignBits =
3710 if (
F->isIntrinsic())
3711 return F->getIntrinsicID();
3717 if (
F->hasLocalLinkage() || !TLI || !TLI->
getLibFunc(CB, Func) ||
3727 return Intrinsic::sin;
3731 return Intrinsic::cos;
3735 return Intrinsic::exp;
3739 return Intrinsic::exp2;
3743 return Intrinsic::log;
3745 case LibFunc_log10f:
3746 case LibFunc_log10l:
3747 return Intrinsic::log10;
3751 return Intrinsic::log2;
3755 return Intrinsic::fabs;
3759 return Intrinsic::minnum;
3763 return Intrinsic::maxnum;
3764 case LibFunc_copysign:
3765 case LibFunc_copysignf:
3766 case LibFunc_copysignl:
3767 return Intrinsic::copysign;
3769 case LibFunc_floorf:
3770 case LibFunc_floorl:
3771 return Intrinsic::floor;
3775 return Intrinsic::ceil;
3777 case LibFunc_truncf:
3778 case LibFunc_truncl:
3779 return Intrinsic::trunc;
3783 return Intrinsic::rint;
3784 case LibFunc_nearbyint:
3785 case LibFunc_nearbyintf:
3786 case LibFunc_nearbyintl:
3787 return Intrinsic::nearbyint;
3789 case LibFunc_roundf:
3790 case LibFunc_roundl:
3791 return Intrinsic::round;
3792 case LibFunc_roundeven:
3793 case LibFunc_roundevenf:
3794 case LibFunc_roundevenl:
3795 return Intrinsic::roundeven;
3799 return Intrinsic::pow;
3803 return Intrinsic::sqrt;
3851 switch (Mode.Input) {
3871 if (!Src.isKnownNeverPosZero() && !Src.isKnownNeverNegZero())
3875 if (Src.isKnownNeverSubnormal())
3905 bool &TrueIfSigned) {
3908 TrueIfSigned =
true;
3909 return RHS.isZero();
3911 TrueIfSigned =
true;
3912 return RHS.isAllOnes();
3914 TrueIfSigned =
false;
3915 return RHS.isAllOnes();
3917 TrueIfSigned =
false;
3918 return RHS.isZero();
3921 TrueIfSigned =
true;
3922 return RHS.isMaxSignedValue();
3925 TrueIfSigned =
true;
3926 return RHS.isMinSignedValue();
3929 TrueIfSigned =
false;
3930 return RHS.isMinSignedValue();
3933 TrueIfSigned =
false;
3934 return RHS.isMaxSignedValue();
3945 bool LookThroughSrc) {
3953std::pair<Value *, FPClassTest>
3955 const APFloat *ConstRHS,
bool LookThroughSrc) {
3957 auto [Src, ClassIfTrue, ClassIfFalse] =
3959 if (Src && ClassIfTrue == ~ClassIfFalse)
3960 return {Src, ClassIfTrue};
3971std::tuple<Value *, FPClassTest, FPClassTest>
3979 const bool IsNegativeRHS = (RHSClass &
fcNegative) == RHSClass;
3980 const bool IsPositiveRHS = (RHSClass &
fcPositive) == RHSClass;
3981 const bool IsNaN = (RHSClass & ~fcNan) ==
fcNone;
4007 const bool IsZero = (OrigClass &
fcZero) == OrigClass;
4054 const bool IsDenormalRHS = (OrigClass &
fcSubnormal) == OrigClass;
4056 const bool IsInf = (OrigClass &
fcInf) == OrigClass;
4074 if (IsNegativeRHS) {
4097 if (IsNegativeRHS) {
4098 Mask = ~fcNegInf & ~fcNan;
4102 Mask = ~fcPosInf & ~fcNan;
4111 if (IsNegativeRHS) {
4131 if (IsNegativeRHS) {
4151 if (IsNegativeRHS) {
4166 if (IsNegativeRHS) {
4194 return {Src, Class, ~fcNan};
4198 return {Src, ~fcNan, RHSClass |
fcNan};
4207 "should have been recognized as an exact class test");
4209 if (IsNegativeRHS) {
4219 return {Src, ~fcNan,
fcNan};
4228 return {Src,
fcNan, ~fcNan};
4247 return {Src, ClassesGE, ~ClassesGE | RHSClass};
4250 return {Src, ClassesGE |
fcNan, ~(ClassesGE |
fcNan) | RHSClass};
4253 return {Src, ClassesLE, ~ClassesLE | RHSClass};
4256 return {Src, ClassesLE |
fcNan, ~(ClassesLE |
fcNan) | RHSClass};
4260 }
else if (IsPositiveRHS) {
4276 return {Src, ClassesGE, ~ClassesGE | RHSClass};
4279 return {Src, ClassesGE |
fcNan, ~(ClassesGE |
fcNan) | RHSClass};
4282 return {Src, ClassesLE, ~ClassesLE | RHSClass};
4285 return {Src, ClassesLE |
fcNan, ~(ClassesLE |
fcNan) | RHSClass};
4294std::tuple<Value *, FPClassTest, FPClassTest>
4296 const APFloat &ConstRHS,
bool LookThroughSrc) {
4344std::tuple<Value *, FPClassTest, FPClassTest>
4346 Value *RHS,
bool LookThroughSrc) {
4368 KnownFromContext.
knownNot(~(CondIsTrue ? MaskIfTrue : MaskIfFalse));
4369 }
else if (
match(
Cond, m_Intrinsic<Intrinsic::is_fpclass>(
4372 KnownFromContext.
knownNot(CondIsTrue ? ~Mask : Mask);
4378 if (TrueIfSigned == CondIsTrue)
4390 return KnownFromContext;
4410 return KnownFromContext;
4420 "Got assumption for the wrong function!");
4421 assert(
I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
4422 "must be an assume intrinsic");
4428 Q.
CxtI, KnownFromContext);
4431 return KnownFromContext;
4441 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
4442 APInt DemandedElts =
4448 const APInt &DemandedElts,
4452 if ((InterestedClasses &
4458 KnownSrc,
Depth + 1, Q);
4473 assert(Known.
isUnknown() &&
"should not be called with known information");
4475 if (!DemandedElts) {
4483 if (
auto *CFP = dyn_cast_or_null<ConstantFP>(V)) {
4485 Known.
SignBit = CFP->isNegative();
4490 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
4491 const Constant *CV = dyn_cast<Constant>(V);
4494 bool SignBitAllZero =
true;
4495 bool SignBitAllOne =
true;
4498 unsigned NumElts = VFVTy->getNumElements();
4499 for (
unsigned i = 0; i != NumElts; ++i) {
4505 if (isa<UndefValue>(Elt))
4507 auto *CElt = dyn_cast<ConstantFP>(Elt);
4513 const APFloat &
C = CElt->getValueAPF();
4516 SignBitAllZero =
false;
4518 SignBitAllOne =
false;
4520 if (SignBitAllOne != SignBitAllZero)
4521 Known.
SignBit = SignBitAllOne;
4526 if (
const auto *CB = dyn_cast<CallBase>(V))
4527 KnownNotFromFlags |= CB->getRetNoFPClass();
4528 else if (
const auto *Arg = dyn_cast<Argument>(V))
4529 KnownNotFromFlags |= Arg->getNoFPClass();
4533 if (FPOp->hasNoNaNs())
4534 KnownNotFromFlags |=
fcNan;
4535 if (FPOp->hasNoInfs())
4536 KnownNotFromFlags |=
fcInf;
4540 KnownNotFromFlags |= ~AssumedClasses.KnownFPClasses;
4544 InterestedClasses &= ~KnownNotFromFlags;
4549 if (*AssumedClasses.SignBit)
4550 Known.signBitMustBeOne();
4552 Known.signBitMustBeZero();
4563 const unsigned Opc =
Op->getOpcode();
4565 case Instruction::FNeg: {
4567 Known,
Depth + 1, Q);
4571 case Instruction::Select: {
4579 Value *TestedValue =
nullptr;
4583 const Function *
F = cast<Instruction>(
Op)->getFunction();
4585 Value *CmpLHS, *CmpRHS;
4592 bool LookThroughFAbsFNeg = CmpLHS !=
LHS && CmpLHS !=
RHS;
4593 std::tie(TestedValue, MaskIfTrue, MaskIfFalse) =
4596 m_Intrinsic<Intrinsic::is_fpclass>(
4599 MaskIfTrue = TestedMask;
4600 MaskIfFalse = ~TestedMask;
4603 if (TestedValue ==
LHS) {
4605 FilterLHS = MaskIfTrue;
4606 }
else if (TestedValue ==
RHS) {
4608 FilterRHS = MaskIfFalse;
4617 Known2,
Depth + 1, Q);
4623 case Instruction::Call: {
4627 case Intrinsic::fabs: {
4632 InterestedClasses, Known,
Depth + 1, Q);
4638 case Intrinsic::copysign: {
4642 Known,
Depth + 1, Q);
4644 KnownSign,
Depth + 1, Q);
4648 case Intrinsic::fma:
4649 case Intrinsic::fmuladd: {
4662 KnownAddend,
Depth + 1, Q);
4668 case Intrinsic::sqrt:
4669 case Intrinsic::experimental_constrained_sqrt: {
4672 if (InterestedClasses &
fcNan)
4676 KnownSrc,
Depth + 1, Q);
4702 case Intrinsic::sin:
4703 case Intrinsic::cos: {
4707 KnownSrc,
Depth + 1, Q);
4713 case Intrinsic::maxnum:
4714 case Intrinsic::minnum:
4715 case Intrinsic::minimum:
4716 case Intrinsic::maximum: {
4719 KnownLHS,
Depth + 1, Q);
4721 KnownRHS,
Depth + 1, Q);
4724 Known = KnownLHS | KnownRHS;
4727 if (NeverNaN && (IID == Intrinsic::minnum || IID == Intrinsic::maxnum))
4730 if (IID == Intrinsic::maxnum) {
4738 }
else if (IID == Intrinsic::maximum) {
4744 }
else if (IID == Intrinsic::minnum) {
4786 }
else if ((IID == Intrinsic::maximum || IID == Intrinsic::minimum) ||
4791 if ((IID == Intrinsic::maximum || IID == Intrinsic::maxnum) &&
4794 else if ((IID == Intrinsic::minimum || IID == Intrinsic::minnum) &&
4801 case Intrinsic::canonicalize: {
4804 KnownSrc,
Depth + 1, Q);
4848 case Intrinsic::trunc:
4849 case Intrinsic::floor:
4850 case Intrinsic::ceil:
4851 case Intrinsic::rint:
4852 case Intrinsic::nearbyint:
4853 case Intrinsic::round:
4854 case Intrinsic::roundeven: {
4862 KnownSrc,
Depth + 1, Q);
4871 if (IID == Intrinsic::trunc || !V->getType()->isMultiUnitFPType()) {
4886 case Intrinsic::exp:
4887 case Intrinsic::exp2:
4888 case Intrinsic::exp10: {
4895 KnownSrc,
Depth + 1, Q);
4903 case Intrinsic::fptrunc_round: {
4908 case Intrinsic::log:
4909 case Intrinsic::log10:
4910 case Intrinsic::log2:
4911 case Intrinsic::experimental_constrained_log:
4912 case Intrinsic::experimental_constrained_log10:
4913 case Intrinsic::experimental_constrained_log2: {
4929 KnownSrc,
Depth + 1, Q);
4943 case Intrinsic::powi: {
4948 Type *ExpTy = Exp->getType();
4952 ExponentKnownBits,
Depth + 1, Q);
4954 if (ExponentKnownBits.
Zero[0]) {
4969 KnownSrc,
Depth + 1, Q);
4974 case Intrinsic::ldexp: {
4977 KnownSrc,
Depth + 1, Q);
4993 if ((InterestedClasses & ExpInfoMask) ==
fcNone)
5005 const int MantissaBits = Precision - 1;
5011 if (ConstVal && ConstVal->
isZero()) {
5034 case Intrinsic::arithmetic_fence: {
5036 Known,
Depth + 1, Q);
5039 case Intrinsic::experimental_constrained_sitofp:
5040 case Intrinsic::experimental_constrained_uitofp:
5050 if (IID == Intrinsic::experimental_constrained_uitofp)
5061 case Instruction::FAdd:
5062 case Instruction::FSub: {
5065 Op->getOpcode() == Instruction::FAdd &&
5067 bool WantNaN = (InterestedClasses &
fcNan) !=
fcNone;
5070 if (!WantNaN && !WantNegative && !WantNegZero)
5076 if (InterestedClasses &
fcNan)
5077 InterestedSrcs |=
fcInf;
5079 KnownRHS,
Depth + 1, Q);
5083 WantNegZero || Opc == Instruction::FSub) {
5088 KnownLHS,
Depth + 1, Q);
5096 const Function *
F = cast<Instruction>(
Op)->getFunction();
5098 if (
Op->getOpcode() == Instruction::FAdd) {
5126 case Instruction::FMul: {
5128 if (
Op->getOperand(0) ==
Op->getOperand(1))
5161 const Function *
F = cast<Instruction>(
Op)->getFunction();
5173 case Instruction::FDiv:
5174 case Instruction::FRem: {
5175 if (
Op->getOperand(0) ==
Op->getOperand(1)) {
5177 if (
Op->getOpcode() == Instruction::FDiv) {
5188 const bool WantNan = (InterestedClasses &
fcNan) !=
fcNone;
5190 const bool WantPositive =
5192 if (!WantNan && !WantNegative && !WantPositive)
5201 bool KnowSomethingUseful =
5204 if (KnowSomethingUseful || WantPositive) {
5210 InterestedClasses & InterestedLHS, KnownLHS,
5214 const Function *
F = cast<Instruction>(
Op)->getFunction();
5216 if (
Op->getOpcode() == Instruction::FDiv) {
5253 case Instruction::FPExt: {
5256 Known,
Depth + 1, Q);
5259 Op->getType()->getScalarType()->getFltSemantics();
5261 Op->getOperand(0)->getType()->getScalarType()->getFltSemantics();
5277 case Instruction::FPTrunc: {
5282 case Instruction::SIToFP:
5283 case Instruction::UIToFP: {
5292 if (
Op->getOpcode() == Instruction::UIToFP)
5295 if (InterestedClasses &
fcInf) {
5299 int IntSize =
Op->getOperand(0)->getType()->getScalarSizeInBits();
5300 if (
Op->getOpcode() == Instruction::SIToFP)
5305 Type *FPTy =
Op->getType()->getScalarType();
5312 case Instruction::ExtractElement: {
5315 const Value *Vec =
Op->getOperand(0);
5317 auto *CIdx = dyn_cast<ConstantInt>(
Idx);
5319 if (
auto *VecTy = dyn_cast<FixedVectorType>(Vec->
getType())) {
5320 unsigned NumElts = VecTy->getNumElements();
5322 if (CIdx && CIdx->getValue().ult(NumElts))
5330 case Instruction::InsertElement: {
5331 if (isa<ScalableVectorType>(
Op->getType()))
5334 const Value *Vec =
Op->getOperand(0);
5335 const Value *Elt =
Op->getOperand(1);
5336 auto *CIdx = dyn_cast<ConstantInt>(
Op->getOperand(2));
5339 if (!CIdx || CIdx->getValue().uge(NumElts))
5342 unsigned EltIdx = CIdx->getZExtValue();
5344 if (DemandedElts[EltIdx]) {
5354 APInt DemandedVecElts = DemandedElts;
5356 if (!!DemandedVecElts) {
5365 case Instruction::ShuffleVector: {
5368 APInt DemandedLHS, DemandedRHS;
5369 auto *Shuf = dyn_cast<ShuffleVectorInst>(
Op);
5373 if (!!DemandedLHS) {
5374 const Value *
LHS = Shuf->getOperand(0);
5385 if (!!DemandedRHS) {
5387 const Value *
RHS = Shuf->getOperand(1);
5395 case Instruction::ExtractValue: {
5399 if (isa<StructType>(Src->getType()) && Indices.
size() == 1 &&
5401 if (
const auto *II = dyn_cast<IntrinsicInst>(Src)) {
5402 switch (II->getIntrinsicID()) {
5403 case Intrinsic::frexp: {
5408 InterestedClasses, KnownSrc,
Depth + 1, Q);
5410 const Function *
F = cast<Instruction>(
Op)->getFunction();
5443 case Instruction::PHI: {
5446 if (
P->getNumIncomingValues() == 0)
5453 if (
Depth < PhiRecursionLimit) {
5455 if (isa_and_nonnull<UndefValue>(
P->hasConstantValue()))
5460 for (
const Use &U :
P->operands()) {
5461 Value *IncValue = U.get();
5471 IncValue, DemandedElts, InterestedClasses, KnownSrc,
5495 const APInt &DemandedElts,
5502 return KnownClasses;
5517 if (V->getType()->isIntegerTy(8))
5524 if (isa<UndefValue>(V))
5528 if (
DL.getTypeStoreSize(V->getType()).isZero())
5543 if (
C->isNullValue())
5550 if (CFP->getType()->isHalfTy())
5552 else if (CFP->getType()->isFloatTy())
5554 else if (CFP->getType()->isDoubleTy())
5563 if (CI->getBitWidth() % 8 == 0) {
5564 assert(CI->getBitWidth() > 8 &&
"8 bits should be handled above!");
5565 if (!CI->getValue().isSplat(8))
5567 return ConstantInt::get(Ctx, CI->getValue().trunc(8));
5571 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
5572 if (CE->getOpcode() == Instruction::IntToPtr) {
5573 if (
auto *PtrTy = dyn_cast<PointerType>(CE->getType())) {
5574 unsigned BitWidth =
DL.getPointerSizeInBits(PtrTy->getAddressSpace());
5587 if (
LHS == UndefInt8)
5589 if (
RHS == UndefInt8)
5595 Value *Val = UndefInt8;
5596 for (
unsigned I = 0,
E = CA->getNumElements();
I !=
E; ++
I)
5602 if (isa<ConstantAggregate>(
C)) {
5603 Value *Val = UndefInt8;
5604 for (
unsigned I = 0,
E =
C->getNumOperands();
I !=
E; ++
I)
5624 StructType *STy = dyn_cast<StructType>(IndexedType);
5638 while (PrevTo != OrigTo) {
5685 unsigned IdxSkip = Idxs.
size();
5698 std::optional<BasicBlock::iterator> InsertBefore) {
5701 if (idx_range.
empty())
5704 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
5705 "Not looking at a struct or array?");
5707 "Invalid indices for type?");
5709 if (
Constant *
C = dyn_cast<Constant>(V)) {
5710 C =
C->getAggregateElement(idx_range[0]);
5711 if (!
C)
return nullptr;
5718 const unsigned *req_idx = idx_range.
begin();
5719 for (
const unsigned *i =
I->idx_begin(), *e =
I->idx_end();
5720 i != e; ++i, ++req_idx) {
5721 if (req_idx == idx_range.
end()) {
5751 ArrayRef(req_idx, idx_range.
end()), InsertBefore);
5760 unsigned size =
I->getNumIndices() + idx_range.
size();
5765 Idxs.
append(
I->idx_begin(),
I->idx_end());
5771 &&
"Number of indices added not correct?");
5781 unsigned CharSize) {
5783 if (
GEP->getNumOperands() != 3)
5788 ArrayType *AT = dyn_cast<ArrayType>(
GEP->getSourceElementType());
5794 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(
GEP->getOperand(1));
5795 if (!FirstIdx || !FirstIdx->
isZero())
5809 assert(V &&
"V should not be null.");
5810 assert((ElementSize % 8) == 0 &&
5811 "ElementSize expected to be a multiple of the size of a byte.");
5812 unsigned ElementSizeInBytes = ElementSize / 8;
5824 APInt Off(
DL.getIndexTypeSizeInBits(V->getType()), 0);
5826 if (GV != V->stripAndAccumulateConstantOffsets(
DL, Off,
5831 uint64_t StartIdx = Off.getLimitedValue();
5838 if ((StartIdx % ElementSizeInBytes) != 0)
5841 Offset += StartIdx / ElementSizeInBytes;
5847 uint64_t SizeInBytes =
DL.getTypeStoreSize(GVTy).getFixedValue();
5850 Slice.
Array =
nullptr;
5861 if (
auto *ArrayInit = dyn_cast<ConstantDataArray>(
Init)) {
5862 Type *InitElTy = ArrayInit->getElementType();
5867 ArrayTy = ArrayInit->getType();
5872 if (ElementSize != 8)
5883 Array = dyn_cast<ConstantDataArray>(
Init);
5884 ArrayTy = dyn_cast<ArrayType>(
Init->getType());
5891 Slice.
Array = Array;
5907 if (Slice.
Array ==
nullptr) {
5930 Str = Str.substr(Slice.
Offset);
5936 Str = Str.substr(0, Str.find(
'\0'));
5949 unsigned CharSize) {
5951 V = V->stripPointerCasts();
5955 if (
const PHINode *PN = dyn_cast<PHINode>(V)) {
5956 if (!PHIs.
insert(PN).second)
5961 for (
Value *IncValue : PN->incoming_values()) {
5963 if (Len == 0)
return 0;
5965 if (Len == ~0ULL)
continue;
5967 if (Len != LenSoFar && LenSoFar != ~0ULL)
5977 if (
const SelectInst *SI = dyn_cast<SelectInst>(V)) {
5979 if (Len1 == 0)
return 0;
5981 if (Len2 == 0)
return 0;
5982 if (Len1 == ~0ULL)
return Len2;
5983 if (Len2 == ~0ULL)
return Len1;
5984 if (Len1 != Len2)
return 0;
5993 if (Slice.
Array ==
nullptr)
6001 unsigned NullIndex = 0;
6002 for (
unsigned E = Slice.
Length; NullIndex <
E; ++NullIndex) {
6007 return NullIndex + 1;
6013 if (!V->getType()->isPointerTy())
6020 return Len == ~0ULL ? 1 : Len;
6025 bool MustPreserveNullness) {
6027 "getArgumentAliasingToReturnedPointer only works on nonnull calls");
6028 if (
const Value *RV = Call->getReturnedArgOperand())
6032 Call, MustPreserveNullness))
6033 return Call->getArgOperand(0);
6038 const CallBase *Call,
bool MustPreserveNullness) {
6039 switch (Call->getIntrinsicID()) {
6040 case Intrinsic::launder_invariant_group:
6041 case Intrinsic::strip_invariant_group:
6042 case Intrinsic::aarch64_irg:
6043 case Intrinsic::aarch64_tagp:
6053 case Intrinsic::amdgcn_make_buffer_rsrc:
6055 case Intrinsic::ptrmask:
6056 return !MustPreserveNullness;
6073 if (!PrevValue || LI->
getLoopFor(PrevValue->getParent()) != L)
6075 if (!PrevValue || LI->
getLoopFor(PrevValue->getParent()) != L)
6083 if (
auto *Load = dyn_cast<LoadInst>(PrevValue))
6084 if (!L->isLoopInvariant(Load->getPointerOperand()))
6090 if (!V->getType()->isPointerTy())
6092 for (
unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
6093 if (
auto *
GEP = dyn_cast<GEPOperator>(V)) {
6094 V =
GEP->getPointerOperand();
6097 V = cast<Operator>(V)->getOperand(0);
6098 if (!V->getType()->isPointerTy())
6100 }
else if (
auto *GA = dyn_cast<GlobalAlias>(V)) {
6101 if (GA->isInterposable())
6103 V = GA->getAliasee();
6105 if (
auto *
PHI = dyn_cast<PHINode>(V)) {
6107 if (
PHI->getNumIncomingValues() == 1) {
6108 V =
PHI->getIncomingValue(0);
6111 }
else if (
auto *Call = dyn_cast<CallBase>(V)) {
6129 assert(V->getType()->isPointerTy() &&
"Unexpected operand type!");
6136 LoopInfo *LI,
unsigned MaxLookup) {
6144 if (!Visited.
insert(
P).second)
6147 if (
auto *SI = dyn_cast<SelectInst>(
P)) {
6149 Worklist.
push_back(SI->getFalseValue());
6153 if (
auto *PN = dyn_cast<PHINode>(
P)) {
6173 }
while (!Worklist.
empty());
6180 if (
const Operator *U = dyn_cast<Operator>(V)) {
6183 if (U->getOpcode() == Instruction::PtrToInt)
6184 return U->getOperand(0);
6191 if (U->getOpcode() != Instruction::Add ||
6192 (!isa<ConstantInt>(U->getOperand(1)) &&
6194 !isa<PHINode>(U->getOperand(1))))
6196 V = U->getOperand(0);
6200 assert(V->getType()->isIntegerTy() &&
"Unexpected operand type!");
6217 for (
const Value *V : Objs) {
6218 if (!Visited.
insert(V).second)
6223 if (O->getType()->isPointerTy()) {
6236 }
while (!Working.
empty());
6245 auto AddWork = [&](
Value *V) {
6246 if (Visited.
insert(V).second)
6255 if (
AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
6256 if (Result && Result != AI)
6259 }
else if (
CastInst *CI = dyn_cast<CastInst>(V)) {
6260 AddWork(CI->getOperand(0));
6261 }
else if (
PHINode *PN = dyn_cast<PHINode>(V)) {
6262 for (
Value *IncValue : PN->incoming_values())
6264 }
else if (
auto *SI = dyn_cast<SelectInst>(V)) {
6265 AddWork(SI->getTrueValue());
6266 AddWork(SI->getFalseValue());
6268 if (OffsetZero && !
GEP->hasAllZeroIndices())
6270 AddWork(
GEP->getPointerOperand());
6271 }
else if (
CallBase *CB = dyn_cast<CallBase>(V)) {
6272 Value *Returned = CB->getReturnedArgOperand();
6280 }
while (!Worklist.
empty());
6286 const Value *V,
bool AllowLifetime,
bool AllowDroppable) {
6287 for (
const User *U : V->users()) {
6317 return F.hasFnAttribute(Attribute::SanitizeThread) ||
6319 F.hasFnAttribute(Attribute::SanitizeAddress) ||
6320 F.hasFnAttribute(Attribute::SanitizeHWAddress);
6339 auto hasEqualReturnAndLeadingOperandTypes =
6340 [](
const Instruction *Inst,
unsigned NumLeadingOperands) {
6344 for (
unsigned ItOp = 0; ItOp < NumLeadingOperands; ++ItOp)
6350 hasEqualReturnAndLeadingOperandTypes(Inst, 2));
6352 hasEqualReturnAndLeadingOperandTypes(Inst, 1));
6359 case Instruction::UDiv:
6360 case Instruction::URem: {
6367 case Instruction::SDiv:
6368 case Instruction::SRem: {
6370 const APInt *Numerator, *Denominator;
6374 if (*Denominator == 0)
6386 case Instruction::Load: {
6387 const LoadInst *LI = dyn_cast<LoadInst>(Inst);
6397 case Instruction::Call: {
6398 auto *CI = dyn_cast<const CallInst>(Inst);
6401 const Function *Callee = CI->getCalledFunction();
6405 return Callee && Callee->isSpeculatable();
6407 case Instruction::VAArg:
6408 case Instruction::Alloca:
6409 case Instruction::Invoke:
6410 case Instruction::CallBr:
6411 case Instruction::PHI:
6412 case Instruction::Store:
6413 case Instruction::Ret:
6414 case Instruction::Br:
6415 case Instruction::IndirectBr:
6416 case Instruction::Switch:
6417 case Instruction::Unreachable:
6418 case Instruction::Fence:
6419 case Instruction::AtomicRMW:
6420 case Instruction::AtomicCmpXchg:
6421 case Instruction::LandingPad:
6422 case Instruction::Resume:
6423 case Instruction::CatchSwitch:
6424 case Instruction::CatchPad:
6425 case Instruction::CatchRet:
6426 case Instruction::CleanupPad:
6427 case Instruction::CleanupRet:
6433 if (
I.mayReadOrWriteMemory())
6540 if (
Add &&
Add->hasNoSignedWrap()) {
6580 bool LHSOrRHSKnownNonNegative =
6582 bool LHSOrRHSKnownNegative =
6584 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
6587 if ((AddKnown.
isNonNegative() && LHSOrRHSKnownNonNegative) ||
6588 (AddKnown.
isNegative() && LHSOrRHSKnownNegative))
6617 m_Intrinsic<Intrinsic::usub_with_overflow>(
m_Value(),
m_Value())))
6666 if (
const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
6667 assert(EVI->getNumIndices() == 1 &&
"Obvious from CI's type");
6669 if (EVI->getIndices()[0] == 0)
6672 assert(EVI->getIndices()[0] == 1 &&
"Obvious from CI's type");
6674 for (
const auto *U : EVI->users())
6675 if (
const auto *
B = dyn_cast<BranchInst>(U)) {
6676 assert(
B->isConditional() &&
"How else is it using an i1?");
6687 auto AllUsesGuardedByBranch = [&](
const BranchInst *BI) {
6693 for (
const auto *Result :
Results) {
6696 if (DT.
dominates(NoWrapEdge, Result->getParent()))
6699 for (
const auto &RU : Result->uses())
6707 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
6712 auto *
C = dyn_cast<Constant>(ShiftAmount);
6718 if (
auto *FVTy = dyn_cast<FixedVectorType>(
C->getType())) {
6719 unsigned NumElts = FVTy->getNumElements();
6720 for (
unsigned i = 0; i < NumElts; ++i)
6721 ShiftAmounts.
push_back(
C->getAggregateElement(i));
6722 }
else if (isa<ScalableVectorType>(
C->getType()))
6728 auto *CI = dyn_cast_or_null<ConstantInt>(
C);
6729 return CI && CI->getValue().ult(
C->getType()->getIntegerBitWidth());
6742 return (
unsigned(Kind) &
unsigned(UndefPoisonKind::PoisonOnly)) != 0;
6746 return (
unsigned(Kind) &
unsigned(UndefPoisonKind::UndefOnly)) != 0;
6750 bool ConsiderFlagsAndMetadata) {
6753 Op->hasPoisonGeneratingFlagsOrMetadata())
6756 unsigned Opcode =
Op->getOpcode();
6760 case Instruction::Shl:
6761 case Instruction::AShr:
6762 case Instruction::LShr:
6764 case Instruction::FPToSI:
6765 case Instruction::FPToUI:
6769 case Instruction::Call:
6770 if (
auto *II = dyn_cast<IntrinsicInst>(
Op)) {
6771 switch (II->getIntrinsicID()) {
6773 case Intrinsic::ctlz:
6774 case Intrinsic::cttz:
6775 case Intrinsic::abs:
6776 if (cast<ConstantInt>(II->getArgOperand(1))->isNullValue())
6779 case Intrinsic::ctpop:
6780 case Intrinsic::bswap:
6781 case Intrinsic::bitreverse:
6782 case Intrinsic::fshl:
6783 case Intrinsic::fshr:
6784 case Intrinsic::smax:
6785 case Intrinsic::smin:
6786 case Intrinsic::umax:
6787 case Intrinsic::umin:
6788 case Intrinsic::ptrmask:
6789 case Intrinsic::fptoui_sat:
6790 case Intrinsic::fptosi_sat:
6791 case Intrinsic::sadd_with_overflow:
6792 case Intrinsic::ssub_with_overflow:
6793 case Intrinsic::smul_with_overflow:
6794 case Intrinsic::uadd_with_overflow:
6795 case Intrinsic::usub_with_overflow:
6796 case Intrinsic::umul_with_overflow:
6797 case Intrinsic::sadd_sat:
6798 case Intrinsic::uadd_sat:
6799 case Intrinsic::ssub_sat:
6800 case Intrinsic::usub_sat:
6802 case Intrinsic::sshl_sat:
6803 case Intrinsic::ushl_sat:
6806 case Intrinsic::fma:
6807 case Intrinsic::fmuladd:
6808 case Intrinsic::sqrt:
6809 case Intrinsic::powi:
6810 case Intrinsic::sin:
6811 case Intrinsic::cos:
6812 case Intrinsic::pow:
6813 case Intrinsic::log:
6814 case Intrinsic::log10:
6815 case Intrinsic::log2:
6816 case Intrinsic::exp:
6817 case Intrinsic::exp2:
6818 case Intrinsic::exp10:
6819 case Intrinsic::fabs:
6820 case Intrinsic::copysign:
6821 case Intrinsic::floor:
6822 case Intrinsic::ceil:
6823 case Intrinsic::trunc:
6824 case Intrinsic::rint:
6825 case Intrinsic::nearbyint:
6826 case Intrinsic::round:
6827 case Intrinsic::roundeven:
6828 case Intrinsic::fptrunc_round:
6829 case Intrinsic::canonicalize:
6830 case Intrinsic::arithmetic_fence:
6831 case Intrinsic::minnum:
6832 case Intrinsic::maxnum:
6833 case Intrinsic::minimum:
6834 case Intrinsic::maximum:
6835 case Intrinsic::is_fpclass:
6836 case Intrinsic::ldexp:
6837 case Intrinsic::frexp:
6839 case Intrinsic::lround:
6840 case Intrinsic::llround:
6841 case Intrinsic::lrint:
6842 case Intrinsic::llrint:
6849 case Instruction::CallBr:
6850 case Instruction::Invoke: {
6851 const auto *CB = cast<CallBase>(
Op);
6852 return !CB->hasRetAttr(Attribute::NoUndef);
6854 case Instruction::InsertElement:
6855 case Instruction::ExtractElement: {
6857 auto *VTy = cast<VectorType>(
Op->getOperand(0)->getType());
6858 unsigned IdxOp =
Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
6859 auto *
Idx = dyn_cast<ConstantInt>(
Op->getOperand(IdxOp));
6862 Idx->getValue().uge(VTy->getElementCount().getKnownMinValue());
6865 case Instruction::ShuffleVector: {
6867 ? cast<ConstantExpr>(
Op)->getShuffleMask()
6868 : cast<ShuffleVectorInst>(
Op)->getShuffleMask();
6871 case Instruction::FNeg:
6872 case Instruction::PHI:
6873 case Instruction::Select:
6874 case Instruction::URem:
6875 case Instruction::SRem:
6876 case Instruction::ExtractValue:
6877 case Instruction::InsertValue:
6878 case Instruction::Freeze:
6879 case Instruction::ICmp:
6880 case Instruction::FCmp:
6881 case Instruction::FAdd:
6882 case Instruction::FSub:
6883 case Instruction::FMul:
6884 case Instruction::FDiv:
6885 case Instruction::FRem:
6887 case Instruction::GetElementPtr:
6892 const auto *CE = dyn_cast<ConstantExpr>(
Op);
6893 if (isa<CastInst>(
Op) || (CE && CE->isCast()))
6904 bool ConsiderFlagsAndMetadata) {
6905 return ::canCreateUndefOrPoison(
Op, UndefPoisonKind::UndefOrPoison,
6906 ConsiderFlagsAndMetadata);
6910 return ::canCreateUndefOrPoison(
Op, UndefPoisonKind::PoisonOnly,
6911 ConsiderFlagsAndMetadata);
6916 if (ValAssumedPoison == V)
6923 if (
const auto *
I = dyn_cast<Instruction>(V)) {
6925 return propagatesPoison(Op) &&
6926 directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1);
6954 const auto *
I = dyn_cast<Instruction>(ValAssumedPoison);
6957 return impliesPoison(Op, V, Depth + 1);
6964 return ::impliesPoison(ValAssumedPoison, V, 0);
6975 if (isa<MetadataAsValue>(V))
6978 if (
const auto *
A = dyn_cast<Argument>(V)) {
6979 if (
A->hasAttribute(Attribute::NoUndef) ||
6980 A->hasAttribute(Attribute::Dereferenceable) ||
6981 A->hasAttribute(Attribute::DereferenceableOrNull))
6985 if (
auto *
C = dyn_cast<Constant>(V)) {
6986 if (isa<PoisonValue>(
C))
6989 if (isa<UndefValue>(
C))
6992 if (isa<ConstantInt>(
C) || isa<GlobalVariable>(
C) || isa<ConstantFP>(V) ||
6993 isa<ConstantPointerNull>(
C) || isa<Function>(
C))
6996 if (
C->getType()->isVectorTy() && !isa<ConstantExpr>(
C))
6998 : !
C->containsUndefOrPoisonElement()) &&
6999 !
C->containsConstantExpression();
7010 auto *StrippedV = V->stripPointerCastsSameRepresentation();
7011 if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) ||
7012 isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV))
7015 auto OpCheck = [&](
const Value *V) {
7019 if (
auto *Opr = dyn_cast<Operator>(V)) {
7022 if (isa<FreezeInst>(V))
7025 if (
const auto *CB = dyn_cast<CallBase>(V)) {
7026 if (CB->hasRetAttr(Attribute::NoUndef) ||
7027 CB->hasRetAttr(Attribute::Dereferenceable) ||
7028 CB->hasRetAttr(Attribute::DereferenceableOrNull))
7032 if (
const auto *PN = dyn_cast<PHINode>(V)) {
7033 unsigned Num = PN->getNumIncomingValues();
7034 bool IsWellDefined =
true;
7035 for (
unsigned i = 0; i < Num; ++i) {
7036 auto *TI = PN->getIncomingBlock(i)->getTerminator();
7038 DT,
Depth + 1, Kind)) {
7039 IsWellDefined =
false;
7047 all_of(Opr->operands(), OpCheck))
7051 if (
auto *
I = dyn_cast<LoadInst>(V))
7052 if (
I->hasMetadata(LLVMContext::MD_noundef) ||
7053 I->hasMetadata(LLVMContext::MD_dereferenceable) ||
7054 I->hasMetadata(LLVMContext::MD_dereferenceable_or_null))
7074 auto *Dominator = DNode->
getIDom();
7076 auto *TI = Dominator->
getBlock()->getTerminator();
7079 if (
auto BI = dyn_cast_or_null<BranchInst>(TI)) {
7080 if (BI->isConditional())
7081 Cond = BI->getCondition();
7082 }
else if (
auto SI = dyn_cast_or_null<SwitchInst>(TI)) {
7083 Cond = SI->getCondition();
7091 auto *Opr = cast<Operator>(
Cond);
7092 if (
any_of(Opr->operands(),
7093 [V](
const Use &U) { return V == U && propagatesPoison(U); }))
7098 Dominator = Dominator->getIDom();
7111 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT,
Depth,
7112 UndefPoisonKind::UndefOrPoison);
7118 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT,
Depth,
7119 UndefPoisonKind::PoisonOnly);
7125 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT,
Depth,
7126 UndefPoisonKind::UndefOnly);
7149 while (!Worklist.
empty()) {
7158 if (
I != Root && !
any_of(
I->operands(), [&KnownPoison](
const Use &U) {
7159 return KnownPoison.contains(U) && propagatesPoison(U);
7163 if (KnownPoison.
insert(
I).second)
7175 return ::computeOverflowForSignedAdd(
Add->getOperand(0),
Add->getOperand(1),
7183 return ::computeOverflowForSignedAdd(
LHS,
RHS,
nullptr, SQ);
7192 if (isa<ReturnInst>(
I))
7194 if (isa<UnreachableInst>(
I))
7201 if (isa<CatchPadInst>(
I)) {
7215 return !
I->mayThrow() &&
I->willReturn();
7229 unsigned ScanLimit) {
7236 assert(ScanLimit &&
"scan limit must be non-zero");
7238 if (isa<DbgInfoIntrinsic>(
I))
7240 if (--ScanLimit == 0)
7254 if (
I->getParent() != L->getHeader())
return false;
7257 if (&LI ==
I)
return true;
7260 llvm_unreachable(
"Instruction not contained in its own parent basic block.");
7265 switch (
I->getOpcode()) {
7266 case Instruction::Freeze:
7267 case Instruction::PHI:
7268 case Instruction::Invoke:
7270 case Instruction::Select:
7272 case Instruction::Call:
7273 if (
auto *II = dyn_cast<IntrinsicInst>(
I)) {
7274 switch (II->getIntrinsicID()) {
7276 case Intrinsic::sadd_with_overflow:
7277 case Intrinsic::ssub_with_overflow:
7278 case Intrinsic::smul_with_overflow:
7279 case Intrinsic::uadd_with_overflow:
7280 case Intrinsic::usub_with_overflow:
7281 case Intrinsic::umul_with_overflow:
7286 case Intrinsic::ctpop:
7287 case Intrinsic::ctlz:
7288 case Intrinsic::cttz:
7289 case Intrinsic::abs:
7290 case Intrinsic::smax:
7291 case Intrinsic::smin:
7292 case Intrinsic::umax:
7293 case Intrinsic::umin:
7294 case Intrinsic::bitreverse:
7295 case Intrinsic::bswap:
7296 case Intrinsic::sadd_sat:
7297 case Intrinsic::ssub_sat:
7298 case Intrinsic::sshl_sat:
7299 case Intrinsic::uadd_sat:
7300 case Intrinsic::usub_sat:
7301 case Intrinsic::ushl_sat:
7306 case Instruction::ICmp:
7307 case Instruction::FCmp:
7308 case Instruction::GetElementPtr:
7311 if (isa<BinaryOperator>(
I) || isa<UnaryOperator>(
I) || isa<CastInst>(
I))
7322template <
typename CallableT>
7324 const CallableT &Handle) {
7325 switch (
I->getOpcode()) {
7326 case Instruction::Store:
7331 case Instruction::Load:
7338 case Instruction::AtomicCmpXchg:
7343 case Instruction::AtomicRMW:
7348 case Instruction::Call:
7349 case Instruction::Invoke: {
7353 for (
unsigned i = 0; i < CB->
arg_size(); ++i)
7356 CB->
paramHasAttr(i, Attribute::DereferenceableOrNull)) &&
7361 case Instruction::Ret:
7362 if (
I->getFunction()->hasRetAttribute(Attribute::NoUndef) &&
7363 Handle(
I->getOperand(0)))
7366 case Instruction::Switch:
7367 if (Handle(cast<SwitchInst>(
I)->getCondition()))
7370 case Instruction::Br: {
7371 auto *BR = cast<BranchInst>(
I);
7372 if (BR->isConditional() && Handle(BR->getCondition()))
7392template <
typename CallableT>
7394 const CallableT &Handle) {
7397 switch (
I->getOpcode()) {
7399 case Instruction::UDiv:
7400 case Instruction::SDiv:
7401 case Instruction::URem:
7402 case Instruction::SRem:
7403 return Handle(
I->getOperand(1));
7420 I, [&](
const Value *V) {
return KnownPoison.
count(V); });
7434 if (
const auto *Inst = dyn_cast<Instruction>(V)) {
7438 }
else if (
const auto *Arg = dyn_cast<Argument>(V)) {
7439 if (Arg->getParent()->isDeclaration())
7442 Begin = BB->
begin();
7449 unsigned ScanLimit = 32;
7458 if (isa<DbgInfoIntrinsic>(
I))
7460 if (--ScanLimit == 0)
7464 return WellDefinedOp == V;
7484 if (isa<DbgInfoIntrinsic>(
I))
7486 if (--ScanLimit == 0)
7494 for (
const Use &
Op :
I.operands()) {
7504 if (
I.getOpcode() == Instruction::Select &&
7505 YieldsPoison.
count(
I.getOperand(1)) &&
7506 YieldsPoison.
count(
I.getOperand(2))) {
7512 if (!BB || !Visited.
insert(BB).second)
7522 return ::programUndefinedIfUndefOrPoison(Inst,
false);
7526 return ::programUndefinedIfUndefOrPoison(Inst,
true);
7533 if (
auto *
C = dyn_cast<ConstantFP>(V))
7536 if (
auto *
C = dyn_cast<ConstantDataVector>(V)) {
7537 if (!
C->getElementType()->isFloatingPointTy())
7539 for (
unsigned I = 0,
E =
C->getNumElements();
I <
E; ++
I) {
7540 if (
C->getElementAsAPFloat(
I).isNaN())
7546 if (isa<ConstantAggregateZero>(V))
7553 if (
auto *
C = dyn_cast<ConstantFP>(V))
7554 return !
C->isZero();
7556 if (
auto *
C = dyn_cast<ConstantDataVector>(V)) {
7557 if (!
C->getElementType()->isFloatingPointTy())
7559 for (
unsigned I = 0,
E =
C->getNumElements();
I <
E; ++
I) {
7560 if (
C->getElementAsAPFloat(
I).isZero())
7583 if (CmpRHS == FalseVal) {
7631 if (CmpRHS != TrueVal) {
7670 Value *
A =
nullptr, *
B =
nullptr;
7675 Value *
C =
nullptr, *
D =
nullptr;
7677 if (L.Flavor != R.Flavor)
7729 return {L.Flavor,
SPNB_NA,
false};
7736 return {L.Flavor,
SPNB_NA,
false};
7743 return {L.Flavor,
SPNB_NA,
false};
7750 return {L.Flavor,
SPNB_NA,
false};
7766 return ConstantInt::get(V->getType(), ~(*
C));
7823 if ((CmpLHS == TrueVal &&
match(FalseVal,
m_APInt(C2))) ||
7842 assert(
X &&
Y &&
"Invalid operand");
7868 bool HasMismatchedZeros =
false;
7874 Value *OutputZeroVal =
nullptr;
7876 !cast<Constant>(TrueVal)->containsUndefOrPoisonElement())
7877 OutputZeroVal = TrueVal;
7879 !cast<Constant>(FalseVal)->containsUndefOrPoisonElement())
7880 OutputZeroVal = FalseVal;
7882 if (OutputZeroVal) {
7884 HasMismatchedZeros =
true;
7885 CmpLHS = OutputZeroVal;
7888 HasMismatchedZeros =
true;
7889 CmpRHS = OutputZeroVal;
7906 if (!HasMismatchedZeros)
7917 bool Ordered =
false;
7928 if (LHSSafe && RHSSafe) {
7958 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
7969 if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
7994 auto MaybeSExtCmpLHS =
7998 if (
match(TrueVal, MaybeSExtCmpLHS)) {
8020 else if (
match(FalseVal, MaybeSExtCmpLHS)) {
8070 auto *Cast1 = dyn_cast<CastInst>(V1);
8074 *CastOp = Cast1->getOpcode();
8075 Type *SrcTy = Cast1->getSrcTy();
8076 if (
auto *Cast2 = dyn_cast<CastInst>(V2)) {
8078 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
8079 return Cast2->getOperand(0);
8083 auto *
C = dyn_cast<Constant>(V2);
8090 case Instruction::ZExt:
8094 case Instruction::SExt:
8098 case Instruction::Trunc:
8101 CmpConst->
getType() == SrcTy) {
8123 CastedTo = CmpConst;
8125 unsigned ExtOp = CmpI->
isSigned() ? Instruction::SExt : Instruction::ZExt;
8129 case Instruction::FPTrunc:
8132 case Instruction::FPExt:
8135 case Instruction::FPToUI:
8138 case Instruction::FPToSI:
8141 case Instruction::UIToFP:
8144 case Instruction::SIToFP:
8157 if (CastedBack && CastedBack !=
C)
8172 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
8175 Value *TrueVal = SI->getTrueValue();
8176 Value *FalseVal = SI->getFalseValue();
8189 if (isa<FPMathOperator>(CmpI))
8197 if (CastOp && CmpLHS->
getType() != TrueVal->getType()) {
8201 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
8203 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
8204 cast<CastInst>(TrueVal)->getOperand(0),
C,
8210 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
8212 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
8213 C, cast<CastInst>(FalseVal)->getOperand(0),
8217 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
8243 case Intrinsic::smax:
return Intrinsic::smin;
8244 case Intrinsic::smin:
return Intrinsic::smax;
8245 case Intrinsic::umax:
return Intrinsic::umin;
8246 case Intrinsic::umin:
return Intrinsic::umax;
8249 case Intrinsic::maximum:
return Intrinsic::minimum;
8250 case Intrinsic::minimum:
return Intrinsic::maximum;
8251 case Intrinsic::maxnum:
return Intrinsic::minnum;
8252 case Intrinsic::minnum:
return Intrinsic::maxnum;
8267std::pair<Intrinsic::ID, bool>
8272 bool AllCmpSingleUse =
true;
8275 if (
all_of(VL, [&SelectPattern, &AllCmpSingleUse](
Value *
I) {
8281 !
I->getType()->isIntOrIntVectorTy())
8284 SelectPattern.
Flavor != CurrentPattern.Flavor)
8286 SelectPattern = CurrentPattern;
8291 switch (SelectPattern.
Flavor) {
8293 return {Intrinsic::smin, AllCmpSingleUse};
8295 return {Intrinsic::umin, AllCmpSingleUse};
8297 return {Intrinsic::smax, AllCmpSingleUse};
8299 return {Intrinsic::umax, AllCmpSingleUse};
8312 if (
P->getNumIncomingValues() != 2)
8315 for (
unsigned i = 0; i != 2; ++i) {
8316 Value *L =
P->getIncomingValue(i);
8317 Value *R =
P->getIncomingValue(!i);
8318 auto *LU = dyn_cast<BinaryOperator>(L);
8321 unsigned Opcode = LU->getOpcode();
8327 case Instruction::LShr:
8328 case Instruction::AShr:
8329 case Instruction::Shl:
8330 case Instruction::Add:
8331 case Instruction::Sub:
8332 case Instruction::And:
8333 case Instruction::Or:
8334 case Instruction::Mul:
8335 case Instruction::FMul: {
8336 Value *LL = LU->getOperand(0);
8337 Value *LR = LU->getOperand(1);
8367 P = dyn_cast<PHINode>(
I->getOperand(0));
8369 P = dyn_cast<PHINode>(
I->getOperand(1));
8389 return !
C->isNegative();
8404 auto MatchNUWAddsToSameValue = [&](
const Value *
A,
const Value *
B,
8417 if (CA->isSubsetOf(Known.
Zero) && CB->isSubsetOf(Known.
Zero))
8425 const APInt *CLHS, *CRHS;
8426 if (MatchNUWAddsToSameValue(
LHS,
RHS,
X, CLHS, CRHS))
8427 return CLHS->
ule(*CRHS);
8436static std::optional<bool>
8442 return std::nullopt;
8449 return std::nullopt;
8456 return std::nullopt;
8463 return std::nullopt;
8470 return std::nullopt;
8477static std::optional<bool>
8485 return std::nullopt;
8502 return std::nullopt;
8512 bool LHSIsTrue,
unsigned Depth) {
8519 LHSIsTrue ?
LHS->getPredicate() :
LHS->getInversePredicate();
8543 const APInt *LC, *RC;
8548 if (L0 == R0 && L1 == R1)
8556 return LPred == RPred;
8561 return std::nullopt;
8568static std::optional<bool>
8573 assert((
LHS->getOpcode() == Instruction::And ||
8574 LHS->getOpcode() == Instruction::Or ||
8575 LHS->getOpcode() == Instruction::Select) &&
8576 "Expected LHS to be 'and', 'or', or 'select'.");
8583 const Value *ALHS, *ARHS;
8588 ALHS, RHSPred, RHSOp0, RHSOp1,
DL, LHSIsTrue,
Depth + 1))
8591 ARHS, RHSPred, RHSOp0, RHSOp1,
DL, LHSIsTrue,
Depth + 1))
8593 return std::nullopt;
8595 return std::nullopt;
8604 return std::nullopt;
8609 return std::nullopt;
8612 "Expected integer type only!");
8624 if ((LHSI->getOpcode() == Instruction::And ||
8625 LHSI->getOpcode() == Instruction::Or ||
8626 LHSI->getOpcode() == Instruction::Select))
8630 return std::nullopt;
8635 bool LHSIsTrue,
unsigned Depth) {
8640 if (
const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(
RHS))
8642 RHSCmp->getOperand(0), RHSCmp->getOperand(1),
DL,
8646 return std::nullopt;
8650 const Value *RHS1, *RHS2;
8652 if (std::optional<bool> Imp =
8656 if (std::optional<bool> Imp =
8662 if (std::optional<bool> Imp =
8666 if (std::optional<bool> Imp =
8672 return std::nullopt;
8677static std::pair<Value *, bool>
8679 if (!ContextI || !ContextI->
getParent())
8680 return {
nullptr,
false};
8687 return {
nullptr,
false};
8693 return {
nullptr,
false};
8696 if (TrueBB == FalseBB)
8697 return {
nullptr,
false};
8699 assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
8700 "Predecessor block does not point to successor?");
8703 return {PredCond, TrueBB == ContextBB};
8709 assert(
Cond->getType()->isIntOrIntVectorTy(1) &&
"Condition must be bool");
8713 return std::nullopt;
8725 return std::nullopt;
8730 bool PreferSignedRange) {
8731 unsigned Width =
Lower.getBitWidth();
8734 case Instruction::Add:
8743 if (PreferSignedRange && HasNSW && HasNUW)
8749 }
else if (HasNSW) {
8750 if (
C->isNegative()) {
8763 case Instruction::And:
8774 case Instruction::Or:
8780 case Instruction::AShr:
8786 unsigned ShiftAmount = Width - 1;
8787 if (!
C->isZero() && IIQ.
isExact(&BO))
8788 ShiftAmount =
C->countr_zero();
8789 if (
C->isNegative()) {
8792 Upper =
C->ashr(ShiftAmount) + 1;
8795 Lower =
C->ashr(ShiftAmount);
8801 case Instruction::LShr:
8807 unsigned ShiftAmount = Width - 1;
8808 if (!
C->isZero() && IIQ.
isExact(&BO))
8809 ShiftAmount =
C->countr_zero();
8810 Lower =
C->lshr(ShiftAmount);
8815 case Instruction::Shl:
8822 if (
C->isNegative()) {
8824 unsigned ShiftAmount =
C->countl_one() - 1;
8825 Lower =
C->shl(ShiftAmount);
8829 unsigned ShiftAmount =
C->countl_zero() - 1;
8831 Upper =
C->shl(ShiftAmount) + 1;
8850 case Instruction::SDiv:
8854 if (
C->isAllOnes()) {
8859 }
else if (
C->countl_zero() < Width - 1) {
8870 if (
C->isMinSignedValue()) {
8882 case Instruction::UDiv:
8892 case Instruction::SRem:
8898 if (
C->isNegative()) {
8909 case Instruction::URem:
8927 case Intrinsic::ctpop:
8928 case Intrinsic::ctlz:
8929 case Intrinsic::cttz:
8932 APInt(Width, Width + 1));
8933 case Intrinsic::uadd_sat:
8939 case Intrinsic::sadd_sat:
8942 if (
C->isNegative())
8953 case Intrinsic::usub_sat:
8963 case Intrinsic::ssub_sat:
8965 if (
C->isNegative())
8975 if (
C->isNegative())
8986 case Intrinsic::umin:
8987 case Intrinsic::umax:
8988 case Intrinsic::smin:
8989 case Intrinsic::smax:
8995 case Intrinsic::umin:
8997 case Intrinsic::umax:
8999 case Intrinsic::smin:
9002 case Intrinsic::smax:
9009 case Intrinsic::abs:
9018 case Intrinsic::vscale:
9026 return ConstantRange::getFull(Width);
9031 unsigned BitWidth = SI.getType()->getScalarSizeInBits();
9035 return ConstantRange::getFull(
BitWidth);
9058 return ConstantRange::getFull(
BitWidth);
9072 return ConstantRange::getFull(
BitWidth);
9079 unsigned BitWidth =
I->getType()->getScalarSizeInBits();
9080 if (!
I->getOperand(0)->getType()->getScalarType()->isHalfTy())
9082 if (isa<FPToSIInst>(
I) &&
BitWidth >= 17) {
9087 if (isa<FPToUIInst>(
I) &&
BitWidth >= 16) {
9098 assert(V->getType()->isIntOrIntVectorTy() &&
"Expected integer instruction");
9101 return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
9106 unsigned BitWidth = V->getType()->getScalarSizeInBits();
9108 if (
auto *VC = dyn_cast<ConstantDataVector>(V)) {
9110 for (
unsigned ElemIdx = 0, NElem = VC->getNumElements(); ElemIdx < NElem;
9112 CR = CR.
unionWith(VC->getElementAsAPInt(ElemIdx));
9118 if (
auto *BO = dyn_cast<BinaryOperator>(V)) {
9124 }
else if (
auto *II = dyn_cast<IntrinsicInst>(V))
9126 else if (
auto *SI = dyn_cast<SelectInst>(V)) {
9128 SI->getTrueValue(), ForSigned, UseInstrInfo, AC, CtxI, DT,
Depth + 1);
9130 SI->getFalseValue(), ForSigned, UseInstrInfo, AC, CtxI, DT,
Depth + 1);
9133 }
else if (isa<FPToUIInst>(V) || isa<FPToSIInst>(V)) {
9141 if (
auto *
I = dyn_cast<Instruction>(V))
9142 if (
auto *Range = IIQ.
getMetadata(
I, LLVMContext::MD_range))
9152 "Got assumption for the wrong function!");
9153 assert(
I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
9154 "must be an assume intrinsic");
9158 Value *Arg =
I->getArgOperand(0);
9159 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
9161 if (!Cmp || Cmp->getOperand(0) != V)
9166 UseInstrInfo, AC,
I, DT,
Depth + 1);
9179 if (isa<Argument>(V) || isa<GlobalValue>(V)) {
9181 }
else if (
auto *
I = dyn_cast<Instruction>(V)) {
9187 if (isa<Instruction>(
Op) || isa<Argument>(
Op))
9195 auto AddAffected = [&InsertAffected](
Value *V) {
9210 while (!Worklist.
empty()) {
9212 if (!Visited.
insert(V).second)
9235 AddCmpOperands(
A,
B);
9262 AddCmpOperands(
A,
B);
9272 }
else if (
match(V, m_Intrinsic<Intrinsic::is_fpclass>(
m_Value(
A),
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
Function Alias Analysis Results
This file contains the simple types necessary to represent the attributes associated with functions a...
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static MaybeAlign getAlign(Value *Ptr)
static const unsigned MaxDepth
static bool hasNoUnsignedWrap(BinaryOperator &I)
mir Rename Register Operands
Module.h This file contains the declarations for the Module class.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
static bool mayHaveSideEffects(MachineInstr &MI)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static SmallVector< VPValue *, 4 > getOperands(ArrayRef< VPValue * > Values, unsigned OperandIndex)
static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS)
static cl::opt< unsigned > DomConditionsMaxUses("dom-conditions-max-uses", cl::Hidden, cl::init(20))
static unsigned computeNumSignBitsVectorConstant(const Value *V, const APInt &DemandedElts, unsigned TyBits)
For vector constants, loop over the elements and find the constant with the minimum number of sign bi...
static bool isKnownNonZeroFromOperator(const Operator *I, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q)
static bool isKnownNonNullFromDominatingCondition(const Value *V, const Instruction *CtxI, const DominatorTree *DT)
static const Value * getUnderlyingObjectFromInt(const Value *V)
This is the function that does the work of looking through basic ptrtoint+arithmetic+inttoptr sequenc...
static bool isNonZeroShift(const Operator *I, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q, const KnownBits &KnownVal)
static bool rangeMetadataExcludesValue(const MDNode *Ranges, const APInt &Value)
Does the 'Range' metadata (which must be a valid MD_range operand list) ensure that the value it's at...
static bool outputDenormalIsIEEEOrPosZero(const Function &F, const Type *Ty)
static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS, const Value *RHS, const DataLayout &DL, unsigned Depth)
Return true if "icmp Pred LHS RHS" is always true.
static bool inputDenormalIsIEEE(const Function &F, const Type *Ty)
Return true if it's possible to assume IEEE treatment of input denormals in F for Val.
static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR)
Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
static void addValueAffectedByCondition(Value *V, function_ref< void(Value *)> InsertAffected)
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static std::tuple< Value *, FPClassTest, FPClassTest > exactClass(Value *V, FPClassTest M)
Return the return value for fcmpImpliesClass for a compare that produces an exact class test.
static bool haveNoCommonBitsSetSpecialCases(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
static std::optional< bool > isImpliedCondAndOr(const Instruction *LHS, CmpInst::Predicate RHSPred, const Value *RHSOp0, const Value *RHSOp1, const DataLayout &DL, bool LHSIsTrue, unsigned Depth)
Return true if LHS implies RHS is true.
static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower, APInt &Upper, const InstrInfoQuery &IIQ, bool PreferSignedRange)
static Value * lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, Instruction::CastOps *CastOp)
Helps to match a select pattern in case of a type mismatch.
static std::pair< Value *, bool > getDomPredecessorCondition(const Instruction *ContextI)
static bool isNonEqualMul(const Value *V1, const Value *V2, unsigned Depth, const SimplifyQuery &Q)
Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and the multiplication is nuw o...
static bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q)
Return true if the given value is known to be non-zero when defined.
static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q)
static bool includesPoison(UndefPoisonKind Kind)
static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS)
Match clamp pattern for float types without care about NaNs or signed zeros.
static bool includesUndef(UndefPoisonKind Kind)
static std::optional< bool > isImpliedCondICmps(const ICmpInst *LHS, CmpInst::Predicate RPred, const Value *R0, const Value *R1, const DataLayout &DL, bool LHSIsTrue, unsigned Depth)
Return true if LHS implies RHS (expanded to its components as "R0 RPred R1") is true.
static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero, unsigned Depth, SimplifyQuery &Q)
Try to detect a recurrence that the value of the induction variable is always a power of two (or zero...
static ConstantRange getRangeForSelectPattern(const SelectInst &SI, const InstrInfoQuery &IIQ)
static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred, FastMathFlags FMF, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, unsigned Depth)
static std::optional< bool > isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, const Value *ARHS, const Value *BLHS, const Value *BRHS, const DataLayout &DL, unsigned Depth)
Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred ALHS ARHS" is true.
static uint64_t GetStringLengthH(const Value *V, SmallPtrSetImpl< const PHINode * > &PHIs, unsigned CharSize)
If we can compute the length of the string pointed to by the specified pointer, return 'len+1'.
static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(const Value *V, bool AllowLifetime, bool AllowDroppable)
static bool isSignedMinMaxClamp(const Value *Select, const Value *&In, const APInt *&CLow, const APInt *&CHigh)
static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, bool NSW, bool NUW, const APInt &DemandedElts, KnownBits &KnownOut, KnownBits &Known2, unsigned Depth, const SimplifyQuery &Q)
static void computeKnownBitsFromOperator(const Operator *I, const APInt &DemandedElts, KnownBits &Known, unsigned Depth, const SimplifyQuery &Q)
static bool directlyImpliesPoison(const Value *ValAssumedPoison, const Value *V, unsigned Depth)
static void computeKnownBitsFromCmp(const Value *V, CmpInst::Predicate Pred, Value *LHS, Value *RHS, KnownBits &Known, const SimplifyQuery &Q)
static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TVal, Value *FVal, unsigned Depth)
Recognize variations of: a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
static void computeKnownFPClassFromCond(const Value *V, Value *Cond, bool CondIsTrue, const Instruction *CxtI, KnownFPClass &KnownFromContext)
static std::optional< bool > isImpliedCondCommonOperandWithConstants(CmpInst::Predicate LPred, const APInt &LC, CmpInst::Predicate RPred, const APInt &RC)
Return true if "icmp LPred X, LC" implies "icmp RPred X, RC" is true.
static void setLimitForFPToI(const Instruction *I, APInt &Lower, APInt &Upper)
static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth, const SimplifyQuery &Q)
Return true if it is known that V1 != V2.
static bool isSameUnderlyingObjectInLoop(const PHINode *PN, const LoopInfo *LI)
PN defines a loop-variant pointer to an object.
static bool isNonEqualPointersWithRecursiveGEP(const Value *A, const Value *B, const SimplifyQuery &Q)
static bool isSignedMinMaxIntrinsicClamp(const IntrinsicInst *II, const APInt *&CLow, const APInt *&CHigh)
static void computeKnownFPClassForFPTrunc(const Operator *Op, const APInt &DemandedElts, FPClassTest InterestedClasses, KnownFPClass &Known, unsigned Depth, const SimplifyQuery &Q)
static bool handleGuaranteedWellDefinedOps(const Instruction *I, const CallableT &Handle)
Enumerates all operands of I that are guaranteed to not be undef or poison.
static void computeKnownBits(const Value *V, const APInt &DemandedElts, KnownBits &Known, unsigned Depth, const SimplifyQuery &Q)
Determine which bits of V are known to be either zero or one and return them in the Known bit set.
static KnownFPClass computeKnownFPClassFromContext(const Value *V, const SimplifyQuery &Q)
static Value * getNotValue(Value *V)
If the input value is the result of a 'not' op, constant integer, or vector splat of a constant integ...
static bool isNonEqualSelect(const Value *V1, const Value *V2, unsigned Depth, const SimplifyQuery &Q)
static void computeKnownBitsFromCond(const Value *V, Value *Cond, KnownBits &Known, unsigned Depth, const SimplifyQuery &SQ, bool Invert)
static void computeKnownBitsFromICmpCond(const Value *V, ICmpInst *Cmp, KnownBits &Known, const SimplifyQuery &SQ, bool Invert)
static ConstantRange getRangeForIntrinsic(const IntrinsicInst &II)
static bool isNonZeroRecurrence(const PHINode *PN)
Try to detect a recurrence that monotonically increases/decreases from a non-zero starting value.
static SelectPatternResult matchClamp(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal)
Recognize variations of: CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
static bool shiftAmountKnownInRange(const Value *ShiftAmount)
Shifts return poison if shiftwidth is larger than the bitwidth.
static bool isEphemeralValueOf(const Instruction *I, const Value *E)
static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, unsigned Depth)
Match non-obvious integer minimum and maximum sequences.
static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2, unsigned Depth, const SimplifyQuery &Q)
static bool isNonEqualShl(const Value *V1, const Value *V2, unsigned Depth, const SimplifyQuery &Q)
Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and the shift is nuw or nsw.
static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth, const SimplifyQuery &Q)
Test whether a GEP's result is known to be non-null.
static bool handleGuaranteedNonPoisonOps(const Instruction *I, const CallableT &Handle)
Enumerates all operands of I that are guaranteed to not be poison.
static bool isNonZeroSub(const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y)
static std::optional< std::pair< Value *, Value * > > getInvertibleOperands(const Operator *Op1, const Operator *Op2)
If the pair of operators are the same invertible function, return the the operands of the function co...
static void computeKnownBitsFromShiftOperator(const Operator *I, const APInt &DemandedElts, KnownBits &Known, KnownBits &Known2, unsigned Depth, const SimplifyQuery &Q, function_ref< KnownBits(const KnownBits &, const KnownBits &, bool)> KF)
Compute known bits from a shift operator, including those with a non-constant shift amount.
static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS)
static bool inputDenormalIsIEEEOrPosZero(const Function &F, const Type *Ty)
static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth, const SimplifyQuery &Q)
Return true if V2 == V1 + X, where X is known non-zero.
static KnownBits getKnownBitsFromAndXorOr(const Operator *I, const APInt &DemandedElts, const KnownBits &KnownLHS, const KnownBits &KnownRHS, unsigned Depth, const SimplifyQuery &Q)
static bool isKnownNonZeroFromAssume(const Value *V, const SimplifyQuery &Q)
static unsigned ComputeNumSignBitsImpl(const Value *V, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q)
Return the number of times the sign bit of the register is replicated into the other bits.
static bool isNonZeroAdd(const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y, bool NSW, bool NUW)
static const Instruction * safeCxtI(const Value *V, const Instruction *CxtI)
static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, const SimplifyQuery &Q)
Return true if the given value is known to have exactly one bit set when defined.
static bool isKnownNonNaN(const Value *V, FastMathFlags FMF)
static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, const APInt &DemandedElts, KnownBits &Known, KnownBits &Known2, unsigned Depth, const SimplifyQuery &Q)
static std::optional< bool > isImpliedCondMatchingOperands(CmpInst::Predicate LPred, CmpInst::Predicate RPred)
Return true if "icmp1 LPred X, Y" implies "icmp2 RPred X, Y" is true.
static Value * BuildSubAggregate(Value *From, Value *To, Type *IndexedType, SmallVectorImpl< unsigned > &Idxs, unsigned IdxSkip, BasicBlock::iterator InsertBefore)
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
FPClassTest classify() const
Return the FPClassTest which will return true for the value.
bool isSmallestNormalized() const
Class for arbitrary precision integers.
APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
unsigned ceilLogBase2() const
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
APInt reverseBits() const
bool sle(const APInt &RHS) const
Signed less or equal comparison.
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
unsigned logBase2() const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
void setAllBits()
Set every bit to 1.
bool getBoolValue() const
Convert APInt to a boolean value.
bool isMaxSignedValue() const
Determine if this is the largest signed value.
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
an instruction to allocate memory on the stack
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Class to represent array types.
Type * getElementType() const
This represents the llvm.assume intrinsic.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
std::optional< unsigned > getVScaleRangeMax() const
Returns the maximum value for the vscale_range attribute or std::nullopt when unknown.
unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
bool isSingleEdge() const
Check if this is the only edge between Start and End.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
InstListType::const_iterator const_iterator
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
BinaryOps getOpcode() const
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
bool isIndirectCall() const
Return true if the callsite is an indirect call.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
static bool isEquality(Predicate pred)
Determine if this is an equals/not equals predicate.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
bool isFPPredicate() const
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
static bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
static bool isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2)
Determine if Pred1 implies Pred2 is true when two compares have matching operands.
bool isIntPredicate() const
static bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
static bool isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2)
Determine if Pred1 implies Pred2 is false when two compares have matching operands.
An array constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
StringRef getAsString() const
If this array is isString(), then this method returns the array as a StringRef.
uint64_t getElementAsInteger(unsigned i) const
If this is a sequential container of integers (of any size), return the specified element in the low ...
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This class represents a range of values.
PreferredRangeType
If represented precisely, the result of some range operations may consist of multiple disjoint ranges...
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
OverflowResult unsignedSubMayOverflow(const ConstantRange &Other) const
Return whether unsigned sub of the two ranges always/never overflows.
bool isAllNegative() const
Return true if all values in this range are negative.
OverflowResult unsignedAddMayOverflow(const ConstantRange &Other) const
Return whether unsigned add of the two ranges always/never overflows.
KnownBits toKnownBits() const
Return known bits for values in this range.
ConstantRange difference(const ConstantRange &CR) const
Subtract the specified range from this range (aka relative complement of the sets).
bool isEmptySet() const
Return true if this set contains no members.
APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
OverflowResult unsignedMulMayOverflow(const ConstantRange &Other) const
Return whether unsigned mul of the two ranges always/never overflows.
bool isAllNonNegative() const
Return true if all values in this range are non-negative.
static ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other)
Produce the smallest range such that all values that may satisfy the given predicate with any value c...
ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
OverflowResult signedAddMayOverflow(const ConstantRange &Other) const
Return whether signed add of the two ranges always/never overflows.
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
OverflowResult
Represents whether an operation on the given constant range is known to always or never overflow.
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
OverflowResult signedSubMayOverflow(const ConstantRange &Other) const
Return whether signed sub of the two ranges always/never overflows.
ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
This is an important base class in LLVM.
Constant * getSplatValue(bool AllowUndefs=false) const
If all elements of the vector constant have the same value, return that value.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
bool isZeroValue() const
Return true if the value is negative zero or null value.
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
bool isLittleEndian() const
Layout endianness...
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
unsigned getIndexTypeSizeInBits(Type *Ty) const
Layout size of the index used in GEP calculation.
unsigned getPointerTypeSizeInBits(Type *) const
Layout pointer size, in bits, based on the type.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
ArrayRef< BranchInst * > conditionsFor(const Value *V) const
Access the list of branches which affect this value.
DomTreeNodeBase * getIDom() const
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Utility class for floating point operations which can have information about relaxed accuracy require...
Convenience struct for specifying and reasoning about fast-math flags.
bool noSignedZeros() const
void setNoSignedZeros(bool B=true)
const BasicBlock & getEntryBlock() const
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Module * getParent()
Get the module that this global value is contained inside of...
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
bool isEquality() const
Return true if this predicate is either EQ or NE.
This instruction inserts a struct field of array element value into an aggregate value.
Value * getAggregateOperand()
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr, BasicBlock::iterator InsertBefore)
bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
bool isLifetimeStartOrEnd() const LLVM_READONLY
Return true if the instruction is a llvm.lifetime.start or llvm.lifetime.end marker.
bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
const BasicBlock * getParent() const
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
bool isExact() const LLVM_READONLY
Determine whether the exact flag is set.
const Function * getFunction() const
Return the function this instruction belongs to.
bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
Value * getPointerOperand()
Align getAlign() const
Return the alignment of the access that is being performed.
bool isLoopHeader(const BlockT *BB) const
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
This is a utility class that provides an abstraction for the common functionality between Instruction...
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
iterator_range< const_block_iterator > blocks() const
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A udiv or sdiv instruction, which can be marked as "exact", indicating that no bits are destroyed.
bool isExact() const
Test whether this division is known to be exact, with zero remainder.
This class represents the LLVM 'select' instruction.
const Value * getFalseValue() const
const Value * getCondition() const
const Value * getTrueValue() const
This instruction constructs a fixed permutation of two input vectors.
VectorType * getType() const
Overload to return most specific vector type.
static void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
unsigned getNumElements() const
Random access to the elements.
Type * getElementType(unsigned N) const
Provides information about what library functions are available for the current target.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
const fltSemantics & getFltSemantics() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
uint64_t getArrayNumElements() const
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
static IntegerType * getInt16Ty(LLVMContext &C)
static IntegerType * getInt8Ty(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
unsigned getOperandNo() const
Return the operand # of this use in its User.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
iterator_range< user_iterator > users()
const KnownBits & getKnownBits(const SimplifyQuery &Q) const
PointerType getValue() const
Represents an op.with.overflow intrinsic.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
An efficient, type-erasing, non-owning reference to a callable.
StructType * getStructTypeOrNull() const
TypeSize getSequentialElementStride(const DataLayout &DL) const
Type * getIndexedType() const
self_iterator getIterator()
A range adaptor for a pair of iterators.
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
MaxMin_match< FCmpInst, LHS, RHS, ufmin_pred_ty > m_UnordFMin(const LHS &L, const RHS &R)
Match an 'unordered' floating point minimum function.
PtrToIntSameSize_match< OpTy > m_PtrToIntSameSize(const DataLayout &DL, const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
apfloat_match m_APFloatAllowUndef(const APFloat *&Res)
Match APFloat while allowing undefs in splat vector constants.
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
auto m_LogicalOp()
Matches either L && R or L || R where L and R are arbitrary values.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
cst_pred_ty< is_power2_or_zero > m_Power2OrZero()
Match an integer or vector of 0 or power-of-2 values.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, FCmpInst, FCmpInst::Predicate > m_FCmp(FCmpInst::Predicate &Pred, const LHS &L, const RHS &R)
CastOperator_match< OpTy, Instruction::Trunc > m_Trunc(const OpTy &Op)
Matches Trunc.
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
OneUse_match< T > m_OneUse(const T &SubPattern)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate, true > m_c_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
MaxMin_match< FCmpInst, LHS, RHS, ufmax_pred_ty > m_UnordFMax(const LHS &L, const RHS &R)
Match an 'unordered' floating point maximum function.
VScaleVal_match m_VScale()
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
MaxMin_match< FCmpInst, LHS, RHS, ofmax_pred_ty > m_OrdFMax(const LHS &L, const RHS &R)
Match an 'ordered' floating point maximum function.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
BinOpPred_match< LHS, RHS, is_irem_op > m_IRem(const LHS &L, const RHS &R)
Matches integer remainder operations.
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
MaxMin_match< FCmpInst, LHS, RHS, ofmin_pred_ty > m_OrdFMin(const LHS &L, const RHS &R)
Match an 'ordered' floating point minimum function.
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
BinOpPred_match< LHS, RHS, is_bitwiselogic_op > m_BitwiseLogic(const LHS &L, const RHS &R)
Matches bitwise logic operations.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
static unsigned decodeVSEW(unsigned VSEW)
unsigned getSEWLMULRatio(unsigned SEW, RISCVII::VLMUL VLMul)
static constexpr unsigned RVVBitsPerBlock
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
bool haveNoCommonBitsSet(const WithCache< const Value * > &LHSCache, const WithCache< const Value * > &RHSCache, const SimplifyQuery &SQ)
Return true if LHS and RHS have no common bits set.
bool mustExecuteUBIfPoisonOnPathTo(Instruction *Root, Instruction *OnPathTo, DominatorTree *DT)
Return true if undefined behavior would provable be executed on the path to OnPathTo if Root produced...
Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &DL, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
bool mustTriggerUB(const Instruction *I, const SmallPtrSetImpl< const Value * > &KnownPoison)
Return true if the given instruction must trigger undefined behavior when I is executed with any oper...
bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false)
Return true if the two given values are negation.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
bool isAssumeLikeIntrinsic(const Instruction *I)
Return true if it is an intrinsic that cannot be speculated but also cannot trap.
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth)
Return the minimum or maximum constant value for the specified integer min/max flavor and type.
void getGuaranteedNonPoisonOps(const Instruction *I, SmallVectorImpl< const Value * > &Ops)
Insert operands of I into Ops such that I will trigger undefined behavior if I is executed and that o...
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
bool getConstantStringInfo(const Value *V, StringRef &Str, bool TrimAtNul=true)
This function computes the length of a null-terminated C string pointed to by V.
bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V)
Return true if the only users of this pointer are lifetime markers or droppable instructions.
Constant * ReadByteArrayFromGlobal(const GlobalVariable *GV, uint64_t Offset)
bool getUnderlyingObjectsForCodeGen(const Value *V, SmallVectorImpl< Value * > &Objects)
This is a wrapper around getUnderlyingObjects and adds support for basic ptrtoint+arithmetic+inttoptr...
std::pair< Intrinsic::ID, bool > canConvertToMinOrMaxIntrinsic(ArrayRef< Value * > VL)
Check if the values in VL are select instructions that can be converted to a min or max (vector) intr...
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice, unsigned ElementSize, uint64_t Offset=0)
Returns true if the value V is a pointer into a ConstantDataArray.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
bool isGuaranteedToExecuteForEveryIteration(const Instruction *I, const Loop *L)
Return true if this function can prove that the instruction I is executed for every iteration of the ...
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to have exactly one bit set when defined.
bool mustSuppressSpeculation(const LoadInst &LI)
Return true if speculation of the given load must be suppressed to avoid ordering or interfering with...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
gep_type_iterator gep_type_end(const User *GEP)
CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered=false)
Return the canonical comparison predicate for the specified minimum/maximum flavor.
void computeKnownBitsFromContext(const Value *V, KnownBits &Known, unsigned Depth, const SimplifyQuery &Q)
Merge bits known from context-dependent facts into Known.
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, const DominatorTree &DT)
Returns true if the arithmetic part of the WO 's result is used only along the paths control dependen...
bool isSafeToSpeculativelyExecuteWithOpcode(unsigned Opcode, const Instruction *Inst, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
This returns the same result as isSafeToSpeculativelyExecute if Opcode is the actual opcode of Inst.
RetainedKnowledge getKnowledgeValidInContext(const Value *V, ArrayRef< Attribute::AttrKind > AttrKinds, const Instruction *CtxI, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr)
Return a valid Knowledge associated to the Value V if its Attribute kind is in AttrKinds and the know...
RetainedKnowledge getKnowledgeFromBundle(AssumeInst &Assume, const CallBase::BundleOpInfo &BOI)
This extracts the Knowledge from an element of an operand bundle.
bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
KnownBits analyzeKnownBitsFromAndXorOr(const Operator *I, const KnownBits &KnownLHS, const KnownBits &KnownRHS, unsigned Depth, const SimplifyQuery &SQ)
Using KnownBits LHS/RHS produce the known bits for logic op (and/xor/or).
bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
bool isGuard(const User *U)
Returns true iff U has semantics of a guard expressed in a form of call of llvm.experimental....
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF)
Return the inverse minimum/maximum flavor of the specified flavor.
constexpr unsigned MaxAnalysisRecursionDepth
void getGuaranteedWellDefinedOps(const Instruction *I, SmallVectorImpl< const Value * > &Ops)
Insert operands of I into Ops such that I will trigger undefined behavior if I is executed and that o...
OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
std::tuple< Value *, FPClassTest, FPClassTest > fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS, Value *RHS, bool LookThroughSrc=true)
Compute the possible floating-point classes that LHS could be based on fcmp \Pred LHS,...
SelectPatternFlavor
Specific patterns of select instructions we can match.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_FMINNUM
Unsigned maximum.
bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(const CallBase *Call, bool MustPreserveNullness)
{launder,strip}.invariant.group returns pointer that aliases its argument, and it only captures point...
bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool programUndefinedIfPoison(const Instruction *Inst)
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
bool programUndefinedIfUndefOrPoison(const Instruction *Inst)
Return true if this function can prove that if Inst is executed and yields a poison value or undef bi...
FPClassTest inverse_fabs(FPClassTest Mask)
Return the test mask which returns true after fabs is applied to the value.
uint64_t GetStringLength(const Value *V, unsigned CharSize=8)
If we can compute the length of the string pointed to by the specified pointer, return 'len+1'.
OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
constexpr int PoisonMaskElem
bool onlyUsedByLifetimeMarkers(const Value *V)
Return true if the only users of this pointer are lifetime markers.
Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB, const TargetLibraryInfo *TLI)
Map a call instruction to an intrinsic ID.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
bool propagatesPoison(const Use &PoisonOp)
Return true if PoisonOp's user yields poison or raises UB if its operand PoisonOp is poison.
bool isKnownNegative(const Value *V, const SimplifyQuery &DL, unsigned Depth=0)
Returns true if the given value is known be negative (i.e.
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given values are known to be non-equal when defined.
ConstantRange computeConstantRangeIncludingKnownBits(const WithCache< const Value * > &V, bool ForSigned, const SimplifyQuery &SQ)
Combine constant ranges from computeConstantRange() and computeKnownBits().
SelectPatternNaNBehavior
Behavior when a floating point min/max is given one NaN and one non-NaN as input.
@ SPNB_RETURNS_NAN
NaN behavior not applicable.
@ SPNB_RETURNS_OTHER
Given one NaN input, returns the NaN.
@ SPNB_RETURNS_ANY
Given one NaN input, returns the non-NaN.
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
DWARFExpression::Operation Op
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if the instruction does not have any effects besides calculating the result and does not ...
constexpr unsigned BitWidth
SelectPatternResult matchDecomposedSelectPattern(CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Determine the pattern that a select with the given compare as its predicate and given values as its t...
OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
gep_type_iterator gep_type_begin(const User *GEP)
std::pair< Value *, FPClassTest > fcmpToClassTest(CmpInst::Predicate Pred, const Function &F, Value *LHS, Value *RHS, bool LookThroughSrc=true)
Returns a pair of values, which if passed to llvm.is.fpclass, returns the same result as an fcmp with...
Value * isBytewiseValue(Value *V, const DataLayout &DL)
If the specified value can be set by repeating the same byte in memory, return the i8 value that it i...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return the number of times the sign bit of the register is replicated into the other bits.
OverflowResult computeOverflowForUnsignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
unsigned Log2(Align A)
Returns the log2 of the alignment.
std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
bool isGEPBasedOnPointerToString(const GEPOperator *GEP, unsigned CharSize=8)
Returns true if the GEP is based on a pointer to a string (array of.
bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, unsigned Depth, const SimplifyQuery &SQ)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
Value * FindInsertedValue(Value *V, ArrayRef< unsigned > idx_range, std::optional< BasicBlock::iterator > InsertBefore=std::nullopt)
Given an aggregate and an sequence of indices, see if the scalar value indexed is already around as a...
bool isKnownPositive(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be positive (i.e.
Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Get the upper bound on bit size for this Value Op as a signed integer.
bool mayHaveNonDefUseDependency(const Instruction &I)
Returns true if the result or effects of the given instructions I depend values not reachable through...
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
void findValuesAffectedByCondition(Value *Cond, bool IsAssume, function_ref< void(Value *)> InsertAffected)
Call InsertAffected on all Values whose known bits / value may be affected by the condition Cond.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static unsigned int semanticsPrecision(const fltSemantics &)
static bool isRepresentableAsNormalIn(const fltSemantics &Src, const fltSemantics &Dst)
This struct is a compact representation of a valid (non-zero power of two) alignment.
Represents offset+length into a ConstantDataArray.
uint64_t Length
Length of the slice.
uint64_t Offset
Slice starts at this Offset.
const ConstantDataArray * Array
ConstantDataArray pointer.
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
constexpr bool outputsAreZero() const
Return true if output denormals should be flushed to 0.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ Dynamic
Denormals have unknown treatment.
@ IEEE
IEEE-754 denormal numbers preserved.
static constexpr DenormalMode getPositiveZero()
constexpr bool inputsAreZero() const
Return true if input denormals must be implicitly treated as 0.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
static constexpr DenormalMode getIEEE()
InstrInfoQuery provides an interface to query additional information for instructions like metadata o...
bool isExact(const BinaryOperator *Op) const
MDNode * getMetadata(const Instruction *I, unsigned KindID) const
bool hasNoSignedZeros(const InstT *Op) const
bool hasNoSignedWrap(const InstT *Op) const
bool hasNoUnsignedWrap(const InstT *Op) const
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
static KnownBits sadd_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.sadd.sat(LHS, RHS)
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
unsigned countMinSignBits() const
Returns the number of times the sign bit is replicated into the other bits.
static KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
KnownBits blsi() const
Compute known bits for X & -X, which has only the lowest bit set of X set.
void makeNonNegative()
Make this value non-negative.
static KnownBits usub_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.usub.sat(LHS, RHS)
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
static KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
static KnownBits ssub_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.ssub.sat(LHS, RHS)
static KnownBits urem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for urem(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
KnownBits blsmsk() const
Compute known bits for X ^ (X - 1), which has all bits up to and including the lowest set bit of X se...
void makeNegative()
Make this value negative.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
bool hasConflict() const
Returns true if there is conflicting information.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
void setAllZero()
Make all bits known to be zero and discard any previous information.
unsigned getBitWidth() const
Get the bit width of this value.
static KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
bool isConstant() const
Returns true if we know the value of all bits.
void resetAll()
Resets the known state of all bits.
KnownBits unionWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for either this or RHS or both.
static KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
bool isNonZero() const
Returns true if this value is known to be non-zero.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
unsigned countMinTrailingOnes() const
Returns the minimum number of trailing one bits.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static KnownBits srem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for srem(LHS, RHS).
static KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for udiv(LHS, RHS).
static KnownBits computeForAddSub(bool Add, bool NSW, bool NUW, const KnownBits &LHS, const KnownBits &RHS)
Compute known bits resulting from adding LHS and RHS.
static KnownBits sdiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for sdiv(LHS, RHS).
static bool haveNoCommonBitsSet(const KnownBits &LHS, const KnownBits &RHS)
Return true if LHS and RHS have no common bits set.
bool isNegative() const
Returns true if this value is known to be negative.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
void setAllOnes()
Make all bits known to be one and discard any previous information.
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
static KnownBits uadd_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.uadd.sat(LHS, RHS)
static KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
KnownBits abs(bool IntMinIsPoison=false) const
Compute known bits for the absolute value.
static std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
static KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
static KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
KnownBits sextOrTrunc(unsigned BitWidth) const
Return known bits for a sign extension or truncation of the value we're tracking.
const APInt & getConstant() const
Returns the value when all bits have a known value.
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
bool isKnownNeverInfinity() const
Return true if it's known this can never be an infinity.
bool cannotBeOrderedGreaterThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never greater tha...
static constexpr FPClassTest OrderedGreaterThanZeroMask
static constexpr FPClassTest OrderedLessThanZeroMask
void knownNot(FPClassTest RuleOut)
bool isKnownNeverZero() const
Return true if it's known this can never be a zero.
void copysign(const KnownFPClass &Sign)
bool isKnownNeverSubnormal() const
Return true if it's known this can never be a subnormal.
bool isKnownNeverLogicalNegZero(const Function &F, Type *Ty) const
Return true if it's know this can never be interpreted as a negative zero.
bool isKnownNeverLogicalPosZero(const Function &F, Type *Ty) const
Return true if it's know this can never be interpreted as a positive zero.
void propagateCanonicalizingSrc(const KnownFPClass &Src, const Function &F, Type *Ty)
Report known classes if Src is evaluated through a potentially canonicalizing operation.
void propagateDenormal(const KnownFPClass &Src, const Function &F, Type *Ty)
Propagate knowledge from a source value that could be a denormal or zero.
bool isKnownNeverNegInfinity() const
Return true if it's known this can never be -infinity.
bool isKnownNeverNegSubnormal() const
Return true if it's known this can never be a negative subnormal.
bool isKnownNeverPosZero() const
Return true if it's known this can never be a literal positive zero.
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
bool isKnownNeverNegZero() const
Return true if it's known this can never be a negative zero.
bool isKnownNeverLogicalZero(const Function &F, Type *Ty) const
Return true if it's know this can never be interpreted as a zero.
void propagateNaN(const KnownFPClass &Src, bool PreserveSign=false)
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
void signBitMustBeOne()
Assume the sign bit is one.
void signBitMustBeZero()
Assume the sign bit is zero.
bool isKnownNeverPosInfinity() const
Return true if it's known this can never be +infinity.
bool isKnownNeverPosSubnormal() const
Return true if it's known this can never be a positive subnormal.
Represent one information held inside an operand bundle of an llvm.assume.
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
SimplifyQuery getWithInstruction(const Instruction *I) const
const DomConditionCache * DC