LLVM 20.0.0git
Classes | Namespaces | Enumerations | Functions | Variables
ValueTracking.h File Reference
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Analysis/SimplifyQuery.h"
#include "llvm/Analysis/WithCache.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/FMF.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Intrinsics.h"
#include <cassert>
#include <cstdint>

Go to the source code of this file.

Classes

struct  llvm::KnownFPClass
 
struct  llvm::ConstantDataArraySlice
 Represents offset+length into a ConstantDataArray. More...
 
struct  llvm::SelectPatternResult
 

Namespaces

namespace  llvm
 This is an optimization pass for GlobalISel generic memory operations.
 

Enumerations

enum class  llvm::OverflowResult { llvm::AlwaysOverflowsLow , llvm::AlwaysOverflowsHigh , llvm::MayOverflow , llvm::NeverOverflows }
 
enum  llvm::SelectPatternFlavor {
  llvm::SPF_UNKNOWN = 0 , llvm::SPF_SMIN , llvm::SPF_UMIN , llvm::SPF_SMAX ,
  llvm::SPF_UMAX , llvm::SPF_FMINNUM , llvm::SPF_FMAXNUM , llvm::SPF_ABS ,
  llvm::SPF_NABS
}
 Specific patterns of select instructions we can match. More...
 
enum  llvm::SelectPatternNaNBehavior { llvm::SPNB_NA = 0 , llvm::SPNB_RETURNS_NAN , llvm::SPNB_RETURNS_OTHER , llvm::SPNB_RETURNS_ANY }
 Behavior when a floating point min/max is given one NaN and one non-NaN as input. More...
 

Functions

void llvm::computeKnownBits (const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
 Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOne bit sets.
 
KnownBits llvm::computeKnownBits (const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
 Returns the known bits rather than passing by reference.
 
KnownBits llvm::computeKnownBits (const Value *V, const APInt &DemandedElts, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
 Returns the known bits rather than passing by reference.
 
KnownBits llvm::computeKnownBits (const Value *V, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q)
 Determine which bits of V are known to be either zero or one and return them.
 
KnownBits llvm::computeKnownBits (const Value *V, unsigned Depth, const SimplifyQuery &Q)
 Determine which bits of V are known to be either zero or one and return them.
 
void llvm::computeKnownBits (const Value *V, KnownBits &Known, unsigned Depth, const SimplifyQuery &Q)
 
void llvm::computeKnownBitsFromRangeMetadata (const MDNode &Ranges, KnownBits &Known)
 Compute known bits from the range metadata.
 
void llvm::computeKnownBitsFromContext (const Value *V, KnownBits &Known, unsigned Depth, const SimplifyQuery &Q)
 Merge bits known from context-dependent facts into Known.
 
KnownBits llvm::analyzeKnownBitsFromAndXorOr (const Operator *I, const KnownBits &KnownLHS, const KnownBits &KnownRHS, unsigned Depth, const SimplifyQuery &SQ)
 Using KnownBits LHS/RHS produce the known bits for logic op (and/xor/or).
 
void llvm::adjustKnownBitsForSelectArm (KnownBits &Known, Value *Cond, Value *Arm, bool Invert, unsigned Depth, const SimplifyQuery &Q)
 Adjust Known for the given select Arm to include information from the select Cond.
 
bool llvm::haveNoCommonBitsSet (const WithCache< const Value * > &LHSCache, const WithCache< const Value * > &RHSCache, const SimplifyQuery &SQ)
 Return true if LHS and RHS have no common bits set.
 
bool llvm::isKnownToBeAPowerOfTwo (const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
 Return true if the given value is known to have exactly one bit set when defined.
 
bool llvm::isOnlyUsedInZeroComparison (const Instruction *CxtI)
 
bool llvm::isOnlyUsedInZeroEqualityComparison (const Instruction *CxtI)
 
bool llvm::isKnownNonZero (const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
 Return true if the given value is known to be non-zero when defined.
 
bool llvm::isKnownNegation (const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)
 Return true if the two given values are negation.
 
bool llvm::isKnownInversion (const Value *X, const Value *Y)
 Return true iff:
 
bool llvm::isKnownNonNegative (const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
 Returns true if the give value is known to be non-negative.
 
bool llvm::isKnownPositive (const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
 Returns true if the given value is known be positive (i.e.
 
bool llvm::isKnownNegative (const Value *V, const SimplifyQuery &DL, unsigned Depth=0)
 Returns true if the given value is known be negative (i.e.
 
bool llvm::isKnownNonEqual (const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
 Return true if the given values are known to be non-equal when defined.
 
bool llvm::MaskedValueIsZero (const Value *V, const APInt &Mask, const SimplifyQuery &DL, unsigned Depth=0)
 Return true if 'V & Mask' is known to be zero.
 
unsigned llvm::ComputeNumSignBits (const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
 Return the number of times the sign bit of the register is replicated into the other bits.
 
unsigned llvm::ComputeMaxSignificantBits (const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
 Get the upper bound on bit size for this Value Op as a signed integer.
 
Intrinsic::ID llvm::getIntrinsicForCallSite (const CallBase &CB, const TargetLibraryInfo *TLI)
 Map a call instruction to an intrinsic ID.
 
bool llvm::isSignBitCheck (ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
 Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
 
std::pair< Value *, FPClassTestllvm::fcmpToClassTest (CmpInst::Predicate Pred, const Function &F, Value *LHS, Value *RHS, bool LookThroughSrc=true)
 Returns a pair of values, which if passed to llvm.is.fpclass, returns the same result as an fcmp with the given operands.
 
std::pair< Value *, FPClassTestllvm::fcmpToClassTest (CmpInst::Predicate Pred, const Function &F, Value *LHS, const APFloat *ConstRHS, bool LookThroughSrc=true)
 
std::tuple< Value *, FPClassTest, FPClassTestllvm::fcmpImpliesClass (CmpInst::Predicate Pred, const Function &F, Value *LHS, Value *RHS, bool LookThroughSrc=true)
 Compute the possible floating-point classes that LHS could be based on fcmp \Pred LHS, RHS.
 
std::tuple< Value *, FPClassTest, FPClassTestllvm::fcmpImpliesClass (CmpInst::Predicate Pred, const Function &F, Value *LHS, FPClassTest RHS, bool LookThroughSrc=true)
 
std::tuple< Value *, FPClassTest, FPClassTestllvm::fcmpImpliesClass (CmpInst::Predicate Pred, const Function &F, Value *LHS, const APFloat &RHS, bool LookThroughSrc=true)
 
KnownFPClass llvm::operator| (KnownFPClass LHS, const KnownFPClass &RHS)
 
KnownFPClass llvm::operator| (const KnownFPClass &LHS, KnownFPClass &&RHS)
 
KnownFPClass llvm::computeKnownFPClass (const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, unsigned Depth, const SimplifyQuery &SQ)
 Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
 
KnownFPClass llvm::computeKnownFPClass (const Value *V, FPClassTest InterestedClasses, unsigned Depth, const SimplifyQuery &SQ)
 
KnownFPClass llvm::computeKnownFPClass (const Value *V, const DataLayout &DL, FPClassTest InterestedClasses=fcAllFlags, unsigned Depth=0, const TargetLibraryInfo *TLI=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
 
KnownFPClass llvm::computeKnownFPClass (const Value *V, const APInt &DemandedElts, FastMathFlags FMF, FPClassTest InterestedClasses, unsigned Depth, const SimplifyQuery &SQ)
 Wrapper to account for known fast math flags at the use instruction.
 
KnownFPClass llvm::computeKnownFPClass (const Value *V, FastMathFlags FMF, FPClassTest InterestedClasses, unsigned Depth, const SimplifyQuery &SQ)
 
bool llvm::cannotBeNegativeZero (const Value *V, unsigned Depth, const SimplifyQuery &SQ)
 Return true if we can prove that the specified FP value is never equal to -0.0.
 
bool llvm::cannotBeOrderedLessThanZero (const Value *V, unsigned Depth, const SimplifyQuery &SQ)
 Return true if we can prove that the specified FP value is either NaN or never less than -0.0.
 
bool llvm::isKnownNeverInfinity (const Value *V, unsigned Depth, const SimplifyQuery &SQ)
 Return true if the floating-point scalar value is not an infinity or if the floating-point vector value has no infinities.
 
bool llvm::isKnownNeverInfOrNaN (const Value *V, unsigned Depth, const SimplifyQuery &SQ)
 Return true if the floating-point value can never contain a NaN or infinity.
 
bool llvm::isKnownNeverNaN (const Value *V, unsigned Depth, const SimplifyQuery &SQ)
 Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has no NaN elements.
 
std::optional< boolllvm::computeKnownFPSignBit (const Value *V, unsigned Depth, const SimplifyQuery &SQ)
 Return false if we can prove that the specified FP value's sign bit is 0.
 
Valuellvm::isBytewiseValue (Value *V, const DataLayout &DL)
 If the specified value can be set by repeating the same byte in memory, return the i8 value that it is represented with.
 
Valuellvm::FindInsertedValue (Value *V, ArrayRef< unsigned > idx_range, std::optional< BasicBlock::iterator > InsertBefore=std::nullopt)
 Given an aggregate and an sequence of indices, see if the scalar value indexed is already around as a register, for example if it were inserted directly into the aggregate.
 
Value * llvm::GetPointerBaseWithConstantOffset (Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
 Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
 
const Value * llvm::GetPointerBaseWithConstantOffset (const Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
 
bool llvm::isGEPBasedOnPointerToString (const GEPOperator *GEP, unsigned CharSize=8)
 Returns true if the GEP is based on a pointer to a string (array of.
 
bool llvm::getConstantDataArrayInfo (const Value *V, ConstantDataArraySlice &Slice, unsigned ElementSize, uint64_t Offset=0)
 Returns true if the value V is a pointer into a ConstantDataArray.
 
bool llvm::getConstantStringInfo (const Value *V, StringRef &Str, bool TrimAtNul=true)
 This function computes the length of a null-terminated C string pointed to by V.
 
uint64_t llvm::GetStringLength (const Value *V, unsigned CharSize=8)
 If we can compute the length of the string pointed to by the specified pointer, return 'len+1'.
 
const Valuellvm::getArgumentAliasingToReturnedPointer (const CallBase *Call, bool MustPreserveNullness)
 This function returns call pointer argument that is considered the same by aliasing rules.
 
Value * llvm::getArgumentAliasingToReturnedPointer (CallBase *Call, bool MustPreserveNullness)
 
bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing (const CallBase *Call, bool MustPreserveNullness)
 {launder,strip}.invariant.group returns pointer that aliases its argument, and it only captures pointer by returning it.
 
const Valuellvm::getUnderlyingObject (const Value *V, unsigned MaxLookup=6)
 This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal.address from the specified value V, returning the original object being addressed.
 
Value * llvm::getUnderlyingObject (Value *V, unsigned MaxLookup=6)
 
const Valuellvm::getUnderlyingObjectAggressive (const Value *V)
 Like getUnderlyingObject(), but will try harder to find a single underlying object.
 
void llvm::getUnderlyingObjects (const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)
 This method is similar to getUnderlyingObject except that it can look through phi and select instructions and return multiple objects.
 
bool llvm::getUnderlyingObjectsForCodeGen (const Value *V, SmallVectorImpl< Value * > &Objects)
 This is a wrapper around getUnderlyingObjects and adds support for basic ptrtoint+arithmetic+inttoptr sequences.
 
AllocaInstllvm::findAllocaForValue (Value *V, bool OffsetZero=false)
 Returns unique alloca where the value comes from, or nullptr.
 
const AllocaInst * llvm::findAllocaForValue (const Value *V, bool OffsetZero=false)
 
bool llvm::onlyUsedByLifetimeMarkers (const Value *V)
 Return true if the only users of this pointer are lifetime markers.
 
bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts (const Value *V)
 Return true if the only users of this pointer are lifetime markers or droppable instructions.
 
bool llvm::isSafeToSpeculativelyExecute (const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true)
 Return true if the instruction does not have any effects besides calculating the result and does not have undefined behavior.
 
bool llvm::isSafeToSpeculativelyExecute (const Instruction *I, BasicBlock::iterator CtxI, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true)
 
bool llvm::isSafeToSpeculativelyExecuteWithVariableReplaced (const Instruction *I)
 Don't use information from its non-constant operands.
 
bool llvm::isSafeToSpeculativelyExecuteWithOpcode (unsigned Opcode, const Instruction *Inst, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true)
 This returns the same result as isSafeToSpeculativelyExecute if Opcode is the actual opcode of Inst.
 
bool llvm::mayHaveNonDefUseDependency (const Instruction &I)
 Returns true if the result or effects of the given instructions I depend values not reachable through the def use graph.
 
bool llvm::isAssumeLikeIntrinsic (const Instruction *I)
 Return true if it is an intrinsic that cannot be speculated but also cannot trap.
 
bool llvm::isValidAssumeForContext (const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
 Return true if it is valid to use the assumptions provided by an assume intrinsic, I, at the point in the control-flow identified by the context instruction, CxtI.
 
OverflowResult llvm::computeOverflowForUnsignedMul (const Value *LHS, const Value *RHS, const SimplifyQuery &SQ, bool IsNSW=false)
 
OverflowResult llvm::computeOverflowForSignedMul (const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
 
OverflowResult llvm::computeOverflowForUnsignedAdd (const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
 
OverflowResult llvm::computeOverflowForSignedAdd (const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
 
OverflowResult llvm::computeOverflowForSignedAdd (const AddOperator *Add, const SimplifyQuery &SQ)
 This version also leverages the sign bit of Add if known.
 
OverflowResult llvm::computeOverflowForUnsignedSub (const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
 
OverflowResult llvm::computeOverflowForSignedSub (const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
 
bool llvm::isOverflowIntrinsicNoWrap (const WithOverflowInst *WO, const DominatorTree &DT)
 Returns true if the arithmetic part of the WO 's result is used only along the paths control dependent on the computation not overflowing, WO being an <op>.with.overflow intrinsic.
 
ConstantRange llvm::getVScaleRange (const Function *F, unsigned BitWidth)
 Determine the possible constant range of vscale with the given bit width, based on the vscale_range function attribute.
 
ConstantRange llvm::computeConstantRange (const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
 Determine the possible constant range of an integer or vector of integer value.
 
ConstantRange llvm::computeConstantRangeIncludingKnownBits (const WithCache< const Value * > &V, bool ForSigned, const SimplifyQuery &SQ)
 Combine constant ranges from computeConstantRange() and computeKnownBits().
 
bool llvm::isGuaranteedToTransferExecutionToSuccessor (const Instruction *I)
 Return true if this function can prove that the instruction I will always transfer execution to one of its successors (including the next instruction that follows within a basic block).
 
bool llvm::isGuaranteedToTransferExecutionToSuccessor (const BasicBlock *BB)
 Returns true if this block does not contain a potential implicit exit.
 
bool llvm::isGuaranteedToTransferExecutionToSuccessor (BasicBlock::const_iterator Begin, BasicBlock::const_iterator End, unsigned ScanLimit=32)
 Return true if every instruction in the range (Begin, End) is guaranteed to transfer execution to its static successor.
 
bool llvm::isGuaranteedToTransferExecutionToSuccessor (iterator_range< BasicBlock::const_iterator > Range, unsigned ScanLimit=32)
 Same as previous, but with range expressed via iterator_range.
 
bool llvm::isGuaranteedToExecuteForEveryIteration (const Instruction *I, const Loop *L)
 Return true if this function can prove that the instruction I is executed for every iteration of the loop L.
 
bool llvm::propagatesPoison (const Use &PoisonOp)
 Return true if PoisonOp's user yields poison or raises UB if its operand PoisonOp is poison.
 
void llvm::getGuaranteedNonPoisonOps (const Instruction *I, SmallVectorImpl< const Value * > &Ops)
 Insert operands of I into Ops such that I will trigger undefined behavior if I is executed and that operand has a poison value.
 
void llvm::getGuaranteedWellDefinedOps (const Instruction *I, SmallVectorImpl< const Value * > &Ops)
 Insert operands of I into Ops such that I will trigger undefined behavior if I is executed and that operand is not a well-defined value (i.e.
 
bool llvm::mustTriggerUB (const Instruction *I, const SmallPtrSetImpl< const Value * > &KnownPoison)
 Return true if the given instruction must trigger undefined behavior when I is executed with any operands which appear in KnownPoison holding a poison value at the point of execution.
 
bool llvm::programUndefinedIfUndefOrPoison (const Instruction *Inst)
 Return true if this function can prove that if Inst is executed and yields a poison value or undef bits, then that will trigger undefined behavior.
 
bool llvm::programUndefinedIfPoison (const Instruction *Inst)
 
bool llvm::canCreateUndefOrPoison (const Operator *Op, bool ConsiderFlagsAndMetadata=true)
 canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison operands.
 
bool llvm::canCreatePoison (const Operator *Op, bool ConsiderFlagsAndMetadata=true)
 
bool llvm::impliesPoison (const Value *ValAssumedPoison, const Value *V)
 Return true if V is poison given that ValAssumedPoison is already poison.
 
bool llvm::isGuaranteedNotToBeUndefOrPoison (const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
 Return true if this function can prove that V does not have undef bits and is never poison.
 
bool llvm::isGuaranteedNotToBePoison (const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
 Returns true if V cannot be poison, but may be undef.
 
bool llvm::isGuaranteedNotToBePoison (const Value *V, AssumptionCache *AC, BasicBlock::iterator CtxI, const DominatorTree *DT=nullptr, unsigned Depth=0)
 
bool llvm::isGuaranteedNotToBeUndef (const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
 Returns true if V cannot be undef, but may be poison.
 
bool llvm::mustExecuteUBIfPoisonOnPathTo (Instruction *Root, Instruction *OnPathTo, DominatorTree *DT)
 Return true if undefined behavior would provable be executed on the path to OnPathTo if Root produced a posion result.
 
SelectPatternResult llvm::matchSelectPattern (Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
 Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out parameter results if we successfully match.
 
SelectPatternResult llvm::matchSelectPattern (const Value *V, const Value *&LHS, const Value *&RHS)
 
SelectPatternResult llvm::matchDecomposedSelectPattern (CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
 Determine the pattern that a select with the given compare as its predicate and given values as its true/false operands would match.
 
CmpInst::Predicate llvm::getMinMaxPred (SelectPatternFlavor SPF, bool Ordered=false)
 Return the canonical comparison predicate for the specified minimum/maximum flavor.
 
SelectPatternFlavor llvm::getInverseMinMaxFlavor (SelectPatternFlavor SPF)
 Return the inverse minimum/maximum flavor of the specified flavor.
 
Intrinsic::ID llvm::getInverseMinMaxIntrinsic (Intrinsic::ID MinMaxID)
 
APInt llvm::getMinMaxLimit (SelectPatternFlavor SPF, unsigned BitWidth)
 Return the minimum or maximum constant value for the specified integer min/max flavor and type.
 
std::pair< Intrinsic::ID, boolllvm::canConvertToMinOrMaxIntrinsic (ArrayRef< Value * > VL)
 Check if the values in VL are select instructions that can be converted to a min or max (vector) intrinsic.
 
bool llvm::matchSimpleRecurrence (const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
 Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start, Entry], [Inc, backedge] inc = binop iv, step OR iv = phi Ty [Start, Entry], [Inc, backedge] inc = binop step, iv.
 
bool llvm::matchSimpleRecurrence (const BinaryOperator *I, PHINode *&P, Value *&Start, Value *&Step)
 Analogous to the above, but starting from the binary operator.
 
std::optional< boolllvm::isImpliedCondition (const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
 Return true if RHS is known to be implied true by LHS.
 
std::optional< boolllvm::isImpliedCondition (const Value *LHS, CmpInst::Predicate RHSPred, const Value *RHSOp0, const Value *RHSOp1, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
 
std::optional< boolllvm::isImpliedByDomCondition (const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
 Return the boolean condition value in the context of the given instruction if it is known based on dominating conditions.
 
std::optional< boolllvm::isImpliedByDomCondition (CmpInst::Predicate Pred, const Value *LHS, const Value *RHS, const Instruction *ContextI, const DataLayout &DL)
 
void llvm::findValuesAffectedByCondition (Value *Cond, bool IsAssume, function_ref< void(Value *)> InsertAffected)
 Call InsertAffected on all Values whose known bits / value may be affected by the condition Cond.
 

Variables

constexpr unsigned llvm::MaxAnalysisRecursionDepth = 6