23  case Instruction::Add:
 
   24  case Instruction::Sub:
 
   25  case Instruction::Mul:
 
   26  case Instruction::Shl: {
 
   28    return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap();
 
   30  case Instruction::Trunc: {
 
   32      return TI->hasNoUnsignedWrap() || TI->hasNoSignedWrap();
 
   35  case Instruction::UDiv:
 
   36  case Instruction::SDiv:
 
   37  case Instruction::AShr:
 
   38  case Instruction::LShr:
 
   42  case Instruction::GetElementPtr: {
 
   46           GEP->getInRange() != std::nullopt;
 
   48  case Instruction::UIToFP:
 
   49  case Instruction::ZExt:
 
   51      return NNI->hasNonNeg();
 
   53  case Instruction::ICmp:
 
   57      return FP->hasNoNaNs() || 
FP->hasNoInfs();
 
 
   66  return I && (
I->hasPoisonGeneratingReturnAttributes() ||
 
   67               I->hasPoisonGeneratingMetadata());
 
 
   72    return I->getSourceElementType();
 
 
   78    return I->getResultElementType();
 
 
   84    return CE->getInRange();
 
 
   98    if (
StructType *STy = GTI.getStructTypeOrNull()) {
 
  102      assert(GTI.isSequential() && 
"should be sequencial");
 
  106      Offset = GTI.getSequentialElementStride(
DL) * ElemCount;
 
 
  118         "The offset bit width does not match DL specification.");
 
 
  128  if (SourceType->
isIntegerTy(8) && !Index.empty() && !ExternalAnalysis) {
 
  130    if (CI && CI->getType()->isIntegerTy()) {
 
  131      Offset += CI->getValue().sextOrTrunc(
Offset.getBitWidth());
 
  137  bool UsedExternalAnalysis = 
false;
 
  139    Index = Index.sextOrTrunc(
Offset.getBitWidth());
 
  144    if (!UsedExternalAnalysis) {
 
  145      Offset += Index * IndexedSize;
 
  149      bool Overflow = 
false;
 
  150      APInt OffsetPlus = Index.
smul_ov(IndexedSize, Overflow);
 
  160      SourceType, Index.begin());
 
  162  for (
auto GTI = begin, GTE = end; GTI != GTE; ++GTI) {
 
  164    bool ScalableType = GTI.getIndexedType()->isScalableTy();
 
  166    Value *V = GTI.getOperand();
 
  170    if (ConstOffset && ConstOffset->getType()->isIntegerTy()) {
 
  171      if (ConstOffset->isZero())
 
  179        unsigned ElementIdx = ConstOffset->getZExtValue();
 
  182        if (!AccumulateOffset(
 
  188      if (!AccumulateOffset(ConstOffset->getValue(),
 
  189                            GTI.getSequentialElementStride(
DL)))
 
  196    if (!ExternalAnalysis || STy || ScalableType)
 
  199    if (!ExternalAnalysis(*V, AnalysisIndex))
 
  201    UsedExternalAnalysis = 
true;
 
  202    if (!AccumulateOffset(AnalysisIndex, GTI.getSequentialElementStride(
DL)))
 
 
  211    APInt &ConstantOffset)
 const {
 
  213         "The offset bit width does not match DL specification.");
 
  216    Index = Index.sextOrTrunc(
BitWidth);
 
  220    ConstantOffset += Index * IndexedSize;
 
  226    bool ScalableType = GTI.getIndexedType()->isScalableTy();
 
  228    Value *V = GTI.getOperand();
 
  232    if (ConstOffset && ConstOffset->getType()->isIntegerTy()) {
 
  233      if (ConstOffset->isZero())
 
  244        unsigned ElementIdx = ConstOffset->getZExtValue();
 
  251      CollectConstantOffset(ConstOffset->getValue(),
 
  252                            GTI.getSequentialElementStride(
DL));
 
  256    if (STy || ScalableType)
 
  263    if (!IndexedSize.
isZero()) {
 
  265      It->second += IndexedSize;
 
 
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Class for arbitrary precision integers.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
A parsed version of the target data layout string in and methods for querying it.
bool noSignedZeros() const
bool allowReciprocal() const
LLVM_ABI void print(raw_ostream &O) const
Print fast-math flags to O.
bool allowReassoc() const
Flag queries.
bool allowContract() const
static GEPNoWrapFlags none()
LLVM_ABI bool collectOffset(const DataLayout &DL, unsigned BitWidth, SmallMapVector< Value *, APInt, 4 > &VariableOffsets, APInt &ConstantOffset) const
Collect the offset of this GEP as a map of Values to their associated APInt multipliers,...
LLVM_ABI std::optional< ConstantRange > getInRange() const
Returns the offset of the index with an inrange attachment, or std::nullopt if none.
LLVM_ABI Type * getSourceElementType() const
LLVM_ABI Type * getResultElementType() const
LLVM_ABI bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset, function_ref< bool(Value &, APInt &)> ExternalAnalysis=nullptr) const
Accumulate the constant address offset of this GEP if possible.
LLVM_ABI Align getMaxPreservedAlignment(const DataLayout &DL) const
Compute the maximum alignment that this GEP is garranteed to preserve.
unsigned getPointerAddressSpace() const
Method to return the address space of the pointer operand.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
LLVM_ABI bool hasPoisonGeneratingAnnotations() const
Return true if this operator has poison-generating flags, return attributes or metadata.
LLVM_ABI bool hasPoisonGeneratingFlags() const
Return true if this operator has flags which may cause this operator to evaluate to poison despite ha...
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isIntegerTy() const
True if this is an instance of IntegerType.
iterator_range< value_op_iterator > operand_values()
LLVM Value Representation.
static constexpr uint64_t MaximumAlignment
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
gep_type_iterator gep_type_end(const User *GEP)
constexpr T MinAlign(U A, V B)
A and B are either alignments or offsets.
generic_gep_type_iterator<> gep_type_iterator
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
gep_type_iterator gep_type_begin(const User *GEP)
This struct is a compact representation of a valid (non-zero power of two) alignment.
A MapVector that performs no allocations if smaller than a certain size.