25 case Instruction::Add:
26 case Instruction::Sub:
27 case Instruction::Mul:
28 case Instruction::Shl: {
30 return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap();
32 case Instruction::Trunc: {
34 return TI->hasNoUnsignedWrap() || TI->hasNoSignedWrap();
37 case Instruction::UDiv:
38 case Instruction::SDiv:
39 case Instruction::AShr:
40 case Instruction::LShr:
44 case Instruction::GetElementPtr: {
48 GEP->getInRange() != std::nullopt;
50 case Instruction::UIToFP:
51 case Instruction::ZExt:
53 return NNI->hasNonNeg();
55 case Instruction::ICmp:
57 case Instruction::Call:
59 switch (
II->getIntrinsicID()) {
69 return FP->hasNoNaNs() ||
FP->hasNoInfs();
78 return I && (
I->hasPoisonGeneratingReturnAttributes() ||
79 I->hasPoisonGeneratingMetadata());
84 return I->getSourceElementType();
90 return I->getResultElementType();
96 return CE->getInRange();
110 if (
StructType *STy = GTI.getStructTypeOrNull()) {
114 assert(GTI.isSequential() &&
"should be sequencial");
118 Offset = GTI.getSequentialElementStride(
DL) * ElemCount;
130 "The offset bit width does not match DL specification.");
140 if (SourceType->
isIntegerTy(8) && !Index.empty() && !ExternalAnalysis) {
142 if (CI && CI->getType()->isIntegerTy()) {
143 Offset += CI->getValue().sextOrTrunc(
Offset.getBitWidth());
149 bool UsedExternalAnalysis =
false;
151 Index = Index.sextOrTrunc(
Offset.getBitWidth());
156 if (!UsedExternalAnalysis) {
157 Offset += Index * IndexedSize;
161 bool Overflow =
false;
162 APInt OffsetPlus = Index.
smul_ov(IndexedSize, Overflow);
172 SourceType, Index.begin());
174 for (
auto GTI = begin, GTE = end; GTI != GTE; ++GTI) {
176 bool ScalableType = GTI.getIndexedType()->isScalableTy();
178 Value *V = GTI.getOperand();
182 if (ConstOffset && ConstOffset->getType()->isIntegerTy()) {
183 if (ConstOffset->isZero())
191 unsigned ElementIdx = ConstOffset->getZExtValue();
194 if (!AccumulateOffset(
200 if (!AccumulateOffset(ConstOffset->getValue(),
201 GTI.getSequentialElementStride(
DL)))
208 if (!ExternalAnalysis || STy || ScalableType)
211 if (!ExternalAnalysis(*V, AnalysisIndex))
213 UsedExternalAnalysis =
true;
214 if (!AccumulateOffset(AnalysisIndex, GTI.getSequentialElementStride(
DL)))
223 APInt &ConstantOffset)
const {
225 "The offset bit width does not match DL specification.");
228 Index = Index.sextOrTrunc(
BitWidth);
232 ConstantOffset += Index * IndexedSize;
238 bool ScalableType = GTI.getIndexedType()->isScalableTy();
240 Value *V = GTI.getOperand();
244 if (ConstOffset && ConstOffset->getType()->isIntegerTy()) {
245 if (ConstOffset->isZero())
256 unsigned ElementIdx = ConstOffset->getZExtValue();
263 CollectConstantOffset(ConstOffset->getValue(),
264 GTI.getSequentialElementStride(
DL));
268 if (STy || ScalableType)
275 if (!IndexedSize.
isZero()) {
277 It->second += IndexedSize;
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
uint64_t IntrinsicInst * II
Class for arbitrary precision integers.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI void print(raw_ostream &O) const
Print fast-math flags to O.
bool noSignedZeros() const
bool allowReciprocal() const
bool allowReassoc() const
Flag queries.
bool allowContract() const
static GEPNoWrapFlags none()
LLVM_ABI std::optional< ConstantRange > getInRange() const
Returns the offset of the index with an inrange attachment, or std::nullopt if none.
LLVM_ABI bool collectOffset(const DataLayout &DL, unsigned BitWidth, SmallMapVector< Value *, APInt, 4 > &VariableOffsets, APInt &ConstantOffset) const
Collect the offset of this GEP as a map of Values to their associated APInt multipliers,...
LLVM_ABI Type * getResultElementType() const
LLVM_ABI Type * getSourceElementType() const
LLVM_ABI Align getMaxPreservedAlignment(const DataLayout &DL) const
Compute the maximum alignment that this GEP is garranteed to preserve.
LLVM_ABI bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset, function_ref< bool(Value &, APInt &)> ExternalAnalysis=nullptr) const
Accumulate the constant address offset of this GEP if possible.
unsigned getPointerAddressSpace() const
Method to return the address space of the pointer operand.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
LLVM_ABI bool hasPoisonGeneratingFlags() const
Return true if this operator has flags which may cause this operator to evaluate to poison despite ha...
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
LLVM_ABI bool hasPoisonGeneratingAnnotations() const
Return true if this operator has poison-generating flags, return attributes or metadata.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isIntegerTy() const
True if this is an instance of IntegerType.
iterator_range< value_op_iterator > operand_values()
LLVM Value Representation.
static constexpr uint64_t MaximumAlignment
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
gep_type_iterator gep_type_end(const User *GEP)
constexpr T MinAlign(U A, V B)
A and B are either alignments or offsets.
generic_gep_type_iterator<> gep_type_iterator
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
gep_type_iterator gep_type_begin(const User *GEP)
This struct is a compact representation of a valid (non-zero power of two) alignment.
A MapVector that performs no allocations if smaller than a certain size.