23 case Instruction::Add:
24 case Instruction::Sub:
25 case Instruction::Mul:
26 case Instruction::Shl: {
27 auto *OBO = cast<OverflowingBinaryOperator>(
this);
28 return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap();
30 case Instruction::Trunc: {
31 if (
auto *TI = dyn_cast<TruncInst>(
this))
32 return TI->hasNoUnsignedWrap() || TI->hasNoSignedWrap();
35 case Instruction::UDiv:
36 case Instruction::SDiv:
37 case Instruction::AShr:
38 case Instruction::LShr:
39 return cast<PossiblyExactOperator>(
this)->isExact();
41 return cast<PossiblyDisjointInst>(
this)->isDisjoint();
42 case Instruction::GetElementPtr: {
43 auto *
GEP = cast<GEPOperator>(
this);
46 GEP->getInRange() != std::nullopt;
48 case Instruction::UIToFP:
49 case Instruction::ZExt:
50 if (
auto *NNI = dyn_cast<PossiblyNonNegInst>(
this))
51 return NNI->hasNonNeg();
53 case Instruction::ICmp:
54 return cast<ICmpInst>(
this)->hasSameSign();
56 if (
const auto *
FP = dyn_cast<FPMathOperator>(
this))
57 return FP->hasNoNaNs() ||
FP->hasNoInfs();
65 auto *
I = dyn_cast<Instruction>(
this);
66 return I && (
I->hasPoisonGeneratingReturnAttributes() ||
67 I->hasPoisonGeneratingMetadata());
71 if (
auto *
I = dyn_cast<GetElementPtrInst>(
this))
72 return I->getSourceElementType();
73 return cast<GetElementPtrConstantExpr>(
this)->getSourceElementType();
77 if (
auto *
I = dyn_cast<GetElementPtrInst>(
this))
78 return I->getResultElementType();
79 return cast<GetElementPtrConstantExpr>(
this)->getResultElementType();
83 if (
auto *CE = dyn_cast<GetElementPtrConstantExpr>(
this))
84 return CE->getInRange();
96 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
98 if (
StructType *STy = GTI.getStructTypeOrNull()) {
102 assert(GTI.isSequential() &&
"should be sequencial");
106 Offset = GTI.getSequentialElementStride(
DL) * ElemCount;
118 "The offset bit width does not match DL specification.");
129 auto *CI = dyn_cast<ConstantInt>(
Index.front());
130 if (CI && CI->getType()->isIntegerTy()) {
131 Offset += CI->getValue().sextOrTrunc(
Offset.getBitWidth());
137 bool UsedExternalAnalysis =
false;
144 if (!UsedExternalAnalysis) {
149 bool Overflow =
false;
150 APInt OffsetPlus =
Index.smul_ov(IndexedSize, Overflow);
160 SourceType,
Index.begin());
162 for (
auto GTI = begin, GTE = end; GTI != GTE; ++GTI) {
166 Value *V = GTI.getOperand();
169 auto *ConstOffset = dyn_cast<ConstantInt>(V);
170 if (ConstOffset && ConstOffset->getType()->isIntegerTy()) {
171 if (ConstOffset->isZero())
179 unsigned ElementIdx = ConstOffset->getZExtValue();
182 if (!AccumulateOffset(
188 if (!AccumulateOffset(ConstOffset->getValue(),
189 GTI.getSequentialElementStride(
DL)))
196 if (!ExternalAnalysis || STy || ScalableType)
199 if (!ExternalAnalysis(*V, AnalysisIndex))
201 UsedExternalAnalysis =
true;
202 if (!AccumulateOffset(AnalysisIndex, GTI.getSequentialElementStride(
DL)))
211 APInt &ConstantOffset)
const {
213 "The offset bit width does not match DL specification.");
220 ConstantOffset +=
Index * IndexedSize;
226 bool ScalableType = GTI.getIndexedType()->isScalableTy();
228 Value *V = GTI.getOperand();
231 auto *ConstOffset = dyn_cast<ConstantInt>(V);
232 if (ConstOffset && ConstOffset->getType()->isIntegerTy()) {
233 if (ConstOffset->isZero())
244 unsigned ElementIdx = ConstOffset->getZExtValue();
251 CollectConstantOffset(ConstOffset->getValue(),
252 GTI.getSequentialElementStride(
DL));
256 if (STy || ScalableType)
263 if (!IndexedSize.
isZero()) {
265 It->second += IndexedSize;
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Class for arbitrary precision integers.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
A parsed version of the target data layout string in and methods for querying it.
bool noSignedZeros() const
bool allowReciprocal() const
LLVM_ABI void print(raw_ostream &O) const
Print fast-math flags to O.
bool allowReassoc() const
Flag queries.
bool allowContract() const
static GEPNoWrapFlags none()
LLVM_ABI bool collectOffset(const DataLayout &DL, unsigned BitWidth, SmallMapVector< Value *, APInt, 4 > &VariableOffsets, APInt &ConstantOffset) const
Collect the offset of this GEP as a map of Values to their associated APInt multipliers,...
LLVM_ABI std::optional< ConstantRange > getInRange() const
Returns the offset of the index with an inrange attachment, or std::nullopt if none.
LLVM_ABI Type * getSourceElementType() const
LLVM_ABI Type * getResultElementType() const
LLVM_ABI bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset, function_ref< bool(Value &, APInt &)> ExternalAnalysis=nullptr) const
Accumulate the constant address offset of this GEP if possible.
LLVM_ABI Align getMaxPreservedAlignment(const DataLayout &DL) const
Compute the maximum alignment that this GEP is garranteed to preserve.
unsigned getPointerAddressSpace() const
Method to return the address space of the pointer operand.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
LLVM_ABI bool hasPoisonGeneratingAnnotations() const
Return true if this operator has poison-generating flags, return attributes or metadata.
LLVM_ABI bool hasPoisonGeneratingFlags() const
Return true if this operator has flags which may cause this operator to evaluate to poison despite ha...
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
bool isIntegerTy() const
True if this is an instance of IntegerType.
iterator_range< value_op_iterator > operand_values()
LLVM Value Representation.
static constexpr uint64_t MaximumAlignment
An efficient, type-erasing, non-owning reference to a callable.
Type * getIndexedType() const
This class implements an extremely fast bulk output stream that can only output to a stream.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
gep_type_iterator gep_type_end(const User *GEP)
constexpr T MinAlign(U A, V B)
A and B are either alignments or offsets.
constexpr unsigned BitWidth
gep_type_iterator gep_type_begin(const User *GEP)
This struct is a compact representation of a valid (non-zero power of two) alignment.
A MapVector that performs no allocations if smaller than a certain size.