202 "disable-separate-const-offset-from-gep",
cl::init(
false),
203 cl::desc(
"Do not separate the constant offset from a GEP instruction"),
211 cl::desc(
"Verify this pass produces no dead code"),
229class ConstantOffsetExtractor {
249 : IP(InsertionPt),
DL(InsertionPt->getModule()->getDataLayout()), DT(DT) {
287 Value *rebuildWithoutConstOffset();
304 Value *distributeExtsAndCloneChain(
unsigned ChainIndex);
307 Value *removeConstOffset(
unsigned ChainIndex);
321 bool CanTraceInto(
bool SignExtended,
bool ZeroExtended,
BinaryOperator *BO,
346class SeparateConstOffsetFromGEPLegacyPass :
public FunctionPass {
350 SeparateConstOffsetFromGEPLegacyPass(
bool LowerGEP =
false)
374class SeparateConstOffsetFromGEP {
376 SeparateConstOffsetFromGEP(
380 : DT(DT), SE(SE), LI(LI), TLI(TLI), GetTTI(GetTTI), LowerGEP(LowerGEP) {}
397 int64_t AccumulativeByteOffset);
407 int64_t AccumulativeByteOffset);
455 bool hasMoreThanOneUseInLoop(
Value *v,
Loop *L);
482char SeparateConstOffsetFromGEPLegacyPass::ID = 0;
485 SeparateConstOffsetFromGEPLegacyPass,
"separate-const-offset-from-gep",
486 "Split GEPs to a variadic base and a constant offset for better CSE",
false,
494 SeparateConstOffsetFromGEPLegacyPass, "separate-
const-offset-from-
gep",
495 "Split GEPs to a variadic base and a constant offset
for better
CSE",
false,
499 return new SeparateConstOffsetFromGEPLegacyPass(LowerGEP);
502bool ConstantOffsetExtractor::CanTraceInto(
bool SignExtended,
509 if (BO->
getOpcode() != Instruction::Add &&
520 if (BO->
getOpcode() == Instruction::Or &&
544 if (
ConstantInt *ConstLHS = dyn_cast<ConstantInt>(LHS)) {
545 if (!ConstLHS->isNegative())
548 if (
ConstantInt *ConstRHS = dyn_cast<ConstantInt>(RHS)) {
549 if (!ConstRHS->isNegative())
556 if (BO->
getOpcode() == Instruction::Add ||
571 size_t ChainLength = UserChain.size();
582 if (ConstantOffset != 0)
return ConstantOffset;
586 UserChain.resize(ChainLength);
588 ConstantOffset =
find(BO->
getOperand(1), SignExtended, ZeroExtended,
593 ConstantOffset = -ConstantOffset;
596 if (ConstantOffset == 0)
597 UserChain.resize(ChainLength);
599 return ConstantOffset;
602APInt ConstantOffsetExtractor::find(
Value *V,
bool SignExtended,
607 unsigned BitWidth = cast<IntegerType>(
V->getType())->getBitWidth();
610 User *
U = dyn_cast<User>(V);
616 ConstantOffset = CI->getValue();
619 if (CanTraceInto(SignExtended, ZeroExtended, BO,
NonNegative))
620 ConstantOffset = findInEitherOperand(BO, SignExtended, ZeroExtended);
621 }
else if (isa<TruncInst>(V)) {
625 }
else if (isa<SExtInst>(V)) {
626 ConstantOffset =
find(
U->getOperand(0),
true,
628 }
else if (isa<ZExtInst>(V)) {
634 find(
U->getOperand(0),
false,
641 if (ConstantOffset != 0)
642 UserChain.push_back(U);
643 return ConstantOffset;
646Value *ConstantOffsetExtractor::applyExts(
Value *V) {
651 if (
Constant *
C = dyn_cast<Constant>(Current)) {
657 Ext->setOperand(0, Current);
658 Ext->insertBefore(IP);
665Value *ConstantOffsetExtractor::rebuildWithoutConstOffset() {
666 distributeExtsAndCloneChain(UserChain.size() - 1);
668 unsigned NewSize = 0;
669 for (
User *
I : UserChain) {
671 UserChain[NewSize] =
I;
675 UserChain.resize(NewSize);
676 return removeConstOffset(UserChain.size() - 1);
680ConstantOffsetExtractor::distributeExtsAndCloneChain(
unsigned ChainIndex) {
681 User *
U = UserChain[ChainIndex];
682 if (ChainIndex == 0) {
683 assert(isa<ConstantInt>(U));
685 return UserChain[ChainIndex] = cast<ConstantInt>(applyExts(U));
688 if (
CastInst *Cast = dyn_cast<CastInst>(U)) {
690 (isa<SExtInst>(Cast) || isa<ZExtInst>(Cast) || isa<TruncInst>(Cast)) &&
691 "Only following instructions can be traced: sext, zext & trunc");
692 ExtInsts.push_back(Cast);
693 UserChain[ChainIndex] =
nullptr;
694 return distributeExtsAndCloneChain(ChainIndex - 1);
700 unsigned OpNo = (BO->
getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1);
702 Value *NextInChain = distributeExtsAndCloneChain(ChainIndex - 1);
712 return UserChain[ChainIndex] = NewBO;
715Value *ConstantOffsetExtractor::removeConstOffset(
unsigned ChainIndex) {
716 if (ChainIndex == 0) {
717 assert(isa<ConstantInt>(UserChain[ChainIndex]));
718 return ConstantInt::getNullValue(UserChain[ChainIndex]->
getType());
723 "distributeExtsAndCloneChain clones each BinaryOperator in "
724 "UserChain, so no one should be used more than "
727 unsigned OpNo = (BO->
getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1);
729 Value *NextInChain = removeConstOffset(ChainIndex - 1);
734 if (
ConstantInt *CI = dyn_cast<ConstantInt>(NextInChain)) {
735 if (CI->isZero() && !(BO->
getOpcode() == Instruction::Sub && OpNo == 0))
740 if (BO->
getOpcode() == Instruction::Or) {
754 NewOp = Instruction::Add;
768 User *&UserChainTail,
770 ConstantOffsetExtractor Extractor(
GEP, DT);
772 APInt ConstantOffset =
773 Extractor.find(
Idx,
false,
false,
775 if (ConstantOffset == 0) {
776 UserChainTail =
nullptr;
780 Value *IdxWithoutConstOffset = Extractor.rebuildWithoutConstOffset();
781 UserChainTail = Extractor.UserChain.back();
782 return IdxWithoutConstOffset;
788 return ConstantOffsetExtractor(
GEP, DT)
789 .find(
Idx,
false,
false,
794bool SeparateConstOffsetFromGEP::canonicalizeArrayIndicesToIndexSize(
796 bool Changed =
false;
797 Type *PtrIdxTy =
DL->getIndexType(
GEP->getType());
800 I !=
E; ++
I, ++GTI) {
803 if ((*I)->getType() != PtrIdxTy) {
814 bool &NeedsExtraction) {
815 NeedsExtraction =
false;
816 int64_t AccumulativeByteOffset = 0;
818 for (
unsigned I = 1,
E =
GEP->getNumOperands();
I !=
E; ++
I, ++GTI) {
825 int64_t ConstantOffset =
826 ConstantOffsetExtractor::Find(
GEP->getOperand(
I),
GEP, DT);
827 if (ConstantOffset != 0) {
828 NeedsExtraction =
true;
832 AccumulativeByteOffset +=
835 }
else if (LowerGEP) {
840 NeedsExtraction =
true;
841 AccumulativeByteOffset +=
842 DL->getStructLayout(StTy)->getElementOffset(
Field);
846 return AccumulativeByteOffset;
849void SeparateConstOffsetFromGEP::lowerToSingleIndexGEPs(
859 bool isSwapCandidate =
860 L &&
L->isLoopInvariant(ResultPtr) &&
861 !hasMoreThanOneUseInLoop(ResultPtr, L);
862 Value *FirstResult =
nullptr;
864 if (ResultPtr->
getType() != I8PtrTy)
865 ResultPtr =
Builder.CreateBitCast(ResultPtr, I8PtrTy);
870 for (
unsigned I = 1,
E =
Variadic->getNumOperands();
I !=
E; ++
I, ++GTI) {
881 if (ElementSize != 1) {
893 if (FirstResult ==
nullptr)
894 FirstResult = ResultPtr;
899 if (AccumulativeByteOffset != 0) {
904 isSwapCandidate =
false;
909 auto *FirstGEP = dyn_cast_or_null<GetElementPtrInst>(FirstResult);
910 auto *SecondGEP = dyn_cast<GetElementPtrInst>(ResultPtr);
911 if (isSwapCandidate && isLegalToSwapOperand(FirstGEP, SecondGEP, L))
912 swapGEPOperand(FirstGEP, SecondGEP);
917 Variadic->replaceAllUsesWith(ResultPtr);
923 int64_t AccumulativeByteOffset) {
927 "Pointer type must match index type for arithmetic-based lowering of "
935 for (
unsigned I = 1,
E =
Variadic->getNumOperands();
I !=
E; ++
I, ++GTI) {
946 if (ElementSize != 1) {
955 ResultPtr =
Builder.CreateAdd(ResultPtr,
Idx);
960 if (AccumulativeByteOffset != 0) {
966 Variadic->replaceAllUsesWith(ResultPtr);
972 if (
GEP->getType()->isVectorTy())
977 if (
GEP->hasAllConstantIndices())
980 bool Changed = canonicalizeArrayIndicesToIndexSize(
GEP);
982 bool NeedsExtraction;
983 int64_t AccumulativeByteOffset = accumulateByteOffset(
GEP, NeedsExtraction);
985 if (!NeedsExtraction)
998 unsigned AddrSpace =
GEP->getPointerAddressSpace();
1000 nullptr, AccumulativeByteOffset,
1015 for (
unsigned I = 1,
E =
GEP->getNumOperands();
I !=
E; ++
I, ++GTI) {
1024 User *UserChainTail;
1026 ConstantOffsetExtractor::Extract(OldIdx,
GEP, UserChainTail, DT);
1027 if (NewIdx !=
nullptr) {
1029 GEP->setOperand(
I, NewIdx);
1057 bool GEPWasInBounds =
GEP->isInBounds();
1058 GEP->setIsInBounds(
false);
1068 unsigned AddrSpace =
GEP->getPointerAddressSpace();
1069 bool PointerHasExtraData =
DL->getPointerSizeInBits(AddrSpace) !=
1070 DL->getIndexSizeInBits(AddrSpace);
1071 if (
TTI.
useAA() ||
DL->isNonIntegralAddressSpace(AddrSpace) ||
1072 PointerHasExtraData)
1073 lowerToSingleIndexGEPs(
GEP, AccumulativeByteOffset);
1075 lowerToArithmetics(
GEP, AccumulativeByteOffset);
1080 if (AccumulativeByteOffset == 0)
1117 int64_t ElementTypeSizeOfGEP =
static_cast<int64_t
>(
1118 DL->getTypeAllocSize(
GEP->getResultElementType()));
1119 Type *PtrIdxTy =
DL->getIndexType(
GEP->getType());
1120 if (AccumulativeByteOffset % ElementTypeSizeOfGEP == 0) {
1123 int64_t
Index = AccumulativeByteOffset / ElementTypeSizeOfGEP;
1129 cast<GetElementPtrInst>(NewGEP)->setIsInBounds(GEPWasInBounds);
1147 Builder.getInt8Ty()->getPointerTo(
GEP->getPointerAddressSpace());
1149 NewGEP = cast<Instruction>(
Builder.CreateGEP(
1151 {ConstantInt::get(PtrIdxTy, AccumulativeByteOffset, true)},
"uglygep",
1155 NewGEP = cast<Instruction>(
Builder.CreateBitCast(NewGEP,
GEP->getType()));
1158 GEP->replaceAllUsesWith(NewGEP);
1159 GEP->eraseFromParent();
1164bool SeparateConstOffsetFromGEPLegacyPass::runOnFunction(
Function &
F) {
1165 if (skipFunction(
F))
1167 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1168 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1169 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1170 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F);
1172 return this->getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
F);
1174 SeparateConstOffsetFromGEP Impl(DT, SE, LI, TLI, GetTTI, LowerGEP);
1178bool SeparateConstOffsetFromGEP::run(
Function &
F) {
1182 DL = &
F.getParent()->getDataLayout();
1183 bool Changed =
false;
1190 Changed |= splitGEP(
GEP);
1195 Changed |= reuniteExts(
F);
1198 verifyNoDeadCode(
F);
1203Instruction *SeparateConstOffsetFromGEP::findClosestMatchingDominator(
1206 auto Pos = DominatingExprs.find(Key);
1207 if (Pos == DominatingExprs.end())
1210 auto &Candidates = Pos->second;
1215 while (!Candidates.empty()) {
1217 if (DT->
dominates(Candidate, Dominatee))
1219 Candidates.pop_back();
1224bool SeparateConstOffsetFromGEP::reuniteExts(
Instruction *
I) {
1225 if (!SE->isSCEVable(
I->getType()))
1236 SE->getAddExpr(SE->getUnknown(LHS), SE->getUnknown(RHS));
1237 if (
auto *Dom = findClosestMatchingDominator(Key,
I, DominatingAdds)) {
1240 I->replaceAllUsesWith(NewSExt);
1247 const SCEV *
Key = SE->getAddExpr(
1248 SE->getUnknown(LHS), SE->getNegativeSCEV(SE->getUnknown(RHS)));
1249 if (
auto *Dom = findClosestMatchingDominator(Key,
I, DominatingSubs)) {
1252 I->replaceAllUsesWith(NewSExt);
1263 SE->getAddExpr(SE->getUnknown(LHS), SE->getUnknown(RHS));
1264 DominatingAdds[
Key].push_back(
I);
1268 const SCEV *
Key = SE->getAddExpr(
1269 SE->getUnknown(LHS), SE->getNegativeSCEV(SE->getUnknown(RHS)));
1270 DominatingSubs[
Key].push_back(
I);
1276bool SeparateConstOffsetFromGEP::reuniteExts(
Function &
F) {
1277 bool Changed =
false;
1278 DominatingAdds.clear();
1279 DominatingSubs.clear();
1283 Changed |= reuniteExts(&
I);
1288void SeparateConstOffsetFromGEP::verifyNoDeadCode(
Function &
F) {
1292 std::string ErrMessage;
1294 RSO <<
"Dead instruction detected!\n" <<
I <<
"\n";
1301bool SeparateConstOffsetFromGEP::isLegalToSwapOperand(
1303 if (!FirstGEP || !FirstGEP->
hasOneUse())
1309 if (FirstGEP == SecondGEP)
1315 if (FirstNum != SecondNum || FirstNum != 2)
1329 Instruction *FirstOffsetDef = dyn_cast<Instruction>(FirstOffset);
1340 if (FirstOffsetDef && FirstOffsetDef->
isShift() &&
1341 isa<ConstantInt>(FirstOffsetDef->
getOperand(1)))
1342 FirstOffsetDef = dyn_cast<Instruction>(FirstOffsetDef->
getOperand(0));
1347 if (
BinaryOperator *BO = dyn_cast<BinaryOperator>(FirstOffsetDef)) {
1349 if ((opc == Instruction::Add || opc == Instruction::Sub) &&
1357bool SeparateConstOffsetFromGEP::hasMoreThanOneUseInLoop(
Value *V,
Loop *L) {
1359 for (
User *U :
V->users()) {
1361 if (
L->contains(
User))
1362 if (++UsesInLoop > 1)
1372 First->setOperand(1, Offset2);
1378 cast<PointerType>(
First->getType())->getAddressSpace()),
1381 First->stripAndAccumulateInBoundsConstantOffsets(DAL,
Offset);
1384 Offset.ugt(ObjectSize)) {
1385 First->setIsInBounds(
false);
1388 First->setIsInBounds(
true);
1410 SeparateConstOffsetFromGEP Impl(DT, SE, LI, TLI, GetTTI, LowerGEP);
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements a class to represent arbitrary precision integral constant values and operations...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static const T * Find(StringRef S, ArrayRef< T > A)
Find KV in array using binary search.
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static cl::opt< bool > DisableSeparateConstOffsetFromGEP("disable-separate-const-offset-from-gep", cl::init(false), cl::desc("Do not separate the constant offset from a GEP instruction"), cl::Hidden)
static cl::opt< bool > VerifyNoDeadCode("reassociate-geps-verify-no-dead-code", cl::init(false), cl::desc("Verify this pass produces no dead code"), cl::Hidden)
separate const offset from gep
separate const offset from Split GEPs to a variadic base and a constant offset for better CSE
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Class for arbitrary precision integers.
unsigned logBase2() const
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesCFG()
This function should be called by the pass, iff they do not:
LLVM Basic Block Representation.
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), Instruction *InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
BinaryOps getOpcode() const
Represents analyses that only rely on functions' control flow.
This is the base class for all instructions that perform data casts.
static CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", Instruction *InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
This is the shared class of boolean and integer constants.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
FunctionPass class - This class is used to implement most global optimizations.
virtual bool runOnFunction(Function &F)=0
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const BasicBlock * getParent() const
void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
Analysis pass that exposes the LoopInfo for a function.
The legacy pass manager's analysis pass to compute loop information.
Represents a single loop in the control flow graph.
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void preserveSet()
Mark an analysis set as preserved.
This class represents an analyzed expression in the program.
This class represents a sign extension of integer types.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Class to represent struct types.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
A Use represents the edge between a Value definition and its users.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
An efficient, type-erasing, non-owning reference to a callable.
bool isSequential() const
StructType * getStructType() const
Type * getIndexedType() const
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
CastClass_match< OpTy, Instruction::SExt > m_SExt(const OpTy &Op)
Matches SExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
This is an optimization pass for GlobalISel generic memory operations.
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
auto reverse(ContainerTy &&C)
bool programUndefinedIfPoison(const Instruction *Inst)
void initializeSeparateConstOffsetFromGEPLegacyPassPass(PassRegistry &)
FunctionPass * createSeparateConstOffsetFromGEPPass(bool LowerGEP=false)
bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if LHS and RHS have no common bits set.
constexpr unsigned BitWidth
gep_type_iterator gep_type_begin(const User *GEP)
iterator_range< df_iterator< T > > depth_first(const T &G)
A CRTP mix-in to automatically provide informational APIs needed for passes.