41 #define DEBUG_TYPE "basicaa"
51 STATISTIC(SearchLimitReached,
"Number of times the limit to "
52 "decompose GEPs is reached");
53 STATISTIC(SearchTimes,
"Number of times a GEP is decomposed");
100 if (
const Argument *
A = dyn_cast<Argument>(V))
101 if (
A->hasByValAttr() ||
A->hasNoAliasAttr())
113 if (isa<CallInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V))
119 if (isa<LoadInst>(V))
128 bool RoundToAlign =
false) {
197 const Value *BasicAAResult::GetLinearExpression(
210 if (
const ConstantInt *Const = dyn_cast<ConstantInt>(V)) {
217 assert(Scale == 0 &&
"Constant values don't have a scale");
222 if (
ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
229 switch (BOp->getOpcode()) {
247 V = GetLinearExpression(BOp->getOperand(0), Scale,
Offset, ZExtBits,
248 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
251 case Instruction::Sub:
252 V = GetLinearExpression(BOp->getOperand(0), Scale,
Offset, ZExtBits,
253 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
256 case Instruction::Mul:
257 V = GetLinearExpression(BOp->getOperand(0), Scale,
Offset, ZExtBits,
258 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
262 case Instruction::Shl:
263 V = GetLinearExpression(BOp->getOperand(0), Scale,
Offset, ZExtBits,
264 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
273 if (isa<OverflowingBinaryOperator>(BOp)) {
274 NUW &= BOp->hasNoUnsignedWrap();
275 NSW &= BOp->hasNoSignedWrap();
284 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
285 Value *CastOp = cast<CastInst>(V)->getOperand(0);
288 unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits;
289 const Value *Result =
290 GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL,
291 Depth + 1, AC, DT, NSW, NUW);
295 unsigned ExtendedBy = NewWidth - SmallWidth;
297 if (isa<SExtInst>(V) && ZExtBits == 0) {
303 unsigned OldWidth = Offset.getBitWidth();
304 Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth);
311 ZExtBits = OldZExtBits;
312 SExtBits = OldSExtBits;
314 SExtBits += ExtendedBy;
324 ZExtBits = OldZExtBits;
325 SExtBits = OldSExtBits;
327 ZExtBits += ExtendedBy;
343 assert(PointerSize <= 64 &&
"Invalid PointerSize!");
344 unsigned ShiftBits = 64 - PointerSize;
345 return (int64_t)((uint64_t)Offset << ShiftBits) >> ShiftBits;
361 bool BasicAAResult::DecomposeGEPExpression(
const Value *V,
368 Decomposed.StructOffset = 0;
369 Decomposed.OtherOffset = 0;
370 Decomposed.VarIndices.clear();
376 if (
const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
377 if (!GA->isInterposable()) {
378 V = GA->getAliasee();
386 if (Op->
getOpcode() == Instruction::BitCast ||
387 Op->
getOpcode() == Instruction::AddrSpaceCast) {
395 if (
const Value *RV =
CS.getReturnedArgOperand()) {
428 bool GepHasConstantOffset =
true;
430 I !=
E; ++
I, ++GTI) {
435 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
439 Decomposed.StructOffset +=
445 if (
const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
448 Decomposed.OtherOffset +=
453 GepHasConstantOffset =
false;
456 unsigned ZExtBits = 0, SExtBits = 0;
461 if (PointerSize > Width)
462 SExtBits += PointerSize - Width;
465 APInt IndexScale(Width, 0), IndexOffset(Width, 0);
466 bool NSW =
true, NUW =
true;
467 Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
468 SExtBits, DL, 0, AC, DT, NSW, NUW);
472 Decomposed.OtherOffset += IndexOffset.getSExtValue() * Scale;
473 Scale *= IndexScale.getSExtValue();
479 for (
unsigned i = 0, e = Decomposed.VarIndices.size();
i != e; ++
i) {
480 if (Decomposed.VarIndices[
i].V == Index &&
481 Decomposed.VarIndices[
i].ZExtBits == ZExtBits &&
482 Decomposed.VarIndices[
i].SExtBits == SExtBits) {
483 Scale += Decomposed.VarIndices[
i].Scale;
484 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() +
i);
494 VariableGEPIndex Entry = {Index, ZExtBits, SExtBits,
495 static_cast<int64_t
>(Scale)};
496 Decomposed.VarIndices.push_back(Entry);
501 if (GepHasConstantOffset) {
502 Decomposed.StructOffset =
504 Decomposed.OtherOffset =
510 }
while (--MaxLookup);
514 SearchLimitReached++;
523 assert(Visited.empty() &&
"Visited must be cleared after use!");
525 unsigned MaxLookup = 8;
530 if (!Visited.insert(V).second) {
536 if (OrLocal && isa<AllocaInst>(V))
544 if (!GV->isConstant()) {
560 if (
const PHINode *PN = dyn_cast<PHINode>(V)) {
562 if (PN->getNumIncomingValues() > MaxLookup) {
566 for (
Value *IncValue : PN->incoming_values())
575 }
while (!Worklist.
empty() && --MaxLookup);
578 return Worklist.
empty();
649 F == LibFunc::memset_pattern16 && TLI.
has(F))
683 if (
const Instruction *inst = dyn_cast<Instruction>(V))
686 if (
const Argument *arg = dyn_cast<Argument>(V))
687 return arg->getParent();
697 return !F1 || !F2 || F1 == F2;
704 "BasicAliasAnalysis doesn't support interprocedural queries.");
709 auto CacheIt = AliasCache.
find(LocPair(LocA, LocB));
710 if (CacheIt != AliasCache.
end())
711 return CacheIt->second;
720 VisitedPhiBBs.
clear();
733 "AliasAnalysis query involving multiple functions!");
742 if (isa<AllocaInst>(Object))
744 if (CI->isTailCall())
752 bool PassedAsArg =
false;
753 unsigned OperandNo = 0;
755 CI != CE; ++CI, ++OperandNo) {
759 if (!(*CI)->getType()->isPointerTy() ||
907 "Expected GEPs with the same pointer operand");
956 if (isa<SequentialType>(Ty)) {
968 const uint64_t ElementSize =
970 if (V1Size != ElementSize || V2Size != ElementSize)
992 }
else if (!LastIndexedStruct || !C1 || !C2) {
1014 auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size,
1015 uint64_t V2Off, uint64_t V2Size) {
1016 return V1Off < V2Off && V1Off + V1Size <= V2Off &&
1017 ((V2Off + V2Size <= StructSize) ||
1018 (V2Off + V2Size - StructSize <= V1Off));
1021 if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) ||
1022 EltsDontOverlap(V2Off, V2Size, V1Off, V1Size))
1059 bool BasicAAResult::isGEPBaseAtNegativeOffset(
const GEPOperator *GEPOp,
1060 const DecomposedGEP &DecompGEP,
const DecomposedGEP &DecompObject,
1061 uint64_t ObjectAccessSize) {
1069 if (!(isa<AllocaInst>(DecompObject.Base) ||
1070 isa<GlobalVariable>(DecompObject.Base)) ||
1071 !DecompObject.VarIndices.empty())
1074 int64_t ObjectBaseOffset = DecompObject.StructOffset +
1075 DecompObject.OtherOffset;
1082 int64_t GEPBaseOffset = DecompGEP.StructOffset;
1083 if (DecompGEP.VarIndices.empty())
1084 GEPBaseOffset += DecompGEP.OtherOffset;
1086 return (GEPBaseOffset >= ObjectBaseOffset + (int64_t)ObjectAccessSize);
1097 uint64_t V2Size,
const AAMDNodes &V2AAInfo,
1098 const Value *UnderlyingV1,
1099 const Value *UnderlyingV2) {
1100 DecomposedGEP DecompGEP1, DecompGEP2;
1101 bool GEP1MaxLookupReached =
1102 DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
1103 bool GEP2MaxLookupReached =
1104 DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT);
1106 int64_t GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset;
1107 int64_t GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset;
1109 assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1110 "DecomposeGEPExpression returned a result different from "
1111 "GetUnderlyingObject");
1116 if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1117 isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size))
1122 if (
const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
1125 if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1126 isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size))
1135 if ((BaseAlias ==
MayAlias) && V1Size == V2Size) {
1137 AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, V1AAInfo,
1138 UnderlyingV2, V2Size, V2AAInfo);
1139 if (PreciseBaseAlias ==
NoAlias) {
1143 if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1147 if (GEP1BaseOffset == GEP2BaseOffset &&
1148 DecompGEP1.VarIndices == DecompGEP2.VarIndices)
1167 GEP2->getPointerOperand()->getType()) {
1175 if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1180 GEP1BaseOffset -= GEP2BaseOffset;
1181 GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
1195 V2AAInfo,
nullptr, UnderlyingV2);
1205 if (GEP1MaxLookupReached)
1215 if (GEP1BaseOffset == 0 && DecompGEP1.VarIndices.empty())
1222 if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) {
1223 if (GEP1BaseOffset >= 0) {
1225 if ((uint64_t)GEP1BaseOffset < V2Size)
1240 if (-(uint64_t)GEP1BaseOffset < V1Size)
1247 if (!DecompGEP1.VarIndices.empty()) {
1248 uint64_t Modulo = 0;
1249 bool AllPositive =
true;
1250 for (
unsigned i = 0, e = DecompGEP1.VarIndices.size();
i != e; ++
i) {
1256 Modulo |= (uint64_t)DecompGEP1.VarIndices[
i].Scale;
1262 const Value *V = DecompGEP1.VarIndices[
i].V;
1264 bool SignKnownZero, SignKnownOne;
1265 ComputeSignBit(const_cast<Value *>(V), SignKnownZero, SignKnownOne, DL,
1266 0, &AC,
nullptr, DT);
1270 bool IsZExt = DecompGEP1.VarIndices[
i].ZExtBits > 0 || isa<ZExtInst>(V);
1271 SignKnownZero |= IsZExt;
1272 SignKnownOne &= !IsZExt;
1277 int64_t Scale = DecompGEP1.VarIndices[
i].Scale;
1279 (SignKnownZero && Scale >= 0) || (SignKnownOne && Scale < 0);
1283 Modulo = Modulo ^ (Modulo & (Modulo - 1));
1288 uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1);
1291 V1Size <= Modulo - ModOffset)
1297 if (AllPositive && GEP1BaseOffset > 0 && V2Size <= (uint64_t)GEP1BaseOffset)
1300 if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
1301 GEP1BaseOffset, &AC, DT))
1331 const Value *V2, uint64_t V2Size,
1333 const Value *UnderV2) {
1336 if (
const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1339 SI2->getTrueValue(), V2Size, V2AAInfo);
1344 SI2->getFalseValue(), V2Size, V2AAInfo);
1352 SISize, SIAAInfo, UnderV2);
1357 aliasCheck(V2, V2Size, V2AAInfo, SI->
getFalseValue(), SISize, SIAAInfo,
1366 uint64_t V2Size,
const AAMDNodes &V2AAInfo,
1367 const Value *UnderV2) {
1375 if (
const PHINode *PN2 = dyn_cast<PHINode>(V2))
1376 if (PN2->getParent() == PN->
getParent()) {
1390 "There must exist an entry for the phi node");
1406 AliasCache[Locs] = OrigAliasResult;
1413 bool isRecursive =
false;
1415 if (isa<PHINode>(PV1))
1423 if (
GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
1428 if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
1429 isa<ConstantInt>(PV1GEP->idx_begin())) {
1435 if (UniqueSrc.
insert(PV1).second)
1446 aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0],
1447 PNSize, PNAAInfo, UnderV2);
1456 for (
unsigned i = 1, e = V1Srcs.
size();
i != e; ++
i) {
1460 aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo, UnderV2);
1477 if (V1Size == 0 || V2Size == 0)
1486 if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1495 if (isValueEqualInPotentialCycles(V1, V2))
1511 if (CPN->getType()->getAddressSpace() == 0)
1514 if (CPN->getType()->getAddressSpace() == 0)
1567 std::pair<AliasCacheTy::iterator, bool> Pair =
1570 return Pair.first->second;
1574 if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
1580 if (
const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1582 aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2);
1584 return AliasCache[Locs] = Result;
1587 if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
1593 if (
const PHINode *PN = dyn_cast<PHINode>(V1)) {
1594 AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo,
1595 V2, V2Size, V2AAInfo, O2);
1597 return AliasCache[Locs] = Result;
1600 if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
1606 if (
const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1608 aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2);
1610 return AliasCache[Locs] = Result;
1626 return AliasCache[Locs] = Result;
1636 bool BasicAAResult::isValueEqualInPotentialCycles(
const Value *V,
1645 if (VisitedPhiBBs.
empty())
1654 for (
auto *
P : VisitedPhiBBs)
1665 void BasicAAResult::GetIndexDifference(
1671 for (
unsigned i = 0, e = Src.
size();
i != e; ++
i) {
1672 const Value *V = Src[
i].V;
1673 unsigned ZExtBits = Src[
i].ZExtBits, SExtBits = Src[
i].SExtBits;
1674 int64_t Scale = Src[
i].Scale;
1678 for (
unsigned j = 0, e = Dest.
size(); j != e; ++j) {
1679 if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1680 Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
1685 if (Dest[j].Scale != Scale)
1686 Dest[j].Scale -= Scale;
1695 VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale};
1701 bool BasicAAResult::constantOffsetHeuristic(
1709 const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
1711 if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
1712 Var0.Scale != -Var1.Scale)
1715 unsigned Width = Var1.V->getType()->getIntegerBitWidth();
1721 APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0),
1723 bool NSW =
true, NUW =
true;
1724 unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0;
1725 const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits,
1726 V0SExtBits, DL, 0, AC, DT, NSW, NUW);
1729 const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits,
1730 V1SExtBits, DL, 0, AC, DT, NSW, NUW);
1732 if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits ||
1733 V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1))
1743 APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff;
1751 return V1Size +
std::abs(BaseOffset) <= MinDiffBytes &&
1752 V2Size +
std::abs(BaseOffset) <= MinDiffBytes;
1774 void BasicAAWrapperPass::anchor() {}
1777 "Basic Alias Analysis (stateless AA impl)",
true,
true)
1785 return new BasicAAWrapperPass();
1789 auto &ACT = getAnalysis<AssumptionCacheTracker>();
1790 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1791 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1792 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
1795 ACT.getAssumptionCache(F), &DTWP.getDomTree(),
1796 LIWP ? &LIWP->getLoopInfo() :
nullptr));
Pass interface - Implemented by all 'passes'.
The two locations precisely alias each other.
static const unsigned MaxLookupSearchDepth
Type * getSourceElementType() const
void push_back(const T &Elt)
Basic Alias Analysis(stateless AA impl)"
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
A parsed version of the target data layout string in and methods for querying it. ...
Type * getIndexedType() const
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
BasicAAResult(const DataLayout &DL, const TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr)
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static bool isIntrinsicCall(ImmutableCallSite CS, Intrinsic::ID IID)
LLVM Argument representation.
uint64_t getZExtValue() const
Get zero extended value.
STATISTIC(NumFunctions,"Total number of functions")
bool doesNotCapture(unsigned OpNo) const
Determine whether this data operand is not captured.
BasicAAResult createLegacyPMBasicAAResult(Pass &P, Function &F)
A helper for the legacy pass manager to create a BasicAAResult object populated to the best of our ab...
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal)
unsigned getNumOperands() const
The two locations alias, but only due to a partial overlap.
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
This class represents a function call, abstracting a target machine's calling convention.
static bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of .assume calls within a function.
This is the AA result object for the basic, local, and stateless alias analysis.
bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
If this value is smaller than the specified limit, return it, otherwise return the limit value...
bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
The two locations do not alias at all.
Analysis pass which computes a DominatorTree.
bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal)
Chases pointers until we find a (constant global) or not.
The access modifies the value stored in memory.
The two locations may or may not alias. This is the least precise result.
INITIALIZE_PASS_BEGIN(BasicAAWrapperPass,"basicaa","Basic Alias Analysis (stateless AA impl)", true, true) INITIALIZE_PASS_END(BasicAAWrapperPass
This indicates that the function could not be classified into one of the behaviors above...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
AnalysisUsage & addRequired()
StructType * getStructTypeOrNull() const
The only memory references in this function (if it has any) are references of memory that is otherwis...
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
#define INITIALIZE_PASS_DEPENDENCY(depName)
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
This class represents the LLVM 'select' instruction.
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
bool has(LibFunc::Func F) const
Tests whether a library function is available.
Class to represent struct types.
A Use represents the edge between a Value definition and its users.
const unsigned MaxNumPhiBBsValueReachabilityCheck
Cutoff after which to stop analysing a set of phi nodes potentially involved in a cycle...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass...
bool doesNotReadMemory() const
Determine if the function does not access or only writes memory.
static AliasResult MergeAliasResults(AliasResult A, AliasResult B)
The access references the value stored in memory.
Analysis pass that exposes the LoopInfo for a function.
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
The function may perform non-volatile loads and stores of objects pointed to by its pointer-typed arg...
LLVM_NODISCARD bool empty() const
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
place backedge safepoints impl
IterTy data_operands_begin() const
data_operands_begin/data_operands_end - Return iterators iterating over the call / invoke argument li...
APInt zextOrSelf(unsigned width) const
Zero extend or truncate to width.
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
static cl::opt< bool > EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden, cl::init(false))
Enable analysis of recursive PHI nodes.
FunctionModRefBehavior
Summary of how a function affects memory in the program.
static GCRegistry::Add< OcamlGC > B("ocaml","ocaml 3.10-compatible GC")
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, bool RoundToAlign=false, ObjSizeMode Mode=ObjSizeMode::Exact)
Compute the size of the object pointed by Ptr.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
bool doesNotAccessMemory() const
Determine if the function does not access memory.
static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI)
Returns true if we can prove that the object specified by V has size Size.
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
unsigned getNumIncomingValues() const
Return the number of incoming edges.
bool hasOperandBundles() const
uint64_t getElementOffset(unsigned Idx) const
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
The access neither references nor modifies the value stored in memory.
bool getLibFunc(StringRef funcName, LibFunc::Func &F) const
Searches for a particular function name.
initializer< Ty > init(const Ty &Val)
A set of analyses that are preserved following a run of a transformation pass.
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs...ExtraArgs)
Get the result of an analysis pass for a given IR unit.
static bool isWriteOnlyParam(ImmutableCallSite CS, unsigned ArgIdx, const TargetLibraryInfo &TLI)
Returns true if this is a writeonly (i.e Mod only) parameter.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
const Value * getCondition() const
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Return true if 'V & Mask' is known to be zero.
APInt Or(const APInt &LHS, const APInt &RHS)
Bitwise OR function for APInt.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx)
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS)
Returns the behavior when calling the given call site.
AliasResult
The possible results of an alias query.
Represent the analysis usage information of a pass.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE,"Assign register bank of generic virtual registers", false, false) RegBankSelect
unsigned getBitWidth() const
Return the number of bits in the APInt.
The only memory references in this function (if it has any) are non-volatile loads and stores from ob...
Value * getPointerOperand()
FunctionPass class - This class is used to implement most global optimizations.
Value * getOperand(unsigned i) const
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', returning true if uncertain.
unsigned getIntegerBitWidth() const
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc)
Checks to see if the specified callsite can clobber the specified memory object.
static bool isNonEscapingLocalObject(const Value *V)
Returns true if the pointer is to a function-local object that never escapes from the function...
LLVM_NODISCARD bool empty() const
bool isPointerTy() const
True if this is an instance of PointerType.
iterator erase(const_iterator CI)
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1, uint64_t V1Size, const GEPOperator *GEP2, uint64_t V2Size, const DataLayout &DL)
Provide ad-hoc rules to disambiguate accesses through two GEP operators, both having the exact same p...
const Value * getTrueValue() const
bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
Value * GetUnderlyingObject(Value *V, const DataLayout &DL, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value...
This function does not perform any non-local loads or stores to memory.
static int64_t adjustToPointerSize(int64_t Offset, unsigned PointerSize)
To ensure a pointer offset fits in an integer of size PointerSize (in bits) when that size is smaller...
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
bool doesNotAccessMemory() const
Determine if the call does not access memory.
const Value * Ptr
The address of the start of the location.
Representation for a specific memory location.
A function analysis which provides an AssumptionCache.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
This is the shared class of boolean and integer constants.
bool doesNotReadMemory() const
Determine if the call does not access or only writes memory.
InstrTy * getInstruction() const
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
unsigned getNumIndices() const
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
This is a utility class that provides an abstraction for the common functionality between Instruction...
Type * getType() const
All values are typed, get the type of this value.
static bool isObjectSmallerThan(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI)
Returns true if we can prove that the object specified by V is smaller than Size. ...
Provides information about what library functions are available for the current target.
A constant pointer value that points to null.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
LLVM_NODISCARD T pop_back_val()
uint64_t getSizeInBytes() const
Value * stripPointerCasts()
Strip off pointer casts, all-zero GEPs, and aliases.
This function does not perform any non-local stores or volatile loads, but may read from any memory l...
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
unsigned getNumArgOperands() const
Class for arbitrary precision integers.
static bool notDifferentParent(const Value *O1, const Value *O2)
FunctionPass * createBasicAAWrapperPass()
bool isIntegerTy() const
True if this is an instance of IntegerType.
void setPreservesAll()
Set by analyses that do not transform their input at all.
bool isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates uninitialized memory (such ...
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the type of the element that would be loaded with a load instruction with the specified param...
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
bool isKnownNonNull(const Value *V)
Return true if this pointer couldn't possibly be null by its definition.
void ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Determine whether the sign bit is known to be zero or one.
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
ImmutableCallSite - establish a view to a call site for examination.
bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates zero-filled memory (such as...
AAResultsProxy getBestAAResults()
Get a proxy for the best AA result set to query at this time.
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
APFloat abs(APFloat X)
Returns the absolute value of the argument.
bool paramHasAttr(unsigned i, Attribute::AttrKind Kind) const
Return true if the call or the callee has the given attribute.
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
iterator find(const KeyT &Val)
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
unsigned getPointerAddressSpace() const
Method to return the address space of the pointer operand.
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS)
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Return true if the given values are known to be non-equal when defined.
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc)
BasicAAResult run(Function &F, FunctionAnalysisManager &AM)
API to communicate dependencies between analyses during invalidation.
Analysis pass providing the TargetLibraryInfo.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
FunTy * getCalledFunction() const
getCalledFunction - Return the function being called if this is a direct call, otherwise return null ...
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Module * getParent()
Get the module that this global value is contained inside of...
LLVM Value Representation.
ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx)
Get the location associated with a pointer argument of a callsite.
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
static const Function * getParent(const Value *V)
const Value * getFalseValue() const
This is the interface for LLVM's primary stateless and local alias analysis.
A container for analyses that lazily runs them and caches their results.
Value * SimplifyInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr)
See if we can compute a simplified version of this instruction.
Legacy analysis pass which computes a DominatorTree.
IterTy data_operands_end() const
op_range incoming_values()
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
static GCRegistry::Add< ErlangGC > A("erlang","erlang-compatible garbage collector")
A special type used by analysis passes to provide an address that identifies that particular analysis...
const BasicBlock * getParent() const
A wrapper class for inspecting calls to intrinsic functions.
void initializeBasicAAWrapperPassPass(PassRegistry &)
Legacy wrapper pass to provide the BasicAAResult object.
gep_type_iterator gep_type_begin(const User *GEP)
uint64_t Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known...
static MemoryLocation getForSource(const MemTransferInst *MTI)
Return a location representing the source of a memory transfer.