72#define DEBUG_TYPE "loop-accesses"
76 cl::desc(
"Sets the SIMD width. Zero is autoselect."),
82 cl::desc(
"Sets the vectorization interleave count. "
83 "Zero is autoselect."),
90 cl::desc(
"When performing memory disambiguation checks at runtime do not "
91 "generate more than this number of comparisons (default = 8)."),
98 cl::desc(
"Maximum number of comparisons done when trying to merge "
99 "runtime memory checks. (default = 100)"),
108 cl::desc(
"Maximum number of dependences collected by "
109 "loop-access analysis (default = 100)"),
125 cl::desc(
"Enable symbolic stride memory access versioning"));
130 "store-to-load-forwarding-conflict-detection",
cl::Hidden,
131 cl::desc(
"Enable conflict detection in loop-access analysis"),
136 cl::desc(
"Maximum recursion depth when finding forked SCEVs (default = 5)"),
141 cl::desc(
"Speculate that non-constant strides are unit in LAA"),
147 "Hoist inner loop runtime memory checks to outer loop if possible"),
152 return ::VectorizationInterleave.getNumOccurrences() > 0;
162 const SCEV *StrideSCEV = PtrToStride.
lookup(Ptr);
179 <<
" by: " << *Expr <<
"\n");
185 :
High(RtCheck.Pointers[Index].End),
Low(RtCheck.Pointers[Index].Start),
217 std::optional<ScalarEvolution::LoopGuards> &LoopGuards) {
223 bool CheckForNonNull, CheckForFreed;
224 Value *StartPtrV = StartPtr->getValue();
226 DL, CheckForNonNull, CheckForFreed);
228 if (DerefBytes && (CheckForNonNull || CheckForFreed))
236 Instruction *CtxI = &*L->getHeader()->getFirstNonPHIIt();
237 if (
BasicBlock *LoopPred = L->getLoopPredecessor()) {
239 CtxI = LoopPred->getTerminator();
249 DerefRK = std::max(DerefRK, RK);
257 if (DerefBytesSCEV->
isZero())
277 const SCEV *OffsetAtLastIter =
279 if (!OffsetAtLastIter) {
289 if (!OffsetAtLastIter)
298 if (IsKnownNonNegative) {
321 DenseMap<std::pair<const SCEV *, Type *>,
324 std::optional<ScalarEvolution::LoopGuards> &LoopGuards) {
325 std::pair<const SCEV *, const SCEV *> *PtrBoundsPair;
328 {{PtrExpr, AccessTy},
332 PtrBoundsPair = &Iter->second;
342 ScStart = ScEnd = PtrExpr;
344 ScStart = AR->getStart();
350 ScEnd = AR->evaluateAtIteration(BTC, *SE);
360 DT, AC, LoopGuards)) {
361 ScEnd = AR->evaluateAtIteration(MaxBTC, *SE);
370 const SCEV *Step = AR->getStepRecurrence(*SE);
375 if (CStep->getValue()->isNegative())
393 std::pair<const SCEV *, const SCEV *> Res = {ScStart, ScEnd};
395 *PtrBoundsPair = Res;
402 Type *AccessTy,
bool WritePtr,
403 unsigned DepSetId,
unsigned ASId,
409 Lp, PtrExpr, AccessTy, BTC, SymbolicMaxBTC, PSE.
getSE(),
410 &DC.getPointerBounds(), DC.getDT(), DC.getAC(), LoopGuards);
413 "must be able to compute both start and end expressions");
414 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
418bool RuntimePointerChecking::tryToCreateDiffCheck(
441 if (AccSrc.
size() != 1 || AccSink.
size() != 1)
445 if (AccSink[0] < AccSrc[0])
449 const SCEV *SrcStart;
450 const SCEV *SinkStart;
452 if (!
match(Src->Expr,
471 std::max(
DL.getTypeAllocSize(SrcTy),
DL.getTypeAllocSize(DstTy));
501 const Loop *StartARLoop = SrcStartAR->getLoop();
502 if (StartARLoop == SinkStartAR->getLoop() &&
507 SrcStartAR->getStepRecurrence(*SE) !=
508 SinkStartAR->getStepRecurrence(*SE)) {
509 LLVM_DEBUG(
dbgs() <<
"LAA: Not creating diff runtime check, since these "
510 "cannot be hoisted out of the outer loop\n");
516 <<
"SrcStart: " << *SrcStartInt <<
'\n'
517 <<
"SinkStartInt: " << *SinkStartInt <<
'\n');
518 DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
519 Src->NeedsFreeze ||
Sink->NeedsFreeze);
524 SmallVector<RuntimePointerCheck, 4> Checks;
532 CanUseDiffCheck = CanUseDiffCheck && tryToCreateDiffCheck(CGI, CGJ);
533 Checks.emplace_back(&CGI, &CGJ);
542 assert(Checks.empty() &&
"Checks is not empty");
543 groupChecks(DepCands, UseDependencies);
549 for (
const auto &
I : M.Members)
550 for (
const auto &J :
N.Members)
563 return Diff->isNegative() ? J :
I;
570 RtCheck.
Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
571 RtCheck.
Pointers[Index].NeedsFreeze, *RtCheck.SE);
575 const SCEV *End,
unsigned AS,
579 "all pointers in a checking group must be in the same address space");
605void RuntimePointerChecking::groupChecks(
651 if (!UseDependencies) {
657 unsigned TotalComparisons = 0;
660 for (
unsigned Index = 0; Index <
Pointers.size(); ++Index)
661 PositionMap[
Pointers[Index].PointerValue].push_back(Index);
687 auto PointerI = PositionMap.
find(M.getPointer());
690 if (PointerI == PositionMap.
end())
692 for (
unsigned Pointer : PointerI->second) {
709 if (Group.addPointer(Pointer, *
this)) {
719 Groups.emplace_back(Pointer, *
this);
732 return (PtrToPartition[PtrIdx1] != -1 &&
733 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
756 for (
const auto &[Idx, CG] :
enumerate(CheckingGroups))
757 PtrIndices[&CG] = Idx;
763 unsigned Depth)
const {
766 for (
const auto &[Check1, Check2] : Checks) {
767 const auto &
First = Check1->Members, &Second = Check2->Members;
769 OS.
indent(
Depth + 2) <<
"Comparing group GRP" << PtrIndices.at(Check1)
771 for (
unsigned K :
First)
773 OS.
indent(
Depth + 2) <<
"Against group GRP" << PtrIndices.at(Check2)
775 for (
unsigned K : Second)
788 OS.
indent(
Depth + 2) <<
"Group GRP" << PtrIndices.at(&CG) <<
":\n";
789 OS.
indent(
Depth + 4) <<
"(Low: " << *CG.Low <<
" High: " << *CG.High
791 for (
unsigned Member : CG.Members) {
803class AccessAnalysis {
813 : TheLoop(TheLoop), BAA(*
AA), AST(BAA), LI(LI), DT(DT), DepCands(DA),
814 PSE(PSE), LoopAliasScopes(LoopAliasScopes) {
816 BAA.enableCrossIterationMode();
822 AST.add(adjustLoc(
Loc));
823 Accesses[MemAccessInfo(Ptr,
false)].insert(AccessTy);
825 ReadOnlyPtr.insert(Ptr);
829 void addStore(
const MemoryLocation &Loc,
Type *AccessTy) {
831 AST.add(adjustLoc(Loc));
832 Accesses[MemAccessInfo(Ptr,
true)].insert(AccessTy);
842 bool createCheckForAccess(RuntimePointerChecking &RtCheck,
844 const DenseMap<Value *, const SCEV *> &Strides,
845 DenseMap<Value *, unsigned> &DepSetId,
846 Loop *TheLoop,
unsigned &RunningDepId,
847 unsigned ASId,
bool Assume);
857 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, Loop *TheLoop,
858 const DenseMap<Value *, const SCEV *> &Strides,
859 Value *&UncomputablePtr,
bool AllowPartial);
863 void buildDependenceSets() {
864 processMemAccesses();
872 bool isDependencyCheckNeeded()
const {
return !CheckDeps.empty(); }
875 void resetDepChecks(MemoryDepChecker &DepChecker) {
880 const MemAccessInfoList &getDependenciesToCheck()
const {
return CheckDeps; }
883 typedef MapVector<MemAccessInfo, SmallSetVector<Type *, 1>> PtrAccessMap;
887 MemoryLocation adjustLoc(MemoryLocation Loc)
const {
897 MDNode *adjustAliasScopeList(MDNode *ScopeList)
const {
904 return LoopAliasScopes.contains(cast<MDNode>(Scope));
913 void processMemAccesses();
923 MemAccessInfoList CheckDeps;
926 SmallPtrSet<Value*, 16> ReadOnlyPtr;
953 bool IsRTCheckAnalysisNeeded =
false;
956 PredicatedScalarEvolution &PSE;
958 DenseMap<Value *, SmallVector<const Value *, 16>> UnderlyingObjects;
962 SmallPtrSetImpl<MDNode *> &LoopAliasScopes;
969static std::optional<int64_t>
973 LLVM_DEBUG(
dbgs() <<
"LAA: Bad stride - Scalable object: " << *AccessTy
981 dbgs() <<
"LAA: Bad stride - Not striding over innermost loop ";
983 dbgs() << *Ptr <<
" ";
985 dbgs() <<
"SCEV: " << *AR <<
"\n";
994 const APInt *APStepVal;
997 dbgs() <<
"LAA: Bad stride - Not a constant strided ";
999 dbgs() << *Ptr <<
" ";
1000 dbgs() <<
"SCEV: " << *AR <<
"\n";
1002 return std::nullopt;
1006 TypeSize AllocSize =
DL.getTypeAllocSize(AccessTy);
1010 std::optional<int64_t> StepVal = APStepVal->
trySExtValue();
1012 return std::nullopt;
1015 return *StepVal %
Size ? std::nullopt : std::make_optional(*StepVal /
Size);
1023 std::optional<int64_t> Stride = std::nullopt) {
1037 GEP &&
GEP->hasNoUnsignedSignedWrap()) {
1040 if (L->getHeader() == L->getLoopLatch() ||
1042 if (getLoadStorePointerOperand(U) != GEP)
1044 BasicBlock *UserBB = cast<Instruction>(U)->getParent();
1045 if (!L->contains(UserBB))
1047 return !LoopAccessInfo::blockNeedsPredication(UserBB, L, &DT);
1060 (Stride == 1 || Stride == -1))
1064 if (Ptr && Assume) {
1067 <<
"LAA: Pointer: " << *Ptr <<
"\n"
1068 <<
"LAA: SCEV: " << *AR <<
"\n"
1069 <<
"LAA: Added an overflow assumption\n");
1082 while (!WorkList.
empty()) {
1084 if (!Visited.
insert(Ptr).second)
1090 if (PN && InnermostLoop.
contains(PN->getParent()) &&
1091 PN->getParent() != InnermostLoop.
getHeader()) {
1136 auto GetBinOpExpr = [&SE](
unsigned Opcode,
const SCEV *L,
const SCEV *R) {
1138 case Instruction::Add:
1140 case Instruction::Sub:
1148 unsigned Opcode =
I->getOpcode();
1150 case Instruction::GetElementPtr: {
1152 Type *SourceTy =
GEP->getSourceElementType();
1155 if (
I->getNumOperands() != 2 || SourceTy->
isVectorTy()) {
1165 bool NeedsFreeze =
any_of(BaseScevs, UndefPoisonCheck) ||
1166 any_of(OffsetScevs, UndefPoisonCheck);
1171 if (OffsetScevs.
size() == 2 && BaseScevs.
size() == 1)
1173 else if (BaseScevs.
size() == 2 && OffsetScevs.
size() == 1)
1176 ScevList.emplace_back(Scev, NeedsFreeze);
1187 for (
auto [
B, O] :
zip(BaseScevs, OffsetScevs)) {
1198 case Instruction::Select: {
1205 if (ChildScevs.
size() == 2)
1211 case Instruction::PHI: {
1216 if (
I->getNumOperands() == 2) {
1220 if (ChildScevs.
size() == 2)
1226 case Instruction::Add:
1227 case Instruction::Sub: {
1235 any_of(LScevs, UndefPoisonCheck) ||
any_of(RScevs, UndefPoisonCheck);
1240 if (LScevs.
size() == 2 && RScevs.
size() == 1)
1242 else if (RScevs.
size() == 2 && LScevs.
size() == 1)
1245 ScevList.emplace_back(Scev, NeedsFreeze);
1249 for (
auto [L, R] :
zip(LScevs, RScevs))
1250 ScevList.emplace_back(GetBinOpExpr(Opcode,
get<0>(L),
get<0>(R)),
1256 LLVM_DEBUG(
dbgs() <<
"ForkedPtr unhandled instruction: " << *
I <<
"\n");
1262bool AccessAnalysis::createCheckForAccess(
1266 unsigned &RunningDepId,
unsigned ASId,
bool Assume) {
1274 "Must have some runtime-check pointer candidates");
1278 auto IsLoopInvariantOrAR =
1283 if (RTCheckPtrs.
size() == 2 &&
all_of(RTCheckPtrs, IsLoopInvariantOrAR)) {
1284 LLVM_DEBUG(
dbgs() <<
"LAA: Found forked pointer: " << *Ptr <<
"\n";
1286 <<
"\t(" << Idx <<
") " << *Q.getPointer() <<
"\n");
1293 for (
auto &
P : RTCheckPtrs) {
1306 if (RTCheckPtrs.size() == 1) {
1312 if (!
isNoWrap(PSE, AR, RTCheckPtrs.size() == 1 ? Ptr :
nullptr, AccessTy,
1313 TheLoop, Assume, DT))
1317 for (
const auto &[PtrExpr, NeedsFreeze] : RTCheckPtrs) {
1321 if (isDependencyCheckNeeded()) {
1323 unsigned &LeaderId = DepSetId[Leader];
1325 LeaderId = RunningDepId++;
1329 DepId = RunningDepId++;
1331 bool IsWrite =
Access.getInt();
1332 RtCheck.
insert(TheLoop, Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
1334 LLVM_DEBUG(
dbgs() <<
"LAA: Found a runtime check ptr:" << *Ptr <<
'\n');
1340bool AccessAnalysis::canCheckPtrAtRT(
1343 bool AllowPartial) {
1346 bool CanDoRT =
true;
1348 bool MayNeedRTCheck =
false;
1349 if (!IsRTCheckAnalysisNeeded)
return true;
1351 bool IsDepCheckNeeded = isDependencyCheckNeeded();
1356 for (
const auto &AS : AST) {
1357 int NumReadPtrChecks = 0;
1358 int NumWritePtrChecks = 0;
1359 bool CanDoAliasSetRT =
true;
1361 auto ASPointers = AS.getPointers();
1365 unsigned RunningDepId = 1;
1373 for (
const Value *ConstPtr : ASPointers) {
1375 bool IsWrite =
Accesses.contains(MemAccessInfo(Ptr,
true));
1377 ++NumWritePtrChecks;
1385 if (NumWritePtrChecks == 0 ||
1386 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
1387 assert((ASPointers.size() <= 1 ||
1389 [
this](
const Value *Ptr) {
1390 MemAccessInfo AccessWrite(
const_cast<Value *
>(Ptr),
1392 return !DepCands.
contains(AccessWrite);
1394 "Can only skip updating CanDoRT below, if all entries in AS "
1395 "are reads or there is at most 1 entry");
1399 for (
auto &
Access : AccessInfos) {
1401 if (!createCheckForAccess(RtCheck,
Access, AccessTy, StridesMap,
1402 DepSetId, TheLoop, RunningDepId, ASId,
1405 << *
Access.getPointer() <<
'\n');
1407 CanDoAliasSetRT =
false;
1421 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.
empty();
1425 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
1429 CanDoAliasSetRT =
true;
1430 for (
const auto &[
Access, AccessTy] : Retries) {
1431 if (!createCheckForAccess(RtCheck,
Access, AccessTy, StridesMap,
1432 DepSetId, TheLoop, RunningDepId, ASId,
1434 CanDoAliasSetRT =
false;
1435 UncomputablePtr =
Access.getPointer();
1442 CanDoRT &= CanDoAliasSetRT;
1443 MayNeedRTCheck |= NeedsAliasSetRTCheck;
1452 unsigned NumPointers = RtCheck.
Pointers.size();
1453 for (
unsigned i = 0; i < NumPointers; ++i) {
1454 for (
unsigned j = i + 1;
j < NumPointers; ++
j) {
1456 if (RtCheck.
Pointers[i].DependencySetId ==
1457 RtCheck.
Pointers[j].DependencySetId)
1470 dbgs() <<
"LAA: Runtime check would require comparison between"
1471 " different address spaces\n");
1477 if (MayNeedRTCheck && (CanDoRT || AllowPartial))
1481 <<
" pointer comparisons.\n");
1488 bool CanDoRTIfNeeded = !RtCheck.
Need || CanDoRT;
1489 assert(CanDoRTIfNeeded == (CanDoRT || !MayNeedRTCheck) &&
1490 "CanDoRTIfNeeded depends on RtCheck.Need");
1491 if (!CanDoRTIfNeeded && !AllowPartial)
1493 return CanDoRTIfNeeded;
1496void AccessAnalysis::processMemAccesses() {
1506 dbgs() <<
"\t" << *
A.getPointer() <<
" ("
1509 : (ReadOnlyPtr.contains(
A.getPointer()) ?
"read-only"
1518 for (
const auto &AS : AST) {
1522 auto ASPointers = AS.getPointers();
1524 bool SetHasWrite =
false;
1529 UnderlyingObjToAccessMap;
1530 UnderlyingObjToAccessMap ObjToLastAccess;
1533 PtrAccessMap DeferredAccesses;
1537 for (
int SetIteration = 0; SetIteration < 2; ++SetIteration) {
1538 bool UseDeferred = SetIteration > 0;
1539 PtrAccessMap &S = UseDeferred ? DeferredAccesses :
Accesses;
1541 for (
const Value *ConstPtr : ASPointers) {
1546 for (
const auto &[AC,
_] : S) {
1547 if (AC.getPointer() != Ptr)
1550 bool IsWrite = AC.getInt();
1554 bool IsReadOnlyPtr = ReadOnlyPtr.contains(Ptr) && !IsWrite;
1555 if (UseDeferred && !IsReadOnlyPtr)
1559 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1560 S.contains(MemAccessInfo(Ptr,
false))) &&
1561 "Alias-set pointer not in the access set?");
1563 MemAccessInfo
Access(Ptr, IsWrite);
1571 if (!UseDeferred && IsReadOnlyPtr) {
1574 DeferredAccesses.insert({
Access, {}});
1582 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
1583 CheckDeps.push_back(
Access);
1584 IsRTCheckAnalysisNeeded =
true;
1596 <<
"Underlying objects for pointer " << *Ptr <<
"\n");
1597 for (
const Value *UnderlyingObj : UOs) {
1606 auto [It,
Inserted] = ObjToLastAccess.try_emplace(
1624std::optional<int64_t>
1628 bool Assume,
bool ShouldCheckWrap) {
1640 LLVM_DEBUG(
dbgs() <<
"LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1641 <<
" SCEV: " << *PtrScev <<
"\n");
1642 return std::nullopt;
1645 std::optional<int64_t> Stride =
1647 if (!ShouldCheckWrap || !Stride)
1650 if (
isNoWrap(PSE, AR, Ptr, AccessTy, Lp, Assume, DT, Stride))
1654 dbgs() <<
"LAA: Bad stride - Pointer may wrap in the address space "
1655 << *Ptr <<
" SCEV: " << *AR <<
"\n");
1656 return std::nullopt;
1664 assert(PtrA && PtrB &&
"Expected non-nullptr pointers.");
1672 return std::nullopt;
1679 return std::nullopt;
1680 unsigned IdxWidth =
DL.getIndexSizeInBits(ASA);
1682 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1688 std::optional<int64_t> Val;
1689 if (PtrA1 == PtrB1) {
1696 return std::nullopt;
1698 IdxWidth =
DL.getIndexSizeInBits(ASA);
1699 OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1708 std::optional<APInt> Diff =
1711 return std::nullopt;
1712 Val = Diff->trySExtValue();
1716 return std::nullopt;
1718 int64_t
Size =
DL.getTypeStoreSize(ElemTyA);
1719 int64_t Dist = *Val /
Size;
1723 if (!StrictCheck || Dist *
Size == Val)
1725 return std::nullopt;
1732 VL, [](
const Value *V) {
return V->getType()->isPointerTy(); }) &&
1733 "Expected list of pointer operands.");
1736 Value *Ptr0 = VL[0];
1738 using DistOrdPair = std::pair<int64_t, unsigned>;
1740 std::set<DistOrdPair,
decltype(Compare)> Offsets(Compare);
1741 Offsets.emplace(0, 0);
1742 bool IsConsecutive =
true;
1744 std::optional<int64_t> Diff =
1752 auto [It, IsInserted] = Offsets.emplace(
Offset, Idx);
1756 IsConsecutive &= std::next(It) == Offsets.end();
1758 SortedIndices.
clear();
1759 if (!IsConsecutive) {
1762 for (
auto [Idx, Off] :
enumerate(Offsets))
1763 SortedIndices[Idx] = Off.second;
1777 std::optional<int64_t> Diff =
1786 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1787 InstMap.push_back(SI);
1794 [
this, LI](
Value *Ptr) {
1795 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1796 InstMap.push_back(LI);
1858bool MemoryDepChecker::couldPreventStoreLoadForward(
uint64_t Distance,
1860 unsigned CommonStride) {
1873 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1875 uint64_t MaxVFWithoutSLForwardIssuesPowerOf2 =
1877 MaxStoreLoadForwardSafeDistanceInBits);
1880 for (
uint64_t VF = 2 * TypeByteSize;
1881 VF <= MaxVFWithoutSLForwardIssuesPowerOf2; VF *= 2) {
1884 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1885 MaxVFWithoutSLForwardIssuesPowerOf2 = (VF >> 1);
1890 if (MaxVFWithoutSLForwardIssuesPowerOf2 < 2 * TypeByteSize) {
1892 dbgs() <<
"LAA: Distance " << Distance
1893 <<
" that could cause a store-load forwarding conflict\n");
1898 MaxVFWithoutSLForwardIssuesPowerOf2 <
1899 MaxStoreLoadForwardSafeDistanceInBits &&
1900 MaxVFWithoutSLForwardIssuesPowerOf2 !=
1903 bit_floor(MaxVFWithoutSLForwardIssuesPowerOf2 / CommonStride);
1904 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
1905 MaxStoreLoadForwardSafeDistanceInBits =
1906 std::min(MaxStoreLoadForwardSafeDistanceInBits, MaxVFInBits);
1929 const SCEV &MaxBTC,
const SCEV &Dist,
1952 const SCEV *CastedDist = &Dist;
1953 const SCEV *CastedProduct = Product;
1960 if (DistTypeSizeBits > ProductTypeSizeBits)
1985 assert(Stride > 1 &&
"The stride must be greater than 1");
1986 assert(TypeByteSize > 0 &&
"The type size in byte must be non-zero");
1987 assert(Distance > 0 &&
"The distance must be non-zero");
1990 if (Distance % TypeByteSize)
2009 return Distance % Stride;
2012bool MemoryDepChecker::areAccessesCompletelyBeforeOrAfter(
const SCEV *Src,
2016 const SCEV *BTC = PSE.getBackedgeTakenCount();
2017 const SCEV *SymbolicMaxBTC = PSE.getSymbolicMaxBackedgeTakenCount();
2018 ScalarEvolution &SE = *PSE.getSE();
2019 const auto &[SrcStart_, SrcEnd_] =
2021 &SE, &PointerBounds, DT, AC, LoopGuards);
2025 const auto &[SinkStart_, SinkEnd_] =
2027 &SE, &PointerBounds, DT, AC, LoopGuards);
2046 MemoryDepChecker::DepDistanceStrideAndSizeInfo>
2047MemoryDepChecker::getDependenceDistanceStrideAndSize(
2048 const AccessAnalysis::MemAccessInfo &
A, Instruction *AInst,
2049 const AccessAnalysis::MemAccessInfo &
B, Instruction *BInst) {
2050 const auto &
DL = InnermostLoop->getHeader()->getDataLayout();
2051 auto &SE = *PSE.getSE();
2052 const auto &[APtr, AIsWrite] =
A;
2053 const auto &[BPtr, BIsWrite] =
B;
2056 if (!AIsWrite && !BIsWrite)
2063 if (APtr->getType()->getPointerAddressSpace() !=
2064 BPtr->getType()->getPointerAddressSpace())
2068 PSE, ATy, APtr, InnermostLoop, *DT, SymbolicStrides,
true,
true);
2070 PSE, BTy, BPtr, InnermostLoop, *DT, SymbolicStrides,
true,
true);
2072 const SCEV *Src = PSE.getSCEV(APtr);
2073 const SCEV *
Sink = PSE.getSCEV(BPtr);
2078 if (StrideAPtr && *StrideAPtr < 0) {
2087 LLVM_DEBUG(
dbgs() <<
"LAA: Src Scev: " << *Src <<
"Sink Scev: " << *Sink
2089 LLVM_DEBUG(
dbgs() <<
"LAA: Distance for " << *AInst <<
" to " << *BInst
2090 <<
": " << *Dist <<
"\n");
2099 if (!StrideAPtr || !StrideBPtr) {
2100 LLVM_DEBUG(
dbgs() <<
"Pointer access with non-constant stride\n");
2104 int64_t StrideAPtrInt = *StrideAPtr;
2105 int64_t StrideBPtrInt = *StrideBPtr;
2106 LLVM_DEBUG(
dbgs() <<
"LAA: Src induction step: " << StrideAPtrInt
2107 <<
" Sink induction step: " << StrideBPtrInt <<
"\n");
2110 if (!StrideAPtrInt || !StrideBPtrInt)
2115 if ((StrideAPtrInt > 0) != (StrideBPtrInt > 0)) {
2117 dbgs() <<
"Pointer access with strides in different directions\n");
2121 TypeSize AStoreSz =
DL.getTypeStoreSize(ATy);
2122 TypeSize BStoreSz =
DL.getTypeStoreSize(BTy);
2126 uint64_t ASz =
DL.getTypeAllocSize(ATy);
2127 uint64_t BSz =
DL.getTypeAllocSize(BTy);
2128 uint64_t TypeByteSize = (AStoreSz == BStoreSz) ? BSz : 0;
2130 uint64_t StrideAScaled = std::abs(StrideAPtrInt) * ASz;
2131 uint64_t StrideBScaled = std::abs(StrideBPtrInt) * BSz;
2133 uint64_t MaxStride = std::max(StrideAScaled, StrideBScaled);
2135 std::optional<uint64_t> CommonStride;
2136 if (StrideAScaled == StrideBScaled)
2137 CommonStride = StrideAScaled;
2142 ShouldRetryWithRuntimeChecks |= StrideAPtrInt == StrideBPtrInt;
2150 return DepDistanceStrideAndSizeInfo(Dist, MaxStride, CommonStride,
2151 TypeByteSize, AIsWrite, BIsWrite);
2155MemoryDepChecker::isDependent(
const MemAccessInfo &
A,
unsigned AIdx,
2157 assert(AIdx < BIdx &&
"Must pass arguments in program order");
2162 auto CheckCompletelyBeforeOrAfter = [&]() {
2163 auto *APtr =
A.getPointer();
2164 auto *BPtr =
B.getPointer();
2167 const SCEV *Src = PSE.getSCEV(APtr);
2168 const SCEV *
Sink = PSE.getSCEV(BPtr);
2169 return areAccessesCompletelyBeforeOrAfter(Src, ATy, Sink, BTy);
2175 getDependenceDistanceStrideAndSize(
A, InstMap[AIdx],
B, InstMap[BIdx]);
2176 if (std::holds_alternative<Dependence::DepType>(Res)) {
2178 CheckCompletelyBeforeOrAfter())
2180 return std::get<Dependence::DepType>(Res);
2183 auto &[Dist, MaxStride, CommonStride, TypeByteSize, AIsWrite, BIsWrite] =
2184 std::get<DepDistanceStrideAndSizeInfo>(Res);
2185 bool HasSameSize = TypeByteSize > 0;
2187 ScalarEvolution &SE = *PSE.getSE();
2188 auto &
DL = InnermostLoop->getHeader()->getDataLayout();
2197 DL, SE, *(PSE.getSymbolicMaxBackedgeTakenCount()), *Dist, MaxStride))
2202 const APInt *APDist =
nullptr;
2203 uint64_t ConstDist =
2210 if (ConstDist > 0 && CommonStride && CommonStride > 1 && HasSameSize &&
2229 LLVM_DEBUG(
dbgs() <<
"LAA: possibly zero dependence difference but "
2230 "different type sizes\n");
2234 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
2249 couldPreventStoreLoadForward(ConstDist, TypeByteSize)) {
2251 dbgs() <<
"LAA: Forward but may prevent st->ld forwarding\n");
2262 if (MinDistance <= 0) {
2268 if (CheckCompletelyBeforeOrAfter())
2270 LLVM_DEBUG(
dbgs() <<
"LAA: ReadWrite-Write positive dependency with "
2271 "different type sizes\n");
2280 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
2315 uint64_t MinDistanceNeeded = MaxStride * (MinNumIter - 1) + TypeByteSize;
2316 if (MinDistanceNeeded >
static_cast<uint64_t
>(MinDistance)) {
2325 LLVM_DEBUG(
dbgs() <<
"LAA: Failure because of positive minimum distance "
2326 << MinDistance <<
'\n');
2332 if (MinDistanceNeeded > MinDepDistBytes) {
2334 << MinDistanceNeeded <<
" size in bytes\n");
2339 std::min(
static_cast<uint64_t
>(MinDistance), MinDepDistBytes);
2341 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
2343 couldPreventStoreLoadForward(MinDistance, TypeByteSize, *CommonStride))
2346 uint64_t MaxVF = MinDepDistBytes / MaxStride;
2347 LLVM_DEBUG(
dbgs() <<
"LAA: Positive min distance " << MinDistance
2348 <<
" with max VF = " << MaxVF <<
'\n');
2350 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
2351 if (!ConstDist && MaxVFInBits < MaxTargetVectorWidthInBits) {
2360 if (CheckCompletelyBeforeOrAfter())
2363 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
2370 MinDepDistBytes = -1;
2385 bool AIIsWrite = AI->getInt();
2389 (AIIsWrite ? AI : std::next(AI));
2392 auto &Acc = Accesses[*AI];
2393 for (std::vector<unsigned>::iterator I1 = Acc.begin(), I1E = Acc.end();
2397 for (std::vector<unsigned>::iterator
2398 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
2399 I2E = (OI == AI ? I1E : Accesses[*OI].end());
2401 auto A = std::make_pair(&*AI, *I1);
2402 auto B = std::make_pair(&*OI, *I2);
2409 isDependent(*
A.first,
A.second, *
B.first,
B.second);
2416 if (RecordDependences) {
2418 Dependences.emplace_back(
A.second,
B.second,
Type);
2421 RecordDependences =
false;
2422 Dependences.clear();
2424 <<
"Too many dependences, stopped recording\n");
2436 LLVM_DEBUG(
dbgs() <<
"Total Dependences: " << Dependences.size() <<
"\n");
2443 auto I = Accesses.find(
Access);
2445 if (
I != Accesses.end()) {
2446 transform(
I->second, std::back_inserter(Insts),
2447 [&](
unsigned Idx) { return this->InstMap[Idx]; });
2458 "ForwardButPreventsForwarding",
2460 "BackwardVectorizable",
2461 "BackwardVectorizableButPreventsForwarding"};
2471bool LoopAccessInfo::canAnalyzeLoop() {
2480 recordAnalysis(
"NotInnerMostLoop") <<
"loop is not the innermost loop";
2487 dbgs() <<
"LAA: loop control flow is not understood by analyzer\n");
2488 recordAnalysis(
"CFGNotUnderstood")
2489 <<
"loop control flow is not understood by analyzer";
2498 recordAnalysis(
"CantComputeNumberOfIterations")
2499 <<
"could not determine number of loop iterations";
2500 LLVM_DEBUG(
dbgs() <<
"LAA: SCEV could not compute the loop exit count.\n");
2509bool LoopAccessInfo::analyzeLoop(AAResults *AA,
const LoopInfo *LI,
2510 const TargetLibraryInfo *TLI,
2511 DominatorTree *DT) {
2515 SmallPtrSet<MDNode *, 8> LoopAliasScopes;
2518 unsigned NumReads = 0;
2519 unsigned NumReadWrites = 0;
2521 bool HasComplexMemInst =
false;
2524 HasConvergentOp =
false;
2526 PtrRtChecking->Pointers.
clear();
2527 PtrRtChecking->Need =
false;
2531 const bool EnableMemAccessVersioningOfLoop =
2537 LoopBlocksRPO RPOT(TheLoop);
2539 for (BasicBlock *BB : RPOT) {
2542 for (Instruction &
I : *BB) {
2545 HasConvergentOp =
true;
2550 if (HasComplexMemInst && HasConvergentOp)
2554 if (HasComplexMemInst)
2559 for (
Metadata *
Op : Decl->getScopeList()->operands())
2572 if (
I.mayReadFromMemory()) {
2573 auto hasPointerArgs = [](CallBase *CB) {
2575 return Arg->getType()->isPointerTy();
2588 recordAnalysis(
"CantVectorizeInstruction", Ld)
2589 <<
"instruction cannot be vectorized";
2590 HasComplexMemInst =
true;
2593 if (!Ld->isSimple() && !IsAnnotatedParallel) {
2594 recordAnalysis(
"NonSimpleLoad", Ld)
2595 <<
"read with atomic ordering or volatile read";
2597 HasComplexMemInst =
true;
2603 if (EnableMemAccessVersioningOfLoop)
2604 collectStridedAccess(Ld);
2609 if (
I.mayWriteToMemory()) {
2612 recordAnalysis(
"CantVectorizeInstruction", St)
2613 <<
"instruction cannot be vectorized";
2614 HasComplexMemInst =
true;
2617 if (!St->isSimple() && !IsAnnotatedParallel) {
2618 recordAnalysis(
"NonSimpleStore", St)
2619 <<
"write with atomic ordering or volatile write";
2621 HasComplexMemInst =
true;
2627 if (EnableMemAccessVersioningOfLoop)
2628 collectStridedAccess(St);
2633 if (HasComplexMemInst)
2641 if (!Stores.
size()) {
2647 AccessAnalysis
Accesses(TheLoop, AA, LI, *DT, DepCands, *PSE,
2655 SmallSet<std::pair<Value *, Type *>, 16> Seen;
2659 SmallPtrSet<Value *, 16> UniformStores;
2661 for (StoreInst *ST : Stores) {
2662 Value *Ptr =
ST->getPointerOperand();
2664 if (isInvariant(Ptr)) {
2666 StoresToInvariantAddresses.push_back(ST);
2667 HasStoreStoreDependenceInvolvingLoopInvariantAddress |=
2668 !UniformStores.
insert(Ptr).second;
2674 if (Seen.
insert({Ptr, AccessTy}).second) {
2681 if (blockNeedsPredication(
ST->getParent(), TheLoop, DT))
2685 [&Accesses, AccessTy, Loc](
Value *Ptr) {
2686 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2687 Accesses.addStore(NewLoc, AccessTy);
2692 if (IsAnnotatedParallel) {
2694 dbgs() <<
"LAA: A loop annotated parallel, ignore memory dependency "
2699 for (LoadInst *LD : Loads) {
2700 Value *Ptr =
LD->getPointerOperand();
2709 bool IsReadOnlyPtr =
false;
2711 if (Seen.
insert({Ptr, AccessTy}).second ||
2712 !
getPtrStride(*PSE, AccessTy, Ptr, TheLoop, *DT, SymbolicStrides,
false,
2715 IsReadOnlyPtr =
true;
2721 LLVM_DEBUG(
dbgs() <<
"LAA: Found an unsafe dependency between a uniform "
2722 "load and uniform store to the same address!\n");
2723 HasLoadStoreDependenceInvolvingLoopInvariantAddress =
true;
2730 if (blockNeedsPredication(
LD->getParent(), TheLoop, DT))
2734 [&Accesses, AccessTy, Loc, IsReadOnlyPtr](
Value *Ptr) {
2735 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2736 Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2742 if (NumReadWrites == 1 && NumReads == 0) {
2749 Accesses.buildDependenceSets();
2753 Value *UncomputablePtr =
nullptr;
2754 HasCompletePtrRtChecking = Accesses.canCheckPtrAtRT(
2755 *PtrRtChecking, TheLoop, SymbolicStrides, UncomputablePtr, AllowPartial);
2756 if (!HasCompletePtrRtChecking) {
2758 recordAnalysis(
"CantIdentifyArrayBounds",
I)
2759 <<
"cannot identify array bounds";
2760 LLVM_DEBUG(
dbgs() <<
"LAA: We can't vectorize because we can't find "
2761 <<
"the array bounds.\n");
2766 dbgs() <<
"LAA: May be able to perform a memory runtime check if needed.\n");
2768 bool DepsAreSafe =
true;
2769 if (Accesses.isDependencyCheckNeeded()) {
2772 DepChecker->
areDepsSafe(DepCands, Accesses.getDependenciesToCheck());
2778 Accesses.resetDepChecks(*DepChecker);
2780 PtrRtChecking->reset();
2781 PtrRtChecking->Need =
true;
2783 UncomputablePtr =
nullptr;
2784 HasCompletePtrRtChecking =
2785 Accesses.canCheckPtrAtRT(*PtrRtChecking, TheLoop, SymbolicStrides,
2786 UncomputablePtr, AllowPartial);
2789 if (!HasCompletePtrRtChecking) {
2791 recordAnalysis(
"CantCheckMemDepsAtRunTime",
I)
2792 <<
"cannot check memory dependencies at runtime";
2793 LLVM_DEBUG(
dbgs() <<
"LAA: Can't vectorize with memory checks\n");
2800 if (HasConvergentOp) {
2801 recordAnalysis(
"CantInsertRuntimeCheckWithConvergent")
2802 <<
"cannot add control dependency to convergent operation";
2803 LLVM_DEBUG(
dbgs() <<
"LAA: We can't vectorize because a runtime check "
2804 "would be needed with a convergent operation\n");
2810 dbgs() <<
"LAA: No unsafe dependent memory operations in loop. We"
2811 << (PtrRtChecking->Need ?
"" :
" don't")
2812 <<
" need runtime memory checks.\n");
2816 emitUnsafeDependenceRemark();
2820void LoopAccessInfo::emitUnsafeDependenceRemark() {
2821 const auto *Deps = getDepChecker().getDependences();
2829 if (Found == Deps->end())
2831 MemoryDepChecker::Dependence Dep = *Found;
2833 LLVM_DEBUG(
dbgs() <<
"LAA: unsafe dependent memory operations in loop\n");
2836 bool HasForcedDistribution =
false;
2837 std::optional<const MDOperand *>
Value =
2845 const std::string
Info =
2846 HasForcedDistribution
2847 ?
"unsafe dependent memory operations in loop."
2848 :
"unsafe dependent memory operations in loop. Use "
2849 "#pragma clang loop distribute(enable) to allow loop distribution "
2850 "to attempt to isolate the offending operations into a separate "
2852 OptimizationRemarkAnalysis &
R =
2861 R <<
"\nBackward loop carried data dependence.";
2864 R <<
"\nForward loop carried data dependence that prevents "
2865 "store-to-load forwarding.";
2868 R <<
"\nBackward loop carried data dependence that prevents "
2869 "store-to-load forwarding.";
2872 R <<
"\nUnsafe indirect dependence.";
2875 R <<
"\nUnknown data dependence.";
2879 if (Instruction *
I = Dep.
getSource(getDepChecker())) {
2882 SourceLoc = DD->getDebugLoc();
2884 R <<
" Memory location is the same as accessed at "
2885 <<
ore::NV(
"Location", SourceLoc);
2890 const Loop *TheLoop,
2892 assert(TheLoop->contains(BB) &&
"Unknown block used");
2895 const BasicBlock *Latch = TheLoop->getLoopLatch();
2901 assert(!Report &&
"Multiple reports generated");
2907 CodeRegion =
I->getParent();
2910 if (
I->getDebugLoc())
2911 DL =
I->getDebugLoc();
2914 Report = std::make_unique<OptimizationRemarkAnalysis>(
DEBUG_TYPE, RemarkName,
2920 auto *SE = PSE->getSE();
2921 if (TheLoop->isLoopInvariant(V))
2938 for (
const Use &U :
GEP->operands()) {
2960 Value *OrigPtr = Ptr;
2968 V =
C->getOperand();
2989void LoopAccessInfo::collectStridedAccess(
Value *MemAccess) {
3007 LLVM_DEBUG(
dbgs() <<
"LAA: Found a strided access that is a candidate for "
3009 LLVM_DEBUG(
dbgs() <<
" Ptr: " << *Ptr <<
" Stride: " << *StrideExpr <<
"\n");
3012 LLVM_DEBUG(
dbgs() <<
" Chose not to due to -laa-speculate-unit-stride\n");
3029 const SCEV *MaxBTC = PSE->getSymbolicMaxBackedgeTakenCount();
3035 uint64_t StrideTypeSizeBits =
DL.getTypeSizeInBits(StrideExpr->
getType());
3036 uint64_t BETypeSizeBits =
DL.getTypeSizeInBits(MaxBTC->
getType());
3037 const SCEV *CastedStride = StrideExpr;
3038 const SCEV *CastedBECount = MaxBTC;
3039 ScalarEvolution *SE = PSE->getSE();
3040 if (BETypeSizeBits >= StrideTypeSizeBits)
3044 const SCEV *StrideMinusBETaken = SE->
getMinusSCEV(CastedStride, CastedBECount);
3050 dbgs() <<
"LAA: Stride>=TripCount; No point in versioning as the "
3051 "Stride==1 predicate will imply that the loop executes "
3055 LLVM_DEBUG(
dbgs() <<
"LAA: Found a strided access that we can version.\n");
3059 const SCEV *StrideBase = StrideExpr;
3061 StrideBase =
C->getOperand();
3071 PtrRtChecking(nullptr), TheLoop(L), AllowPartial(AllowPartial) {
3072 unsigned MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3073 if (
TTI && !
TTI->enableScalableVectorization())
3076 MaxTargetVectorWidthInBits =
3079 DepChecker = std::make_unique<MemoryDepChecker>(
3080 *PSE, AC, DT, L, SymbolicStrides, MaxTargetVectorWidthInBits, LoopGuards);
3082 std::make_unique<RuntimePointerChecking>(*DepChecker, SE, LoopGuards);
3083 if (canAnalyzeLoop())
3084 CanVecMem = analyzeLoop(
AA, LI, TLI, DT);
3089 OS.
indent(
Depth) <<
"Memory dependences are safe";
3092 OS <<
" with a maximum safe vector width of "
3096 OS <<
", with a maximum safe store-load forward width of " << SLDist
3099 if (PtrRtChecking->Need)
3100 OS <<
" with run-time checks";
3104 if (HasConvergentOp)
3105 OS.
indent(
Depth) <<
"Has convergent operation in loop\n";
3108 OS.
indent(
Depth) <<
"Report: " << Report->getMsg() <<
"\n";
3110 if (
auto *Dependences = DepChecker->getDependences()) {
3112 for (
const auto &Dep : *Dependences) {
3113 Dep.
print(OS,
Depth + 2, DepChecker->getMemoryInstructions());
3117 OS.
indent(
Depth) <<
"Too many dependences, not recorded\n";
3120 PtrRtChecking->print(OS,
Depth);
3121 if (PtrRtChecking->Need && !HasCompletePtrRtChecking)
3122 OS.
indent(
Depth) <<
"Generated run-time checks are incomplete\n";
3126 <<
"Non vectorizable stores to invariant address were "
3127 << (HasStoreStoreDependenceInvolvingLoopInvariantAddress ||
3128 HasLoadStoreDependenceInvolvingLoopInvariantAddress
3131 <<
"found in loop.\n";
3134 PSE->getPredicate().print(OS,
Depth);
3139 PSE->print(OS,
Depth);
3143 bool AllowPartial) {
3144 const auto &[It, Inserted] = LoopAccessInfoMap.try_emplace(&L);
3148 if (Inserted || It->second->hasAllowPartial() != AllowPartial)
3149 It->second = std::make_unique<LoopAccessInfo>(&L, &SE, TTI, TLI, &AA, &DT,
3150 &LI, AC, AllowPartial);
3159 for (
const auto &[L, LAI] : LoopAccessInfoMap) {
3160 if (LAI->getRuntimePointerChecking()->getChecks().empty() &&
3161 LAI->getPSE().getPredicate().isAlwaysTrue())
3163 LoopAccessInfoMap.erase(L);
3169 FunctionAnalysisManager::Invalidator &Inv) {
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Forward Handle Accesses
This file defines the DenseMap class.
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
This header defines various interfaces for pass management in LLVM.
static cl::opt< unsigned > MaxDependences("max-dependences", cl::Hidden, cl::desc("Maximum number of dependences collected by " "loop-access analysis (default = 100)"), cl::init(100))
We collect dependences up to this threshold.
static cl::opt< bool > EnableForwardingConflictDetection("store-to-load-forwarding-conflict-detection", cl::Hidden, cl::desc("Enable conflict detection in loop-access analysis"), cl::init(true))
Enable store-to-load forwarding conflict detection.
static void findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr, SmallVectorImpl< PointerIntPair< const SCEV *, 1, bool > > &ScevList, unsigned Depth)
static cl::opt< unsigned > MemoryCheckMergeThreshold("memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " "runtime memory checks. (default = 100)"), cl::init(100))
The maximum iterations used to merge memory checks.
static const SCEV * getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
Get the stride of a pointer access in a loop.
static bool evaluatePtrAddRecAtMaxBTCWillNotWrap(const SCEVAddRecExpr *AR, const SCEV *MaxBTC, const SCEV *EltSize, ScalarEvolution &SE, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC, std::optional< ScalarEvolution::LoopGuards > &LoopGuards)
Return true, if evaluating AR at MaxBTC cannot wrap, because AR at MaxBTC is guaranteed inbounds of t...
static std::optional< int64_t > getStrideFromAddRec(const SCEVAddRecExpr *AR, const Loop *Lp, Type *AccessTy, Value *Ptr, PredicatedScalarEvolution &PSE)
Try to compute a constant stride for AR.
static cl::opt< unsigned, true > VectorizationInterleave("force-vector-interleave", cl::Hidden, cl::desc("Sets the vectorization interleave count. " "Zero is autoselect."), cl::location(VectorizerParams::VectorizationInterleave))
static cl::opt< bool, true > HoistRuntimeChecks("hoist-runtime-checks", cl::Hidden, cl::desc("Hoist inner loop runtime memory checks to outer loop if possible"), cl::location(VectorizerParams::HoistRuntimeChecks), cl::init(true))
static DenseMap< const RuntimeCheckingPtrGroup *, unsigned > getPtrToIdxMap(ArrayRef< RuntimeCheckingPtrGroup > CheckingGroups)
Assign each RuntimeCheckingPtrGroup pointer an index for stable UTC output.
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
static cl::opt< unsigned, true > RuntimeMemoryCheckThreshold("runtime-memory-check-threshold", cl::Hidden, cl::desc("When performing memory disambiguation checks at runtime do not " "generate more than this number of comparisons (default = 8)."), cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8))
static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, function_ref< void(Value *)> AddPointer)
static bool isNoWrap(PredicatedScalarEvolution &PSE, const SCEVAddRecExpr *AR, Value *Ptr, Type *AccessTy, const Loop *L, bool Assume, const DominatorTree &DT, std::optional< int64_t > Stride=std::nullopt)
Check whether AR is a non-wrapping AddRec.
static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, const SCEV &MaxBTC, const SCEV &Dist, uint64_t MaxStride)
Given a dependence-distance Dist between two memory accesses, that have strides in the same direction...
static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, uint64_t TypeByteSize)
Check the dependence for two accesses with the same stride Stride.
static const SCEV * getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE)
Compare I and J and return the minimum.
static const SCEV * mulSCEVOverflow(const SCEV *A, const SCEV *B, ScalarEvolution &SE)
Returns A * B, if it is guaranteed not to unsigned wrap.
static Value * getLoopVariantGEPOperand(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
If Ptr is a GEP, which has a loop-variant operand, return that operand.
static cl::opt< unsigned > MaxForkedSCEVDepth("max-forked-scev-depth", cl::Hidden, cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"), cl::init(5))
static cl::opt< bool > SpeculateUnitStride("laa-speculate-unit-stride", cl::Hidden, cl::desc("Speculate that non-constant strides are unit in LAA"), cl::init(true))
static cl::opt< bool > EnableMemAccessVersioning("enable-mem-access-versioning", cl::init(true), cl::Hidden, cl::desc("Enable symbolic stride memory access versioning"))
This enables versioning on the strides of symbolically striding memory accesses in code like the foll...
static const SCEV * addSCEVNoOverflow(const SCEV *A, const SCEV *B, ScalarEvolution &SE)
Returns A + B, if it is guaranteed not to unsigned wrap.
This header provides classes for managing per-loop analyses.
This file provides utility analysis objects describing memory locations.
FunctionAnalysisManager FAM
This file defines the PointerIntPair class.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
static const X86InstrFMA3Group Groups[]
A manager for alias analyses.
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
APInt abs() const
Get the absolute value.
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
std::optional< int64_t > trySExtValue() const
Get sign extended value if possible.
int64_t getSExtValue() const
Get sign extended value.
This templated class represents "all analyses that operate over <aparticular IR unit>" (e....
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
bool isNoBuiltin() const
Return true if the call should not be treated as a call to a builtin.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool isConvergent() const
Determine if the invoke is convergent.
@ ICMP_UGE
unsigned greater or equal
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
A parsed version of the target data layout string in and methods for querying it.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
Analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
iterator_range< member_iterator > members(const ECValue &ECV) const
bool contains(const ElemTy &V) const
Returns true if V is contained an equivalence class.
const ECValue & insert(const ElemTy &Data)
insert - Insert a new value into the union/find set, ignoring the request if the value already exists...
member_iterator member_end() const
const ElemTy & getLeaderValue(const ElemTy &V) const
getLeaderValue - Return the leader for the specified value that is in the set.
member_iterator findLeader(const ElemTy &V) const
findLeader - Given a value in the set, return a member iterator for the equivalence class it is in.
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)
union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
PointerType * getType() const
Global values are always pointers.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
An instruction for reading from memory.
Value * getPointerOperand()
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
This analysis provides dependence information for the memory accesses of a loop.
LLVM_ABI Result run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
LLVM_ABI const LoopAccessInfo & getInfo(Loop &L, bool AllowPartial=false)
Drive the analysis of memory accesses in the loop.
const MemoryDepChecker & getDepChecker() const
the Memory Dependence Checker which can determine the loop-independent and loop-carried dependences b...
LLVM_ABI bool isInvariant(Value *V) const
Returns true if value V is loop invariant.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth=0) const
Print the information about the memory accesses in the loop.
static LLVM_ABI bool blockNeedsPredication(const BasicBlock *BB, const Loop *TheLoop, const DominatorTree *DT)
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
LLVM_ABI LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetTransformInfo *TTI, const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT, LoopInfo *LI, AssumptionCache *AC, bool AllowPartial=false)
Analysis pass that exposes the LoopInfo for a function.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
unsigned getNumBackEdges() const
Calculate the number of back edges to the loop header.
BlockT * getHeader() const
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Represents a single loop in the control flow graph.
std::string getLocStr() const
Return a string containing the debug location of the loop (file name + line number if present,...
bool isAnnotatedParallel() const
Returns true if the loop is annotated parallel.
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
ArrayRef< MDOperand > operands() const
Checks memory dependences among accesses to the same underlying object to determine whether there vec...
ArrayRef< unsigned > getOrderForAccess(Value *Ptr, bool IsWrite) const
Return the program order indices for the access location (Ptr, IsWrite).
bool isSafeForAnyStoreLoadForwardDistances() const
Return true if there are no store-load forwarding dependencies.
bool isSafeForAnyVectorWidth() const
Return true if the number of elements that are safe to operate on simultaneously is not bounded.
LLVM_ABI bool areDepsSafe(const DepCandidates &AccessSets, const MemAccessInfoList &CheckDeps)
Check whether the dependencies between the accesses are safe, and records the dependence information ...
EquivalenceClasses< MemAccessInfo > DepCandidates
Set of potential dependent memory accesses.
bool shouldRetryWithRuntimeChecks() const
In same cases when the dependency check fails we can still vectorize the loop with a dynamic array ac...
const Loop * getInnermostLoop() const
uint64_t getMaxSafeVectorWidthInBits() const
Return the number of elements that are safe to operate on simultaneously, multiplied by the size of t...
bool isSafeForVectorization() const
No memory dependence was encountered that would inhibit vectorization.
SmallVector< MemAccessInfo, 8 > MemAccessInfoList
LLVM_ABI SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const
Find the set of instructions that read or write via Ptr.
VectorizationSafetyStatus
Type to keep track of the status of the dependence check.
@ PossiblySafeWithRtChecks
LLVM_ABI void addAccess(StoreInst *SI)
Register the location (instructions are given increasing numbers) of a write access.
PointerIntPair< Value *, 1, bool > MemAccessInfo
uint64_t getStoreLoadForwardSafeDistanceInBits() const
Return safe power-of-2 number of elements, which do not prevent store-load forwarding,...
Representation for a specific memory location.
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
const Value * Ptr
The address of the start of the location.
PointerIntPair - This class implements a pair of a pointer and small integer.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
LLVM_ABI void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
LLVM_ABI bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
LLVM_ABI void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
LLVM_ABI const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSymbolicMaxBackedgeTakenCount()
Get the (predicated) symbolic max backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Holds information about the memory runtime legality checks to verify that a group of pointers do not ...
bool Need
This flag indicates if we need to add the runtime check.
void reset()
Reset the state of the pointer runtime information.
unsigned getNumberOfChecks() const
Returns the number of run-time checks required according to needsChecking.
LLVM_ABI void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const
Print Checks.
LLVM_ABI bool needsChecking(const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const
Decide if we need to add a check between two groups of pointers, according to needsChecking.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth=0) const
Print the list run-time memory checks necessary.
SmallVector< RuntimeCheckingPtrGroup, 2 > CheckingGroups
Holds a partitioning of pointers into "check groups".
LLVM_ABI void generateChecks(MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies)
Generate the checks and store it.
friend struct RuntimeCheckingPtrGroup
static LLVM_ABI bool arePointersInSamePartition(const SmallVectorImpl< int > &PtrToPartition, unsigned PtrIdx1, unsigned PtrIdx2)
Check if pointers are in the same partition.
SmallVector< PointerInfo, 2 > Pointers
Information about the pointers that may require checking.
LLVM_ABI void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy, bool WritePtr, unsigned DepSetId, unsigned ASId, PredicatedScalarEvolution &PSE, bool NeedsFreeze)
Insert a pointer and calculate the start and end SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStart() const
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
const Loop * getLoop() const
This class represents a constant integer value.
ConstantInt * getValue() const
const APInt & getAPInt() const
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
This class represents an analyzed expression in the program.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
static LLVM_ABI LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
The main scalar evolution driver.
const SCEV * getConstantMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEVConstant that is greater than or equal to (i.e.
LLVM_ABI bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
LLVM_ABI const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
LLVM_ABI Type * getWiderType(Type *Ty1, Type *Ty2) const
LLVM_ABI const SCEV * getAbsExpr(const SCEV *Op, bool IsNSW)
LLVM_ABI bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
LLVM_ABI bool isKnownNegative(const SCEV *S)
Test if the given expression is known to be negative.
LLVM_ABI const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI=nullptr)
Is operation BinOp between LHS and RHS provably does not have a signed/unsigned overflow (Signed)?
LLVM_ABI const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
LLVM_ABI const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
LLVM_ABI const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
LLVM_ABI const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
LLVM_ABI const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
LLVM_ABI const SCEV * getNoopOrZeroExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI const SCEV * getCouldNotCompute()
LLVM_ABI const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
LLVM_ABI std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)
Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
bool contains(const T &V) const
Check if the SmallSet contains the given element.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
A Use represents the edge between a Value definition and its users.
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI bool canBeFreed() const
Return true if the memory object referred to by V can by freed in the scope for which the SSA value d...
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
constexpr ScalarTy getFixedValue() const
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
@ C
The default llvm calling convention, compatible with C.
bool match(Val *V, const Pattern &P)
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
is_undef_or_poison m_scev_UndefOrPoison()
Match an SCEVUnknown wrapping undef or poison.
class_match< const SCEVConstant > m_SCEVConstant()
specificloop_ty m_SpecificLoop(const Loop *L)
SCEVAffineAddRec_match< Op0_t, Op1_t, class_match< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
specificscev_ty m_scev_Specific(const SCEV *S)
Match if we have a specific specified SCEV.
class_match< const SCEV > m_SCEV()
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, bool > hasa(Y &&MD)
Check whether Metadata has a Value.
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
LLVM_ABI bool willNotFreeBetween(const Instruction *Assume, const Instruction *CtxI)
Returns true, if no instruction between Assume and CtxI may free memory and the function is marked as...
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
FunctionAddr VTableAddr Value
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI RetainedKnowledge getKnowledgeForValue(const Value *V, ArrayRef< Attribute::AttrKind > AttrKinds, AssumptionCache &AC, function_ref< bool(RetainedKnowledge, Instruction *, const CallBase::BundleOpInfo *)> Filter=[](auto...) { return true;})
Return a valid Knowledge associated to the Value V if its Attribute kind is in AttrKinds and it match...
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
LLVM_ABI Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
unsigned getPointerAddressSpace(const Type *T)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< const MDOperand * > findStringMetadataForLoop(const Loop *TheLoop, StringRef Name)
Find string metadata for loop.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
auto dyn_cast_or_null(const Y &Val)
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI std::optional< int64_t > getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB, const DataLayout &DL, ScalarEvolution &SE, bool StrictCheck=false, bool CheckType=true)
Returns the distance between the pointers PtrA and PtrB iff they are compatible and it is possible to...
LLVM_ABI bool sortPtrAccesses(ArrayRef< Value * > VL, Type *ElemTy, const DataLayout &DL, ScalarEvolution &SE, SmallVectorImpl< unsigned > &SortedIndices)
Attempt to sort the pointers in VL and return the sorted indices in SortedIndices,...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
LLVM_ABI const SCEV * replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &PtrToStride, Value *Ptr)
Return the SCEV corresponding to a pointer with the symbolic stride replaced with constant one,...
LLVM_ABI bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)
Returns true if the memory operations A and B are consecutive.
DWARFExpression::Operation Op
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
LLVM_ABI void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=MaxLookupSearchDepth)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
LLVM_ABI std::pair< const SCEV *, const SCEV * > getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, const SCEV *BTC, const SCEV *MaxBTC, ScalarEvolution *SE, DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > *PointerBounds, DominatorTree *DT, AssumptionCache *AC, std::optional< ScalarEvolution::LoopGuards > &LoopGuards)
Calculate Start and End points of memory access using exact backedge taken count BTC if computable or...
LLVM_ABI std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DominatorTree &DT, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
Implement std::hash so that hash_code can be used in STL containers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
IR Values for the lower and upper bounds of a pointer evolution.
MDNode * Scope
The tag for alias scope specification (used with noalias).
MDNode * TBAA
The tag for type-based alias analysis.
MDNode * NoAlias
The tag specifying the noalias scope.
A special type used by analysis passes to provide an address that identifies that particular analysis...
Instruction * getDestination(const MemoryDepChecker &DepChecker) const
Return the destination instruction of the dependence.
DepType Type
The type of the dependence.
unsigned Destination
Index of the destination of the dependence in the InstMap vector.
LLVM_ABI bool isPossiblyBackward() const
May be a lexically backward dependence type (includes Unknown).
Instruction * getSource(const MemoryDepChecker &DepChecker) const
Return the source instruction of the dependence.
LLVM_ABI bool isForward() const
Lexically forward dependence.
LLVM_ABI bool isBackward() const
Lexically backward dependence.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth, const SmallVectorImpl< Instruction * > &Instrs) const
Print the dependence.
unsigned Source
Index of the source of the dependence in the InstMap vector.
DepType
The type of the dependence.
@ BackwardVectorizableButPreventsForwarding
@ ForwardButPreventsForwarding
static LLVM_ABI const char * DepName[]
String version of the types.
static LLVM_ABI VectorizationSafetyStatus isSafeForVectorization(DepType Type)
Dependence types that don't prevent vectorization.
Represent one information held inside an operand bundle of an llvm.assume.
unsigned AddressSpace
Address space of the involved pointers.
LLVM_ABI bool addPointer(unsigned Index, const RuntimePointerChecking &RtCheck)
Tries to add the pointer recorded in RtCheck at index Index to this pointer checking group.
bool NeedsFreeze
Whether the pointer needs to be frozen after expansion, e.g.
LLVM_ABI RuntimeCheckingPtrGroup(unsigned Index, const RuntimePointerChecking &RtCheck)
Create a new pointer checking group containing a single pointer, with index Index in RtCheck.
const SCEV * High
The SCEV expression which represents the upper bound of all the pointers in this group.
SmallVector< unsigned, 2 > Members
Indices of all the pointers that constitute this grouping.
const SCEV * Low
The SCEV expression which represents the lower bound of all the pointers in this group.
bool IsWritePtr
Holds the information if this pointer is used for writing to memory.
unsigned DependencySetId
Holds the id of the set of pointers that could be dependent because of a shared underlying object.
unsigned AliasSetId
Holds the id of the disjoint alias set to which this pointer belongs.
static LLVM_ABI const unsigned MaxVectorWidth
Maximum SIMD width.
static LLVM_ABI unsigned VectorizationFactor
VF as overridden by the user.
static LLVM_ABI unsigned RuntimeMemoryCheckThreshold
\When performing memory disambiguation checks at runtime do not make more than this number of compari...
static LLVM_ABI bool isInterleaveForced()
True if force-vector-interleave was specified by the user.
static LLVM_ABI unsigned VectorizationInterleave
Interleave factor as overridden by the user.
static LLVM_ABI bool HoistRuntimeChecks
Function object to check whether the first component of a container supported by std::get (like std::...