70#define DEBUG_TYPE "loop-accesses"
74 cl::desc(
"Sets the SIMD width. Zero is autoselect."),
80 cl::desc(
"Sets the vectorization interleave count. "
81 "Zero is autoselect."),
88 cl::desc(
"When performing memory disambiguation checks at runtime do not "
89 "generate more than this number of comparisons (default = 8)."),
96 cl::desc(
"Maximum number of comparisons done when trying to merge "
97 "runtime memory checks. (default = 100)"),
106 cl::desc(
"Maximum number of dependences collected by "
107 "loop-access analysis (default = 100)"),
123 cl::desc(
"Enable symbolic stride memory access versioning"));
128 "store-to-load-forwarding-conflict-detection",
cl::Hidden,
129 cl::desc(
"Enable conflict detection in loop-access analysis"),
134 cl::desc(
"Maximum recursion depth when finding forked SCEVs (default = 5)"),
139 cl::desc(
"Speculate that non-constant strides are unit in LAA"),
145 "Hoist inner loop runtime memory checks to outer loop if possible"),
150 return ::VectorizationInterleave.getNumOccurrences() > 0;
161 if (SI == PtrToStride.
end())
165 const SCEV *StrideSCEV = SI->second;
170 assert(isa<SCEVUnknown>(StrideSCEV) &&
"shouldn't be in map");
178 <<
" by: " << *Expr <<
"\n");
187 ->getPointerAddressSpace()),
188 NeedsFreeze(RtCheck.Pointers[
Index].NeedsFreeze) {
206 Type *AccessTy,
bool WritePtr,
207 unsigned DepSetId,
unsigned ASId,
216 ScStart = ScEnd = PtrExpr;
219 assert(AR &&
"Invalid addrec expression");
228 if (
const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
229 if (CStep->getValue()->isNegative())
244 Type *IdxTy =
DL.getIndexType(
Ptr->getType());
248 Pointers.emplace_back(
Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
252void RuntimePointerChecking::tryToCreateDiffCheck(
254 if (!CanUseDiffCheck)
261 CanUseDiffCheck =
false;
272 CanUseDiffCheck =
false;
282 if (AccSrc.
size() != 1 || AccSink.
size() != 1) {
283 CanUseDiffCheck =
false;
287 if (AccSink[0] < AccSrc[0])
290 auto *SrcAR = dyn_cast<SCEVAddRecExpr>(Src->Expr);
291 auto *SinkAR = dyn_cast<SCEVAddRecExpr>(
Sink->Expr);
294 CanUseDiffCheck =
false;
304 if (isa<ScalableVectorType>(SrcTy) || isa<ScalableVectorType>(DstTy)) {
305 CanUseDiffCheck =
false;
309 SinkAR->getLoop()->getHeader()->getModule()->getDataLayout();
311 std::max(
DL.getTypeAllocSize(SrcTy),
DL.getTypeAllocSize(DstTy));
316 auto *Step = dyn_cast<SCEVConstant>(SinkAR->getStepRecurrence(*SE));
317 if (!Step || Step != SrcAR->getStepRecurrence(*SE) ||
318 Step->getAPInt().abs() != AllocSize) {
319 CanUseDiffCheck =
false;
328 if (Step->getValue()->isNegative())
333 if (isa<SCEVCouldNotCompute>(SinkStartInt) ||
334 isa<SCEVCouldNotCompute>(SrcStartInt)) {
335 CanUseDiffCheck =
false;
339 const Loop *InnerLoop = SrcAR->getLoop();
345 isa<SCEVAddRecExpr>(SinkStartInt) && isa<SCEVAddRecExpr>(SrcStartInt)) {
346 auto *SrcStartAR = cast<SCEVAddRecExpr>(SrcStartInt);
347 auto *SinkStartAR = cast<SCEVAddRecExpr>(SinkStartInt);
348 const Loop *StartARLoop = SrcStartAR->getLoop();
349 if (StartARLoop == SinkStartAR->getLoop() &&
351 LLVM_DEBUG(
dbgs() <<
"LAA: Not creating diff runtime check, since these "
352 "cannot be hoisted out of the outer loop\n");
353 CanUseDiffCheck =
false;
359 <<
"SrcStart: " << *SrcStartInt <<
'\n'
360 <<
"SinkStartInt: " << *SinkStartInt <<
'\n');
361 DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
362 Src->NeedsFreeze ||
Sink->NeedsFreeze);
374 tryToCreateDiffCheck(CGI, CGJ);
375 Checks.
push_back(std::make_pair(&CGI, &CGJ));
382void RuntimePointerChecking::generateChecks(
385 groupChecks(DepCands, UseDependencies);
391 for (
unsigned I = 0, EI = M.Members.size(); EI !=
I; ++
I)
392 for (
unsigned J = 0, EJ =
N.Members.size(); EJ != J; ++J)
407 if (
C->getValue()->isNegative())
416 RtCheck.
Pointers[
Index].PointerValue->getType()->getPointerAddressSpace(),
425 "all pointers in a checking group must be in the same address space");
451void RuntimePointerChecking::groupChecks(
497 if (!UseDependencies) {
503 unsigned TotalComparisons = 0;
508 Iter.first->second.push_back(
Index);
537 auto PointerI = PositionMap.
find(
MI->getPointer());
539 "pointer in equivalence class not found in PositionMap");
540 for (
unsigned Pointer : PointerI->second) {
557 if (Group.addPointer(Pointer, *
this)) {
580 return (PtrToPartition[PtrIdx1] != -1 &&
581 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
605 unsigned Depth)
const {
607 for (
const auto &
Check : Checks) {
608 const auto &
First =
Check.first->Members, &Second =
Check.second->Members;
613 for (
unsigned K = 0; K <
First.size(); ++K)
617 for (
unsigned K = 0; K < Second.size(); ++K)
632 OS.
indent(
Depth + 4) <<
"(Low: " << *CG.Low <<
" High: " << *CG.High
634 for (
unsigned J = 0; J < CG.Members.size(); ++J) {
647class AccessAnalysis {
656 : TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DepCands(DA), PSE(PSE) {
658 BAA.enableCrossIterationMode();
665 Accesses[MemAccessInfo(
Ptr,
false)].insert(AccessTy);
667 ReadOnlyPtr.insert(
Ptr);
674 Accesses[MemAccessInfo(
Ptr,
true)].insert(AccessTy);
685 MemAccessInfo Access,
Type *AccessTy,
688 Loop *TheLoop,
unsigned &RunningDepId,
689 unsigned ASId,
bool ShouldCheckStride,
bool Assume);
698 Value *&UncomputablePtr,
bool ShouldCheckWrap =
false);
702 void buildDependenceSets() {
703 processMemAccesses();
711 bool isDependencyCheckNeeded() {
return !CheckDeps.empty(); }
719 MemAccessInfoList &getDependenciesToCheck() {
return CheckDeps; }
723 return UnderlyingObjects;
731 void processMemAccesses();
735 PtrAccessMap Accesses;
741 MemAccessInfoList CheckDeps;
767 bool IsRTCheckAnalysisNeeded =
false;
781 const SCEV *PtrScev,
Loop *L,
bool Assume) {
805 int64_t Stride =
getPtrStride(PSE, AccessTy,
Ptr, L, Strides).value_or(0);
818 while (!WorkList.
empty()) {
822 auto *PN = dyn_cast<PHINode>(
Ptr);
826 if (PN && InnermostLoop.
contains(PN->getParent()) &&
827 PN->getParent() != InnermostLoop.
getHeader()) {
828 for (
const Use &Inc : PN->incoming_values())
861 if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(
Ptr) ||
862 !isa<Instruction>(
Ptr) ||
Depth == 0) {
873 auto GetBinOpExpr = [&SE](
unsigned Opcode,
const SCEV *L,
const SCEV *R) {
875 case Instruction::Add:
877 case Instruction::Sub:
885 unsigned Opcode =
I->getOpcode();
887 case Instruction::GetElementPtr: {
889 Type *SourceTy =
GEP->getSourceElementType();
892 if (
I->getNumOperands() != 2 || SourceTy->
isVectorTy()) {
902 bool NeedsFreeze =
any_of(BaseScevs, UndefPoisonCheck) ||
903 any_of(OffsetScevs, UndefPoisonCheck);
908 if (OffsetScevs.
size() == 2 && BaseScevs.
size() == 1)
910 else if (BaseScevs.
size() == 2 && OffsetScevs.
size() == 1)
913 ScevList.emplace_back(Scev, NeedsFreeze);
931 ScevList.emplace_back(SE->
getAddExpr(get<0>(BaseScevs[0]), Scaled1),
933 ScevList.emplace_back(SE->
getAddExpr(get<0>(BaseScevs[1]), Scaled2),
937 case Instruction::Select: {
944 if (ChildScevs.
size() == 2) {
945 ScevList.push_back(ChildScevs[0]);
946 ScevList.push_back(ChildScevs[1]);
951 case Instruction::PHI: {
956 if (
I->getNumOperands() == 2) {
960 if (ChildScevs.
size() == 2) {
961 ScevList.push_back(ChildScevs[0]);
962 ScevList.push_back(ChildScevs[1]);
967 case Instruction::Add:
968 case Instruction::Sub: {
976 any_of(LScevs, UndefPoisonCheck) ||
any_of(RScevs, UndefPoisonCheck);
981 if (LScevs.
size() == 2 && RScevs.
size() == 1)
983 else if (RScevs.
size() == 2 && LScevs.
size() == 1)
986 ScevList.emplace_back(Scev, NeedsFreeze);
990 ScevList.emplace_back(
991 GetBinOpExpr(
Opcode, get<0>(LScevs[0]), get<0>(RScevs[0])),
993 ScevList.emplace_back(
994 GetBinOpExpr(
Opcode, get<0>(LScevs[1]), get<0>(RScevs[1])),
1000 LLVM_DEBUG(
dbgs() <<
"ForkedPtr unhandled instruction: " << *
I <<
"\n");
1017 if (Scevs.
size() == 2 &&
1018 (isa<SCEVAddRecExpr>(get<0>(Scevs[0])) ||
1020 (isa<SCEVAddRecExpr>(get<0>(Scevs[1])) ||
1032 MemAccessInfo Access,
Type *AccessTy,
1035 Loop *TheLoop,
unsigned &RunningDepId,
1036 unsigned ASId,
bool ShouldCheckWrap,
1043 for (
auto &
P : TranslatedPtrs) {
1044 const SCEV *PtrExpr = get<0>(
P);
1050 if (ShouldCheckWrap) {
1052 if (TranslatedPtrs.size() > 1)
1055 if (!
isNoWrap(PSE, StridesMap,
Ptr, AccessTy, TheLoop)) {
1057 if (!Assume || !isa<SCEVAddRecExpr>(Expr))
1064 if (TranslatedPtrs.size() == 1)
1069 for (
auto [PtrExpr, NeedsFreeze] : TranslatedPtrs) {
1073 if (isDependencyCheckNeeded()) {
1075 unsigned &LeaderId = DepSetId[Leader];
1077 LeaderId = RunningDepId++;
1081 DepId = RunningDepId++;
1083 bool IsWrite = Access.getInt();
1084 RtCheck.
insert(TheLoop,
Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
1095 Value *&UncomputablePtr,
bool ShouldCheckWrap) {
1098 bool CanDoRT =
true;
1100 bool MayNeedRTCheck =
false;
1101 if (!IsRTCheckAnalysisNeeded)
return true;
1103 bool IsDepCheckNeeded = isDependencyCheckNeeded();
1108 for (
auto &AS : AST) {
1109 int NumReadPtrChecks = 0;
1110 int NumWritePtrChecks = 0;
1111 bool CanDoAliasSetRT =
true;
1116 unsigned RunningDepId = 1;
1124 for (
const auto &
A : AS) {
1126 bool IsWrite = Accesses.count(MemAccessInfo(
Ptr,
true));
1128 ++NumWritePtrChecks;
1136 if (NumWritePtrChecks == 0 ||
1137 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
1138 assert((AS.size() <= 1 ||
1141 MemAccessInfo AccessWrite(AC.getValue(),
true);
1142 return DepCands.
findValue(AccessWrite) == DepCands.
end();
1144 "Can only skip updating CanDoRT below, if all entries in AS "
1145 "are reads or there is at most 1 entry");
1149 for (
auto &Access : AccessInfos) {
1150 for (
const auto &AccessTy : Accesses[Access]) {
1151 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1152 DepSetId, TheLoop, RunningDepId, ASId,
1153 ShouldCheckWrap,
false)) {
1155 << *Access.getPointer() <<
'\n');
1157 CanDoAliasSetRT =
false;
1171 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.
empty();
1175 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
1179 CanDoAliasSetRT =
true;
1180 for (
auto Retry : Retries) {
1181 MemAccessInfo Access = Retry.first;
1182 Type *AccessTy = Retry.second;
1183 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1184 DepSetId, TheLoop, RunningDepId, ASId,
1185 ShouldCheckWrap,
true)) {
1186 CanDoAliasSetRT =
false;
1187 UncomputablePtr = Access.getPointer();
1193 CanDoRT &= CanDoAliasSetRT;
1194 MayNeedRTCheck |= NeedsAliasSetRTCheck;
1203 unsigned NumPointers = RtCheck.
Pointers.size();
1204 for (
unsigned i = 0; i < NumPointers; ++i) {
1205 for (
unsigned j = i + 1;
j < NumPointers; ++
j) {
1207 if (RtCheck.
Pointers[i].DependencySetId ==
1208 RtCheck.
Pointers[j].DependencySetId)
1221 dbgs() <<
"LAA: Runtime check would require comparison between"
1222 " different address spaces\n");
1228 if (MayNeedRTCheck && CanDoRT)
1232 <<
" pointer comparisons.\n");
1239 bool CanDoRTIfNeeded = !RtCheck.
Need || CanDoRT;
1240 if (!CanDoRTIfNeeded)
1242 return CanDoRTIfNeeded;
1245void AccessAnalysis::processMemAccesses() {
1252 LLVM_DEBUG(
dbgs() <<
"LAA: Accesses(" << Accesses.size() <<
"):\n");
1254 for (
auto A : Accesses)
1255 dbgs() <<
"\t" << *
A.first.getPointer() <<
" ("
1256 << (
A.first.getInt()
1258 : (ReadOnlyPtr.count(
A.first.getPointer()) ?
"read-only"
1267 for (
const auto &AS : AST) {
1272 bool SetHasWrite =
false;
1276 UnderlyingObjToAccessMap ObjToLastAccess;
1279 PtrAccessMap DeferredAccesses;
1283 for (
int SetIteration = 0; SetIteration < 2; ++SetIteration) {
1284 bool UseDeferred = SetIteration > 0;
1285 PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
1287 for (
const auto &AV : AS) {
1292 for (
const auto &AC : S) {
1293 if (AC.first.getPointer() !=
Ptr)
1296 bool IsWrite = AC.first.getInt();
1300 bool IsReadOnlyPtr = ReadOnlyPtr.count(
Ptr) && !IsWrite;
1301 if (UseDeferred && !IsReadOnlyPtr)
1305 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1306 S.count(MemAccessInfo(
Ptr,
false))) &&
1307 "Alias-set pointer not in the access set?");
1309 MemAccessInfo Access(
Ptr, IsWrite);
1317 if (!UseDeferred && IsReadOnlyPtr) {
1320 DeferredAccesses.insert({Access, {}});
1328 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
1329 CheckDeps.push_back(Access);
1330 IsRTCheckAnalysisNeeded =
true;
1339 ValueVector TempObjects;
1341 UnderlyingObjects[
Ptr] = {};
1345 <<
"Underlying objects for pointer " << *
Ptr <<
"\n");
1346 for (
const Value *UnderlyingObj : UOs) {
1349 if (isa<ConstantPointerNull>(UnderlyingObj) &&
1355 UnderlyingObjToAccessMap::iterator Prev =
1356 ObjToLastAccess.find(UnderlyingObj);
1357 if (Prev != ObjToLastAccess.end())
1358 DepCands.
unionSets(Access, Prev->second);
1360 ObjToLastAccess[UnderlyingObj] = Access;
1389 auto *
GEP = dyn_cast<GetElementPtrInst>(
Ptr);
1390 if (!
GEP || !
GEP->isInBounds())
1394 Value *NonConstIndex =
nullptr;
1396 if (!isa<ConstantInt>(
Index)) {
1399 NonConstIndex =
Index;
1407 if (
auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
1408 if (OBO->hasNoSignedWrap() &&
1411 isa<ConstantInt>(OBO->getOperand(1))) {
1412 auto *OpScev = PSE.
getSCEV(OBO->getOperand(0));
1414 if (
auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
1415 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(
SCEV::FlagNSW);
1426 bool Assume,
bool ShouldCheckWrap) {
1430 if (isa<ScalableVectorType>(AccessTy)) {
1431 LLVM_DEBUG(
dbgs() <<
"LAA: Bad stride - Scalable object: " << *AccessTy
1433 return std::nullopt;
1444 <<
" SCEV: " << *PtrScev <<
"\n");
1445 return std::nullopt;
1450 LLVM_DEBUG(
dbgs() <<
"LAA: Bad stride - Not striding over innermost loop "
1451 << *
Ptr <<
" SCEV: " << *AR <<
"\n");
1452 return std::nullopt;
1462 <<
" SCEV: " << *AR <<
"\n");
1463 return std::nullopt;
1467 TypeSize AllocSize =
DL.getTypeAllocSize(AccessTy);
1469 const APInt &APStepVal =
C->getAPInt();
1473 return std::nullopt;
1478 int64_t Stride = StepVal /
Size;
1479 int64_t Rem = StepVal %
Size;
1481 return std::nullopt;
1483 if (!ShouldCheckWrap)
1495 if (
auto *
GEP = dyn_cast<GetElementPtrInst>(
Ptr);
1496 GEP &&
GEP->isInBounds() && (Stride == 1 || Stride == -1))
1504 (Stride == 1 || Stride == -1))
1510 <<
"LAA: Pointer: " << *
Ptr <<
"\n"
1511 <<
"LAA: SCEV: " << *AR <<
"\n"
1512 <<
"LAA: Added an overflow assumption\n");
1516 dbgs() <<
"LAA: Bad stride - Pointer may wrap in the address space "
1517 << *
Ptr <<
" SCEV: " << *AR <<
"\n");
1518 return std::nullopt;
1526 assert(PtrA && PtrB &&
"Expected non-nullptr pointers.");
1534 return std::nullopt;
1541 return std::nullopt;
1542 unsigned IdxWidth =
DL.getIndexSizeInBits(ASA);
1544 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1549 if (PtrA1 == PtrB1) {
1552 ASA = cast<PointerType>(PtrA1->
getType())->getAddressSpace();
1553 ASB = cast<PointerType>(PtrB1->
getType())->getAddressSpace();
1556 return std::nullopt;
1558 IdxWidth =
DL.getIndexSizeInBits(ASA);
1559 OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1569 dyn_cast<SCEVConstant>(SE.
getMinusSCEV(PtrSCEVB, PtrSCEVA));
1571 return std::nullopt;
1572 Val = Diff->getAPInt().getSExtValue();
1574 int Size =
DL.getTypeStoreSize(ElemTyA);
1575 int Dist = Val /
Size;
1579 if (!StrictCheck || Dist *
Size == Val)
1581 return std::nullopt;
1588 VL, [](
const Value *V) {
return V->getType()->isPointerTy(); }) &&
1589 "Expected list of pointer operands.");
1592 Value *Ptr0 = VL[0];
1594 using DistOrdPair = std::pair<int64_t, int>;
1596 std::set<DistOrdPair,
decltype(Compare)> Offsets(Compare);
1597 Offsets.emplace(0, 0);
1599 bool IsConsecutive =
true;
1608 auto Res = Offsets.emplace(
Offset, Cnt);
1612 IsConsecutive = IsConsecutive && std::next(Res.first) == Offsets.end();
1615 SortedIndices.
clear();
1616 if (!IsConsecutive) {
1620 for (
const std::pair<int64_t, int> &Pair : Offsets) {
1621 SortedIndices[Cnt] = Pair.second;
1637 std::optional<int> Diff =
1640 return Diff && *Diff == 1;
1646 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1647 InstMap.push_back(SI);
1655 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1656 InstMap.push_back(LI);
1684 case ForwardButPreventsForwarding:
1686 case IndirectUnsafe:
1689 case BackwardVectorizable:
1691 case BackwardVectorizableButPreventsForwarding:
1704 case ForwardButPreventsForwarding:
1709 case BackwardVectorizable:
1711 case BackwardVectorizableButPreventsForwarding:
1712 case IndirectUnsafe:
1718bool MemoryDepChecker::couldPreventStoreLoadForward(
uint64_t Distance,
1732 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1734 uint64_t MaxVFWithoutSLForwardIssues = std::min(
1738 for (
uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1742 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1743 MaxVFWithoutSLForwardIssues = (VF >> 1);
1748 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1750 dbgs() <<
"LAA: Distance " << Distance
1751 <<
" that could cause a store-load forwarding conflict\n");
1755 if (MaxVFWithoutSLForwardIssues < MinDepDistBytes &&
1756 MaxVFWithoutSLForwardIssues !=
1758 MinDepDistBytes = MaxVFWithoutSLForwardIssues;
1780 const SCEV &BackedgeTakenCount,
1801 const uint64_t ByteStride = Stride * TypeByteSize;
1805 const SCEV *CastedDist = &Dist;
1806 const SCEV *CastedProduct = Product;
1813 if (DistTypeSizeBits > ProductTypeSizeBits)
1841 assert(Stride > 1 &&
"The stride must be greater than 1");
1842 assert(TypeByteSize > 0 &&
"The type size in byte must be non-zero");
1843 assert(Distance > 0 &&
"The distance must be non-zero");
1846 if (Distance % TypeByteSize)
1849 uint64_t ScaledDist = Distance / TypeByteSize;
1867 return ScaledDist % Stride;
1875 return any_of(UnderlyingObjects, [&SE, L](
const Value *UO) {
1887 std::tuple<const SCEV *, uint64_t, uint64_t, bool, bool>>
1895 auto &SE = *PSE.
getSE();
1896 auto [APtr, AIsWrite] =
A;
1897 auto [BPtr, BIsWrite] =
B;
1900 if (!AIsWrite && !BIsWrite)
1907 if (APtr->getType()->getPointerAddressSpace() !=
1908 BPtr->getType()->getPointerAddressSpace())
1911 int64_t StrideAPtr =
1912 getPtrStride(PSE, ATy, APtr, InnermostLoop, Strides,
true).value_or(0);
1913 int64_t StrideBPtr =
1914 getPtrStride(PSE, BTy, BPtr, InnermostLoop, Strides,
true).value_or(0);
1921 if (StrideAPtr < 0) {
1932 LLVM_DEBUG(
dbgs() <<
"LAA: Src Scev: " << *Src <<
"Sink Scev: " << *Sink
1933 <<
"(Induction step: " << StrideAPtr <<
")\n");
1934 LLVM_DEBUG(
dbgs() <<
"LAA: Distance for " << *AInst <<
" to " << *BInst
1935 <<
": " << *Dist <<
"\n");
1948 if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr) {
1949 LLVM_DEBUG(
dbgs() <<
"Pointer access with non-constant stride\n");
1953 uint64_t TypeByteSize =
DL.getTypeAllocSize(ATy);
1955 DL.getTypeStoreSizeInBits(ATy) ==
DL.getTypeStoreSizeInBits(BTy);
1958 uint64_t Stride = std::abs(StrideAPtr);
1959 return std::make_tuple(Dist, Stride, TypeByteSize, AIsWrite, BIsWrite);
1966 &UnderlyingObjects) {
1967 assert(AIdx < BIdx &&
"Must pass arguments in program order");
1972 A, InstMap[AIdx],
B, InstMap[BIdx], Strides, UnderlyingObjects, PSE,
1974 if (std::holds_alternative<Dependence::DepType>(Res))
1975 return std::get<Dependence::DepType>(Res);
1977 const auto &[Dist, Stride, TypeByteSize, AIsWrite, BIsWrite] =
1978 std::get<std::tuple<const SCEV *, uint64_t, uint64_t, bool, bool>>(Res);
1979 bool HasSameSize = TypeByteSize > 0;
1983 if (!isa<SCEVCouldNotCompute>(Dist) && HasSameSize &&
1985 Stride, TypeByteSize))
1990 LLVM_DEBUG(
dbgs() <<
"LAA: Dependence because of non-constant distance\n");
1991 FoundNonConstantDistanceDependence =
true;
1995 const APInt &Val =
C->getAPInt();
1999 if (std::abs(Distance) > 0 && Stride > 1 && HasSameSize &&
2007 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
2012 (!HasSameSize || couldPreventStoreLoadForward(Val.
abs().
getZExtValue(),
2014 LLVM_DEBUG(
dbgs() <<
"LAA: Forward but may prevent st->ld forwarding\n");
2027 dbgs() <<
"LAA: Zero dependence difference but different type sizes\n");
2034 LLVM_DEBUG(
dbgs() <<
"LAA: ReadWrite-Write positive dependency with "
2035 "different type sizes\n");
2045 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
2074 TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize;
2075 if (MinDistanceNeeded >
static_cast<uint64_t>(Distance)) {
2076 LLVM_DEBUG(
dbgs() <<
"LAA: Failure because of positive distance "
2077 << Distance <<
'\n');
2083 if (MinDistanceNeeded > MinDepDistBytes) {
2085 << MinDistanceNeeded <<
" size in bytes\n");
2106 std::min(
static_cast<uint64_t>(Distance), MinDepDistBytes);
2108 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
2109 uint64_t MinDepDistBytesOld = MinDepDistBytes;
2111 couldPreventStoreLoadForward(Distance, TypeByteSize)) {
2114 assert(MinDepDistBytes == MinDepDistBytesOld &&
2115 "An update to MinDepDistBytes requires an update to "
2116 "MaxSafeVectorWidthInBits");
2117 (void)MinDepDistBytesOld;
2123 uint64_t MaxVF = MinDepDistBytes / (TypeByteSize * Stride);
2125 <<
" with max VF = " << MaxVF <<
'\n');
2126 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
2127 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
2135 &UnderlyingObjects) {
2137 MinDepDistBytes = -1;
2140 if (Visited.
count(CurAccess))
2156 bool AIIsWrite = AI->getInt();
2160 (AIIsWrite ? AI : std::next(AI));
2163 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
2164 I1E = Accesses[*AI].
end(); I1 != I1E; ++I1)
2167 for (std::vector<unsigned>::iterator
2168 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
2169 I2E = (OI == AI ? I1E : Accesses[*OI].end());
2171 auto A = std::make_pair(&*AI, *I1);
2172 auto B = std::make_pair(&*OI, *I2);
2179 isDependent(*
A.first,
A.second, *
B.first,
B.second, Strides,
2187 if (RecordDependences) {
2192 RecordDependences =
false;
2193 Dependences.clear();
2195 <<
"Too many dependences, stopped recording\n");
2207 LLVM_DEBUG(
dbgs() <<
"Total Dependences: " << Dependences.size() <<
"\n");
2214 auto &IndexVector = Accesses.find(Access)->second;
2218 std::back_inserter(Insts),
2219 [&](
unsigned Idx) {
return this->InstMap[
Idx]; });
2228 "ForwardButPreventsForwarding",
2230 "BackwardVectorizable",
2231 "BackwardVectorizableButPreventsForwarding"};
2241bool LoopAccessInfo::canAnalyzeLoop() {
2250 recordAnalysis(
"NotInnerMostLoop") <<
"loop is not the innermost loop";
2257 dbgs() <<
"LAA: loop control flow is not understood by analyzer\n");
2258 recordAnalysis(
"CFGNotUnderstood")
2259 <<
"loop control flow is not understood by analyzer";
2265 if (isa<SCEVCouldNotCompute>(ExitCount)) {
2266 recordAnalysis(
"CantComputeNumberOfIterations")
2267 <<
"could not determine number of loop iterations";
2268 LLVM_DEBUG(
dbgs() <<
"LAA: SCEV could not compute the loop exit count.\n");
2283 unsigned NumReads = 0;
2284 unsigned NumReadWrites = 0;
2286 bool HasComplexMemInst =
false;
2289 HasConvergentOp =
false;
2291 PtrRtChecking->Pointers.
clear();
2292 PtrRtChecking->Need =
false;
2296 const bool EnableMemAccessVersioningOfLoop =
2308 if (
auto *Call = dyn_cast<CallBase>(&
I)) {
2309 if (
Call->isConvergent())
2310 HasConvergentOp =
true;
2315 if (HasComplexMemInst && HasConvergentOp) {
2321 if (HasComplexMemInst)
2327 auto *
Call = dyn_cast<CallInst>(&
I);
2334 if (
I.mayReadFromMemory()) {
2337 if (Call && !
Call->isNoBuiltin() &&
Call->getCalledFunction() &&
2341 auto *Ld = dyn_cast<LoadInst>(&
I);
2343 recordAnalysis(
"CantVectorizeInstruction", Ld)
2344 <<
"instruction cannot be vectorized";
2345 HasComplexMemInst =
true;
2348 if (!Ld->isSimple() && !IsAnnotatedParallel) {
2349 recordAnalysis(
"NonSimpleLoad", Ld)
2350 <<
"read with atomic ordering or volatile read";
2352 HasComplexMemInst =
true;
2357 DepChecker->addAccess(Ld);
2358 if (EnableMemAccessVersioningOfLoop)
2359 collectStridedAccess(Ld);
2364 if (
I.mayWriteToMemory()) {
2365 auto *St = dyn_cast<StoreInst>(&
I);
2367 recordAnalysis(
"CantVectorizeInstruction", St)
2368 <<
"instruction cannot be vectorized";
2369 HasComplexMemInst =
true;
2372 if (!St->isSimple() && !IsAnnotatedParallel) {
2373 recordAnalysis(
"NonSimpleStore", St)
2374 <<
"write with atomic ordering or volatile write";
2376 HasComplexMemInst =
true;
2381 DepChecker->addAccess(St);
2382 if (EnableMemAccessVersioningOfLoop)
2383 collectStridedAccess(St);
2388 if (HasComplexMemInst) {
2398 if (!Stores.
size()) {
2405 AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE);
2421 if (isInvariant(
Ptr)) {
2423 StoresToInvariantAddresses.push_back(ST);
2424 HasDependenceInvolvingLoopInvariantAddress |=
2431 if (Seen.
insert({Ptr, AccessTy}).second) {
2438 if (blockNeedsPredication(
ST->getParent(), TheLoop, DT))
2442 [&Accesses, AccessTy, Loc](
Value *
Ptr) {
2443 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2444 Accesses.addStore(NewLoc, AccessTy);
2449 if (IsAnnotatedParallel) {
2451 dbgs() <<
"LAA: A loop annotated parallel, ignore memory dependency "
2467 bool IsReadOnlyPtr =
false;
2469 if (Seen.
insert({Ptr, AccessTy}).second ||
2470 !
getPtrStride(*PSE,
LD->getType(),
Ptr, TheLoop, SymbolicStrides).value_or(0)) {
2472 IsReadOnlyPtr =
true;
2478 LLVM_DEBUG(
dbgs() <<
"LAA: Found an unsafe dependency between a uniform "
2479 "load and uniform store to the same address!\n");
2480 HasDependenceInvolvingLoopInvariantAddress =
true;
2487 if (blockNeedsPredication(
LD->getParent(), TheLoop, DT))
2491 [&Accesses, AccessTy, Loc, IsReadOnlyPtr](
Value *
Ptr) {
2492 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2493 Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2499 if (NumReadWrites == 1 && NumReads == 0) {
2507 Accesses.buildDependenceSets();
2511 Value *UncomputablePtr =
nullptr;
2512 bool CanDoRTIfNeeded =
2513 Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->
getSE(), TheLoop,
2514 SymbolicStrides, UncomputablePtr,
false);
2515 if (!CanDoRTIfNeeded) {
2516 auto *
I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2517 recordAnalysis(
"CantIdentifyArrayBounds",
I)
2518 <<
"cannot identify array bounds";
2519 LLVM_DEBUG(
dbgs() <<
"LAA: We can't vectorize because we can't find "
2520 <<
"the array bounds.\n");
2526 dbgs() <<
"LAA: May be able to perform a memory runtime check if needed.\n");
2529 if (Accesses.isDependencyCheckNeeded()) {
2531 CanVecMem = DepChecker->areDepsSafe(
2532 DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides,
2533 Accesses.getUnderlyingObjects());
2535 if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) {
2539 Accesses.resetDepChecks(*DepChecker);
2541 PtrRtChecking->reset();
2542 PtrRtChecking->Need =
true;
2544 auto *SE = PSE->
getSE();
2545 UncomputablePtr =
nullptr;
2546 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(
2547 *PtrRtChecking, SE, TheLoop, SymbolicStrides, UncomputablePtr,
true);
2550 if (!CanDoRTIfNeeded) {
2551 auto *
I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2552 recordAnalysis(
"CantCheckMemDepsAtRunTime",
I)
2553 <<
"cannot check memory dependencies at runtime";
2554 LLVM_DEBUG(
dbgs() <<
"LAA: Can't vectorize with memory checks\n");
2563 if (HasConvergentOp) {
2564 recordAnalysis(
"CantInsertRuntimeCheckWithConvergent")
2565 <<
"cannot add control dependency to convergent operation";
2566 LLVM_DEBUG(
dbgs() <<
"LAA: We can't vectorize because a runtime check "
2567 "would be needed with a convergent operation\n");
2574 dbgs() <<
"LAA: No unsafe dependent memory operations in loop. We"
2575 << (PtrRtChecking->Need ?
"" :
" don't")
2576 <<
" need runtime memory checks.\n");
2578 emitUnsafeDependenceRemark();
2581void LoopAccessInfo::emitUnsafeDependenceRemark() {
2582 auto Deps = getDepChecker().getDependences();
2589 if (Found == Deps->end())
2593 LLVM_DEBUG(
dbgs() <<
"LAA: unsafe dependent memory operations in loop\n");
2596 bool HasForcedDistribution =
false;
2597 std::optional<const MDOperand *>
Value =
2601 assert(
Op && mdconst::hasa<ConstantInt>(*
Op) &&
"invalid metadata");
2602 HasForcedDistribution = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
2605 const std::string
Info =
2606 HasForcedDistribution
2607 ?
"unsafe dependent memory operations in loop."
2608 :
"unsafe dependent memory operations in loop. Use "
2609 "#pragma clang loop distribute(enable) to allow loop distribution "
2610 "to attempt to isolate the offending operations into a separate "
2621 R <<
"\nBackward loop carried data dependence.";
2624 R <<
"\nForward loop carried data dependence that prevents "
2625 "store-to-load forwarding.";
2628 R <<
"\nBackward loop carried data dependence that prevents "
2629 "store-to-load forwarding.";
2632 R <<
"\nUnsafe indirect dependence.";
2635 R <<
"\nUnknown data dependence.";
2642 SourceLoc = DD->getDebugLoc();
2644 R <<
" Memory location is the same as accessed at "
2645 <<
ore::NV(
"Location", SourceLoc);
2660 assert(!Report &&
"Multiple reports generated");
2666 CodeRegion =
I->getParent();
2669 if (
I->getDebugLoc())
2670 DL =
I->getDebugLoc();
2673 Report = std::make_unique<OptimizationRemarkAnalysis>(
DEBUG_TYPE, RemarkName,
DL,
2679 auto *SE = PSE->
getSE();
2700 std::advance(GEPTI, LastOperand - 2);
2724 for (
unsigned i = 0, e =
GEP->getNumOperands(); i != e; ++i)
2725 if (i != InductionOperand &&
2728 return GEP->getOperand(InductionOperand);
2733 Value *UniqueCast =
nullptr;
2734 for (
User *U :
Ptr->users()) {
2735 CastInst *CI = dyn_cast<CastInst>(U);
2736 if (CI && CI->
getType() == Ty) {
2749 auto *PtrTy = dyn_cast<PointerType>(
Ptr->getType());
2750 if (!PtrTy || PtrTy->isAggregateType())
2759 int64_t PtrAccessSize = 1;
2767 V =
C->getOperand();
2784 if (OrigPtr ==
Ptr) {
2785 if (
const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
2786 if (M->getOperand(0)->getSCEVType() !=
scConstant)
2789 const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
2796 if (PtrAccessSize != StepVal)
2798 V = M->getOperand(1);
2810 const auto *
C = dyn_cast<SCEVIntegralCastExpr>(V);
2813 U = dyn_cast<SCEVUnknown>(
C->getOperand());
2825void LoopAccessInfo::collectStridedAccess(
Value *MemAccess) {
2840 LLVM_DEBUG(
dbgs() <<
"LAA: Found a strided access that is a candidate for "
2845 LLVM_DEBUG(
dbgs() <<
" Chose not to due to -laa-speculate-unit-stride\n");
2870 const SCEV *CastedStride = StrideExpr;
2871 const SCEV *CastedBECount = BETakenCount;
2873 if (BETypeSizeBits >= StrideTypeSizeBits)
2877 const SCEV *StrideMinusBETaken = SE->
getMinusSCEV(CastedStride, CastedBECount);
2883 dbgs() <<
"LAA: Stride>=TripCount; No point in versioning as the "
2884 "Stride==1 predicate will imply that the loop executes "
2888 LLVM_DEBUG(
dbgs() <<
"LAA: Found a strided access that we can version.\n");
2892 const SCEV *StrideBase = StrideExpr;
2893 if (
const auto *
C = dyn_cast<SCEVIntegralCastExpr>(StrideBase))
2894 StrideBase =
C->getOperand();
2895 SymbolicStrides[
Ptr] = cast<SCEVUnknown>(StrideBase);
2902 PtrRtChecking(nullptr),
2904 PtrRtChecking = std::make_unique<RuntimePointerChecking>(*DepChecker, SE);
2905 if (canAnalyzeLoop()) {
2906 analyzeLoop(AA, LI, TLI, DT);
2915 OS <<
" with a maximum safe vector width of "
2917 if (PtrRtChecking->Need)
2918 OS <<
" with run-time checks";
2922 if (HasConvergentOp)
2928 if (
auto *Dependences = DepChecker->getDependences()) {
2930 for (
const auto &Dep : *Dependences) {
2931 Dep.
print(
OS,
Depth + 2, DepChecker->getMemoryInstructions());
2938 PtrRtChecking->print(
OS,
Depth);
2941 OS.
indent(
Depth) <<
"Non vectorizable stores to invariant address were "
2942 << (HasDependenceInvolvingLoopInvariantAddress ?
"" :
"not ")
2943 <<
"found in loop.\n";
2955 auto I = LoopAccessInfoMap.insert({&L,
nullptr});
2959 std::make_unique<LoopAccessInfo>(&L, &SE, TLI, &AA, &DT, &LI);
2961 return *
I.first->second;
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements a class to represent arbitrary precision integral constant values and operations...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
static cl::opt< bool, true > HoistRuntimeChecks("hoist-runtime-checks", cl::Hidden, cl::desc("Hoist inner loop runtime memory checks to outer loop if possible"), cl::location(VectorizerParams::HoistRuntimeChecks), cl::init(false))
static cl::opt< unsigned > MaxDependences("max-dependences", cl::Hidden, cl::desc("Maximum number of dependences collected by " "loop-access analysis (default = 100)"), cl::init(100))
We collect dependences up to this threshold.
static cl::opt< bool > EnableForwardingConflictDetection("store-to-load-forwarding-conflict-detection", cl::Hidden, cl::desc("Enable conflict detection in loop-access analysis"), cl::init(true))
Enable store-to-load forwarding conflict detection.
static void findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr, SmallVectorImpl< PointerIntPair< const SCEV *, 1, bool > > &ScevList, unsigned Depth)
static bool hasComputableBounds(PredicatedScalarEvolution &PSE, Value *Ptr, const SCEV *PtrScev, Loop *L, bool Assume)
Check whether a pointer can participate in a runtime bounds check.
static cl::opt< unsigned > MemoryCheckMergeThreshold("memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " "runtime memory checks. (default = 100)"), cl::init(100))
The maximum iterations used to merge memory checks.
static bool isNoWrap(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &Strides, Value *Ptr, Type *AccessTy, Loop *L)
Check whether a pointer address cannot wrap.
static const SCEV * getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
Get the stride of a pointer access in a loop.
static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep)
Find the operand of the GEP that should be checked for consecutive stores.
static cl::opt< unsigned, true > VectorizationInterleave("force-vector-interleave", cl::Hidden, cl::desc("Sets the vectorization interleave count. " "Zero is autoselect."), cl::location(VectorizerParams::VectorizationInterleave))
static bool isLoopVariantIndirectAddress(ArrayRef< const Value * > UnderlyingObjects, ScalarEvolution &SE, const Loop *L)
Returns true if any of the underlying objects has a loop varying address, i.e.
static Value * getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty)
If a value has only one user that is a CastInst, return it.
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
static cl::opt< unsigned, true > RuntimeMemoryCheckThreshold("runtime-memory-check-threshold", cl::Hidden, cl::desc("When performing memory disambiguation checks at runtime do not " "generate more than this number of comparisons (default = 8)."), cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8))
static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, function_ref< void(Value *)> AddPointer)
static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, PredicatedScalarEvolution &PSE, const Loop *L)
Return true if an AddRec pointer Ptr is unsigned non-wrapping, i.e.
static Value * stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
If the argument is a GEP, then returns the operand identified by getGEPInductionOperand.
static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, const SCEV &BackedgeTakenCount, const SCEV &Dist, uint64_t Stride, uint64_t TypeByteSize)
Given a dependence-distance Dist between two memory accesses, that have the same stride whose absolut...
static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, uint64_t TypeByteSize)
Check the dependence for two accesses with the same stride Stride.
static const SCEV * getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE)
Compare I and J and return the minimum.
static std::variant< MemoryDepChecker::Dependence::DepType, std::tuple< const SCEV *, uint64_t, uint64_t, bool, bool > > getDependenceDistanceStrideAndSize(const AccessAnalysis::MemAccessInfo &A, Instruction *AInst, const AccessAnalysis::MemAccessInfo &B, Instruction *BInst, const DenseMap< Value *, const SCEV * > &Strides, const DenseMap< Value *, SmallVector< const Value *, 16 > > &UnderlyingObjects, PredicatedScalarEvolution &PSE, const Loop *InnermostLoop)
static cl::opt< unsigned > MaxForkedSCEVDepth("max-forked-scev-depth", cl::Hidden, cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"), cl::init(5))
static cl::opt< bool > SpeculateUnitStride("laa-speculate-unit-stride", cl::Hidden, cl::desc("Speculate that non-constant strides are unit in LAA"), cl::init(true))
static SmallVector< PointerIntPair< const SCEV *, 1, bool > > findForkedPointer(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap, Value *Ptr, const Loop *L)
static cl::opt< bool > EnableMemAccessVersioning("enable-mem-access-versioning", cl::init(true), cl::Hidden, cl::desc("Enable symbolic stride memory access versioning"))
This enables versioning on the strides of symbolically striding memory accesses in code like the foll...
This header provides classes for managing per-loop analyses.
This file provides utility analysis objects describing memory locations.
FunctionAnalysisManager FAM
This header defines various interfaces for pass management in LLVM.
This file defines the PointerIntPair class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckType(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const TargetLowering *TLI, const DataLayout &DL)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
static const X86InstrFMA3Group Groups[]
static constexpr uint32_t Opcode
A manager for alias analyses.
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
APInt abs() const
Get the absolute value.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isNegative() const
Determine sign of this APInt.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
int64_t getSExtValue() const
Get sign extended value.
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
API to communicate dependencies between analyses during invalidation.
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
This is the base class for all instructions that perform data casts.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
EquivalenceClasses - This represents a collection of equivalence classes and supports three efficient...
iterator findValue(const ElemTy &V) const
findValue - Return an iterator to the specified value.
iterator insert(const ElemTy &Data)
insert - Insert a new value into the union/find set, ignoring the request if the value already exists...
member_iterator member_end() const
typename std::set< ECValue, ECValueComparator >::const_iterator iterator
iterator* - Provides a way to iterate over all values in the set.
member_iterator member_begin(iterator I) const
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)
union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...
const ElemTy & getLeaderValue(const ElemTy &V) const
getLeaderValue - Return the leader for the specified value that is in the set.
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Type * getResultElementType() const
PointerType * getType() const
Global values are always pointers.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Class to represent integer types.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
An instruction for reading from memory.
Value * getPointerOperand()
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
This analysis provides dependence information for the memory accesses of a loop.
Result run(Function &F, FunctionAnalysisManager &AM)
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
const LoopAccessInfo & getInfo(Loop &L)
Drive the analysis of memory accesses in the loop.
const MemoryDepChecker & getDepChecker() const
the Memory Dependence Checker which can determine the loop-independent and loop-carried dependences b...
bool isInvariant(Value *V) const
Returns true if value V is loop invariant.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the information about the memory accesses in the loop.
LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT, LoopInfo *LI)
static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, DominatorTree *DT)
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
Analysis pass that exposes the LoopInfo for a function.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
unsigned getNumBackEdges() const
Calculate the number of back edges to the loop header.
BlockT * getHeader() const
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
Represents a single loop in the control flow graph.
bool isAnnotatedParallel() const
Returns true if the loop is annotated parallel.
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Tracking metadata reference owned by Metadata.
This class implements a map that also provides access to all stored values in a deterministic order.
Checks memory dependences among accesses to the same underlying object to determine whether there vec...
ArrayRef< unsigned > getOrderForAccess(Value *Ptr, bool IsWrite) const
Return the program order indices for the access location (Ptr, IsWrite).
bool isSafeForAnyVectorWidth() const
Return true if the number of elements that are safe to operate on simultaneously is not bounded.
bool areDepsSafe(DepCandidates &AccessSets, MemAccessInfoList &CheckDeps, const DenseMap< Value *, const SCEV * > &Strides, const DenseMap< Value *, SmallVector< const Value *, 16 > > &UnderlyingObjects)
Check whether the dependencies between the accesses are safe.
const Loop * getInnermostLoop() const
uint64_t getMaxSafeVectorWidthInBits() const
Return the number of elements that are safe to operate on simultaneously, multiplied by the size of t...
bool isSafeForVectorization() const
No memory dependence was encountered that would inhibit vectorization.
SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const
Find the set of instructions that read or write via Ptr.
VectorizationSafetyStatus
Type to keep track of the status of the dependence check.
@ PossiblySafeWithRtChecks
void addAccess(StoreInst *SI)
Register the location (instructions are given increasing numbers) of a write access.
PointerIntPair< Value *, 1, bool > MemAccessInfo
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
const Value * Ptr
The address of the start of the location.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
const SCEVPredicate & getPredicate() const
bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Holds information about the memory runtime legality checks to verify that a group of pointers do not ...
bool Need
This flag indicates if we need to add the runtime check.
void reset()
Reset the state of the pointer runtime information.
unsigned getNumberOfChecks() const
Returns the number of run-time checks required according to needsChecking.
void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const
Print Checks.
bool needsChecking(const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const
Decide if we need to add a check between two groups of pointers, according to needsChecking.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the list run-time memory checks necessary.
SmallVector< RuntimeCheckingPtrGroup, 2 > CheckingGroups
Holds a partitioning of pointers into "check groups".
void generateChecks(MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies)
Generate the checks and store it.
friend struct RuntimeCheckingPtrGroup
static bool arePointersInSamePartition(const SmallVectorImpl< int > &PtrToPartition, unsigned PtrIdx1, unsigned PtrIdx2)
Check if pointers are in the same partition.
SmallVector< PointerInfo, 2 > Pointers
Information about the pointers that may require checking.
void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy, bool WritePtr, unsigned DepSetId, unsigned ASId, PredicatedScalarEvolution &PSE, bool NeedsFreeze)
Insert a pointer and calculate the start and end SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStart() const
const SCEV * evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const
Return the value of this chain of recurrences at the specified iteration number.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
const Loop * getLoop() const
This class represents a constant integer value.
This is the base class for unary integral cast operator classes.
This node represents multiplication of some number of SCEVs.
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
virtual void print(raw_ostream &OS, unsigned Depth=0) const =0
Prints a textual representation of this predicate with an indentation of Depth.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents an analyzed expression in the program.
Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
const SCEV * getConstant(ConstantInt *V)
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
StringRef getName() const
Return a constant reference to the value's name.
constexpr ScalarTy getFixedValue() const
An efficient, type-erasing, non-owning reference to a callable.
Type * getIndexedType() const
This class implements an extremely fast bulk output stream that can only output to a stream.
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
friend const_iterator end(StringRef path)
Get end iterator over path.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
bool match(Val *V, const Pattern &P)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
std::optional< int > getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB, const DataLayout &DL, ScalarEvolution &SE, bool StrictCheck=false, bool CheckType=true)
Returns the distance between the pointers PtrA and PtrB iff they are compatible and it is possible to...
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
std::optional< const MDOperand * > findStringMetadataForLoop(const Loop *TheLoop, StringRef Name)
Find string metadata for loop.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
bool sortPtrAccesses(ArrayRef< Value * > VL, Type *ElemTy, const DataLayout &DL, ScalarEvolution &SE, SmallVectorImpl< unsigned > &SortedIndices)
Attempt to sort the pointers in VL and return the sorted indices in SortedIndices,...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
const SCEV * replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &PtrToStride, Value *Ptr)
Return the SCEV corresponding to a pointer with the symbolic stride replaced with constant one,...
bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)
Returns true if the memory operations A and B are consecutive.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
OutputIt copy(R &&Range, OutputIt Out)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
gep_type_iterator gep_type_begin(const User *GEP)
Type * getLoadStoreType(Value *I)
A helper function that returns the type of a load or store instruction.
Implement std::hash so that hash_code can be used in STL containers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
MDNode * TBAA
The tag for type-based alias analysis.
A special type used by analysis passes to provide an address that identifies that particular analysis...
Dependece between memory access instructions.
DepType Type
The type of the dependence.
bool isPossiblyBackward() const
May be a lexically backward dependence type (includes Unknown).
bool isForward() const
Lexically forward dependence.
bool isBackward() const
Lexically backward dependence.
void print(raw_ostream &OS, unsigned Depth, const SmallVectorImpl< Instruction * > &Instrs) const
Print the dependence.
Instruction * getDestination(const LoopAccessInfo &LAI) const
Return the destination instruction of the dependence.
Instruction * getSource(const LoopAccessInfo &LAI) const
Return the source instruction of the dependence.
DepType
The type of the dependence.
@ BackwardVectorizableButPreventsForwarding
@ ForwardButPreventsForwarding
static const char * DepName[]
String version of the types.
static VectorizationSafetyStatus isSafeForVectorization(DepType Type)
Dependence types that don't prevent vectorization.
unsigned AddressSpace
Address space of the involved pointers.
bool addPointer(unsigned Index, RuntimePointerChecking &RtCheck)
Tries to add the pointer recorded in RtCheck at index Index to this pointer checking group.
bool NeedsFreeze
Whether the pointer needs to be frozen after expansion, e.g.
const SCEV * High
The SCEV expression which represents the upper bound of all the pointers in this group.
SmallVector< unsigned, 2 > Members
Indices of all the pointers that constitute this grouping.
RuntimeCheckingPtrGroup(unsigned Index, RuntimePointerChecking &RtCheck)
Create a new pointer checking group containing a single pointer, with index Index in RtCheck.
const SCEV * Low
The SCEV expression which represents the lower bound of all the pointers in this group.
bool IsWritePtr
Holds the information if this pointer is used for writing to memory.
unsigned DependencySetId
Holds the id of the set of pointers that could be dependent because of a shared underlying object.
unsigned AliasSetId
Holds the id of the disjoint alias set to which this pointer belongs.
static const unsigned MaxVectorWidth
Maximum SIMD width.
static unsigned VectorizationFactor
VF as overridden by the user.
static unsigned RuntimeMemoryCheckThreshold
\When performing memory disambiguation checks at runtime do not make more than this number of compari...
static bool isInterleaveForced()
True if force-vector-interleave was specified by the user.
static unsigned VectorizationInterleave
Interleave factor as overridden by the user.
static bool HoistRuntimeChecks
Function object to check whether the first component of a container supported by std::get (like std::...