71#define DEBUG_TYPE "loop-accesses"
75 cl::desc(
"Sets the SIMD width. Zero is autoselect."),
81 cl::desc(
"Sets the vectorization interleave count. "
82 "Zero is autoselect."),
89 cl::desc(
"When performing memory disambiguation checks at runtime do not "
90 "generate more than this number of comparisons (default = 8)."),
97 cl::desc(
"Maximum number of comparisons done when trying to merge "
98 "runtime memory checks. (default = 100)"),
107 cl::desc(
"Maximum number of dependences collected by "
108 "loop-access analysis (default = 100)"),
124 cl::desc(
"Enable symbolic stride memory access versioning"));
129 "store-to-load-forwarding-conflict-detection",
cl::Hidden,
130 cl::desc(
"Enable conflict detection in loop-access analysis"),
135 cl::desc(
"Maximum recursion depth when finding forked SCEVs (default = 5)"),
140 cl::desc(
"Speculate that non-constant strides are unit in LAA"),
146 "Hoist inner loop runtime memory checks to outer loop if possible"),
151 return ::VectorizationInterleave.getNumOccurrences() > 0;
162 if (SI == PtrToStride.
end())
166 const SCEV *StrideSCEV = SI->second;
171 assert(isa<SCEVUnknown>(StrideSCEV) &&
"shouldn't be in map");
179 <<
" by: " << *Expr <<
"\n");
185 :
High(RtCheck.Pointers[Index].
End),
Low(RtCheck.Pointers[Index].Start),
189 NeedsFreeze(RtCheck.Pointers[Index].NeedsFreeze) {
209 DenseMap<std::pair<const SCEV *, Type *>,
214 {{PtrExpr, AccessTy},
223 ScStart = ScEnd = PtrExpr;
224 }
else if (
auto *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr)) {
227 ScStart = AR->getStart();
228 ScEnd = AR->evaluateAtIteration(Ex, *SE);
229 const SCEV *Step = AR->getStepRecurrence(*SE);
233 if (
const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
234 if (CStep->getValue()->isNegative())
255 Iter->second = {ScStart, ScEnd};
262 Type *AccessTy,
bool WritePtr,
263 unsigned DepSetId,
unsigned ASId,
268 assert(!isa<SCEVCouldNotCompute>(ScStart) &&
269 !isa<SCEVCouldNotCompute>(ScEnd) &&
270 "must be able to compute both start and end expressions");
271 Pointers.emplace_back(
Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
275bool RuntimePointerChecking::tryToCreateDiffCheck(
298 if (AccSrc.
size() != 1 || AccSink.
size() != 1)
302 if (AccSink[0] < AccSrc[0])
305 auto *SrcAR = dyn_cast<SCEVAddRecExpr>(Src->Expr);
306 auto *SinkAR = dyn_cast<SCEVAddRecExpr>(Sink->Expr);
317 if (isa<ScalableVectorType>(SrcTy) || isa<ScalableVectorType>(DstTy))
321 SinkAR->getLoop()->getHeader()->getDataLayout();
323 std::max(
DL.getTypeAllocSize(SrcTy),
DL.getTypeAllocSize(DstTy));
328 auto *Step = dyn_cast<SCEVConstant>(SinkAR->getStepRecurrence(*SE));
329 if (!Step || Step != SrcAR->getStepRecurrence(*SE) ||
330 Step->getAPInt().abs() != AllocSize)
338 if (Step->getValue()->isNegative())
343 if (isa<SCEVCouldNotCompute>(SinkStartInt) ||
344 isa<SCEVCouldNotCompute>(SrcStartInt))
347 const Loop *InnerLoop = SrcAR->getLoop();
353 isa<SCEVAddRecExpr>(SinkStartInt) && isa<SCEVAddRecExpr>(SrcStartInt)) {
354 auto *SrcStartAR = cast<SCEVAddRecExpr>(SrcStartInt);
355 auto *SinkStartAR = cast<SCEVAddRecExpr>(SinkStartInt);
356 const Loop *StartARLoop = SrcStartAR->getLoop();
357 if (StartARLoop == SinkStartAR->getLoop() &&
362 SrcStartAR->getStepRecurrence(*SE) !=
363 SinkStartAR->getStepRecurrence(*SE)) {
364 LLVM_DEBUG(
dbgs() <<
"LAA: Not creating diff runtime check, since these "
365 "cannot be hoisted out of the outer loop\n");
371 <<
"SrcStart: " << *SrcStartInt <<
'\n'
372 <<
"SinkStartInt: " << *SinkStartInt <<
'\n');
373 DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
374 Src->NeedsFreeze ||
Sink->NeedsFreeze);
387 CanUseDiffCheck = CanUseDiffCheck && tryToCreateDiffCheck(CGI, CGJ);
395void RuntimePointerChecking::generateChecks(
398 groupChecks(DepCands, UseDependencies);
404 for (
const auto &
I : M.Members)
405 for (
const auto &J :
N.Members)
418 return Diff->isNegative() ? J :
I;
425 RtCheck.
Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
426 RtCheck.
Pointers[Index].NeedsFreeze, *RtCheck.SE);
434 "all pointers in a checking group must be in the same address space");
460void RuntimePointerChecking::groupChecks(
506 if (!UseDependencies) {
512 unsigned TotalComparisons = 0;
515 for (
unsigned Index = 0; Index <
Pointers.size(); ++Index)
516 PositionMap[
Pointers[Index].PointerValue].push_back(Index);
544 auto PointerI = PositionMap.
find(
MI->getPointer());
546 "pointer in equivalence class not found in PositionMap");
547 for (
unsigned Pointer : PointerI->second) {
564 if (Group.addPointer(Pointer, *
this)) {
574 Groups.emplace_back(Pointer, *
this);
587 return (PtrToPartition[PtrIdx1] != -1 &&
588 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
609 unsigned Depth)
const {
611 for (
const auto &[Check1, Check2] : Checks) {
612 const auto &
First = Check1->Members, &Second = Check2->Members;
616 OS.
indent(
Depth + 2) <<
"Comparing group (" << Check1 <<
"):\n";
617 for (
unsigned K :
First)
620 OS.
indent(
Depth + 2) <<
"Against group (" << Check2 <<
"):\n";
621 for (
unsigned K : Second)
634 OS.
indent(
Depth + 4) <<
"(Low: " << *CG.Low <<
" High: " << *CG.High
636 for (
unsigned Member : CG.Members) {
648class AccessAnalysis {
658 : TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DepCands(DA), PSE(PSE),
659 LoopAliasScopes(LoopAliasScopes) {
661 BAA.enableCrossIterationMode();
667 AST.add(adjustLoc(Loc));
668 Accesses[MemAccessInfo(
Ptr,
false)].insert(AccessTy);
670 ReadOnlyPtr.insert(
Ptr);
676 AST.add(adjustLoc(Loc));
677 Accesses[MemAccessInfo(
Ptr,
true)].insert(AccessTy);
691 Loop *TheLoop,
unsigned &RunningDepId,
692 unsigned ASId,
bool ShouldCheckStride,
bool Assume);
701 Value *&UncomputablePtr,
bool ShouldCheckWrap =
false);
705 void buildDependenceSets() {
706 processMemAccesses();
714 bool isDependencyCheckNeeded()
const {
return !CheckDeps.empty(); }
722 const MemAccessInfoList &getDependenciesToCheck()
const {
return CheckDeps; }
746 return LoopAliasScopes.contains(cast<MDNode>(Scope));
755 void processMemAccesses();
759 PtrAccessMap Accesses;
765 MemAccessInfoList CheckDeps;
792 bool IsRTCheckAnalysisNeeded =
false;
810 const SCEV *PtrScev,
Loop *L,
bool Assume) {
829 Type *AccessTy,
Loop *L,
bool Assume) {
834 return getPtrStride(PSE, AccessTy,
Ptr, L, Strides, Assume).has_value() ||
844 while (!WorkList.
empty()) {
848 auto *PN = dyn_cast<PHINode>(
Ptr);
852 if (PN && InnermostLoop.
contains(PN->getParent()) &&
853 PN->getParent() != InnermostLoop.
getHeader()) {
854 for (
const Use &Inc : PN->incoming_values())
887 if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(
Ptr) ||
888 !isa<Instruction>(
Ptr) ||
Depth == 0) {
899 auto GetBinOpExpr = [&SE](
unsigned Opcode,
const SCEV *L,
const SCEV *R) {
901 case Instruction::Add:
903 case Instruction::Sub:
911 unsigned Opcode =
I->getOpcode();
913 case Instruction::GetElementPtr: {
914 auto *
GEP = cast<GetElementPtrInst>(
I);
915 Type *SourceTy =
GEP->getSourceElementType();
918 if (
I->getNumOperands() != 2 || SourceTy->
isVectorTy()) {
928 bool NeedsFreeze =
any_of(BaseScevs, UndefPoisonCheck) ||
929 any_of(OffsetScevs, UndefPoisonCheck);
934 if (OffsetScevs.
size() == 2 && BaseScevs.
size() == 1)
936 else if (BaseScevs.
size() == 2 && OffsetScevs.
size() == 1)
939 ScevList.emplace_back(Scev, NeedsFreeze);
957 ScevList.emplace_back(SE->
getAddExpr(get<0>(BaseScevs[0]), Scaled1),
959 ScevList.emplace_back(SE->
getAddExpr(get<0>(BaseScevs[1]), Scaled2),
963 case Instruction::Select: {
970 if (ChildScevs.
size() == 2) {
971 ScevList.push_back(ChildScevs[0]);
972 ScevList.push_back(ChildScevs[1]);
977 case Instruction::PHI: {
982 if (
I->getNumOperands() == 2) {
986 if (ChildScevs.
size() == 2) {
987 ScevList.push_back(ChildScevs[0]);
988 ScevList.push_back(ChildScevs[1]);
993 case Instruction::Add:
994 case Instruction::Sub: {
1002 any_of(LScevs, UndefPoisonCheck) ||
any_of(RScevs, UndefPoisonCheck);
1007 if (LScevs.
size() == 2 && RScevs.
size() == 1)
1009 else if (RScevs.
size() == 2 && LScevs.
size() == 1)
1012 ScevList.emplace_back(Scev, NeedsFreeze);
1016 ScevList.emplace_back(
1017 GetBinOpExpr(Opcode, get<0>(LScevs[0]), get<0>(RScevs[0])),
1019 ScevList.emplace_back(
1020 GetBinOpExpr(Opcode, get<0>(LScevs[1]), get<0>(RScevs[1])),
1026 LLVM_DEBUG(
dbgs() <<
"ForkedPtr unhandled instruction: " << *
I <<
"\n");
1043 if (Scevs.
size() == 2 &&
1044 (isa<SCEVAddRecExpr>(get<0>(Scevs[0])) ||
1046 (isa<SCEVAddRecExpr>(get<0>(Scevs[1])) ||
1061 Loop *TheLoop,
unsigned &RunningDepId,
1062 unsigned ASId,
bool ShouldCheckWrap,
1069 for (
const auto &
P : TranslatedPtrs) {
1070 const SCEV *PtrExpr = get<0>(
P);
1076 if (ShouldCheckWrap) {
1078 if (TranslatedPtrs.size() > 1)
1081 if (!
isNoWrap(PSE, StridesMap,
Ptr, AccessTy, TheLoop, Assume))
1086 if (TranslatedPtrs.size() == 1)
1091 for (
auto [PtrExpr, NeedsFreeze] : TranslatedPtrs) {
1095 if (isDependencyCheckNeeded()) {
1097 unsigned &LeaderId = DepSetId[Leader];
1099 LeaderId = RunningDepId++;
1103 DepId = RunningDepId++;
1105 bool IsWrite =
Access.getInt();
1106 RtCheck.
insert(TheLoop,
Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
1117 Value *&UncomputablePtr,
bool ShouldCheckWrap) {
1120 bool CanDoRT =
true;
1122 bool MayNeedRTCheck =
false;
1123 if (!IsRTCheckAnalysisNeeded)
return true;
1125 bool IsDepCheckNeeded = isDependencyCheckNeeded();
1130 for (
const auto &AS : AST) {
1131 int NumReadPtrChecks = 0;
1132 int NumWritePtrChecks = 0;
1133 bool CanDoAliasSetRT =
true;
1135 auto ASPointers = AS.getPointers();
1139 unsigned RunningDepId = 1;
1147 for (
const Value *ConstPtr : ASPointers) {
1149 bool IsWrite = Accesses.count(MemAccessInfo(
Ptr,
true));
1151 ++NumWritePtrChecks;
1159 if (NumWritePtrChecks == 0 ||
1160 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
1161 assert((ASPointers.size() <= 1 ||
1164 MemAccessInfo AccessWrite(
const_cast<Value *
>(
Ptr),
1166 return DepCands.
findValue(AccessWrite) == DepCands.
end();
1168 "Can only skip updating CanDoRT below, if all entries in AS "
1169 "are reads or there is at most 1 entry");
1173 for (
auto &
Access : AccessInfos) {
1174 for (
const auto &AccessTy : Accesses[
Access]) {
1175 if (!createCheckForAccess(RtCheck,
Access, AccessTy, StridesMap,
1176 DepSetId, TheLoop, RunningDepId, ASId,
1177 ShouldCheckWrap,
false)) {
1179 << *
Access.getPointer() <<
'\n');
1181 CanDoAliasSetRT =
false;
1195 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.
empty();
1199 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
1203 CanDoAliasSetRT =
true;
1204 for (
const auto &[
Access, AccessTy] : Retries) {
1205 if (!createCheckForAccess(RtCheck,
Access, AccessTy, StridesMap,
1206 DepSetId, TheLoop, RunningDepId, ASId,
1207 ShouldCheckWrap,
true)) {
1208 CanDoAliasSetRT =
false;
1209 UncomputablePtr =
Access.getPointer();
1215 CanDoRT &= CanDoAliasSetRT;
1216 MayNeedRTCheck |= NeedsAliasSetRTCheck;
1225 unsigned NumPointers = RtCheck.
Pointers.size();
1226 for (
unsigned i = 0; i < NumPointers; ++i) {
1227 for (
unsigned j = i + 1;
j < NumPointers; ++
j) {
1229 if (RtCheck.
Pointers[i].DependencySetId ==
1230 RtCheck.
Pointers[j].DependencySetId)
1243 dbgs() <<
"LAA: Runtime check would require comparison between"
1244 " different address spaces\n");
1250 if (MayNeedRTCheck && CanDoRT)
1254 <<
" pointer comparisons.\n");
1261 bool CanDoRTIfNeeded = !RtCheck.
Need || CanDoRT;
1262 if (!CanDoRTIfNeeded)
1264 return CanDoRTIfNeeded;
1267void AccessAnalysis::processMemAccesses() {
1274 LLVM_DEBUG(
dbgs() <<
"LAA: Accesses(" << Accesses.size() <<
"):\n");
1276 for (
const auto &[
A,
_] : Accesses)
1277 dbgs() <<
"\t" << *
A.getPointer() <<
" ("
1278 << (
A.getInt() ?
"write"
1279 : (ReadOnlyPtr.count(
A.getPointer()) ?
"read-only"
1288 for (
const auto &AS : AST) {
1292 auto ASPointers = AS.getPointers();
1294 bool SetHasWrite =
false;
1298 UnderlyingObjToAccessMap ObjToLastAccess;
1301 PtrAccessMap DeferredAccesses;
1305 for (
int SetIteration = 0; SetIteration < 2; ++SetIteration) {
1306 bool UseDeferred = SetIteration > 0;
1307 PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
1309 for (
const Value *ConstPtr : ASPointers) {
1314 for (
const auto &[AC,
_] : S) {
1315 if (AC.getPointer() !=
Ptr)
1318 bool IsWrite = AC.getInt();
1322 bool IsReadOnlyPtr = ReadOnlyPtr.count(
Ptr) && !IsWrite;
1323 if (UseDeferred && !IsReadOnlyPtr)
1327 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1328 S.count(MemAccessInfo(
Ptr,
false))) &&
1329 "Alias-set pointer not in the access set?");
1339 if (!UseDeferred && IsReadOnlyPtr) {
1342 DeferredAccesses.insert({
Access, {}});
1350 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
1351 CheckDeps.push_back(
Access);
1352 IsRTCheckAnalysisNeeded =
true;
1361 ValueVector TempObjects;
1363 UnderlyingObjects[
Ptr] = {};
1367 <<
"Underlying objects for pointer " << *
Ptr <<
"\n");
1368 for (
const Value *UnderlyingObj : UOs) {
1371 if (isa<ConstantPointerNull>(UnderlyingObj) &&
1377 UnderlyingObjToAccessMap::iterator Prev =
1378 ObjToLastAccess.find(UnderlyingObj);
1379 if (Prev != ObjToLastAccess.end())
1382 ObjToLastAccess[UnderlyingObj] =
Access;
1411 const auto *
GEP = dyn_cast<GetElementPtrInst>(
Ptr);
1412 if (!
GEP || !
GEP->hasNoUnsignedSignedWrap())
1416 Value *NonConstIndex =
nullptr;
1417 for (
Value *Index :
GEP->indices())
1418 if (!isa<ConstantInt>(Index)) {
1421 NonConstIndex = Index;
1429 if (
auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
1430 if (OBO->hasNoSignedWrap() &&
1433 isa<ConstantInt>(OBO->getOperand(1))) {
1434 const SCEV *OpScev = PSE.
getSCEV(OBO->getOperand(0));
1436 if (
auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
1437 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(
SCEV::FlagNSW);
1444std::optional<int64_t>
1448 bool Assume,
bool ShouldCheckWrap) {
1455 if (isa<ScalableVectorType>(AccessTy)) {
1456 LLVM_DEBUG(
dbgs() <<
"LAA: Bad stride - Scalable object: " << *AccessTy
1458 return std::nullopt;
1467 <<
" SCEV: " << *PtrScev <<
"\n");
1468 return std::nullopt;
1473 LLVM_DEBUG(
dbgs() <<
"LAA: Bad stride - Not striding over innermost loop "
1474 << *
Ptr <<
" SCEV: " << *AR <<
"\n");
1475 return std::nullopt;
1485 <<
" SCEV: " << *AR <<
"\n");
1486 return std::nullopt;
1490 TypeSize AllocSize =
DL.getTypeAllocSize(AccessTy);
1492 const APInt &APStepVal =
C->getAPInt();
1496 return std::nullopt;
1501 int64_t Stride = StepVal /
Size;
1502 int64_t Rem = StepVal %
Size;
1504 return std::nullopt;
1506 if (!ShouldCheckWrap)
1519 if (
auto *
GEP = dyn_cast<GetElementPtrInst>(
Ptr);
1520 GEP &&
GEP->hasNoUnsignedSignedWrap())
1528 (Stride == 1 || Stride == -1))
1534 <<
"LAA: Pointer: " << *
Ptr <<
"\n"
1535 <<
"LAA: SCEV: " << *AR <<
"\n"
1536 <<
"LAA: Added an overflow assumption\n");
1540 dbgs() <<
"LAA: Bad stride - Pointer may wrap in the address space "
1541 << *
Ptr <<
" SCEV: " << *AR <<
"\n");
1542 return std::nullopt;
1550 assert(PtrA && PtrB &&
"Expected non-nullptr pointers.");
1558 return std::nullopt;
1565 return std::nullopt;
1566 unsigned IdxWidth =
DL.getIndexSizeInBits(ASA);
1568 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1575 if (PtrA1 == PtrB1) {
1578 ASA = cast<PointerType>(PtrA1->
getType())->getAddressSpace();
1579 ASB = cast<PointerType>(PtrB1->
getType())->getAddressSpace();
1582 return std::nullopt;
1584 IdxWidth =
DL.getIndexSizeInBits(ASA);
1585 OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1594 std::optional<APInt> Diff =
1597 return std::nullopt;
1598 Val = Diff->getSExtValue();
1600 int Size =
DL.getTypeStoreSize(ElemTyA);
1601 int Dist = Val /
Size;
1605 if (!StrictCheck || Dist *
Size == Val)
1607 return std::nullopt;
1615 "Expected list of pointer operands.");
1618 Value *Ptr0 = VL[0];
1620 using DistOrdPair = std::pair<int64_t, int>;
1622 std::set<DistOrdPair,
decltype(Compare)> Offsets(Compare);
1623 Offsets.emplace(0, 0);
1624 bool IsConsecutive =
true;
1633 auto [It, IsInserted] = Offsets.emplace(
Offset,
Idx);
1637 IsConsecutive &= std::next(It) == Offsets.end();
1639 SortedIndices.
clear();
1640 if (!IsConsecutive) {
1644 SortedIndices[
Idx] = Off.second;
1658 std::optional<int> Diff =
1661 return Diff && *Diff == 1;
1667 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1668 InstMap.push_back(SI);
1676 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1677 InstMap.push_back(LI);
1705 case ForwardButPreventsForwarding:
1707 case IndirectUnsafe:
1710 case BackwardVectorizable:
1712 case BackwardVectorizableButPreventsForwarding:
1725 case ForwardButPreventsForwarding:
1730 case BackwardVectorizable:
1732 case BackwardVectorizableButPreventsForwarding:
1733 case IndirectUnsafe:
1739bool MemoryDepChecker::couldPreventStoreLoadForward(
uint64_t Distance,
1753 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1755 uint64_t MaxVFWithoutSLForwardIssues = std::min(
1759 for (
uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1763 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1764 MaxVFWithoutSLForwardIssues = (VF >> 1);
1769 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1771 dbgs() <<
"LAA: Distance " << Distance
1772 <<
" that could cause a store-load forwarding conflict\n");
1776 if (MaxVFWithoutSLForwardIssues < MinDepDistBytes &&
1777 MaxVFWithoutSLForwardIssues !=
1779 MinDepDistBytes = MaxVFWithoutSLForwardIssues;
1801 const SCEV &MaxBTC,
const SCEV &Dist,
1822 const uint64_t ByteStride = MaxStride * TypeByteSize;
1826 const SCEV *CastedDist = &Dist;
1827 const SCEV *CastedProduct = Product;
1834 if (DistTypeSizeBits > ProductTypeSizeBits)
1859 assert(Stride > 1 &&
"The stride must be greater than 1");
1860 assert(TypeByteSize > 0 &&
"The type size in byte must be non-zero");
1861 assert(Distance > 0 &&
"The distance must be non-zero");
1864 if (Distance % TypeByteSize)
1867 uint64_t ScaledDist = Distance / TypeByteSize;
1885 return ScaledDist % Stride;
1889 MemoryDepChecker::DepDistanceStrideAndSizeInfo>
1890MemoryDepChecker::getDependenceDistanceStrideAndSize(
1894 auto &SE = *PSE.
getSE();
1895 const auto &[APtr, AIsWrite] =
A;
1896 const auto &[BPtr, BIsWrite] =
B;
1899 if (!AIsWrite && !BIsWrite)
1906 if (APtr->getType()->getPointerAddressSpace() !=
1907 BPtr->getType()->getPointerAddressSpace())
1910 std::optional<int64_t> StrideAPtr =
1911 getPtrStride(PSE, ATy, APtr, InnermostLoop, SymbolicStrides,
true,
true);
1912 std::optional<int64_t> StrideBPtr =
1913 getPtrStride(PSE, BTy, BPtr, InnermostLoop, SymbolicStrides,
true,
true);
1921 if (StrideAPtr && *StrideAPtr < 0) {
1929 LLVM_DEBUG(
dbgs() <<
"LAA: Src Scev: " << *Src <<
"Sink Scev: " << *Sink
1931 LLVM_DEBUG(
dbgs() <<
"LAA: Distance for " << *AInst <<
" to " << *BInst
1932 <<
": " << *Dist <<
"\n");
1940 const auto &[SrcStart_, SrcEnd_] =
1942 const auto &[SinkStart_, SinkEnd_] =
1944 if (!isa<SCEVCouldNotCompute>(SrcStart_) &&
1945 !isa<SCEVCouldNotCompute>(SrcEnd_) &&
1946 !isa<SCEVCouldNotCompute>(SinkStart_) &&
1947 !isa<SCEVCouldNotCompute>(SinkEnd_)) {
1970 if (!StrideAPtr || !StrideBPtr) {
1971 LLVM_DEBUG(
dbgs() <<
"Pointer access with non-constant stride\n");
1975 int64_t StrideAPtrInt = *StrideAPtr;
1976 int64_t StrideBPtrInt = *StrideBPtr;
1977 LLVM_DEBUG(
dbgs() <<
"LAA: Src induction step: " << StrideAPtrInt
1978 <<
" Sink induction step: " << StrideBPtrInt <<
"\n");
1981 if (!StrideAPtrInt || !StrideBPtrInt)
1986 if ((StrideAPtrInt > 0) != (StrideBPtrInt > 0)) {
1988 dbgs() <<
"Pointer access with strides in different directions\n");
1992 uint64_t TypeByteSize =
DL.getTypeAllocSize(ATy);
1994 DL.getTypeStoreSizeInBits(ATy) ==
DL.getTypeStoreSizeInBits(BTy);
1998 StrideAPtrInt = std::abs(StrideAPtrInt);
1999 StrideBPtrInt = std::abs(StrideBPtrInt);
2001 uint64_t MaxStride = std::max(StrideAPtrInt, StrideBPtrInt);
2003 std::optional<uint64_t> CommonStride;
2004 if (StrideAPtrInt == StrideBPtrInt)
2005 CommonStride = StrideAPtrInt;
2010 bool ShouldRetryWithRuntimeCheck = CommonStride.has_value();
2012 return DepDistanceStrideAndSizeInfo(Dist, MaxStride, CommonStride,
2013 ShouldRetryWithRuntimeCheck, TypeByteSize,
2014 AIsWrite, BIsWrite);
2018MemoryDepChecker::isDependent(
const MemAccessInfo &
A,
unsigned AIdx,
2020 assert(AIdx < BIdx &&
"Must pass arguments in program order");
2025 getDependenceDistanceStrideAndSize(
A, InstMap[AIdx],
B, InstMap[BIdx]);
2026 if (std::holds_alternative<Dependence::DepType>(Res))
2027 return std::get<Dependence::DepType>(Res);
2029 auto &[Dist, MaxStride, CommonStride, ShouldRetryWithRuntimeCheck,
2030 TypeByteSize, AIsWrite, BIsWrite] =
2031 std::get<DepDistanceStrideAndSizeInfo>(Res);
2032 bool HasSameSize = TypeByteSize > 0;
2034 if (isa<SCEVCouldNotCompute>(Dist)) {
2037 FoundNonConstantDistanceDependence |= ShouldRetryWithRuntimeCheck;
2038 LLVM_DEBUG(
dbgs() <<
"LAA: Dependence because of uncomputable distance.\n");
2052 *Dist, MaxStride, TypeByteSize))
2055 const SCEVConstant *ConstDist = dyn_cast<SCEVConstant>(Dist);
2063 if (Distance > 0 && CommonStride && CommonStride > 1 && HasSameSize &&
2082 LLVM_DEBUG(
dbgs() <<
"LAA: possibly zero dependence difference but "
2083 "different type sizes\n");
2087 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
2102 FoundNonConstantDistanceDependence |= ShouldRetryWithRuntimeCheck;
2106 couldPreventStoreLoadForward(
2109 dbgs() <<
"LAA: Forward but may prevent st->ld forwarding\n");
2120 if (MinDistance <= 0) {
2121 FoundNonConstantDistanceDependence |= ShouldRetryWithRuntimeCheck;
2134 FoundNonConstantDistanceDependence |= ShouldRetryWithRuntimeCheck;
2138 LLVM_DEBUG(
dbgs() <<
"LAA: ReadWrite-Write positive dependency with "
2139 "different type sizes\n");
2152 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
2185 TypeByteSize * *CommonStride * (MinNumIter - 1) + TypeByteSize;
2186 if (MinDistanceNeeded >
static_cast<uint64_t>(MinDistance)) {
2194 LLVM_DEBUG(
dbgs() <<
"LAA: Failure because of positive minimum distance "
2195 << MinDistance <<
'\n');
2201 if (MinDistanceNeeded > MinDepDistBytes) {
2203 << MinDistanceNeeded <<
" size in bytes\n");
2224 std::min(
static_cast<uint64_t>(MinDistance), MinDepDistBytes);
2226 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
2227 uint64_t MinDepDistBytesOld = MinDepDistBytes;
2229 couldPreventStoreLoadForward(MinDistance, TypeByteSize)) {
2232 assert(MinDepDistBytes == MinDepDistBytesOld &&
2233 "An update to MinDepDistBytes requires an update to "
2234 "MaxSafeVectorWidthInBits");
2235 (void)MinDepDistBytesOld;
2241 uint64_t MaxVF = MinDepDistBytes / (TypeByteSize * *CommonStride);
2242 LLVM_DEBUG(
dbgs() <<
"LAA: Positive min distance " << MinDistance
2243 <<
" with max VF = " << MaxVF <<
'\n');
2245 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
2246 if (!ConstDist && MaxVFInBits < MaxTargetVectorWidthInBits) {
2253 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
2260 MinDepDistBytes = -1;
2263 if (Visited.
count(CurAccess))
2279 bool AIIsWrite = AI->getInt();
2283 (AIIsWrite ? AI : std::next(AI));
2286 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
2287 I1E = Accesses[*AI].
end(); I1 != I1E; ++I1)
2290 for (std::vector<unsigned>::iterator
2291 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
2292 I2E = (OI == AI ? I1E : Accesses[*OI].end());
2294 auto A = std::make_pair(&*AI, *I1);
2295 auto B = std::make_pair(&*OI, *I2);
2302 isDependent(*
A.first,
A.second, *
B.first,
B.second);
2309 if (RecordDependences) {
2311 Dependences.emplace_back(
A.second,
B.second,
Type);
2314 RecordDependences =
false;
2315 Dependences.clear();
2317 <<
"Too many dependences, stopped recording\n");
2329 LLVM_DEBUG(
dbgs() <<
"Total Dependences: " << Dependences.size() <<
"\n");
2336 auto &IndexVector = Accesses.find(
Access)->second;
2340 std::back_inserter(Insts),
2341 [&](
unsigned Idx) {
return this->InstMap[
Idx]; });
2350 "ForwardButPreventsForwarding",
2352 "BackwardVectorizable",
2353 "BackwardVectorizableButPreventsForwarding"};
2363bool LoopAccessInfo::canAnalyzeLoop() {
2372 recordAnalysis(
"NotInnerMostLoop") <<
"loop is not the innermost loop";
2379 dbgs() <<
"LAA: loop control flow is not understood by analyzer\n");
2380 recordAnalysis(
"CFGNotUnderstood")
2381 <<
"loop control flow is not understood by analyzer";
2389 if (isa<SCEVCouldNotCompute>(ExitCount)) {
2390 recordAnalysis(
"CantComputeNumberOfIterations")
2391 <<
"could not determine number of loop iterations";
2392 LLVM_DEBUG(
dbgs() <<
"LAA: SCEV could not compute the loop exit count.\n");
2410 unsigned NumReads = 0;
2411 unsigned NumReadWrites = 0;
2413 bool HasComplexMemInst =
false;
2416 HasConvergentOp =
false;
2418 PtrRtChecking->Pointers.
clear();
2419 PtrRtChecking->Need =
false;
2423 const bool EnableMemAccessVersioningOfLoop =
2435 if (
auto *Call = dyn_cast<CallBase>(&
I)) {
2436 if (
Call->isConvergent())
2437 HasConvergentOp =
true;
2442 if (HasComplexMemInst && HasConvergentOp)
2446 if (HasComplexMemInst)
2450 if (
auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&
I))
2451 for (
Metadata *
Op : Decl->getScopeList()->operands())
2452 LoopAliasScopes.
insert(cast<MDNode>(
Op));
2457 auto *
Call = dyn_cast<CallInst>(&
I);
2464 if (
I.mayReadFromMemory()) {
2465 auto hasPointerArgs = [](
CallBase *CB) {
2467 return Arg->getType()->isPointerTy();
2474 if (Call && !
Call->isNoBuiltin() &&
Call->getCalledFunction() &&
2478 auto *Ld = dyn_cast<LoadInst>(&
I);
2480 recordAnalysis(
"CantVectorizeInstruction", Ld)
2481 <<
"instruction cannot be vectorized";
2482 HasComplexMemInst =
true;
2485 if (!Ld->isSimple() && !IsAnnotatedParallel) {
2486 recordAnalysis(
"NonSimpleLoad", Ld)
2487 <<
"read with atomic ordering or volatile read";
2489 HasComplexMemInst =
true;
2495 if (EnableMemAccessVersioningOfLoop)
2496 collectStridedAccess(Ld);
2501 if (
I.mayWriteToMemory()) {
2502 auto *St = dyn_cast<StoreInst>(&
I);
2504 recordAnalysis(
"CantVectorizeInstruction", St)
2505 <<
"instruction cannot be vectorized";
2506 HasComplexMemInst =
true;
2509 if (!St->isSimple() && !IsAnnotatedParallel) {
2510 recordAnalysis(
"NonSimpleStore", St)
2511 <<
"write with atomic ordering or volatile write";
2513 HasComplexMemInst =
true;
2519 if (EnableMemAccessVersioningOfLoop)
2520 collectStridedAccess(St);
2525 if (HasComplexMemInst)
2533 if (!Stores.
size()) {
2539 AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE,
2556 if (isInvariant(
Ptr)) {
2558 StoresToInvariantAddresses.push_back(ST);
2559 HasStoreStoreDependenceInvolvingLoopInvariantAddress |=
2566 if (Seen.
insert({Ptr, AccessTy}).second) {
2573 if (blockNeedsPredication(
ST->getParent(), TheLoop, DT))
2577 [&Accesses, AccessTy, Loc](
Value *
Ptr) {
2578 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2579 Accesses.addStore(NewLoc, AccessTy);
2584 if (IsAnnotatedParallel) {
2586 dbgs() <<
"LAA: A loop annotated parallel, ignore memory dependency "
2601 bool IsReadOnlyPtr =
false;
2603 if (Seen.
insert({Ptr, AccessTy}).second ||
2604 !
getPtrStride(*PSE,
LD->getType(),
Ptr, TheLoop, SymbolicStrides).value_or(0)) {
2606 IsReadOnlyPtr =
true;
2612 LLVM_DEBUG(
dbgs() <<
"LAA: Found an unsafe dependency between a uniform "
2613 "load and uniform store to the same address!\n");
2614 HasLoadStoreDependenceInvolvingLoopInvariantAddress =
true;
2621 if (blockNeedsPredication(
LD->getParent(), TheLoop, DT))
2625 [&Accesses, AccessTy, Loc, IsReadOnlyPtr](
Value *
Ptr) {
2626 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2627 Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2633 if (NumReadWrites == 1 && NumReads == 0) {
2640 Accesses.buildDependenceSets();
2644 Value *UncomputablePtr =
nullptr;
2645 bool CanDoRTIfNeeded =
2646 Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->
getSE(), TheLoop,
2647 SymbolicStrides, UncomputablePtr,
false);
2648 if (!CanDoRTIfNeeded) {
2649 const auto *
I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2650 recordAnalysis(
"CantIdentifyArrayBounds",
I)
2651 <<
"cannot identify array bounds";
2652 LLVM_DEBUG(
dbgs() <<
"LAA: We can't vectorize because we can't find "
2653 <<
"the array bounds.\n");
2658 dbgs() <<
"LAA: May be able to perform a memory runtime check if needed.\n");
2660 bool DepsAreSafe =
true;
2661 if (Accesses.isDependencyCheckNeeded()) {
2663 DepsAreSafe = DepChecker->
areDepsSafe(DependentAccesses,
2664 Accesses.getDependenciesToCheck());
2670 Accesses.resetDepChecks(*DepChecker);
2672 PtrRtChecking->reset();
2673 PtrRtChecking->Need =
true;
2675 auto *SE = PSE->
getSE();
2676 UncomputablePtr =
nullptr;
2677 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(
2678 *PtrRtChecking, SE, TheLoop, SymbolicStrides, UncomputablePtr,
true);
2681 if (!CanDoRTIfNeeded) {
2682 auto *
I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2683 recordAnalysis(
"CantCheckMemDepsAtRunTime",
I)
2684 <<
"cannot check memory dependencies at runtime";
2685 LLVM_DEBUG(
dbgs() <<
"LAA: Can't vectorize with memory checks\n");
2692 if (HasConvergentOp) {
2693 recordAnalysis(
"CantInsertRuntimeCheckWithConvergent")
2694 <<
"cannot add control dependency to convergent operation";
2695 LLVM_DEBUG(
dbgs() <<
"LAA: We can't vectorize because a runtime check "
2696 "would be needed with a convergent operation\n");
2702 dbgs() <<
"LAA: No unsafe dependent memory operations in loop. We"
2703 << (PtrRtChecking->Need ?
"" :
" don't")
2704 <<
" need runtime memory checks.\n");
2708 emitUnsafeDependenceRemark();
2712void LoopAccessInfo::emitUnsafeDependenceRemark() {
2713 const auto *Deps = getDepChecker().getDependences();
2721 if (Found == Deps->end())
2725 LLVM_DEBUG(
dbgs() <<
"LAA: unsafe dependent memory operations in loop\n");
2728 bool HasForcedDistribution =
false;
2729 std::optional<const MDOperand *>
Value =
2733 assert(
Op && mdconst::hasa<ConstantInt>(*
Op) &&
"invalid metadata");
2734 HasForcedDistribution = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
2737 const std::string
Info =
2738 HasForcedDistribution
2739 ?
"unsafe dependent memory operations in loop."
2740 :
"unsafe dependent memory operations in loop. Use "
2741 "#pragma clang loop distribute(enable) to allow loop distribution "
2742 "to attempt to isolate the offending operations into a separate "
2753 R <<
"\nBackward loop carried data dependence.";
2756 R <<
"\nForward loop carried data dependence that prevents "
2757 "store-to-load forwarding.";
2760 R <<
"\nBackward loop carried data dependence that prevents "
2761 "store-to-load forwarding.";
2764 R <<
"\nUnsafe indirect dependence.";
2767 R <<
"\nUnknown data dependence.";
2774 SourceLoc = DD->getDebugLoc();
2776 R <<
" Memory location is the same as accessed at "
2777 <<
ore::NV(
"Location", SourceLoc);
2792 assert(!Report &&
"Multiple reports generated");
2798 CodeRegion =
I->getParent();
2801 if (
I->getDebugLoc())
2802 DL =
I->getDebugLoc();
2805 Report = std::make_unique<OptimizationRemarkAnalysis>(
DEBUG_TYPE, RemarkName,
DL,
2811 auto *SE = PSE->
getSE();
2832 std::advance(GEPTI, LastOperand - 2);
2839 if (ElemSize != GEPAllocSize)
2851 auto *
GEP = dyn_cast<GetElementPtrInst>(
Ptr);
2859 for (
unsigned I = 0, E =
GEP->getNumOperands();
I != E; ++
I)
2860 if (
I != InductionOperand &&
2863 return GEP->getOperand(InductionOperand);
2869 auto *PtrTy = dyn_cast<PointerType>(
Ptr->getType());
2870 if (!PtrTy || PtrTy->isAggregateType())
2879 int64_t PtrAccessSize = 1;
2887 V =
C->getOperand();
2904 if (OrigPtr ==
Ptr) {
2905 if (
const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
2906 if (M->getOperand(0)->getSCEVType() !=
scConstant)
2909 const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
2916 if (PtrAccessSize != StepVal)
2918 V = M->getOperand(1);
2928 if (isa<SCEVUnknown>(V))
2931 if (
const auto *
C = dyn_cast<SCEVIntegralCastExpr>(V))
2932 if (isa<SCEVUnknown>(
C->getOperand()))
2938void LoopAccessInfo::collectStridedAccess(
Value *MemAccess) {
2953 LLVM_DEBUG(
dbgs() <<
"LAA: Found a strided access that is a candidate for "
2958 LLVM_DEBUG(
dbgs() <<
" Chose not to due to -laa-speculate-unit-stride\n");
2983 const SCEV *CastedStride = StrideExpr;
2984 const SCEV *CastedBECount = MaxBTC;
2986 if (BETypeSizeBits >= StrideTypeSizeBits)
2990 const SCEV *StrideMinusBETaken = SE->
getMinusSCEV(CastedStride, CastedBECount);
2996 dbgs() <<
"LAA: Stride>=TripCount; No point in versioning as the "
2997 "Stride==1 predicate will imply that the loop executes "
3001 LLVM_DEBUG(
dbgs() <<
"LAA: Found a strided access that we can version.\n");
3005 const SCEV *StrideBase = StrideExpr;
3006 if (
const auto *
C = dyn_cast<SCEVIntegralCastExpr>(StrideBase))
3007 StrideBase =
C->getOperand();
3008 SymbolicStrides[
Ptr] = cast<SCEVUnknown>(StrideBase);
3016 PtrRtChecking(nullptr), TheLoop(L) {
3017 unsigned MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3024 MaxTargetVectorWidthInBits = FixedWidth.
getFixedValue() * 2;
3030 MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3032 DepChecker = std::make_unique<MemoryDepChecker>(*PSE, L, SymbolicStrides,
3033 MaxTargetVectorWidthInBits);
3034 PtrRtChecking = std::make_unique<RuntimePointerChecking>(*DepChecker, SE);
3035 if (canAnalyzeLoop())
3036 CanVecMem = analyzeLoop(AA, LI, TLI, DT);
3044 OS <<
" with a maximum safe vector width of "
3046 if (PtrRtChecking->Need)
3047 OS <<
" with run-time checks";
3051 if (HasConvergentOp)
3059 for (
const auto &Dep : *Dependences) {
3067 PtrRtChecking->print(
OS,
Depth);
3071 <<
"Non vectorizable stores to invariant address were "
3072 << (HasStoreStoreDependenceInvolvingLoopInvariantAddress ||
3073 HasLoadStoreDependenceInvolvingLoopInvariantAddress
3076 <<
"found in loop.\n";
3088 const auto &[It, Inserted] = LoopAccessInfoMap.insert({&L,
nullptr});
3092 std::make_unique<LoopAccessInfo>(&L, &SE,
TTI, TLI, &AA, &DT, &LI);
3102 for (
const auto &[L, LAI] : LoopAccessInfoMap) {
3103 if (LAI->getRuntimePointerChecking()->getChecks().empty() &&
3104 LAI->getPSE().getPredicate().isAlwaysTrue())
3110 LoopAccessInfoMap.erase(L);
This file implements a class to represent arbitrary precision integral constant values and operations...
ReachingDefAnalysis InstSet & ToRemove
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
This header defines various interfaces for pass management in LLVM.
static std::pair< const SCEV *, const SCEV * > getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, PredicatedScalarEvolution &PSE, DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > &PointerBounds)
Calculate Start and End points of memory access.
static cl::opt< unsigned > MaxDependences("max-dependences", cl::Hidden, cl::desc("Maximum number of dependences collected by " "loop-access analysis (default = 100)"), cl::init(100))
We collect dependences up to this threshold.
static cl::opt< bool > EnableForwardingConflictDetection("store-to-load-forwarding-conflict-detection", cl::Hidden, cl::desc("Enable conflict detection in loop-access analysis"), cl::init(true))
Enable store-to-load forwarding conflict detection.
static void findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr, SmallVectorImpl< PointerIntPair< const SCEV *, 1, bool > > &ScevList, unsigned Depth)
static bool hasComputableBounds(PredicatedScalarEvolution &PSE, Value *Ptr, const SCEV *PtrScev, Loop *L, bool Assume)
Check whether a pointer can participate in a runtime bounds check.
static cl::opt< unsigned > MemoryCheckMergeThreshold("memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " "runtime memory checks. (default = 100)"), cl::init(100))
The maximum iterations used to merge memory checks.
static const SCEV * getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
Get the stride of a pointer access in a loop.
static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep)
Find the operand of the GEP that should be checked for consecutive stores.
static cl::opt< unsigned, true > VectorizationInterleave("force-vector-interleave", cl::Hidden, cl::desc("Sets the vectorization interleave count. " "Zero is autoselect."), cl::location(VectorizerParams::VectorizationInterleave))
static bool isNoWrap(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &Strides, Value *Ptr, Type *AccessTy, Loop *L, bool Assume)
Check whether a pointer address cannot wrap.
static cl::opt< bool, true > HoistRuntimeChecks("hoist-runtime-checks", cl::Hidden, cl::desc("Hoist inner loop runtime memory checks to outer loop if possible"), cl::location(VectorizerParams::HoistRuntimeChecks), cl::init(true))
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, const SCEV &MaxBTC, const SCEV &Dist, uint64_t MaxStride, uint64_t TypeByteSize)
Given a dependence-distance Dist between two memory accesses, that have strides in the same direction...
static cl::opt< unsigned, true > RuntimeMemoryCheckThreshold("runtime-memory-check-threshold", cl::Hidden, cl::desc("When performing memory disambiguation checks at runtime do not " "generate more than this number of comparisons (default = 8)."), cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8))
static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, function_ref< void(Value *)> AddPointer)
static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, PredicatedScalarEvolution &PSE, const Loop *L)
Return true if an AddRec pointer Ptr is unsigned non-wrapping, i.e.
static Value * stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
If the argument is a GEP, then returns the operand identified by getGEPInductionOperand.
static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, uint64_t TypeByteSize)
Check the dependence for two accesses with the same stride Stride.
static const SCEV * getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE)
Compare I and J and return the minimum.
static cl::opt< unsigned > MaxForkedSCEVDepth("max-forked-scev-depth", cl::Hidden, cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"), cl::init(5))
static cl::opt< bool > SpeculateUnitStride("laa-speculate-unit-stride", cl::Hidden, cl::desc("Speculate that non-constant strides are unit in LAA"), cl::init(true))
static SmallVector< PointerIntPair< const SCEV *, 1, bool > > findForkedPointer(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap, Value *Ptr, const Loop *L)
static cl::opt< bool > EnableMemAccessVersioning("enable-mem-access-versioning", cl::init(true), cl::Hidden, cl::desc("Enable symbolic stride memory access versioning"))
This enables versioning on the strides of symbolically striding memory accesses in code like the foll...
This header provides classes for managing per-loop analyses.
This file provides utility analysis objects describing memory locations.
FunctionAnalysisManager FAM
This file defines the PointerIntPair class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckType(MVT::SimpleValueType VT, SDValue N, const TargetLowering *TLI, const DataLayout &DL)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
static const X86InstrFMA3Group Groups[]
A manager for alias analyses.
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
APInt abs() const
Get the absolute value.
unsigned getBitWidth() const
Return the number of bits in the APInt.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
int64_t getSExtValue() const
Get sign extended value.
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
API to communicate dependencies between analyses during invalidation.
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
@ ICMP_ULE
unsigned less or equal
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
Analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
EquivalenceClasses - This represents a collection of equivalence classes and supports three efficient...
iterator findValue(const ElemTy &V) const
findValue - Return an iterator to the specified value.
iterator insert(const ElemTy &Data)
insert - Insert a new value into the union/find set, ignoring the request if the value already exists...
member_iterator member_end() const
typename std::set< ECValue, ECValueComparator >::const_iterator iterator
iterator* - Provides a way to iterate over all values in the set.
member_iterator member_begin(iterator I) const
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)
union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...
const ElemTy & getLeaderValue(const ElemTy &V) const
getLeaderValue - Return the leader for the specified value that is in the set.
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Type * getResultElementType() const
PointerType * getType() const
Global values are always pointers.
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Class to represent integer types.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
An instruction for reading from memory.
Value * getPointerOperand()
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
This analysis provides dependence information for the memory accesses of a loop.
Result run(Function &F, FunctionAnalysisManager &AM)
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
const LoopAccessInfo & getInfo(Loop &L)
Drive the analysis of memory accesses in the loop.
const MemoryDepChecker & getDepChecker() const
the Memory Dependence Checker which can determine the loop-independent and loop-carried dependences b...
bool isInvariant(Value *V) const
Returns true if value V is loop invariant.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the information about the memory accesses in the loop.
static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, DominatorTree *DT)
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetTransformInfo *TTI, const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT, LoopInfo *LI)
Analysis pass that exposes the LoopInfo for a function.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
unsigned getNumBackEdges() const
Calculate the number of back edges to the loop header.
BlockT * getHeader() const
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
Represents a single loop in the control flow graph.
std::string getLocStr() const
Return a string containing the debug location of the loop (file name + line number if present,...
bool isAnnotatedParallel() const
Returns true if the loop is annotated parallel.
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
ArrayRef< MDOperand > operands() const
Tracking metadata reference owned by Metadata.
This class implements a map that also provides access to all stored values in a deterministic order.
Checks memory dependences among accesses to the same underlying object to determine whether there vec...
ArrayRef< unsigned > getOrderForAccess(Value *Ptr, bool IsWrite) const
Return the program order indices for the access location (Ptr, IsWrite).
bool isSafeForAnyVectorWidth() const
Return true if the number of elements that are safe to operate on simultaneously is not bounded.
bool areDepsSafe(const DepCandidates &AccessSets, const MemAccessInfoList &CheckDeps)
Check whether the dependencies between the accesses are safe.
const SmallVectorImpl< Instruction * > & getMemoryInstructions() const
The vector of memory access instructions.
const Loop * getInnermostLoop() const
uint64_t getMaxSafeVectorWidthInBits() const
Return the number of elements that are safe to operate on simultaneously, multiplied by the size of t...
bool isSafeForVectorization() const
No memory dependence was encountered that would inhibit vectorization.
const SmallVectorImpl< Dependence > * getDependences() const
Returns the memory dependences.
DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > & getPointerBounds()
SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const
Find the set of instructions that read or write via Ptr.
VectorizationSafetyStatus
Type to keep track of the status of the dependence check.
@ PossiblySafeWithRtChecks
bool shouldRetryWithRuntimeCheck() const
In same cases when the dependency check fails we can still vectorize the loop with a dynamic array ac...
void addAccess(StoreInst *SI)
Register the location (instructions are given increasing numbers) of a write access.
PointerIntPair< Value *, 1, bool > MemAccessInfo
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
const Value * Ptr
The address of the start of the location.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
const SCEVPredicate & getPredicate() const
bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
const SCEV * getSymbolicMaxBackedgeTakenCount()
Get the (predicated) symbolic max backedge count for the analyzed loop.
const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Holds information about the memory runtime legality checks to verify that a group of pointers do not ...
bool Need
This flag indicates if we need to add the runtime check.
void reset()
Reset the state of the pointer runtime information.
unsigned getNumberOfChecks() const
Returns the number of run-time checks required according to needsChecking.
void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const
Print Checks.
bool needsChecking(const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const
Decide if we need to add a check between two groups of pointers, according to needsChecking.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the list run-time memory checks necessary.
SmallVector< RuntimeCheckingPtrGroup, 2 > CheckingGroups
Holds a partitioning of pointers into "check groups".
void generateChecks(MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies)
Generate the checks and store it.
static bool arePointersInSamePartition(const SmallVectorImpl< int > &PtrToPartition, unsigned PtrIdx1, unsigned PtrIdx2)
Check if pointers are in the same partition.
SmallVector< PointerInfo, 2 > Pointers
Information about the pointers that may require checking.
void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy, bool WritePtr, unsigned DepSetId, unsigned ASId, PredicatedScalarEvolution &PSE, bool NeedsFreeze)
Insert a pointer and calculate the start and end SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
const Loop * getLoop() const
This class represents a constant integer value.
const APInt & getAPInt() const
This is the base class for unary integral cast operator classes.
This node represents multiplication of some number of SCEVs.
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
virtual void print(raw_ostream &OS, unsigned Depth=0) const =0
Prints a textual representation of this predicate with an indentation of Depth.
This class represents an analyzed expression in the program.
Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
static LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
The main scalar evolution driver.
bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
const SCEV * getConstant(ConstantInt *V)
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
bool isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
const SCEV * getCouldNotCompute()
const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)
Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr) const
Accumulate the constant offset this value has compared to a base pointer.
StringRef getName() const
Return a constant reference to the value's name.
constexpr ScalarTy getFixedValue() const
constexpr bool isNonZero() const
An efficient, type-erasing, non-owning reference to a callable.
TypeSize getSequentialElementStride(const DataLayout &DL) const
Type * getIndexedType() const
This class implements an extremely fast bulk output stream that can only output to a stream.
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
friend const_iterator end(StringRef path)
Get end iterator over path.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
bool match(Val *V, const Pattern &P)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
std::optional< int > getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB, const DataLayout &DL, ScalarEvolution &SE, bool StrictCheck=false, bool CheckType=true)
Returns the distance between the pointers PtrA and PtrB iff they are compatible and it is possible to...
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
unsigned getPointerAddressSpace(const Type *T)
std::optional< const MDOperand * > findStringMetadataForLoop(const Loop *TheLoop, StringRef Name)
Find string metadata for loop.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isPointerTy(const Type *T)
std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
bool sortPtrAccesses(ArrayRef< Value * > VL, Type *ElemTy, const DataLayout &DL, ScalarEvolution &SE, SmallVectorImpl< unsigned > &SortedIndices)
Attempt to sort the pointers in VL and return the sorted indices in SortedIndices,...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
const SCEV * replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &PtrToStride, Value *Ptr)
Return the SCEV corresponding to a pointer with the symbolic stride replaced with constant one,...
bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)
Returns true if the memory operations A and B are consecutive.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
OutputIt copy(R &&Range, OutputIt Out)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
gep_type_iterator gep_type_begin(const User *GEP)
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
Implement std::hash so that hash_code can be used in STL containers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
IR Values for the lower and upper bounds of a pointer evolution.
MDNode * Scope
The tag for alias scope specification (used with noalias).
MDNode * TBAA
The tag for type-based alias analysis.
MDNode * NoAlias
The tag specifying the noalias scope.
A special type used by analysis passes to provide an address that identifies that particular analysis...
Dependece between memory access instructions.
Instruction * getDestination(const MemoryDepChecker &DepChecker) const
Return the destination instruction of the dependence.
DepType Type
The type of the dependence.
bool isPossiblyBackward() const
May be a lexically backward dependence type (includes Unknown).
Instruction * getSource(const MemoryDepChecker &DepChecker) const
Return the source instruction of the dependence.
bool isForward() const
Lexically forward dependence.
bool isBackward() const
Lexically backward dependence.
void print(raw_ostream &OS, unsigned Depth, const SmallVectorImpl< Instruction * > &Instrs) const
Print the dependence.
DepType
The type of the dependence.
@ BackwardVectorizableButPreventsForwarding
@ ForwardButPreventsForwarding
static const char * DepName[]
String version of the types.
static VectorizationSafetyStatus isSafeForVectorization(DepType Type)
Dependence types that don't prevent vectorization.
unsigned AddressSpace
Address space of the involved pointers.
bool addPointer(unsigned Index, const RuntimePointerChecking &RtCheck)
Tries to add the pointer recorded in RtCheck at index Index to this pointer checking group.
bool NeedsFreeze
Whether the pointer needs to be frozen after expansion, e.g.
RuntimeCheckingPtrGroup(unsigned Index, const RuntimePointerChecking &RtCheck)
Create a new pointer checking group containing a single pointer, with index Index in RtCheck.
const SCEV * High
The SCEV expression which represents the upper bound of all the pointers in this group.
SmallVector< unsigned, 2 > Members
Indices of all the pointers that constitute this grouping.
const SCEV * Low
The SCEV expression which represents the lower bound of all the pointers in this group.
bool IsWritePtr
Holds the information if this pointer is used for writing to memory.
unsigned DependencySetId
Holds the id of the set of pointers that could be dependent because of a shared underlying object.
unsigned AliasSetId
Holds the id of the disjoint alias set to which this pointer belongs.
static const unsigned MaxVectorWidth
Maximum SIMD width.
static unsigned VectorizationFactor
VF as overridden by the user.
static unsigned RuntimeMemoryCheckThreshold
\When performing memory disambiguation checks at runtime do not make more than this number of compari...
static bool isInterleaveForced()
True if force-vector-interleave was specified by the user.
static unsigned VectorizationInterleave
Interleave factor as overridden by the user.
static bool HoistRuntimeChecks
Function object to check whether the first component of a container supported by std::get (like std::...