71#define DEBUG_TYPE "loop-accesses"
75 cl::desc(
"Sets the SIMD width. Zero is autoselect."),
81 cl::desc(
"Sets the vectorization interleave count. "
82 "Zero is autoselect."),
89 cl::desc(
"When performing memory disambiguation checks at runtime do not "
90 "generate more than this number of comparisons (default = 8)."),
97 cl::desc(
"Maximum number of comparisons done when trying to merge "
98 "runtime memory checks. (default = 100)"),
107 cl::desc(
"Maximum number of dependences collected by "
108 "loop-access analysis (default = 100)"),
124 cl::desc(
"Enable symbolic stride memory access versioning"));
129 "store-to-load-forwarding-conflict-detection",
cl::Hidden,
130 cl::desc(
"Enable conflict detection in loop-access analysis"),
135 cl::desc(
"Maximum recursion depth when finding forked SCEVs (default = 5)"),
140 cl::desc(
"Speculate that non-constant strides are unit in LAA"),
146 "Hoist inner loop runtime memory checks to outer loop if possible"),
151 return ::VectorizationInterleave.getNumOccurrences() > 0;
162 if (SI == PtrToStride.
end())
166 const SCEV *StrideSCEV = SI->second;
171 assert(isa<SCEVUnknown>(StrideSCEV) &&
"shouldn't be in map");
179 <<
" by: " << *Expr <<
"\n");
189 NeedsFreeze(RtCheck.Pointers[
Index].NeedsFreeze) {
207 Type *AccessTy,
bool WritePtr,
208 unsigned DepSetId,
unsigned ASId,
217 ScStart = ScEnd = PtrExpr;
220 assert(AR &&
"Invalid addrec expression");
229 if (
const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
230 if (CStep->getValue()->isNegative())
245 Type *IdxTy =
DL.getIndexType(
Ptr->getType());
249 Pointers.emplace_back(
Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
253bool RuntimePointerChecking::tryToCreateDiffCheck(
276 if (AccSrc.
size() != 1 || AccSink.
size() != 1)
280 if (AccSink[0] < AccSrc[0])
283 auto *SrcAR = dyn_cast<SCEVAddRecExpr>(Src->Expr);
284 auto *SinkAR = dyn_cast<SCEVAddRecExpr>(Sink->Expr);
295 if (isa<ScalableVectorType>(SrcTy) || isa<ScalableVectorType>(DstTy))
299 SinkAR->getLoop()->getHeader()->getModule()->getDataLayout();
301 std::max(
DL.getTypeAllocSize(SrcTy),
DL.getTypeAllocSize(DstTy));
306 auto *Step = dyn_cast<SCEVConstant>(SinkAR->getStepRecurrence(*SE));
307 if (!Step || Step != SrcAR->getStepRecurrence(*SE) ||
308 Step->getAPInt().abs() != AllocSize)
316 if (Step->getValue()->isNegative())
321 if (isa<SCEVCouldNotCompute>(SinkStartInt) ||
322 isa<SCEVCouldNotCompute>(SrcStartInt))
325 const Loop *InnerLoop = SrcAR->getLoop();
331 isa<SCEVAddRecExpr>(SinkStartInt) && isa<SCEVAddRecExpr>(SrcStartInt)) {
332 auto *SrcStartAR = cast<SCEVAddRecExpr>(SrcStartInt);
333 auto *SinkStartAR = cast<SCEVAddRecExpr>(SinkStartInt);
334 const Loop *StartARLoop = SrcStartAR->getLoop();
335 if (StartARLoop == SinkStartAR->getLoop() &&
340 SrcStartAR->getStepRecurrence(*SE) !=
341 SinkStartAR->getStepRecurrence(*SE)) {
342 LLVM_DEBUG(
dbgs() <<
"LAA: Not creating diff runtime check, since these "
343 "cannot be hoisted out of the outer loop\n");
349 <<
"SrcStart: " << *SrcStartInt <<
'\n'
350 <<
"SinkStartInt: " << *SinkStartInt <<
'\n');
351 DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
352 Src->NeedsFreeze ||
Sink->NeedsFreeze);
365 CanUseDiffCheck = CanUseDiffCheck && tryToCreateDiffCheck(CGI, CGJ);
366 Checks.
push_back(std::make_pair(&CGI, &CGJ));
373void RuntimePointerChecking::generateChecks(
376 groupChecks(DepCands, UseDependencies);
382 for (
unsigned I = 0, EI = M.Members.size(); EI !=
I; ++
I)
383 for (
unsigned J = 0, EJ =
N.Members.size(); EJ != J; ++J)
398 if (
C->getValue()->isNegative())
407 RtCheck.
Pointers[
Index].PointerValue->getType()->getPointerAddressSpace(),
416 "all pointers in a checking group must be in the same address space");
442void RuntimePointerChecking::groupChecks(
488 if (!UseDependencies) {
494 unsigned TotalComparisons = 0;
499 Iter.first->second.push_back(
Index);
528 auto PointerI = PositionMap.
find(
MI->getPointer());
530 "pointer in equivalence class not found in PositionMap");
531 for (
unsigned Pointer : PointerI->second) {
548 if (Group.addPointer(Pointer, *
this)) {
571 return (PtrToPartition[PtrIdx1] != -1 &&
572 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
596 unsigned Depth)
const {
598 for (
const auto &
Check : Checks) {
599 const auto &
First =
Check.first->Members, &Second =
Check.second->Members;
604 for (
unsigned K = 0; K <
First.size(); ++K)
608 for (
unsigned K = 0; K < Second.size(); ++K)
623 OS.
indent(
Depth + 4) <<
"(Low: " << *CG.Low <<
" High: " << *CG.High
625 for (
unsigned J = 0; J < CG.Members.size(); ++J) {
638class AccessAnalysis {
648 : TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DepCands(DA), PSE(PSE),
649 LoopAliasScopes(LoopAliasScopes) {
651 BAA.enableCrossIterationMode();
657 AST.add(adjustLoc(Loc));
658 Accesses[MemAccessInfo(
Ptr,
false)].insert(AccessTy);
660 ReadOnlyPtr.insert(
Ptr);
666 AST.add(adjustLoc(Loc));
667 Accesses[MemAccessInfo(
Ptr,
true)].insert(AccessTy);
678 MemAccessInfo Access,
Type *AccessTy,
681 Loop *TheLoop,
unsigned &RunningDepId,
682 unsigned ASId,
bool ShouldCheckStride,
bool Assume);
691 Value *&UncomputablePtr,
bool ShouldCheckWrap =
false);
695 void buildDependenceSets() {
696 processMemAccesses();
704 bool isDependencyCheckNeeded() {
return !CheckDeps.empty(); }
712 MemAccessInfoList &getDependenciesToCheck() {
return CheckDeps; }
716 return UnderlyingObjects;
741 return LoopAliasScopes.contains(cast<MDNode>(Scope));
750 void processMemAccesses();
754 PtrAccessMap Accesses;
760 MemAccessInfoList CheckDeps;
786 bool IsRTCheckAnalysisNeeded =
false;
804 const SCEV *PtrScev,
Loop *L,
bool Assume) {
828 int64_t Stride =
getPtrStride(PSE, AccessTy,
Ptr, L, Strides).value_or(0);
841 while (!WorkList.
empty()) {
845 auto *PN = dyn_cast<PHINode>(
Ptr);
849 if (PN && InnermostLoop.
contains(PN->getParent()) &&
850 PN->getParent() != InnermostLoop.
getHeader()) {
851 for (
const Use &Inc : PN->incoming_values())
884 if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(
Ptr) ||
885 !isa<Instruction>(
Ptr) ||
Depth == 0) {
896 auto GetBinOpExpr = [&SE](
unsigned Opcode,
const SCEV *L,
const SCEV *R) {
898 case Instruction::Add:
900 case Instruction::Sub:
908 unsigned Opcode =
I->getOpcode();
910 case Instruction::GetElementPtr: {
912 Type *SourceTy =
GEP->getSourceElementType();
915 if (
I->getNumOperands() != 2 || SourceTy->
isVectorTy()) {
925 bool NeedsFreeze =
any_of(BaseScevs, UndefPoisonCheck) ||
926 any_of(OffsetScevs, UndefPoisonCheck);
931 if (OffsetScevs.
size() == 2 && BaseScevs.
size() == 1)
933 else if (BaseScevs.
size() == 2 && OffsetScevs.
size() == 1)
936 ScevList.emplace_back(Scev, NeedsFreeze);
954 ScevList.emplace_back(SE->
getAddExpr(get<0>(BaseScevs[0]), Scaled1),
956 ScevList.emplace_back(SE->
getAddExpr(get<0>(BaseScevs[1]), Scaled2),
960 case Instruction::Select: {
967 if (ChildScevs.
size() == 2) {
968 ScevList.push_back(ChildScevs[0]);
969 ScevList.push_back(ChildScevs[1]);
974 case Instruction::PHI: {
979 if (
I->getNumOperands() == 2) {
983 if (ChildScevs.
size() == 2) {
984 ScevList.push_back(ChildScevs[0]);
985 ScevList.push_back(ChildScevs[1]);
990 case Instruction::Add:
991 case Instruction::Sub: {
999 any_of(LScevs, UndefPoisonCheck) ||
any_of(RScevs, UndefPoisonCheck);
1004 if (LScevs.
size() == 2 && RScevs.
size() == 1)
1006 else if (RScevs.
size() == 2 && LScevs.
size() == 1)
1009 ScevList.emplace_back(Scev, NeedsFreeze);
1013 ScevList.emplace_back(
1014 GetBinOpExpr(Opcode, get<0>(LScevs[0]), get<0>(RScevs[0])),
1016 ScevList.emplace_back(
1017 GetBinOpExpr(Opcode, get<0>(LScevs[1]), get<0>(RScevs[1])),
1023 LLVM_DEBUG(
dbgs() <<
"ForkedPtr unhandled instruction: " << *
I <<
"\n");
1040 if (Scevs.
size() == 2 &&
1041 (isa<SCEVAddRecExpr>(get<0>(Scevs[0])) ||
1043 (isa<SCEVAddRecExpr>(get<0>(Scevs[1])) ||
1055 MemAccessInfo Access,
Type *AccessTy,
1058 Loop *TheLoop,
unsigned &RunningDepId,
1059 unsigned ASId,
bool ShouldCheckWrap,
1066 for (
auto &
P : TranslatedPtrs) {
1067 const SCEV *PtrExpr = get<0>(
P);
1073 if (ShouldCheckWrap) {
1075 if (TranslatedPtrs.size() > 1)
1078 if (!
isNoWrap(PSE, StridesMap,
Ptr, AccessTy, TheLoop)) {
1080 if (!Assume || !isa<SCEVAddRecExpr>(Expr))
1087 if (TranslatedPtrs.size() == 1)
1092 for (
auto [PtrExpr, NeedsFreeze] : TranslatedPtrs) {
1096 if (isDependencyCheckNeeded()) {
1098 unsigned &LeaderId = DepSetId[Leader];
1100 LeaderId = RunningDepId++;
1104 DepId = RunningDepId++;
1106 bool IsWrite = Access.getInt();
1107 RtCheck.
insert(TheLoop,
Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
1118 Value *&UncomputablePtr,
bool ShouldCheckWrap) {
1121 bool CanDoRT =
true;
1123 bool MayNeedRTCheck =
false;
1124 if (!IsRTCheckAnalysisNeeded)
return true;
1126 bool IsDepCheckNeeded = isDependencyCheckNeeded();
1131 for (
auto &AS : AST) {
1132 int NumReadPtrChecks = 0;
1133 int NumWritePtrChecks = 0;
1134 bool CanDoAliasSetRT =
true;
1136 auto ASPointers = AS.getPointers();
1140 unsigned RunningDepId = 1;
1148 for (
const Value *Ptr_ : ASPointers) {
1150 bool IsWrite = Accesses.count(MemAccessInfo(
Ptr,
true));
1152 ++NumWritePtrChecks;
1160 if (NumWritePtrChecks == 0 ||
1161 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
1162 assert((ASPointers.size() <= 1 ||
1165 MemAccessInfo AccessWrite(
const_cast<Value *
>(
Ptr),
1167 return DepCands.
findValue(AccessWrite) == DepCands.
end();
1169 "Can only skip updating CanDoRT below, if all entries in AS "
1170 "are reads or there is at most 1 entry");
1174 for (
auto &Access : AccessInfos) {
1175 for (
const auto &AccessTy : Accesses[Access]) {
1176 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1177 DepSetId, TheLoop, RunningDepId, ASId,
1178 ShouldCheckWrap,
false)) {
1180 << *Access.getPointer() <<
'\n');
1182 CanDoAliasSetRT =
false;
1196 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.
empty();
1200 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
1204 CanDoAliasSetRT =
true;
1205 for (
auto Retry : Retries) {
1206 MemAccessInfo Access = Retry.first;
1207 Type *AccessTy = Retry.second;
1208 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1209 DepSetId, TheLoop, RunningDepId, ASId,
1210 ShouldCheckWrap,
true)) {
1211 CanDoAliasSetRT =
false;
1212 UncomputablePtr = Access.getPointer();
1218 CanDoRT &= CanDoAliasSetRT;
1219 MayNeedRTCheck |= NeedsAliasSetRTCheck;
1228 unsigned NumPointers = RtCheck.
Pointers.size();
1229 for (
unsigned i = 0; i < NumPointers; ++i) {
1230 for (
unsigned j = i + 1;
j < NumPointers; ++
j) {
1232 if (RtCheck.
Pointers[i].DependencySetId ==
1233 RtCheck.
Pointers[j].DependencySetId)
1246 dbgs() <<
"LAA: Runtime check would require comparison between"
1247 " different address spaces\n");
1253 if (MayNeedRTCheck && CanDoRT)
1257 <<
" pointer comparisons.\n");
1264 bool CanDoRTIfNeeded = !RtCheck.
Need || CanDoRT;
1265 if (!CanDoRTIfNeeded)
1267 return CanDoRTIfNeeded;
1270void AccessAnalysis::processMemAccesses() {
1277 LLVM_DEBUG(
dbgs() <<
"LAA: Accesses(" << Accesses.size() <<
"):\n");
1279 for (
auto A : Accesses)
1280 dbgs() <<
"\t" << *
A.first.getPointer() <<
" ("
1281 << (
A.first.getInt()
1283 : (ReadOnlyPtr.count(
A.first.getPointer()) ?
"read-only"
1292 for (
const auto &AS : AST) {
1296 auto ASPointers = AS.getPointers();
1298 bool SetHasWrite =
false;
1302 UnderlyingObjToAccessMap ObjToLastAccess;
1305 PtrAccessMap DeferredAccesses;
1309 for (
int SetIteration = 0; SetIteration < 2; ++SetIteration) {
1310 bool UseDeferred = SetIteration > 0;
1311 PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
1313 for (
const Value *Ptr_ : ASPointers) {
1318 for (
const auto &AC : S) {
1319 if (AC.first.getPointer() !=
Ptr)
1322 bool IsWrite = AC.first.getInt();
1326 bool IsReadOnlyPtr = ReadOnlyPtr.count(
Ptr) && !IsWrite;
1327 if (UseDeferred && !IsReadOnlyPtr)
1331 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1332 S.count(MemAccessInfo(
Ptr,
false))) &&
1333 "Alias-set pointer not in the access set?");
1335 MemAccessInfo Access(
Ptr, IsWrite);
1343 if (!UseDeferred && IsReadOnlyPtr) {
1346 DeferredAccesses.insert({Access, {}});
1354 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
1355 CheckDeps.push_back(Access);
1356 IsRTCheckAnalysisNeeded =
true;
1365 ValueVector TempObjects;
1367 UnderlyingObjects[
Ptr] = {};
1371 <<
"Underlying objects for pointer " << *
Ptr <<
"\n");
1372 for (
const Value *UnderlyingObj : UOs) {
1375 if (isa<ConstantPointerNull>(UnderlyingObj) &&
1381 UnderlyingObjToAccessMap::iterator Prev =
1382 ObjToLastAccess.find(UnderlyingObj);
1383 if (Prev != ObjToLastAccess.end())
1384 DepCands.
unionSets(Access, Prev->second);
1386 ObjToLastAccess[UnderlyingObj] = Access;
1415 auto *
GEP = dyn_cast<GetElementPtrInst>(
Ptr);
1416 if (!
GEP || !
GEP->isInBounds())
1420 Value *NonConstIndex =
nullptr;
1422 if (!isa<ConstantInt>(
Index)) {
1425 NonConstIndex =
Index;
1433 if (
auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
1434 if (OBO->hasNoSignedWrap() &&
1437 isa<ConstantInt>(OBO->getOperand(1))) {
1438 auto *OpScev = PSE.
getSCEV(OBO->getOperand(0));
1440 if (
auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
1441 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(
SCEV::FlagNSW);
1452 bool Assume,
bool ShouldCheckWrap) {
1456 if (isa<ScalableVectorType>(AccessTy)) {
1457 LLVM_DEBUG(
dbgs() <<
"LAA: Bad stride - Scalable object: " << *AccessTy
1459 return std::nullopt;
1470 <<
" SCEV: " << *PtrScev <<
"\n");
1471 return std::nullopt;
1476 LLVM_DEBUG(
dbgs() <<
"LAA: Bad stride - Not striding over innermost loop "
1477 << *
Ptr <<
" SCEV: " << *AR <<
"\n");
1478 return std::nullopt;
1488 <<
" SCEV: " << *AR <<
"\n");
1489 return std::nullopt;
1493 TypeSize AllocSize =
DL.getTypeAllocSize(AccessTy);
1495 const APInt &APStepVal =
C->getAPInt();
1499 return std::nullopt;
1504 int64_t Stride = StepVal /
Size;
1505 int64_t Rem = StepVal %
Size;
1507 return std::nullopt;
1509 if (!ShouldCheckWrap)
1521 if (
auto *
GEP = dyn_cast<GetElementPtrInst>(
Ptr);
1522 GEP &&
GEP->isInBounds() && (Stride == 1 || Stride == -1))
1530 (Stride == 1 || Stride == -1))
1536 <<
"LAA: Pointer: " << *
Ptr <<
"\n"
1537 <<
"LAA: SCEV: " << *AR <<
"\n"
1538 <<
"LAA: Added an overflow assumption\n");
1542 dbgs() <<
"LAA: Bad stride - Pointer may wrap in the address space "
1543 << *
Ptr <<
" SCEV: " << *AR <<
"\n");
1544 return std::nullopt;
1552 assert(PtrA && PtrB &&
"Expected non-nullptr pointers.");
1560 return std::nullopt;
1567 return std::nullopt;
1568 unsigned IdxWidth =
DL.getIndexSizeInBits(ASA);
1570 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1575 if (PtrA1 == PtrB1) {
1578 ASA = cast<PointerType>(PtrA1->
getType())->getAddressSpace();
1579 ASB = cast<PointerType>(PtrB1->
getType())->getAddressSpace();
1582 return std::nullopt;
1584 IdxWidth =
DL.getIndexSizeInBits(ASA);
1585 OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1595 dyn_cast<SCEVConstant>(SE.
getMinusSCEV(PtrSCEVB, PtrSCEVA));
1597 return std::nullopt;
1598 Val = Diff->getAPInt().getSExtValue();
1600 int Size =
DL.getTypeStoreSize(ElemTyA);
1601 int Dist = Val /
Size;
1605 if (!StrictCheck || Dist *
Size == Val)
1607 return std::nullopt;
1615 "Expected list of pointer operands.");
1618 Value *Ptr0 = VL[0];
1620 using DistOrdPair = std::pair<int64_t, int>;
1622 std::set<DistOrdPair,
decltype(Compare)> Offsets(Compare);
1623 Offsets.emplace(0, 0);
1625 bool IsConsecutive =
true;
1634 auto Res = Offsets.emplace(
Offset, Cnt);
1638 IsConsecutive = IsConsecutive && std::next(Res.first) == Offsets.end();
1641 SortedIndices.
clear();
1642 if (!IsConsecutive) {
1646 for (
const std::pair<int64_t, int> &Pair : Offsets) {
1647 SortedIndices[Cnt] = Pair.second;
1663 std::optional<int> Diff =
1666 return Diff && *Diff == 1;
1672 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1673 InstMap.push_back(SI);
1681 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1682 InstMap.push_back(LI);
1710 case ForwardButPreventsForwarding:
1712 case IndirectUnsafe:
1715 case BackwardVectorizable:
1717 case BackwardVectorizableButPreventsForwarding:
1730 case ForwardButPreventsForwarding:
1735 case BackwardVectorizable:
1737 case BackwardVectorizableButPreventsForwarding:
1738 case IndirectUnsafe:
1744bool MemoryDepChecker::couldPreventStoreLoadForward(
uint64_t Distance,
1758 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1760 uint64_t MaxVFWithoutSLForwardIssues = std::min(
1764 for (
uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1768 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1769 MaxVFWithoutSLForwardIssues = (VF >> 1);
1774 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1776 dbgs() <<
"LAA: Distance " << Distance
1777 <<
" that could cause a store-load forwarding conflict\n");
1781 if (MaxVFWithoutSLForwardIssues < MinDepDistBytes &&
1782 MaxVFWithoutSLForwardIssues !=
1784 MinDepDistBytes = MaxVFWithoutSLForwardIssues;
1806 const SCEV &BackedgeTakenCount,
1827 const uint64_t ByteStride = MaxStride * TypeByteSize;
1831 const SCEV *CastedDist = &Dist;
1832 const SCEV *CastedProduct = Product;
1839 if (DistTypeSizeBits > ProductTypeSizeBits)
1867 assert(Stride > 1 &&
"The stride must be greater than 1");
1868 assert(TypeByteSize > 0 &&
"The type size in byte must be non-zero");
1869 assert(Distance > 0 &&
"The distance must be non-zero");
1872 if (Distance % TypeByteSize)
1875 uint64_t ScaledDist = Distance / TypeByteSize;
1893 return ScaledDist % Stride;
1901 return any_of(UnderlyingObjects, [&SE, L](
const Value *UO) {
1907struct DepDistanceStrideAndSizeInfo {
1915 DepDistanceStrideAndSizeInfo(
const SCEV *Dist,
uint64_t StrideA,
1917 bool AIsWrite,
bool BIsWrite)
1918 : Dist(Dist), StrideA(StrideA), StrideB(StrideB),
1919 TypeByteSize(TypeByteSize), AIsWrite(AIsWrite), BIsWrite(BIsWrite) {}
1930 DepDistanceStrideAndSizeInfo>
1938 auto &SE = *PSE.
getSE();
1939 auto [APtr, AIsWrite] =
A;
1940 auto [BPtr, BIsWrite] =
B;
1943 if (!AIsWrite && !BIsWrite)
1950 if (APtr->getType()->getPointerAddressSpace() !=
1951 BPtr->getType()->getPointerAddressSpace())
1954 int64_t StrideAPtr =
1955 getPtrStride(PSE, ATy, APtr, InnermostLoop, Strides,
true).value_or(0);
1956 int64_t StrideBPtr =
1957 getPtrStride(PSE, BTy, BPtr, InnermostLoop, Strides,
true).value_or(0);
1965 if (StrideAPtr < 0) {
1972 LLVM_DEBUG(
dbgs() <<
"LAA: Src Scev: " << *Src <<
"Sink Scev: " << *Sink
1973 <<
"(Induction step: " << StrideAPtr <<
")\n");
1974 LLVM_DEBUG(
dbgs() <<
"LAA: Distance for " << *AInst <<
" to " << *BInst
1975 <<
": " << *Dist <<
"\n");
1988 if (!StrideAPtr || !StrideBPtr || (StrideAPtr > 0 && StrideBPtr < 0) ||
1989 (StrideAPtr < 0 && StrideBPtr > 0)) {
1990 LLVM_DEBUG(
dbgs() <<
"Pointer access with non-constant stride\n");
1994 uint64_t TypeByteSize =
DL.getTypeAllocSize(ATy);
1996 DL.getTypeStoreSizeInBits(ATy) ==
DL.getTypeStoreSizeInBits(BTy);
1999 return DepDistanceStrideAndSizeInfo(Dist, std::abs(StrideAPtr),
2000 std::abs(StrideBPtr), TypeByteSize,
2001 AIsWrite, BIsWrite);
2008 &UnderlyingObjects) {
2009 assert(AIdx < BIdx &&
"Must pass arguments in program order");
2014 A, InstMap[AIdx],
B, InstMap[BIdx], Strides, UnderlyingObjects, PSE,
2016 if (std::holds_alternative<Dependence::DepType>(Res))
2017 return std::get<Dependence::DepType>(Res);
2019 auto &[Dist, StrideA, StrideB, TypeByteSize, AIsWrite, BIsWrite] =
2020 std::get<DepDistanceStrideAndSizeInfo>(Res);
2021 bool HasSameSize = TypeByteSize > 0;
2023 std::optional<uint64_t> CommonStride =
2024 StrideA == StrideB ? std::make_optional(StrideA) :
std::nullopt;
2025 if (isa<SCEVCouldNotCompute>(Dist)) {
2028 FoundNonConstantDistanceDependence |= !!CommonStride;
2029 LLVM_DEBUG(
dbgs() <<
"LAA: Dependence because of uncomputable distance.\n");
2035 uint64_t MaxStride = std::max(StrideA, StrideB);
2043 MaxStride, TypeByteSize))
2050 const APInt &Val =
C->getAPInt();
2055 if (std::abs(Distance) > 0 && CommonStride && *CommonStride > 1 &&
2072 LLVM_DEBUG(
dbgs() <<
"LAA: possibly zero dependence difference but "
2073 "different type sizes\n");
2078 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
2093 FoundNonConstantDistanceDependence |= CommonStride.has_value();
2097 couldPreventStoreLoadForward(
C->getAPInt().abs().getZExtValue(),
2100 dbgs() <<
"LAA: Forward but may prevent st->ld forwarding\n");
2111 if (MinDistance <= 0) {
2112 FoundNonConstantDistanceDependence |= CommonStride.has_value();
2116 if (!isa<SCEVConstant>(Dist)) {
2125 FoundNonConstantDistanceDependence |= CommonStride.has_value();
2129 LLVM_DEBUG(
dbgs() <<
"LAA: ReadWrite-Write positive dependency with "
2130 "different type sizes\n");
2143 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
2176 TypeByteSize * *CommonStride * (MinNumIter - 1) + TypeByteSize;
2177 if (MinDistanceNeeded >
static_cast<uint64_t>(MinDistance)) {
2178 if (!isa<SCEVConstant>(Dist)) {
2185 LLVM_DEBUG(
dbgs() <<
"LAA: Failure because of positive minimum distance "
2186 << MinDistance <<
'\n');
2192 if (MinDistanceNeeded > MinDepDistBytes) {
2194 << MinDistanceNeeded <<
" size in bytes\n");
2215 std::min(
static_cast<uint64_t>(MinDistance), MinDepDistBytes);
2217 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
2218 uint64_t MinDepDistBytesOld = MinDepDistBytes;
2220 isa<SCEVConstant>(Dist) &&
2221 couldPreventStoreLoadForward(MinDistance, TypeByteSize)) {
2224 assert(MinDepDistBytes == MinDepDistBytesOld &&
2225 "An update to MinDepDistBytes requires an update to "
2226 "MaxSafeVectorWidthInBits");
2227 (void)MinDepDistBytesOld;
2233 uint64_t MaxVF = MinDepDistBytes / (TypeByteSize * *CommonStride);
2234 LLVM_DEBUG(
dbgs() <<
"LAA: Positive min distance " << MinDistance
2235 <<
" with max VF = " << MaxVF <<
'\n');
2237 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
2238 if (!isa<SCEVConstant>(Dist) && MaxVFInBits < MaxTargetVectorWidthInBits) {
2245 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
2253 &UnderlyingObjects) {
2255 MinDepDistBytes = -1;
2258 if (Visited.
count(CurAccess))
2274 bool AIIsWrite = AI->getInt();
2278 (AIIsWrite ? AI : std::next(AI));
2281 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
2282 I1E = Accesses[*AI].
end(); I1 != I1E; ++I1)
2285 for (std::vector<unsigned>::iterator
2286 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
2287 I2E = (OI == AI ? I1E : Accesses[*OI].end());
2289 auto A = std::make_pair(&*AI, *I1);
2290 auto B = std::make_pair(&*OI, *I2);
2297 isDependent(*
A.first,
A.second, *
B.first,
B.second, Strides,
2305 if (RecordDependences) {
2310 RecordDependences =
false;
2311 Dependences.clear();
2313 <<
"Too many dependences, stopped recording\n");
2325 LLVM_DEBUG(
dbgs() <<
"Total Dependences: " << Dependences.size() <<
"\n");
2332 auto &IndexVector = Accesses.find(Access)->second;
2336 std::back_inserter(Insts),
2337 [&](
unsigned Idx) {
return this->InstMap[
Idx]; });
2346 "ForwardButPreventsForwarding",
2348 "BackwardVectorizable",
2349 "BackwardVectorizableButPreventsForwarding"};
2359bool LoopAccessInfo::canAnalyzeLoop() {
2368 recordAnalysis(
"NotInnerMostLoop") <<
"loop is not the innermost loop";
2375 dbgs() <<
"LAA: loop control flow is not understood by analyzer\n");
2376 recordAnalysis(
"CFGNotUnderstood")
2377 <<
"loop control flow is not understood by analyzer";
2383 if (isa<SCEVCouldNotCompute>(ExitCount)) {
2384 recordAnalysis(
"CantComputeNumberOfIterations")
2385 <<
"could not determine number of loop iterations";
2386 LLVM_DEBUG(
dbgs() <<
"LAA: SCEV could not compute the loop exit count.\n");
2402 unsigned NumReads = 0;
2403 unsigned NumReadWrites = 0;
2405 bool HasComplexMemInst =
false;
2408 HasConvergentOp =
false;
2410 PtrRtChecking->Pointers.
clear();
2411 PtrRtChecking->Need =
false;
2415 const bool EnableMemAccessVersioningOfLoop =
2427 if (
auto *Call = dyn_cast<CallBase>(&
I)) {
2428 if (
Call->isConvergent())
2429 HasConvergentOp =
true;
2434 if (HasComplexMemInst && HasConvergentOp) {
2440 if (HasComplexMemInst)
2444 if (
auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&
I))
2445 for (
Metadata *
Op : Decl->getScopeList()->operands())
2446 LoopAliasScopes.
insert(cast<MDNode>(
Op));
2451 auto *
Call = dyn_cast<CallInst>(&
I);
2458 if (
I.mayReadFromMemory()) {
2461 if (Call && !
Call->isNoBuiltin() &&
Call->getCalledFunction() &&
2465 auto *Ld = dyn_cast<LoadInst>(&
I);
2467 recordAnalysis(
"CantVectorizeInstruction", Ld)
2468 <<
"instruction cannot be vectorized";
2469 HasComplexMemInst =
true;
2472 if (!Ld->isSimple() && !IsAnnotatedParallel) {
2473 recordAnalysis(
"NonSimpleLoad", Ld)
2474 <<
"read with atomic ordering or volatile read";
2476 HasComplexMemInst =
true;
2482 if (EnableMemAccessVersioningOfLoop)
2483 collectStridedAccess(Ld);
2488 if (
I.mayWriteToMemory()) {
2489 auto *St = dyn_cast<StoreInst>(&
I);
2491 recordAnalysis(
"CantVectorizeInstruction", St)
2492 <<
"instruction cannot be vectorized";
2493 HasComplexMemInst =
true;
2496 if (!St->isSimple() && !IsAnnotatedParallel) {
2497 recordAnalysis(
"NonSimpleStore", St)
2498 <<
"write with atomic ordering or volatile write";
2500 HasComplexMemInst =
true;
2506 if (EnableMemAccessVersioningOfLoop)
2507 collectStridedAccess(St);
2512 if (HasComplexMemInst) {
2522 if (!Stores.
size()) {
2529 AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE,
2546 if (isInvariant(
Ptr)) {
2548 StoresToInvariantAddresses.push_back(ST);
2549 HasStoreStoreDependenceInvolvingLoopInvariantAddress |=
2556 if (Seen.
insert({Ptr, AccessTy}).second) {
2563 if (blockNeedsPredication(
ST->getParent(), TheLoop, DT))
2567 [&Accesses, AccessTy, Loc](
Value *
Ptr) {
2568 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2569 Accesses.addStore(NewLoc, AccessTy);
2574 if (IsAnnotatedParallel) {
2576 dbgs() <<
"LAA: A loop annotated parallel, ignore memory dependency "
2592 bool IsReadOnlyPtr =
false;
2594 if (Seen.
insert({Ptr, AccessTy}).second ||
2595 !
getPtrStride(*PSE,
LD->getType(),
Ptr, TheLoop, SymbolicStrides).value_or(0)) {
2597 IsReadOnlyPtr =
true;
2603 LLVM_DEBUG(
dbgs() <<
"LAA: Found an unsafe dependency between a uniform "
2604 "load and uniform store to the same address!\n");
2605 HasLoadStoreDependenceInvolvingLoopInvariantAddress =
true;
2612 if (blockNeedsPredication(
LD->getParent(), TheLoop, DT))
2616 [&Accesses, AccessTy, Loc, IsReadOnlyPtr](
Value *
Ptr) {
2617 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2618 Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2624 if (NumReadWrites == 1 && NumReads == 0) {
2632 Accesses.buildDependenceSets();
2636 Value *UncomputablePtr =
nullptr;
2637 bool CanDoRTIfNeeded =
2638 Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->
getSE(), TheLoop,
2639 SymbolicStrides, UncomputablePtr,
false);
2640 if (!CanDoRTIfNeeded) {
2641 auto *
I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2642 recordAnalysis(
"CantIdentifyArrayBounds",
I)
2643 <<
"cannot identify array bounds";
2644 LLVM_DEBUG(
dbgs() <<
"LAA: We can't vectorize because we can't find "
2645 <<
"the array bounds.\n");
2651 dbgs() <<
"LAA: May be able to perform a memory runtime check if needed.\n");
2654 if (Accesses.isDependencyCheckNeeded()) {
2657 DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides,
2658 Accesses.getUnderlyingObjects());
2664 Accesses.resetDepChecks(*DepChecker);
2666 PtrRtChecking->reset();
2667 PtrRtChecking->Need =
true;
2669 auto *SE = PSE->
getSE();
2670 UncomputablePtr =
nullptr;
2671 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(
2672 *PtrRtChecking, SE, TheLoop, SymbolicStrides, UncomputablePtr,
true);
2675 if (!CanDoRTIfNeeded) {
2676 auto *
I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2677 recordAnalysis(
"CantCheckMemDepsAtRunTime",
I)
2678 <<
"cannot check memory dependencies at runtime";
2679 LLVM_DEBUG(
dbgs() <<
"LAA: Can't vectorize with memory checks\n");
2688 if (HasConvergentOp) {
2689 recordAnalysis(
"CantInsertRuntimeCheckWithConvergent")
2690 <<
"cannot add control dependency to convergent operation";
2691 LLVM_DEBUG(
dbgs() <<
"LAA: We can't vectorize because a runtime check "
2692 "would be needed with a convergent operation\n");
2699 dbgs() <<
"LAA: No unsafe dependent memory operations in loop. We"
2700 << (PtrRtChecking->Need ?
"" :
" don't")
2701 <<
" need runtime memory checks.\n");
2703 emitUnsafeDependenceRemark();
2706void LoopAccessInfo::emitUnsafeDependenceRemark() {
2707 auto Deps = getDepChecker().getDependences();
2714 if (Found == Deps->end())
2718 LLVM_DEBUG(
dbgs() <<
"LAA: unsafe dependent memory operations in loop\n");
2721 bool HasForcedDistribution =
false;
2722 std::optional<const MDOperand *>
Value =
2726 assert(
Op && mdconst::hasa<ConstantInt>(*
Op) &&
"invalid metadata");
2727 HasForcedDistribution = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
2730 const std::string
Info =
2731 HasForcedDistribution
2732 ?
"unsafe dependent memory operations in loop."
2733 :
"unsafe dependent memory operations in loop. Use "
2734 "#pragma clang loop distribute(enable) to allow loop distribution "
2735 "to attempt to isolate the offending operations into a separate "
2746 R <<
"\nBackward loop carried data dependence.";
2749 R <<
"\nForward loop carried data dependence that prevents "
2750 "store-to-load forwarding.";
2753 R <<
"\nBackward loop carried data dependence that prevents "
2754 "store-to-load forwarding.";
2757 R <<
"\nUnsafe indirect dependence.";
2760 R <<
"\nUnknown data dependence.";
2767 SourceLoc = DD->getDebugLoc();
2769 R <<
" Memory location is the same as accessed at "
2770 <<
ore::NV(
"Location", SourceLoc);
2785 assert(!Report &&
"Multiple reports generated");
2791 CodeRegion =
I->getParent();
2794 if (
I->getDebugLoc())
2795 DL =
I->getDebugLoc();
2798 Report = std::make_unique<OptimizationRemarkAnalysis>(
DEBUG_TYPE, RemarkName,
DL,
2804 auto *SE = PSE->
getSE();
2825 std::advance(GEPTI, LastOperand - 2);
2832 if (ElemSize != GEPAllocSize)
2852 for (
unsigned i = 0, e =
GEP->getNumOperands(); i != e; ++i)
2853 if (i != InductionOperand &&
2856 return GEP->getOperand(InductionOperand);
2861 Value *UniqueCast =
nullptr;
2862 for (
User *U :
Ptr->users()) {
2863 CastInst *CI = dyn_cast<CastInst>(U);
2864 if (CI && CI->
getType() == Ty) {
2877 auto *PtrTy = dyn_cast<PointerType>(
Ptr->getType());
2878 if (!PtrTy || PtrTy->isAggregateType())
2887 int64_t PtrAccessSize = 1;
2895 V =
C->getOperand();
2912 if (OrigPtr ==
Ptr) {
2913 if (
const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
2914 if (M->getOperand(0)->getSCEVType() !=
scConstant)
2917 const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
2924 if (PtrAccessSize != StepVal)
2926 V = M->getOperand(1);
2938 const auto *
C = dyn_cast<SCEVIntegralCastExpr>(V);
2941 U = dyn_cast<SCEVUnknown>(
C->getOperand());
2953void LoopAccessInfo::collectStridedAccess(
Value *MemAccess) {
2968 LLVM_DEBUG(
dbgs() <<
"LAA: Found a strided access that is a candidate for "
2973 LLVM_DEBUG(
dbgs() <<
" Chose not to due to -laa-speculate-unit-stride\n");
2998 const SCEV *CastedStride = StrideExpr;
2999 const SCEV *CastedBECount = BETakenCount;
3001 if (BETypeSizeBits >= StrideTypeSizeBits)
3005 const SCEV *StrideMinusBETaken = SE->
getMinusSCEV(CastedStride, CastedBECount);
3011 dbgs() <<
"LAA: Stride>=TripCount; No point in versioning as the "
3012 "Stride==1 predicate will imply that the loop executes "
3016 LLVM_DEBUG(
dbgs() <<
"LAA: Found a strided access that we can version.\n");
3020 const SCEV *StrideBase = StrideExpr;
3021 if (
const auto *
C = dyn_cast<SCEVIntegralCastExpr>(StrideBase))
3022 StrideBase =
C->getOperand();
3023 SymbolicStrides[
Ptr] = cast<SCEVUnknown>(StrideBase);
3031 PtrRtChecking(nullptr), TheLoop(L) {
3032 unsigned MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3039 MaxTargetVectorWidthInBits = FixedWidth.
getFixedValue() * 2;
3045 MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3048 std::make_unique<MemoryDepChecker>(*PSE, L, MaxTargetVectorWidthInBits);
3049 PtrRtChecking = std::make_unique<RuntimePointerChecking>(*DepChecker, SE);
3050 if (canAnalyzeLoop()) {
3051 analyzeLoop(AA, LI, TLI, DT);
3060 OS <<
" with a maximum safe vector width of "
3062 if (PtrRtChecking->Need)
3063 OS <<
" with run-time checks";
3067 if (HasConvergentOp)
3075 for (
const auto &Dep : *Dependences) {
3083 PtrRtChecking->print(
OS,
Depth);
3087 <<
"Non vectorizable stores to invariant address were "
3088 << (HasStoreStoreDependenceInvolvingLoopInvariantAddress ||
3089 HasLoadStoreDependenceInvolvingLoopInvariantAddress
3092 <<
"found in loop.\n";
3104 auto I = LoopAccessInfoMap.insert({&L,
nullptr});
3108 std::make_unique<LoopAccessInfo>(&L, &SE,
TTI, TLI, &AA, &DT, &LI);
3110 return *
I.first->second;
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements a class to represent arbitrary precision integral constant values and operations...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
static cl::opt< unsigned > MaxDependences("max-dependences", cl::Hidden, cl::desc("Maximum number of dependences collected by " "loop-access analysis (default = 100)"), cl::init(100))
We collect dependences up to this threshold.
static cl::opt< bool > EnableForwardingConflictDetection("store-to-load-forwarding-conflict-detection", cl::Hidden, cl::desc("Enable conflict detection in loop-access analysis"), cl::init(true))
Enable store-to-load forwarding conflict detection.
static void findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr, SmallVectorImpl< PointerIntPair< const SCEV *, 1, bool > > &ScevList, unsigned Depth)
static std::variant< MemoryDepChecker::Dependence::DepType, DepDistanceStrideAndSizeInfo > getDependenceDistanceStrideAndSize(const AccessAnalysis::MemAccessInfo &A, Instruction *AInst, const AccessAnalysis::MemAccessInfo &B, Instruction *BInst, const DenseMap< Value *, const SCEV * > &Strides, const DenseMap< Value *, SmallVector< const Value *, 16 > > &UnderlyingObjects, PredicatedScalarEvolution &PSE, const Loop *InnermostLoop)
static bool hasComputableBounds(PredicatedScalarEvolution &PSE, Value *Ptr, const SCEV *PtrScev, Loop *L, bool Assume)
Check whether a pointer can participate in a runtime bounds check.
static cl::opt< unsigned > MemoryCheckMergeThreshold("memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " "runtime memory checks. (default = 100)"), cl::init(100))
The maximum iterations used to merge memory checks.
static bool isNoWrap(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &Strides, Value *Ptr, Type *AccessTy, Loop *L)
Check whether a pointer address cannot wrap.
static const SCEV * getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
Get the stride of a pointer access in a loop.
static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep)
Find the operand of the GEP that should be checked for consecutive stores.
static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, const SCEV &BackedgeTakenCount, const SCEV &Dist, uint64_t MaxStride, uint64_t TypeByteSize)
Given a dependence-distance Dist between two memory accesses, that have strides in the same direction...
static cl::opt< unsigned, true > VectorizationInterleave("force-vector-interleave", cl::Hidden, cl::desc("Sets the vectorization interleave count. " "Zero is autoselect."), cl::location(VectorizerParams::VectorizationInterleave))
static bool isLoopVariantIndirectAddress(ArrayRef< const Value * > UnderlyingObjects, ScalarEvolution &SE, const Loop *L)
Returns true if any of the underlying objects has a loop varying address, i.e.
static Value * getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty)
If a value has only one user that is a CastInst, return it.
static cl::opt< bool, true > HoistRuntimeChecks("hoist-runtime-checks", cl::Hidden, cl::desc("Hoist inner loop runtime memory checks to outer loop if possible"), cl::location(VectorizerParams::HoistRuntimeChecks), cl::init(true))
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
static cl::opt< unsigned, true > RuntimeMemoryCheckThreshold("runtime-memory-check-threshold", cl::Hidden, cl::desc("When performing memory disambiguation checks at runtime do not " "generate more than this number of comparisons (default = 8)."), cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8))
static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, function_ref< void(Value *)> AddPointer)
static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, PredicatedScalarEvolution &PSE, const Loop *L)
Return true if an AddRec pointer Ptr is unsigned non-wrapping, i.e.
static Value * stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
If the argument is a GEP, then returns the operand identified by getGEPInductionOperand.
static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, uint64_t TypeByteSize)
Check the dependence for two accesses with the same stride Stride.
static const SCEV * getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE)
Compare I and J and return the minimum.
static cl::opt< unsigned > MaxForkedSCEVDepth("max-forked-scev-depth", cl::Hidden, cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"), cl::init(5))
static cl::opt< bool > SpeculateUnitStride("laa-speculate-unit-stride", cl::Hidden, cl::desc("Speculate that non-constant strides are unit in LAA"), cl::init(true))
static SmallVector< PointerIntPair< const SCEV *, 1, bool > > findForkedPointer(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap, Value *Ptr, const Loop *L)
static cl::opt< bool > EnableMemAccessVersioning("enable-mem-access-versioning", cl::init(true), cl::Hidden, cl::desc("Enable symbolic stride memory access versioning"))
This enables versioning on the strides of symbolically striding memory accesses in code like the foll...
This header provides classes for managing per-loop analyses.
This file provides utility analysis objects describing memory locations.
FunctionAnalysisManager FAM
This header defines various interfaces for pass management in LLVM.
This file defines the PointerIntPair class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckType(MVT::SimpleValueType VT, SDValue N, const TargetLowering *TLI, const DataLayout &DL)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
static const X86InstrFMA3Group Groups[]
A manager for alias analyses.
Class for arbitrary precision integers.
unsigned getBitWidth() const
Return the number of bits in the APInt.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
int64_t getSExtValue() const
Get sign extended value.
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
API to communicate dependencies between analyses during invalidation.
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
This is the base class for all instructions that perform data casts.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
EquivalenceClasses - This represents a collection of equivalence classes and supports three efficient...
iterator findValue(const ElemTy &V) const
findValue - Return an iterator to the specified value.
iterator insert(const ElemTy &Data)
insert - Insert a new value into the union/find set, ignoring the request if the value already exists...
member_iterator member_end() const
typename std::set< ECValue, ECValueComparator >::const_iterator iterator
iterator* - Provides a way to iterate over all values in the set.
member_iterator member_begin(iterator I) const
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)
union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...
const ElemTy & getLeaderValue(const ElemTy &V) const
getLeaderValue - Return the leader for the specified value that is in the set.
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Type * getResultElementType() const
PointerType * getType() const
Global values are always pointers.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Class to represent integer types.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
An instruction for reading from memory.
Value * getPointerOperand()
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
This analysis provides dependence information for the memory accesses of a loop.
Result run(Function &F, FunctionAnalysisManager &AM)
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
const LoopAccessInfo & getInfo(Loop &L)
Drive the analysis of memory accesses in the loop.
const MemoryDepChecker & getDepChecker() const
the Memory Dependence Checker which can determine the loop-independent and loop-carried dependences b...
bool isInvariant(Value *V) const
Returns true if value V is loop invariant.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the information about the memory accesses in the loop.
static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, DominatorTree *DT)
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetTransformInfo *TTI, const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT, LoopInfo *LI)
Analysis pass that exposes the LoopInfo for a function.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
unsigned getNumBackEdges() const
Calculate the number of back edges to the loop header.
BlockT * getHeader() const
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
Represents a single loop in the control flow graph.
bool isAnnotatedParallel() const
Returns true if the loop is annotated parallel.
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
ArrayRef< MDOperand > operands() const
Tracking metadata reference owned by Metadata.
This class implements a map that also provides access to all stored values in a deterministic order.
Checks memory dependences among accesses to the same underlying object to determine whether there vec...
ArrayRef< unsigned > getOrderForAccess(Value *Ptr, bool IsWrite) const
Return the program order indices for the access location (Ptr, IsWrite).
bool isSafeForAnyVectorWidth() const
Return true if the number of elements that are safe to operate on simultaneously is not bounded.
bool areDepsSafe(DepCandidates &AccessSets, MemAccessInfoList &CheckDeps, const DenseMap< Value *, const SCEV * > &Strides, const DenseMap< Value *, SmallVector< const Value *, 16 > > &UnderlyingObjects)
Check whether the dependencies between the accesses are safe.
const SmallVectorImpl< Instruction * > & getMemoryInstructions() const
The vector of memory access instructions.
const Loop * getInnermostLoop() const
uint64_t getMaxSafeVectorWidthInBits() const
Return the number of elements that are safe to operate on simultaneously, multiplied by the size of t...
bool isSafeForVectorization() const
No memory dependence was encountered that would inhibit vectorization.
const SmallVectorImpl< Dependence > * getDependences() const
Returns the memory dependences.
SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const
Find the set of instructions that read or write via Ptr.
VectorizationSafetyStatus
Type to keep track of the status of the dependence check.
@ PossiblySafeWithRtChecks
bool shouldRetryWithRuntimeCheck() const
In same cases when the dependency check fails we can still vectorize the loop with a dynamic array ac...
void addAccess(StoreInst *SI)
Register the location (instructions are given increasing numbers) of a write access.
PointerIntPair< Value *, 1, bool > MemAccessInfo
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
const Value * Ptr
The address of the start of the location.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
const SCEVPredicate & getPredicate() const
bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Holds information about the memory runtime legality checks to verify that a group of pointers do not ...
bool Need
This flag indicates if we need to add the runtime check.
void reset()
Reset the state of the pointer runtime information.
unsigned getNumberOfChecks() const
Returns the number of run-time checks required according to needsChecking.
void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const
Print Checks.
bool needsChecking(const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const
Decide if we need to add a check between two groups of pointers, according to needsChecking.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the list run-time memory checks necessary.
SmallVector< RuntimeCheckingPtrGroup, 2 > CheckingGroups
Holds a partitioning of pointers into "check groups".
void generateChecks(MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies)
Generate the checks and store it.
friend struct RuntimeCheckingPtrGroup
static bool arePointersInSamePartition(const SmallVectorImpl< int > &PtrToPartition, unsigned PtrIdx1, unsigned PtrIdx2)
Check if pointers are in the same partition.
SmallVector< PointerInfo, 2 > Pointers
Information about the pointers that may require checking.
void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy, bool WritePtr, unsigned DepSetId, unsigned ASId, PredicatedScalarEvolution &PSE, bool NeedsFreeze)
Insert a pointer and calculate the start and end SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStart() const
const SCEV * evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const
Return the value of this chain of recurrences at the specified iteration number.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
const Loop * getLoop() const
This class represents a constant integer value.
This is the base class for unary integral cast operator classes.
This node represents multiplication of some number of SCEVs.
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
virtual void print(raw_ostream &OS, unsigned Depth=0) const =0
Prints a textual representation of this predicate with an indentation of Depth.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents an analyzed expression in the program.
Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
const SCEV * getConstant(ConstantInt *V)
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
StringRef getName() const
Return a constant reference to the value's name.
constexpr ScalarTy getFixedValue() const
constexpr bool isNonZero() const
An efficient, type-erasing, non-owning reference to a callable.
TypeSize getSequentialElementStride(const DataLayout &DL) const
Type * getIndexedType() const
This class implements an extremely fast bulk output stream that can only output to a stream.
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
friend const_iterator end(StringRef path)
Get end iterator over path.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
bool match(Val *V, const Pattern &P)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
std::optional< int > getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB, const DataLayout &DL, ScalarEvolution &SE, bool StrictCheck=false, bool CheckType=true)
Returns the distance between the pointers PtrA and PtrB iff they are compatible and it is possible to...
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
unsigned getPointerAddressSpace(const Type *T)
std::optional< const MDOperand * > findStringMetadataForLoop(const Loop *TheLoop, StringRef Name)
Find string metadata for loop.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isPointerTy(const Type *T)
std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
bool sortPtrAccesses(ArrayRef< Value * > VL, Type *ElemTy, const DataLayout &DL, ScalarEvolution &SE, SmallVectorImpl< unsigned > &SortedIndices)
Attempt to sort the pointers in VL and return the sorted indices in SortedIndices,...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
const SCEV * replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &PtrToStride, Value *Ptr)
Return the SCEV corresponding to a pointer with the symbolic stride replaced with constant one,...
bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)
Returns true if the memory operations A and B are consecutive.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
OutputIt copy(R &&Range, OutputIt Out)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
gep_type_iterator gep_type_begin(const User *GEP)
Type * getLoadStoreType(Value *I)
A helper function that returns the type of a load or store instruction.
Implement std::hash so that hash_code can be used in STL containers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
MDNode * Scope
The tag for alias scope specification (used with noalias).
MDNode * TBAA
The tag for type-based alias analysis.
MDNode * NoAlias
The tag specifying the noalias scope.
A special type used by analysis passes to provide an address that identifies that particular analysis...
Dependece between memory access instructions.
Instruction * getDestination(const MemoryDepChecker &DepChecker) const
Return the destination instruction of the dependence.
DepType Type
The type of the dependence.
bool isPossiblyBackward() const
May be a lexically backward dependence type (includes Unknown).
Instruction * getSource(const MemoryDepChecker &DepChecker) const
Return the source instruction of the dependence.
bool isForward() const
Lexically forward dependence.
bool isBackward() const
Lexically backward dependence.
void print(raw_ostream &OS, unsigned Depth, const SmallVectorImpl< Instruction * > &Instrs) const
Print the dependence.
DepType
The type of the dependence.
@ BackwardVectorizableButPreventsForwarding
@ ForwardButPreventsForwarding
static const char * DepName[]
String version of the types.
static VectorizationSafetyStatus isSafeForVectorization(DepType Type)
Dependence types that don't prevent vectorization.
unsigned AddressSpace
Address space of the involved pointers.
bool addPointer(unsigned Index, RuntimePointerChecking &RtCheck)
Tries to add the pointer recorded in RtCheck at index Index to this pointer checking group.
bool NeedsFreeze
Whether the pointer needs to be frozen after expansion, e.g.
const SCEV * High
The SCEV expression which represents the upper bound of all the pointers in this group.
SmallVector< unsigned, 2 > Members
Indices of all the pointers that constitute this grouping.
RuntimeCheckingPtrGroup(unsigned Index, RuntimePointerChecking &RtCheck)
Create a new pointer checking group containing a single pointer, with index Index in RtCheck.
const SCEV * Low
The SCEV expression which represents the lower bound of all the pointers in this group.
bool IsWritePtr
Holds the information if this pointer is used for writing to memory.
unsigned DependencySetId
Holds the id of the set of pointers that could be dependent because of a shared underlying object.
unsigned AliasSetId
Holds the id of the disjoint alias set to which this pointer belongs.
static const unsigned MaxVectorWidth
Maximum SIMD width.
static unsigned VectorizationFactor
VF as overridden by the user.
static unsigned RuntimeMemoryCheckThreshold
\When performing memory disambiguation checks at runtime do not make more than this number of compari...
static bool isInterleaveForced()
True if force-vector-interleave was specified by the user.
static unsigned VectorizationInterleave
Interleave factor as overridden by the user.
static bool HoistRuntimeChecks
Function object to check whether the first component of a container supported by std::get (like std::...