98#define DEBUG_TYPE "loop-idiom"
100STATISTIC(NumMemSet,
"Number of memset's formed from loop stores");
101STATISTIC(NumMemCpy,
"Number of memcpy's formed from loop load+stores");
102STATISTIC(NumMemMove,
"Number of memmove's formed from loop load+stores");
104 NumShiftUntilBitTest,
105 "Number of uncountable loops recognized as 'shift until bitttest' idiom");
107 "Number of uncountable loops recognized as 'shift until zero' idiom");
112 cl::desc(
"Options to disable Loop Idiom Recognize Pass."),
119 cl::desc(
"Proceed with loop idiom recognize pass, but do "
120 "not convert loop(s) to memset."),
127 cl::desc(
"Proceed with loop idiom recognize pass, but do "
128 "not convert loop(s) to memcpy."),
133 "use-lir-code-size-heurs",
134 cl::desc(
"Use loop idiom recognition code size heuristics when compiling"
140class LoopIdiomRecognize {
141 Loop *CurLoop =
nullptr;
150 bool ApplyCodeSizeHeuristics;
151 std::unique_ptr<MemorySSAUpdater> MSSAU;
160 : AA(AA), DT(DT), LI(LI), SE(SE), TLI(TLI),
TTI(
TTI),
DL(
DL), ORE(ORE) {
162 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA);
165 bool runOnLoop(
Loop *L);
171 StoreListMap StoreRefsForMemset;
172 StoreListMap StoreRefsForMemsetPattern;
173 StoreList StoreRefsForMemcpy;
175 bool HasMemsetPattern;
179 enum LegalStoreKind {
184 UnorderedAtomicMemcpy,
192 bool runOnCountableLoop();
197 LegalStoreKind isLegalStore(
StoreInst *SI);
198 enum class ForMemset {
No,
Yes };
202 template <
typename MemInst>
203 bool processLoopMemIntrinsic(
205 bool (LoopIdiomRecognize::*Processor)(MemInst *,
const SCEV *),
206 const SCEV *BECount);
210 bool processLoopStridedStore(
Value *DestPtr,
const SCEV *StoreSizeSCEV,
215 bool IsNegStride,
bool IsLoopMemset =
false);
216 bool processLoopStoreOfLoopLoad(
StoreInst *SI,
const SCEV *BECount);
217 bool processLoopStoreOfLoopLoad(
Value *DestPtr,
Value *SourcePtr,
223 const SCEV *BECount);
224 bool avoidLIRForMultiBlockLoop(
bool IsMemset =
false,
225 bool IsLoopMemset =
false);
231 bool runOnNoncountableLoop();
233 bool recognizePopcount();
236 bool recognizeAndInsertFFS();
241 bool IsCntPhiUsedOutsideLoop);
243 bool recognizeShiftUntilBitTest();
244 bool recognizeShiftUntilZero();
256 const auto *
DL = &L.getHeader()->getModule()->getDataLayout();
263 LoopIdiomRecognize LIR(&AR.
AA, &AR.
DT, &AR.
LI, &AR.
SE, &AR.
TLI, &AR.
TTI,
265 if (!LIR.runOnLoop(&L))
276 I->eraseFromParent();
285bool LoopIdiomRecognize::runOnLoop(
Loop *L) {
289 if (!
L->getLoopPreheader())
294 if (
Name ==
"memset" ||
Name ==
"memcpy")
298 ApplyCodeSizeHeuristics =
301 HasMemset = TLI->
has(LibFunc_memset);
302 HasMemsetPattern = TLI->
has(LibFunc_memset_pattern16);
303 HasMemcpy = TLI->
has(LibFunc_memcpy);
305 if (HasMemset || HasMemsetPattern || HasMemcpy)
307 return runOnCountableLoop();
309 return runOnNoncountableLoop();
312bool LoopIdiomRecognize::runOnCountableLoop() {
314 assert(!isa<SCEVCouldNotCompute>(BECount) &&
315 "runOnCountableLoop() called on a loop without a predictable"
316 "backedge-taken count");
320 if (
const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
321 if (BECst->getAPInt() == 0)
339 bool MadeChange =
false;
347 MadeChange |= runOnLoopBlock(BB, BECount, ExitBlocks);
371 if (!
C || isa<ConstantExpr>(
C))
380 if (
DL->isBigEndian())
396 unsigned ArraySize = 16 /
Size;
401LoopIdiomRecognize::LegalStoreKind
402LoopIdiomRecognize::isLegalStore(
StoreInst *SI) {
404 if (
SI->isVolatile())
405 return LegalStoreKind::None;
407 if (!
SI->isUnordered())
408 return LegalStoreKind::None;
411 if (
SI->getMetadata(LLVMContext::MD_nontemporal))
412 return LegalStoreKind::None;
414 Value *StoredVal =
SI->getValueOperand();
415 Value *StorePtr =
SI->getPointerOperand();
420 return LegalStoreKind::None;
428 return LegalStoreKind::None;
434 dyn_cast<SCEVAddRecExpr>(SE->
getSCEV(StorePtr));
436 return LegalStoreKind::None;
439 if (!isa<SCEVConstant>(StoreEv->
getOperand(1)))
440 return LegalStoreKind::None;
451 bool UnorderedAtomic =
SI->isUnordered() && !
SI->isSimple();
460 return LegalStoreKind::Memset;
467 return LegalStoreKind::MemsetPattern;
475 unsigned StoreSize =
DL->getTypeStoreSize(
SI->getValueOperand()->getType());
476 if (StoreSize != Stride && StoreSize != -Stride)
477 return LegalStoreKind::None;
480 LoadInst *LI = dyn_cast<LoadInst>(
SI->getValueOperand());
484 return LegalStoreKind::None;
487 return LegalStoreKind::None;
495 return LegalStoreKind::None;
499 return LegalStoreKind::None;
502 UnorderedAtomic = UnorderedAtomic || LI->
isAtomic();
503 return UnorderedAtomic ? LegalStoreKind::UnorderedAtomicMemcpy
504 : LegalStoreKind::Memcpy;
507 return LegalStoreKind::None;
510void LoopIdiomRecognize::collectStores(
BasicBlock *BB) {
511 StoreRefsForMemset.clear();
512 StoreRefsForMemsetPattern.clear();
513 StoreRefsForMemcpy.clear();
520 switch (isLegalStore(SI)) {
521 case LegalStoreKind::None:
524 case LegalStoreKind::Memset: {
527 StoreRefsForMemset[
Ptr].push_back(SI);
529 case LegalStoreKind::MemsetPattern: {
532 StoreRefsForMemsetPattern[
Ptr].push_back(SI);
534 case LegalStoreKind::Memcpy:
535 case LegalStoreKind::UnorderedAtomicMemcpy:
536 StoreRefsForMemcpy.push_back(SI);
539 assert(
false &&
"unhandled return value");
548bool LoopIdiomRecognize::runOnLoopBlock(
558 bool MadeChange =
false;
565 for (
auto &SL : StoreRefsForMemset)
566 MadeChange |= processLoopStores(SL.second, BECount, ForMemset::Yes);
568 for (
auto &SL : StoreRefsForMemsetPattern)
569 MadeChange |= processLoopStores(SL.second, BECount, ForMemset::No);
572 for (
auto &SI : StoreRefsForMemcpy)
573 MadeChange |= processLoopStoreOfLoopLoad(SI, BECount);
575 MadeChange |= processLoopMemIntrinsic<MemCpyInst>(
576 BB, &LoopIdiomRecognize::processLoopMemCpy, BECount);
577 MadeChange |= processLoopMemIntrinsic<MemSetInst>(
578 BB, &LoopIdiomRecognize::processLoopMemSet, BECount);
585 const SCEV *BECount, ForMemset For) {
593 for (
unsigned i = 0, e = SL.
size(); i < e; ++i) {
594 assert(SL[i]->
isSimple() &&
"Expected only non-volatile stores.");
596 Value *FirstStoredVal = SL[i]->getValueOperand();
597 Value *FirstStorePtr = SL[i]->getPointerOperand();
599 cast<SCEVAddRecExpr>(SE->
getSCEV(FirstStorePtr));
601 unsigned FirstStoreSize =
DL->getTypeStoreSize(SL[i]->getValueOperand()->
getType());
604 if (FirstStride == FirstStoreSize || -FirstStride == FirstStoreSize) {
609 Value *FirstSplatValue =
nullptr;
610 Constant *FirstPatternValue =
nullptr;
612 if (For == ForMemset::Yes)
617 assert((FirstSplatValue || FirstPatternValue) &&
618 "Expected either splat value or pattern value.");
626 for (j = i + 1;
j <
e; ++
j)
628 for (j = i;
j > 0; --
j)
631 for (
auto &k : IndexQueue) {
632 assert(SL[k]->
isSimple() &&
"Expected only non-volatile stores.");
633 Value *SecondStorePtr = SL[
k]->getPointerOperand();
635 cast<SCEVAddRecExpr>(SE->
getSCEV(SecondStorePtr));
638 if (FirstStride != SecondStride)
641 Value *SecondStoredVal = SL[
k]->getValueOperand();
642 Value *SecondSplatValue =
nullptr;
643 Constant *SecondPatternValue =
nullptr;
645 if (For == ForMemset::Yes)
650 assert((SecondSplatValue || SecondPatternValue) &&
651 "Expected either splat value or pattern value.");
654 if (For == ForMemset::Yes) {
655 if (isa<UndefValue>(FirstSplatValue))
656 FirstSplatValue = SecondSplatValue;
657 if (FirstSplatValue != SecondSplatValue)
660 if (isa<UndefValue>(FirstPatternValue))
661 FirstPatternValue = SecondPatternValue;
662 if (FirstPatternValue != SecondPatternValue)
667 ConsecutiveChain[SL[i]] = SL[
k];
676 bool Changed =
false;
687 unsigned StoreSize = 0;
690 while (Tails.
count(
I) || Heads.count(
I)) {
691 if (TransformedStores.
count(
I))
695 StoreSize +=
DL->getTypeStoreSize(
I->getValueOperand()->getType());
697 I = ConsecutiveChain[
I];
707 if (StoreSize != Stride && StoreSize != -Stride)
710 bool IsNegStride = StoreSize == -Stride;
714 if (processLoopStridedStore(StorePtr, StoreSizeSCEV,
716 HeadStore, AdjacentStores, StoreEv, BECount,
718 TransformedStores.
insert(AdjacentStores.
begin(), AdjacentStores.
end());
728template <
typename MemInst>
729bool LoopIdiomRecognize::processLoopMemIntrinsic(
731 bool (LoopIdiomRecognize::*Processor)(MemInst *,
const SCEV *),
732 const SCEV *BECount) {
733 bool MadeChange =
false;
737 if (MemInst *
MI = dyn_cast<MemInst>(Inst)) {
739 if (!(this->*Processor)(
MI, BECount))
753bool LoopIdiomRecognize::processLoopMemCpy(
MemCpyInst *MCI,
754 const SCEV *BECount) {
765 if (!Dest || !Source)
780 if ((SizeInBytes >> 32) != 0)
786 dyn_cast<SCEVConstant>(StoreEv->
getOperand(1));
788 dyn_cast<SCEVConstant>(LoadEv->
getOperand(1));
789 if (!ConstStoreStride || !ConstLoadStride)
798 if (SizeInBytes != StoreStrideValue && SizeInBytes != -StoreStrideValue) {
801 <<
ore::NV(
"Inst",
"memcpy") <<
" in "
803 <<
" function will not be hoisted: "
804 <<
ore::NV(
"Reason",
"memcpy size is not equal to stride");
809 int64_t StoreStrideInt = StoreStrideValue.
getSExtValue();
812 if (StoreStrideInt != LoadStrideInt)
815 return processLoopStoreOfLoopLoad(
822bool LoopIdiomRecognize::processLoopMemSet(
MemSetInst *MSI,
823 const SCEV *BECount) {
838 if (!Ev || Ev->
getLoop() != CurLoop)
847 if (!PointerStrideSCEV || !MemsetSizeSCEV)
850 bool IsNegStride =
false;
851 const bool IsConstantSize = isa<ConstantInt>(MSI->
getLength());
853 if (IsConstantSize) {
864 if (SizeInBytes != Stride && SizeInBytes != -Stride)
867 IsNegStride = SizeInBytes == -Stride;
875 if (
Pointer->getType()->getPointerAddressSpace() != 0) {
888 const SCEV *PositiveStrideSCEV =
891 LLVM_DEBUG(
dbgs() <<
" MemsetSizeSCEV: " << *MemsetSizeSCEV <<
"\n"
892 <<
" PositiveStrideSCEV: " << *PositiveStrideSCEV
895 if (PositiveStrideSCEV != MemsetSizeSCEV) {
898 const SCEV *FoldedPositiveStride =
900 const SCEV *FoldedMemsetSize =
904 <<
" FoldedMemsetSize: " << *FoldedMemsetSize <<
"\n"
905 <<
" FoldedPositiveStride: " << *FoldedPositiveStride
908 if (FoldedPositiveStride != FoldedMemsetSize) {
925 BECount, IsNegStride,
true);
933 const SCEV *BECount,
const SCEV *StoreSizeSCEV,
943 const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount);
944 const SCEVConstant *ConstSize = dyn_cast<SCEVConstant>(StoreSizeSCEV);
945 if (BECst && ConstSize) {
949 if (BEInt && SizeInt)
971 Type *IntPtr,
const SCEV *StoreSizeSCEV,
974 if (!StoreSizeSCEV->
isOne()) {
989 const SCEV *StoreSizeSCEV,
Loop *CurLoop,
991 const SCEV *TripCountSCEV =
1000bool LoopIdiomRecognize::processLoopStridedStore(
1004 const SCEV *BECount,
bool IsNegStride,
bool IsLoopMemset) {
1012 assert((SplatValue || PatternValue) &&
1013 "Expected either splat value or pattern value.");
1024 Type *DestInt8PtrTy = Builder.getPtrTy(DestAS);
1027 bool Changed =
false;
1035 if (!Expander.isSafeToExpand(Start))
1044 Expander.expandCodeFor(Start, DestInt8PtrTy, Preheader->
getTerminator());
1056 StoreSizeSCEV, *AA, Stores))
1059 if (avoidLIRForMultiBlockLoop(
true, IsLoopMemset))
1064 const SCEV *NumBytesS =
1065 getNumBytes(BECount, IntIdxTy, StoreSizeSCEV, CurLoop,
DL, SE);
1069 if (!Expander.isSafeToExpand(NumBytesS))
1073 Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->
getTerminator());
1080 AATags = AATags.
merge(
Store->getAAMetadata());
1081 if (
auto CI = dyn_cast<ConstantInt>(NumBytes))
1082 AATags = AATags.
extendTo(CI->getZExtValue());
1088 NewCall = Builder.CreateMemSet(
1089 BasePtr, SplatValue, NumBytes,
MaybeAlign(StoreAlignment),
1094 Type *Int8PtrTy = DestInt8PtrTy;
1096 StringRef FuncName =
"memset_pattern16";
1098 Builder.getVoidTy(), Int8PtrTy, Int8PtrTy, IntIdxTy);
1105 PatternValue,
".memset_pattern");
1108 Value *PatternPtr = GV;
1109 NewCall = Builder.CreateCall(MSP, {
BasePtr, PatternPtr, NumBytes});
1125 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB(
1127 MSSAU->insertDef(cast<MemoryDef>(NewMemAcc),
true);
1131 <<
" from store to: " << *Ev <<
" at: " << *TheStore
1137 R <<
"Transformed loop-strided store in "
1139 <<
" function into a call to "
1142 if (!Stores.empty())
1144 for (
auto *
I : Stores) {
1145 R <<
ore::NV(
"FromBlock",
I->getParent()->getName())
1153 for (
auto *
I : Stores) {
1155 MSSAU->removeMemoryAccess(
I,
true);
1159 MSSAU->getMemorySSA()->verifyMemorySSA();
1161 ExpCleaner.markResultUsed();
1168bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
StoreInst *SI,
1169 const SCEV *BECount) {
1170 assert(
SI->isUnordered() &&
"Expected only non-volatile non-ordered stores.");
1172 Value *StorePtr =
SI->getPointerOperand();
1174 unsigned StoreSize =
DL->getTypeStoreSize(
SI->getValueOperand()->getType());
1177 LoadInst *LI = cast<LoadInst>(
SI->getValueOperand());
1187 return processLoopStoreOfLoopLoad(StorePtr, LoadPtr, StoreSizeSCEV,
1189 StoreEv, LoadEv, BECount);
1193class MemmoveVerifier {
1195 explicit MemmoveVerifier(
const Value &LoadBasePtr,
const Value &StoreBasePtr,
1198 LoadBasePtr.stripPointerCasts(), LoadOff,
DL)),
1200 StoreBasePtr.stripPointerCasts(), StoreOff,
DL)),
1201 IsSameObject(BP1 == BP2) {}
1203 bool loadAndStoreMayFormMemmove(
unsigned StoreSize,
bool IsNegStride,
1205 bool IsMemCpy)
const {
1209 if ((!IsNegStride && LoadOff <= StoreOff) ||
1210 (IsNegStride && LoadOff >= StoreOff))
1216 DL.getTypeSizeInBits(TheLoad.
getType()).getFixedValue() / 8;
1217 if (BP1 != BP2 || LoadSize != int64_t(StoreSize))
1219 if ((!IsNegStride && LoadOff < StoreOff + int64_t(StoreSize)) ||
1220 (IsNegStride && LoadOff + LoadSize > StoreOff))
1228 int64_t LoadOff = 0;
1229 int64_t StoreOff = 0;
1234 const bool IsSameObject;
1238bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
1247 if (isa<MemCpyInlineInst>(TheStore))
1259 bool Changed =
false;
1265 const SCEVConstant *ConstStoreSize = dyn_cast<SCEVConstant>(StoreSizeSCEV);
1268 assert(ConstStoreSize &&
"store size is expected to be a constant");
1271 bool IsNegStride = StoreSize == -Stride;
1284 Value *StoreBasePtr = Expander.expandCodeFor(
1285 StrStart, Builder.getPtrTy(StrAS), Preheader->
getTerminator());
1297 IgnoredInsts.
insert(TheStore);
1299 bool IsMemCpy = isa<MemCpyInst>(TheStore);
1300 const StringRef InstRemark = IsMemCpy ?
"memcpy" :
"load and store";
1302 bool LoopAccessStore =
1304 StoreSizeSCEV, *AA, IgnoredInsts);
1305 if (LoopAccessStore) {
1311 IgnoredInsts.
insert(TheLoad);
1313 BECount, StoreSizeSCEV, *AA, IgnoredInsts)) {
1317 <<
ore::NV(
"Inst", InstRemark) <<
" in "
1319 <<
" function will not be hoisted: "
1320 <<
ore::NV(
"Reason",
"The loop may access store location");
1324 IgnoredInsts.
erase(TheLoad);
1337 Value *LoadBasePtr = Expander.expandCodeFor(LdStart, Builder.getPtrTy(LdAS),
1342 MemmoveVerifier
Verifier(*LoadBasePtr, *StoreBasePtr, *
DL);
1343 if (IsMemCpy && !
Verifier.IsSameObject)
1344 IgnoredInsts.
erase(TheStore);
1346 StoreSizeSCEV, *AA, IgnoredInsts)) {
1349 <<
ore::NV(
"Inst", InstRemark) <<
" in "
1351 <<
" function will not be hoisted: "
1352 <<
ore::NV(
"Reason",
"The loop may access load location");
1357 bool UseMemMove = IsMemCpy ?
Verifier.IsSameObject : LoopAccessStore;
1359 if (!
Verifier.loadAndStoreMayFormMemmove(StoreSize, IsNegStride, *TheLoad,
1363 if (avoidLIRForMultiBlockLoop())
1368 const SCEV *NumBytesS =
1369 getNumBytes(BECount, IntIdxTy, StoreSizeSCEV, CurLoop,
DL, SE);
1372 Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->
getTerminator());
1376 AATags = AATags.
merge(StoreAATags);
1377 if (
auto CI = dyn_cast<ConstantInt>(NumBytes))
1378 AATags = AATags.
extendTo(CI->getZExtValue());
1388 NewCall = Builder.CreateMemMove(
1389 StoreBasePtr, StoreAlign, LoadBasePtr, LoadAlign, NumBytes,
1393 Builder.CreateMemCpy(StoreBasePtr, StoreAlign, LoadBasePtr, LoadAlign,
1394 NumBytes,
false, AATags.
TBAA,
1402 assert((StoreAlign && LoadAlign) &&
1403 "Expect unordered load/store to have align.");
1404 if (*StoreAlign < StoreSize || *LoadAlign < StoreSize)
1417 NewCall = Builder.CreateElementUnorderedAtomicMemCpy(
1418 StoreBasePtr, *StoreAlign, LoadBasePtr, *LoadAlign, NumBytes, StoreSize,
1424 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB(
1426 MSSAU->insertDef(cast<MemoryDef>(NewMemAcc),
true);
1430 <<
" from load ptr=" << *LoadEv <<
" at: " << *TheLoad
1432 <<
" from store ptr=" << *StoreEv <<
" at: " << *TheStore
1438 <<
"Formed a call to "
1440 <<
"() intrinsic from " <<
ore::NV(
"Inst", InstRemark)
1451 MSSAU->removeMemoryAccess(TheStore,
true);
1454 MSSAU->getMemorySSA()->verifyMemorySSA();
1459 ExpCleaner.markResultUsed();
1466bool LoopIdiomRecognize::avoidLIRForMultiBlockLoop(
bool IsMemset,
1467 bool IsLoopMemset) {
1468 if (ApplyCodeSizeHeuristics && CurLoop->
getNumBlocks() > 1) {
1469 if (CurLoop->
isOutermost() && (!IsMemset || !IsLoopMemset)) {
1471 <<
" : LIR " << (IsMemset ?
"Memset" :
"Memcpy")
1472 <<
" avoided: multi-block top-level loop\n");
1480bool LoopIdiomRecognize::runOnNoncountableLoop() {
1483 <<
"] Noncountable Loop %"
1486 return recognizePopcount() || recognizeAndInsertFFS() ||
1487 recognizeShiftUntilBitTest() || recognizeShiftUntilZero();
1497 bool JmpOnZero =
false) {
1506 if (!CmpZero || !CmpZero->
isZero())
1517 return Cond->getOperand(0);
1526 auto *PhiX = dyn_cast<PHINode>(VarX);
1527 if (PhiX && PhiX->getParent() == LoopEntry &&
1528 (PhiX->getOperand(0) == DefX || PhiX->
getOperand(1) == DefX))
1564 Value *VarX1, *VarX0;
1567 DefX2 = CountInst =
nullptr;
1568 VarX1 = VarX0 =
nullptr;
1569 PhiX = CountPhi =
nullptr;
1575 dyn_cast<BranchInst>(LoopEntry->
getTerminator()), LoopEntry))
1576 DefX2 = dyn_cast<Instruction>(
T);
1583 if (!DefX2 || DefX2->
getOpcode() != Instruction::And)
1588 if ((SubOneOp = dyn_cast<BinaryOperator>(DefX2->
getOperand(0))))
1592 SubOneOp = dyn_cast<BinaryOperator>(DefX2->
getOperand(1));
1594 if (!SubOneOp || SubOneOp->
getOperand(0) != VarX1)
1600 (SubOneOp->
getOpcode() == Instruction::Add &&
1613 CountInst =
nullptr;
1616 if (Inst.
getOpcode() != Instruction::Add)
1620 if (!Inc || !Inc->
isOne())
1628 bool LiveOutLoop =
false;
1630 if ((cast<Instruction>(U))->
getParent() != LoopEntry) {
1650 auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->
getTerminator());
1655 CntInst = CountInst;
1695 Value *VarX =
nullptr;
1704 dyn_cast<BranchInst>(LoopEntry->
getTerminator()), LoopEntry))
1705 DefX = dyn_cast<Instruction>(
T);
1710 if (!DefX || !DefX->
isShift())
1712 IntrinID = DefX->
getOpcode() == Instruction::Shl ? Intrinsic::cttz :
1715 if (!Shft || !Shft->
isOne())
1740 if (Inst.
getOpcode() != Instruction::Add)
1764bool LoopIdiomRecognize::recognizeAndInsertFFS() {
1776 size_t IdiomCanonicalSize = 6;
1779 CntInst, CntPhi, DefX))
1782 bool IsCntPhiUsedOutsideLoop =
false;
1784 if (!CurLoop->
contains(cast<Instruction>(U))) {
1785 IsCntPhiUsedOutsideLoop =
true;
1788 bool IsCntInstUsedOutsideLoop =
false;
1790 if (!CurLoop->
contains(cast<Instruction>(U))) {
1791 IsCntInstUsedOutsideLoop =
true;
1796 if (IsCntInstUsedOutsideLoop && IsCntPhiUsedOutsideLoop)
1802 bool ZeroCheck =
false;
1811 if (!IsCntPhiUsedOutsideLoop) {
1815 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1840 std::distance(InstWithoutDebugIt.begin(), InstWithoutDebugIt.end());
1845 if (HeaderSize != IdiomCanonicalSize &&
1849 transformLoopToCountable(IntrinID, PH, CntInst, CntPhi, InitX, DefX,
1851 IsCntPhiUsedOutsideLoop);
1859bool LoopIdiomRecognize::recognizePopcount() {
1873 if (LoopBody->
size() >= 20) {
1883 if (!EntryBI || EntryBI->isConditional())
1891 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1892 if (!PreCondBI || PreCondBI->isUnconditional())
1901 transformLoopToPopcount(PreCondBB, CntInst, CntPhi, Val);
1907 Value *Ops[] = {Val};
1963void LoopIdiomRecognize::transformLoopToCountable(
1966 bool ZeroCheck,
bool IsCntPhiUsedOutsideLoop) {
1971 Builder.SetCurrentDebugLocation(
DL);
1980 if (IsCntPhiUsedOutsideLoop) {
1981 if (DefX->
getOpcode() == Instruction::AShr)
1982 InitXNext = Builder.CreateAShr(InitX, 1);
1983 else if (DefX->
getOpcode() == Instruction::LShr)
1984 InitXNext = Builder.CreateLShr(InitX, 1);
1985 else if (DefX->
getOpcode() == Instruction::Shl)
1986 InitXNext = Builder.CreateShl(InitX, 1);
1994 Count = Builder.CreateSub(
1996 Value *NewCount = Count;
1997 if (IsCntPhiUsedOutsideLoop)
2000 NewCount = Builder.CreateZExtOrTrunc(NewCount, CntInst->
getType());
2003 if (cast<ConstantInt>(CntInst->
getOperand(1))->isOne()) {
2006 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
2007 if (!InitConst || !InitConst->
isZero())
2008 NewCount = Builder.CreateAdd(NewCount, CntInitVal);
2012 NewCount = Builder.CreateSub(CntInitVal, NewCount);
2025 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
2030 Builder.SetInsertPoint(LbCond);
2031 Instruction *TcDec = cast<Instruction>(Builder.CreateSub(
2045 if (IsCntPhiUsedOutsideLoop)
2055void LoopIdiomRecognize::transformLoopToPopcount(
BasicBlock *PreCondBB,
2059 auto *PreCondBr = cast<BranchInst>(PreCondBB->
getTerminator());
2068 Value *PopCnt, *PopCntZext, *NewCount, *TripCnt;
2071 NewCount = PopCntZext =
2072 Builder.CreateZExtOrTrunc(PopCnt, cast<IntegerType>(CntPhi->
getType()));
2074 if (NewCount != PopCnt)
2075 (cast<Instruction>(NewCount))->setDebugLoc(
DL);
2082 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
2083 if (!InitConst || !InitConst->
isZero()) {
2084 NewCount = Builder.CreateAdd(NewCount, CntInitVal);
2085 (cast<Instruction>(NewCount))->setDebugLoc(
DL);
2094 ICmpInst *PreCond = cast<ICmpInst>(PreCondBr->getCondition());
2096 Value *Opnd0 = PopCntZext;
2101 ICmpInst *NewPreCond = cast<ICmpInst>(
2102 Builder.CreateICmp(PreCond->
getPredicate(), Opnd0, Opnd1));
2103 PreCondBr->setCondition(NewPreCond);
2131 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
2137 Builder.SetInsertPoint(LbCond);
2140 "tcdec",
false,
true));
2168 : SubPattern(SP), L(L) {}
2170 template <
typename ITy>
bool match(ITy *V) {
2171 return L->isLoopInvariant(V) && SubPattern.match(V);
2176template <
typename Ty>
2207 " Performing shift-until-bittest idiom detection.\n");
2217 assert(LoopPreheaderBB &&
"There is always a loop preheader.");
2219 using namespace PatternMatch;
2224 Value *CmpLHS, *CmpRHS;
2235 auto MatchVariableBitMask = [&]() {
2244 auto MatchConstantBitMask = [&]() {
2250 auto MatchDecomposableConstantBitMask = [&]() {
2258 if (!MatchVariableBitMask() && !MatchConstantBitMask() &&
2259 !MatchDecomposableConstantBitMask()) {
2265 auto *CurrXPN = dyn_cast<PHINode>(CurrX);
2266 if (!CurrXPN || CurrXPN->getParent() != LoopHeaderBB) {
2271 BaseX = CurrXPN->getIncomingValueForBlock(LoopPreheaderBB);
2273 dyn_cast<Instruction>(CurrXPN->getIncomingValueForBlock(LoopHeaderBB));
2276 "Expected BaseX to be avaliable in the preheader!");
2287 "Should only get equality predicates here.");
2297 if (TrueBB != LoopHeaderBB) {
2356bool LoopIdiomRecognize::recognizeShiftUntilBitTest() {
2357 bool MadeChange =
false;
2359 Value *
X, *BitMask, *BitPos, *XCurr;
2364 " shift-until-bittest idiom detection failed.\n");
2374 assert(LoopPreheaderBB &&
"There is always a loop preheader.");
2377 assert(SuccessorBB &&
"There is only a single successor.");
2380 Builder.SetCurrentDebugLocation(cast<Instruction>(XCurr)->
getDebugLoc());
2383 Type *Ty =
X->getType();
2397 " Intrinsic is too costly, not beneficial\n");
2413 if (
auto *BitPosI = dyn_cast<Instruction>(BitPos))
2422 return U.getUser() != BitPosFrozen;
2424 BitPos = BitPosFrozen;
2430 BitPos->
getName() +
".lowbitmask");
2432 Builder.CreateOr(LowBitMask, BitMask, BitPos->
getName() +
".mask");
2433 Value *XMasked = Builder.CreateAnd(
X, Mask,
X->getName() +
".masked");
2434 CallInst *XMaskedNumLeadingZeros = Builder.CreateIntrinsic(
2435 IntrID, Ty, {XMasked, Builder.getTrue()},
2436 nullptr, XMasked->
getName() +
".numleadingzeros");
2437 Value *XMaskedNumActiveBits = Builder.CreateSub(
2439 XMasked->
getName() +
".numactivebits",
true,
2441 Value *XMaskedLeadingOnePos =
2443 XMasked->
getName() +
".leadingonepos",
false,
2446 Value *LoopBackedgeTakenCount = Builder.CreateSub(
2447 BitPos, XMaskedLeadingOnePos, CurLoop->
getName() +
".backedgetakencount",
2451 Value *LoopTripCount =
2453 CurLoop->
getName() +
".tripcount",
true,
2460 Value *NewX = Builder.CreateShl(
X, LoopBackedgeTakenCount);
2462 if (
auto *
I = dyn_cast<Instruction>(NewX))
2463 I->copyIRFlags(XNext,
true);
2475 NewXNext = Builder.CreateShl(
X, LoopTripCount);
2484 if (
auto *
I = dyn_cast<Instruction>(NewXNext))
2485 I->copyIRFlags(XNext,
true);
2496 Builder.SetInsertPoint(LoopHeaderBB, LoopHeaderBB->
begin());
2497 auto *
IV = Builder.CreatePHI(Ty, 2, CurLoop->
getName() +
".iv");
2504 true, Bitwidth != 2);
2507 auto *IVCheck = Builder.CreateICmpEQ(IVNext, LoopTripCount,
2508 CurLoop->
getName() +
".ivcheck");
2509 Builder.CreateCondBr(IVCheck, SuccessorBB, LoopHeaderBB);
2514 IV->addIncoming(IVNext, LoopHeaderBB);
2525 ++NumShiftUntilBitTest;
2561 const SCEV *&ExtraOffsetExpr,
2562 bool &InvertedCond) {
2564 " Performing shift-until-zero idiom detection.\n");
2577 assert(LoopPreheaderBB &&
"There is always a loop preheader.");
2579 using namespace PatternMatch;
2588 !
match(ValShiftedIsZero,
2602 IntrinID = ValShifted->
getOpcode() == Instruction::Shl ? Intrinsic::cttz
2611 else if (
match(NBits,
2615 ExtraOffsetExpr = SE->
getSCEV(ExtraOffset);
2622 auto *IVPN = dyn_cast<PHINode>(
IV);
2623 if (!IVPN || IVPN->getParent() != LoopHeaderBB) {
2628 Start = IVPN->getIncomingValueForBlock(LoopPreheaderBB);
2629 IVNext = dyn_cast<Instruction>(IVPN->getIncomingValueForBlock(LoopHeaderBB));
2639 "Should only get equality predicates here.");
2650 if (FalseBB != LoopHeaderBB) {
2661 if (ValShifted->
getOpcode() == Instruction::AShr &&
2725bool LoopIdiomRecognize::recognizeShiftUntilZero() {
2726 bool MadeChange =
false;
2732 const SCEV *ExtraOffsetExpr;
2735 Start, Val, ExtraOffsetExpr, InvertedCond)) {
2737 " shift-until-zero idiom detection failed.\n");
2747 assert(LoopPreheaderBB &&
"There is always a loop preheader.");
2750 assert(SuccessorBB &&
"There is only a single successor.");
2753 Builder.SetCurrentDebugLocation(
IV->getDebugLoc());
2769 " Intrinsic is too costly, not beneficial\n");
2776 bool OffsetIsZero =
false;
2777 if (
auto *ExtraOffsetExprC = dyn_cast<SCEVConstant>(ExtraOffsetExpr))
2778 OffsetIsZero = ExtraOffsetExprC->isZero();
2782 CallInst *ValNumLeadingZeros = Builder.CreateIntrinsic(
2783 IntrID, Ty, {Val, Builder.getFalse()},
2784 nullptr, Val->
getName() +
".numleadingzeros");
2785 Value *ValNumActiveBits = Builder.CreateSub(
2787 Val->
getName() +
".numactivebits",
true,
2791 Expander.setInsertPoint(&*Builder.GetInsertPoint());
2792 Value *ExtraOffset = Expander.expandCodeFor(ExtraOffsetExpr);
2794 Value *ValNumActiveBitsOffset = Builder.CreateAdd(
2795 ValNumActiveBits, ExtraOffset, ValNumActiveBits->
getName() +
".offset",
2796 OffsetIsZero,
true);
2797 Value *IVFinal = Builder.CreateIntrinsic(Intrinsic::smax, {Ty},
2798 {ValNumActiveBitsOffset, Start},
2799 nullptr,
"iv.final");
2801 auto *LoopBackedgeTakenCount = cast<Instruction>(Builder.CreateSub(
2802 IVFinal, Start, CurLoop->
getName() +
".backedgetakencount",
2803 OffsetIsZero,
true));
2807 Value *LoopTripCount =
2809 CurLoop->
getName() +
".tripcount",
true,
2815 IV->replaceUsesOutsideBlock(IVFinal, LoopHeaderBB);
2820 Builder.SetInsertPoint(LoopHeaderBB, LoopHeaderBB->
begin());
2821 auto *CIV = Builder.CreatePHI(Ty, 2, CurLoop->
getName() +
".iv");
2827 true, Bitwidth != 2);
2830 auto *CIVCheck = Builder.CreateICmpEQ(CIVNext, LoopTripCount,
2831 CurLoop->
getName() +
".ivcheck");
2832 auto *NewIVCheck = CIVCheck;
2834 NewIVCheck = Builder.CreateNot(CIVCheck);
2835 NewIVCheck->takeName(ValShiftedIsZero);
2839 auto *IVDePHId = Builder.CreateAdd(CIV, Start,
"",
false,
2841 IVDePHId->takeName(
IV);
2845 Builder.CreateCondBr(CIVCheck, SuccessorBB, LoopHeaderBB);
2850 CIV->addIncoming(CIVNext, LoopHeaderBB);
2858 IV->replaceAllUsesWith(IVDePHId);
2859 IV->eraseFromParent();
2868 ++NumShiftUntilZero;
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements a class to represent arbitrary precision integral constant values and operations...
static const Function * getParent(const Value *V)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
This file defines the DenseMap class.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L, const SCEV *BECount, unsigned StoreSize, AliasAnalysis &AA, SmallPtrSetImpl< Instruction * > &Ignored)
mayLoopAccessLocation - Return true if the specified loop might access the specified pointer location...
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
static Value * matchCondition(BranchInst *BI, BasicBlock *LoopEntry, bool JmpOnZero=false)
Check if the given conditional branch is based on the comparison between a variable and zero,...
static PHINode * getRecurrenceVar(Value *VarX, Instruction *DefX, BasicBlock *LoopEntry)
static cl::opt< bool, true > DisableLIRPMemset("disable-" DEBUG_TYPE "-memset", cl::desc("Proceed with loop idiom recognize pass, but do " "not convert loop(s) to memset."), cl::location(DisableLIRP::Memset), cl::init(false), cl::ReallyHidden)
static cl::opt< bool > UseLIRCodeSizeHeurs("use-lir-code-size-heurs", cl::desc("Use loop idiom recognition code size heuristics when compiling" "with -Os/-Oz"), cl::init(true), cl::Hidden)
static CallInst * createFFSIntrinsic(IRBuilder<> &IRBuilder, Value *Val, const DebugLoc &DL, bool ZeroCheck, Intrinsic::ID IID)
static bool detectShiftUntilBitTestIdiom(Loop *CurLoop, Value *&BaseX, Value *&BitMask, Value *&BitPos, Value *&CurrX, Instruction *&NextX)
Return true if the idiom is detected in the loop.
static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB, Instruction *&CntInst, PHINode *&CntPhi, Value *&Var)
Return true iff the idiom is detected in the loop.
static Constant * getMemSetPatternValue(Value *V, const DataLayout *DL)
getMemSetPatternValue - If a strided store of the specified value is safe to turn into a memset_patte...
static cl::opt< bool, true > DisableLIRPMemcpy("disable-" DEBUG_TYPE "-memcpy", cl::desc("Proceed with loop idiom recognize pass, but do " "not convert loop(s) to memcpy."), cl::location(DisableLIRP::Memcpy), cl::init(false), cl::ReallyHidden)
static CallInst * createPopcntIntrinsic(IRBuilder<> &IRBuilder, Value *Val, const DebugLoc &DL)
static const SCEV * getNumBytes(const SCEV *BECount, Type *IntPtr, const SCEV *StoreSizeSCEV, Loop *CurLoop, const DataLayout *DL, ScalarEvolution *SE)
Compute the number of bytes as a SCEV from the backedge taken count.
static bool detectShiftUntilZeroIdiom(Loop *CurLoop, const DataLayout &DL, Intrinsic::ID &IntrinID, Value *&InitX, Instruction *&CntInst, PHINode *&CntPhi, Instruction *&DefX)
Return true if the idiom is detected in the loop.
static const SCEV * getStartForNegStride(const SCEV *Start, const SCEV *BECount, Type *IntPtr, const SCEV *StoreSizeSCEV, ScalarEvolution *SE)
static APInt getStoreStride(const SCEVAddRecExpr *StoreEv)
match_LoopInvariant< Ty > m_LoopInvariant(const Ty &M, const Loop *L)
Matches if the value is loop-invariant.
static cl::opt< bool, true > DisableLIRPAll("disable-" DEBUG_TYPE "-all", cl::desc("Options to disable Loop Idiom Recognize Pass."), cl::location(DisableLIRP::All), cl::init(false), cl::ReallyHidden)
static void deleteDeadInstruction(Instruction *I)
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
This file implements a map that provides insertion order iteration.
This file provides utility analysis objects describing memory locations.
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
Module.h This file contains the declarations for the Module class.
Contains a collection of routines for determining if a given instruction is guaranteed to execute if ...
This header defines various interfaces for pass management in LLVM.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isSimple(Instruction *I)
verify safepoint Safepoint IR Verifier
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
static const uint32_t IV[8]
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
Check whether or not an instruction may read or write the optionally specified memory location.
Class for arbitrary precision integers.
std::optional< uint64_t > tryZExtValue() const
Get zero extended value if possible.
unsigned getBitWidth() const
Return the number of bits in the APInt.
int64_t getSExtValue() const
Get sign extended value.
A container for analyses that lazily runs them and caches their results.
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
iterator_range< filter_iterator< BasicBlock::const_iterator, std::function< bool(const Instruction &)> > > instructionsWithoutDebug(bool SkipPseudoOp=true) const
Return a const iterator range over the instructions in the block, skipping any debug instructions.
InstListType::const_iterator getFirstNonPHIIt() const
Iterator returning form of getFirstNonPHI.
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const Instruction & front() const
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
BinaryOps getOpcode() const
Conditional or Unconditional Branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
This class represents a function call, abstracting a target machine's calling convention.
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLE
signed less or equal
@ ICMP_UGT
unsigned greater than
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static Constant * getExactLogBase2(Constant *C)
If C is a scalar/fixed width vector of known powers of 2, then this function returns a new scalar/fix...
This is the shared class of boolean and integer constants.
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
bool isOne() const
This is just a convenience method to make client code smaller for a common case.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
static ConstantInt * getBool(LLVMContext &Context, bool V)
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
A parsed version of the target data layout string in and methods for querying it.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This class represents a freeze function that returns random concrete value if an operand is either a ...
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
void setAlignment(Align Align)
Sets the alignment attribute of the GlobalObject.
void setUnnamedAddr(UnnamedAddr Val)
Module * getParent()
Get the module that this global value is contained inside of...
@ PrivateLinkage
Like Internal, but omit from symbol table.
This instruction compares its operands according to the predicate given to the constructor.
bool isEquality() const
Return true if this predicate is either EQ or NE.
ConstantInt * getInt1(bool V)
Get a constant value representing either true or false.
BasicBlock * GetInsertBlock() const
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
const BasicBlock * getParent() const
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
const Function * getFunction() const
Return the function this instruction belongs to.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
std::optional< InstListType::iterator > getInsertionPointAfterDef()
Get the first insertion point at which the result of this instruction is defined.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
This class provides an interface for updating the loop pass manager based on mutations to the loop ne...
An instruction for reading from memory.
Value * getPointerOperand()
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Align getAlign() const
Return the alignment of the access that is being performed.
static LocationSize precise(uint64_t Value)
static constexpr LocationSize afterPointer()
Any location after the base pointer (but still within the underlying object).
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
bool isOutermost() const
Return true if the loop does not have a parent (natural) loop.
unsigned getNumBlocks() const
Get the number of blocks in this loop in constant time.
unsigned getNumBackEdges() const
Calculate the number of back edges to the loop header.
BlockT * getHeader() const
BlockT * getExitBlock() const
If getExitBlocks would return exactly one block, return that block.
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
ArrayRef< BlockT * > getBlocks() const
Get a list of the basic blocks which make up this loop.
void getUniqueExitBlocks(SmallVectorImpl< BlockT * > &ExitBlocks) const
Return all unique successor blocks of this loop.
block_iterator block_begin() const
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR, LPMUpdater &U)
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
StringRef getName() const
This class implements a map that also provides access to all stored values in a deterministic order.
This class wraps the llvm.memcpy intrinsic.
Value * getLength() const
Value * getDest() const
This is just like getRawDest, but it strips off any cast instructions (including addrspacecast) that ...
MaybeAlign getDestAlign() const
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
MaybeAlign getSourceAlign() const
Value * getSource() const
This is just like getRawSource, but it strips off any cast instructions that feed it,...
Representation for a specific memory location.
An analysis that produces MemorySSA for a function.
Encapsulates MemorySSA, including all data associated with memory accesses.
A Module instance is used to store all the information related to an LLVM module.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Value * getIncomingValueForBlock(const BasicBlock *BB) const
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStart() const
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
const Loop * getLoop() const
This class represents a constant integer value.
ConstantInt * getValue() const
const APInt & getAPInt() const
Helper to remove instructions inserted during SCEV expansion, unless they are marked as used.
This class uses information about analyze scalars to rewrite expressions in canonical form.
const SCEV * getOperand(unsigned i) const
This class represents an analyzed expression in the program.
bool isOne() const
Return true if the expression is a constant one.
bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
The main scalar evolution driver.
bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
const SCEV * getZero(Type *Ty)
Return a SCEV for the constant 0 of a specific type.
const SCEV * getConstant(ConstantInt *V)
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
bool hasLoopInvariantBackedgeTakenCount(const Loop *L)
Return true if the specified loop has an analyzable loop-invariant backedge-taken count.
const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
const SCEV * getTruncateOrZeroExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
A vector that has set insertion semantics.
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
Simple and conservative implementation of LoopSafetyInfo that can give false-positive answers to its ...
void computeLoopSafetyInfo(const Loop *CurLoop) override
Computes safety information for a loop checks loop body & header for the possibility of may throw exc...
bool anyBlockMayThrow() const override
Returns true iff any block of the loop for which this info is contains an instruction that may throw ...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Value * getValueOperand()
Value * getPointerOperand()
StringRef - Represent a constant reference to a string, i.e.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
A Use represents the edge between a Value definition and its users.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
void replaceUsesOutsideBlock(Value *V, BasicBlock *BB)
replaceUsesOutsideBlock - Go through the uses list for this definition and make each use point to "V"...
LLVMContext & getContext() const
All values hold a context through their type.
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
Value handle that is nullable, but tries to track the Value.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
cst_pred_ty< icmp_pred_with_threshold > m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold)
Match an integer or vector with every element comparing 'pred' (eg/ne/...) to Threshold.
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
DiagnosticInfoOptimizationBase::setExtraArgs setExtraArgs
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
bool inferNonMandatoryLibFuncAttrs(Module *M, StringRef Name, const TargetLibraryInfo &TLI)
Analyze the name and prototype of the given function and set any applicable attributes.
bool isLibFuncEmittable(const Module *M, const TargetLibraryInfo *TLI, LibFunc TheLibFunc)
Check whether the library function is available on target and also that it in the current Module is a...
bool isMustProgress(const Loop *L)
Return true if this loop can be assumed to make progress.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isModOrRefSet(const ModRefInfo MRI)
FunctionCallee getOrInsertLibFunc(Module *M, const TargetLibraryInfo &TLI, LibFunc TheLibFunc, FunctionType *T, AttributeList AttributeList)
Calls getOrInsertFunction() and then makes sure to add mandatory argument attributes.
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
@ ModRef
The access may reference and may modify the value stored in memory.
@ Mod
The access may modify the value stored in memory.
bool VerifyMemorySSA
Enables verification of MemorySSA.
bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)
Returns true if the memory operations A and B are consecutive.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
PreservedAnalyses getLoopPassPreservedAnalyses()
Returns the minimum set of Analyses that all loop passes must preserve.
Value * isBytewiseValue(Value *V, const DataLayout &DL)
If the specified value can be set by repeating the same byte in memory, return the i8 value that it i...
bool decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate &Pred, Value *&X, APInt &Mask, bool LookThroughTrunc=true)
Decompose an icmp into the form ((X & Mask) pred 0) if possible.
bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
MDNode * TBAAStruct
The tag for type-based alias analysis (tbaa struct).
MDNode * Scope
The tag for alias scope specification (used with noalias).
MDNode * TBAA
The tag for type-based alias analysis.
AAMDNodes merge(const AAMDNodes &Other) const
Given two sets of AAMDNodes applying to potentially different locations, determine the best AAMDNodes...
MDNode * NoAlias
The tag specifying the noalias scope.
AAMDNodes extendTo(ssize_t Len) const
Create a new AAMDNode that describes this AAMDNode after extending it to apply to a series of bytes o...
This struct is a compact representation of a valid (non-zero power of two) alignment.
static bool Memset
When true, Memset is disabled.
static bool All
When true, the entire pass is disabled.
static bool Memcpy
When true, Memcpy is disabled.
The adaptor from a function pass to a loop pass computes these analyses and makes them available to t...
TargetTransformInfo & TTI
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Match loop-invariant value.
match_LoopInvariant(const SubPattern_t &SP, const Loop *L)