55 #define DEBUG_TYPE "scalarrepl"
57 STATISTIC(NumReplaced,
"Number of allocas broken up");
58 STATISTIC(NumPromoted,
"Number of allocas promoted");
59 STATISTIC(NumAdjusted,
"Number of scalar allocas adjusted to allow promotion");
60 STATISTIC(NumConverted,
"Number of aggregates converted to scalar");
64 SROA(
int T,
bool hasDT,
char &
ID,
int ST,
int AT,
int SLT)
71 StructMemberThreshold = 32;
73 StructMemberThreshold =
ST;
75 ArrayElementThreshold = 8;
77 ArrayElementThreshold = AT;
80 ScalarLoadThreshold = -1;
82 ScalarLoadThreshold = SLT;
112 bool isMemCpySrc : 1;
115 bool isMemCpyDst : 1;
120 bool hasSubelementAccess : 1;
125 bool hasALoadOrStore : 1;
129 hasSubelementAccess(
false), hasALoadOrStore(
false) {}
133 unsigned SRThreshold;
137 unsigned StructMemberThreshold;
141 unsigned ArrayElementThreshold;
145 unsigned ScalarLoadThreshold;
149 DEBUG(
dbgs() <<
" Transformation preventing inst: " << *User <<
'\n');
152 bool isSafeAllocaToScalarRepl(
AllocaInst *AI);
154 void isSafeForScalarRepl(
Instruction *I, uint64_t Offset, AllocaInfo &Info);
155 void isSafePHISelectUseForScalarRepl(
Instruction *User, uint64_t Offset,
158 void isSafeMemAccess(uint64_t Offset, uint64_t MemSize,
159 Type *MemOpType,
bool isStore, AllocaInfo &Info,
161 bool TypeHasComponent(
Type *T, uint64_t Offset, uint64_t Size,
163 uint64_t FindElementAndOffset(
Type *&T, uint64_t &Offset,
Type *&IdxTy,
167 std::vector<AllocaInst*> &WorkList);
168 void DeleteDeadInstructions();
190 struct SROA_DT :
public SROA {
193 SROA_DT(
int T = -1,
int ST = -1,
int AT = -1,
int SLT = -1) :
194 SROA(T,
true, ID, ST, AT, SLT) {
208 struct SROA_SSAUp :
public SROA {
211 SROA_SSAUp(
int T = -1,
int ST = -1,
int AT = -1,
int SLT = -1) :
212 SROA(T,
false, ID, ST, AT, SLT) {
230 "Scalar Replacement of Aggregates (DT)",
false,
false)
245 int StructMemberThreshold,
246 int ArrayElementThreshold,
247 int ScalarLoadThreshold) {
249 return new SROA_DT(Threshold, StructMemberThreshold, ArrayElementThreshold,
250 ScalarLoadThreshold);
251 return new SROA_SSAUp(Threshold, StructMemberThreshold,
252 ArrayElementThreshold, ScalarLoadThreshold);
264 class ConvertToScalarInfo {
268 unsigned ScalarLoadThreshold;
302 bool HadNonMemTransferAccess;
307 bool HadDynamicAccess;
310 explicit ConvertToScalarInfo(
unsigned Size,
const DataLayout &
DL,
312 : AllocaSize(Size), DL(DL), ScalarLoadThreshold(SLT), IsNotTrivial(
false),
313 ScalarKind(
Unknown), VectorTy(nullptr), HadNonMemTransferAccess(
false),
314 HadDynamicAccess(
false) { }
319 bool CanConvertToScalar(
Value *V, uint64_t Offset,
Value* NonConstantIdx);
320 void MergeInTypeForLoadOrStore(
Type *
In, uint64_t Offset);
321 bool MergeInVectorType(
VectorType *VInTy, uint64_t Offset);
322 void ConvertUsesToScalar(
Value *Ptr,
AllocaInst *NewAI, uint64_t Offset,
323 Value *NonConstantIdx);
326 uint64_t Offset,
Value* NonConstantIdx,
329 uint64_t Offset,
Value* NonConstantIdx,
341 if (!CanConvertToScalar(AI, 0,
nullptr) || !IsNotTrivial)
347 ScalarKind = Integer;
349 if (ScalarKind == Vector && VectorTy->getBitWidth() != AllocaSize * 8)
350 ScalarKind = Integer;
359 if (ScalarKind == Vector) {
360 assert(VectorTy &&
"Missing type for vector scalar.");
361 DEBUG(
dbgs() <<
"CONVERT TO VECTOR: " << *AI <<
"\n TYPE = "
362 << *VectorTy <<
'\n');
365 unsigned BitWidth = AllocaSize * 8;
369 if (BitWidth > ScalarLoadThreshold)
372 if ((ScalarKind == ImplicitVector || ScalarKind == Integer) &&
373 !HadNonMemTransferAccess && !
DL.fitsInLegalInteger(BitWidth))
378 if (ScalarKind == Integer && HadDynamicAccess)
381 DEBUG(
dbgs() <<
"CONVERT TO SCALAR INTEGER: " << *AI <<
"\n");
387 ConvertUsesToScalar(AI, NewAI, 0,
nullptr);
404 void ConvertToScalarInfo::MergeInTypeForLoadOrStore(
Type *
In,
408 if (ScalarKind == Integer)
415 if (
VectorType *VInTy = dyn_cast<VectorType>(In)) {
416 if (MergeInVectorType(VInTy, Offset))
424 if (EltSize == AllocaSize)
430 if (Offset % EltSize == 0 && AllocaSize % EltSize == 0 &&
431 (!VectorTy || EltSize == VectorTy->getElementType()
432 ->getPrimitiveSizeInBits()/8)) {
434 ScalarKind = ImplicitVector;
443 ScalarKind = Integer;
448 bool ConvertToScalarInfo::MergeInVectorType(
VectorType *VInTy,
450 if (VInTy->
getBitWidth()/8 == AllocaSize && Offset == 0) {
474 bool ConvertToScalarInfo::CanConvertToScalar(
Value *V, uint64_t Offset,
475 Value* NonConstantIdx) {
479 if (
LoadInst *LI = dyn_cast<LoadInst>(UI)) {
484 if (LI->getType()->isX86_MMXTy())
486 HadNonMemTransferAccess =
true;
487 MergeInTypeForLoadOrStore(LI->getType(), Offset);
491 if (
StoreInst *SI = dyn_cast<StoreInst>(UI)) {
493 if (
SI->getOperand(0) == V || !
SI->isSimple())
return false;
495 if (
SI->getOperand(0)->getType()->isX86_MMXTy())
497 HadNonMemTransferAccess =
true;
498 MergeInTypeForLoadOrStore(
SI->getOperand(0)->getType(), Offset);
502 if (
BitCastInst *BCI = dyn_cast<BitCastInst>(UI)) {
505 if (!CanConvertToScalar(BCI, Offset, NonConstantIdx))
518 Value *GEPNonConstantIdx =
nullptr;
519 if (!
GEP->hasAllConstantIndices()) {
525 if (!GEPNonConstantIdx->getType()->isIntegerTy(32))
527 HadDynamicAccess =
true;
529 GEPNonConstantIdx = NonConstantIdx;
530 uint64_t GEPOffset =
DL.getIndexedOffset(PtrTy,
533 if (!CanConvertToScalar(
GEP, Offset+GEPOffset, GEPNonConstantIdx))
536 HadNonMemTransferAccess =
true;
542 if (
MemSetInst *MSI = dyn_cast<MemSetInst>(UI)) {
547 if (!isa<ConstantInt>(MSI->getValue()))
560 ScalarKind = Integer;
563 HadNonMemTransferAccess =
true;
574 if (!Len || Len->
getZExtValue() != AllocaSize || Offset != 0)
583 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
584 II->getIntrinsicID() == Intrinsic::lifetime_end) {
603 void ConvertToScalarInfo::ConvertUsesToScalar(
Value *Ptr,
AllocaInst *NewAI,
605 Value* NonConstantIdx) {
609 if (
BitCastInst *CI = dyn_cast<BitCastInst>(User)) {
610 ConvertUsesToScalar(CI, NewAI, Offset, NonConstantIdx);
611 CI->eraseFromParent();
618 Value* GEPNonConstantIdx =
nullptr;
619 if (!
GEP->hasAllConstantIndices()) {
620 assert(!NonConstantIdx &&
621 "Dynamic GEP reading from dynamic GEP unsupported");
624 GEPNonConstantIdx = NonConstantIdx;
625 uint64_t GEPOffset =
DL.getIndexedOffset(
GEP->getPointerOperandType(),
627 ConvertUsesToScalar(
GEP, NewAI, Offset+GEPOffset*8, GEPNonConstantIdx);
628 GEP->eraseFromParent();
634 if (
LoadInst *LI = dyn_cast<LoadInst>(User)) {
636 Value *LoadedVal = Builder.CreateLoad(NewAI);
638 = ConvertScalar_ExtractValue(LoadedVal, LI->
getType(), Offset,
639 NonConstantIdx, Builder);
641 LI->eraseFromParent();
645 if (
StoreInst *SI = dyn_cast<StoreInst>(User)) {
646 assert(
SI->getOperand(0) != Ptr &&
"Consistency error!");
648 Value *New = ConvertScalar_InsertValue(
SI->getOperand(0), Old, Offset,
649 NonConstantIdx, Builder);
650 Builder.CreateStore(New, NewAI);
651 SI->eraseFromParent();
655 if (Old->use_empty())
656 Old->eraseFromParent();
662 if (
MemSetInst *MSI = dyn_cast<MemSetInst>(User)) {
663 assert(MSI->getRawDest() == Ptr &&
"Consistency error!");
664 assert(!NonConstantIdx &&
"Cannot replace dynamic memset with insert");
665 int64_t SNumBytes = cast<ConstantInt>(MSI->getLength())->getSExtValue();
666 if (SNumBytes > 0 && (SNumBytes >> 32) == 0) {
667 unsigned NumBytes =
static_cast<unsigned>(SNumBytes);
668 unsigned Val = cast<ConstantInt>(MSI->getValue())->getZExtValue();
671 APInt APVal(NumBytes*8, Val);
675 for (
unsigned i = 1; i != NumBytes; ++i)
679 Value *New = ConvertScalar_InsertValue(
681 Old, Offset,
nullptr, Builder);
682 Builder.CreateStore(New, NewAI);
689 MSI->eraseFromParent();
696 assert(Offset == 0 &&
"must be store to start of alloca");
697 assert(!NonConstantIdx &&
"Cannot replace dynamic transfer with insert");
707 assert(MTI->getRawDest() == Ptr &&
"Neither use is of pointer?");
708 Value *SrcPtr = MTI->getSource();
709 PointerType* SPTy = cast<PointerType>(SrcPtr->getType());
715 SrcPtr = Builder.CreateBitCast(SrcPtr, AIPTy);
717 LoadInst *SrcVal = Builder.CreateLoad(SrcPtr,
"srcval");
719 Builder.CreateStore(SrcVal, NewAI);
723 assert(MTI->getRawSource() == Ptr &&
"Neither use is of pointer?");
724 LoadInst *SrcVal = Builder.CreateLoad(NewAI,
"srcval");
726 PointerType* DPTy = cast<PointerType>(MTI->getDest()->getType());
732 Value *DstPtr = Builder.CreateBitCast(MTI->getDest(), AIPTy);
734 StoreInst *NewStore = Builder.CreateStore(SrcVal, DstPtr);
740 MTI->eraseFromParent();
745 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
746 II->getIntrinsicID() == Intrinsic::lifetime_end) {
749 II->eraseFromParent();
768 Value *ConvertToScalarInfo::
769 ConvertScalar_ExtractValue(
Value *FromVal,
Type *ToType,
770 uint64_t Offset,
Value* NonConstantIdx,
774 if (FromType == ToType && Offset == 0)
779 if (
VectorType *VTy = dyn_cast<VectorType>(FromType)) {
780 unsigned FromTypeSize =
DL.getTypeAllocSize(FromType);
781 unsigned ToTypeSize =
DL.getTypeAllocSize(ToType);
782 if (FromTypeSize == ToTypeSize)
788 unsigned EltSize =
DL.getTypeAllocSizeInBits(VTy->getElementType());
789 Elt = Offset/EltSize;
790 assert(EltSize*Elt == Offset &&
"Invalid modulus in validity checking");
794 if (NonConstantIdx) {
800 Idx = NonConstantIdx;
811 if (
StructType *ST = dyn_cast<StructType>(ToType)) {
812 assert(!NonConstantIdx &&
813 "Dynamic indexing into struct types not supported");
816 for (
unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
817 Value *Elt = ConvertScalar_ExtractValue(FromVal, ST->getElementType(i),
825 if (
ArrayType *AT = dyn_cast<ArrayType>(ToType)) {
826 assert(!NonConstantIdx &&
827 "Dynamic indexing into array types not supported");
828 uint64_t EltSize =
DL.getTypeAllocSizeInBits(AT->getElementType());
830 for (
unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
831 Value *Elt = ConvertScalar_ExtractValue(FromVal, AT->getElementType(),
832 Offset+i*EltSize,
nullptr,
845 if (
DL.isBigEndian()) {
849 ShAmt =
DL.getTypeStoreSizeInBits(NTy) -
850 DL.getTypeStoreSizeInBits(ToType) - Offset;
858 if (ShAmt > 0 && (
unsigned)ShAmt < NTy->
getBitWidth())
861 else if (ShAmt < 0 && (
unsigned)-ShAmt < NTy->
getBitWidth())
866 unsigned LIBitWidth =
DL.getTypeSizeInBits(ToType);
886 assert(FromVal->
getType() == ToType &&
"Didn't convert right?");
903 Value *ConvertToScalarInfo::
904 ConvertScalar_InsertValue(
Value *SV,
Value *Old,
905 uint64_t Offset,
Value* NonConstantIdx,
912 if (
VectorType *VTy = dyn_cast<VectorType>(AllocaType)) {
913 uint64_t VecSize =
DL.getTypeAllocSizeInBits(VTy);
914 uint64_t ValSize =
DL.getTypeAllocSizeInBits(SV->
getType());
918 if (ValSize == VecSize)
922 Type *EltTy = VTy->getElementType();
925 uint64_t EltSize =
DL.getTypeAllocSizeInBits(EltTy);
926 unsigned Elt = Offset/EltSize;
928 if (NonConstantIdx) {
934 Idx = NonConstantIdx;
942 assert(!NonConstantIdx &&
943 "Dynamic indexing into struct types not supported");
945 for (
unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
947 Old = ConvertScalar_InsertValue(Elt, Old,
955 assert(!NonConstantIdx &&
956 "Dynamic indexing into array types not supported");
957 uint64_t EltSize =
DL.getTypeAllocSizeInBits(AT->getElementType());
958 for (
unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
960 Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize,
nullptr,
968 unsigned SrcWidth =
DL.getTypeSizeInBits(SV->
getType());
969 unsigned DestWidth =
DL.getTypeSizeInBits(AllocaType);
970 unsigned SrcStoreWidth =
DL.getTypeStoreSizeInBits(SV->
getType());
971 unsigned DestStoreWidth =
DL.getTypeStoreSizeInBits(AllocaType);
978 if (SV->
getType() != AllocaType) {
986 SrcWidth = DestWidth;
987 SrcStoreWidth = DestStoreWidth;
994 if (
DL.isBigEndian()) {
998 ShAmt = DestStoreWidth - SrcStoreWidth - Offset;
1007 if (ShAmt > 0 && (
unsigned)ShAmt < DestWidth) {
1010 }
else if (ShAmt < 0 && (
unsigned)-ShAmt < DestWidth) {
1012 Mask = Mask.lshr(-ShAmt);
1017 if (SrcWidth != DestWidth) {
1018 assert(DestWidth > SrcWidth);
1020 SV = Builder.
CreateOr(Old, SV,
"ins");
1032 if (skipOptnoneFunction(F))
1035 bool Changed = performPromotion(F);
1038 bool LocalChange = performScalarRepl(F);
1039 if (!LocalChange)
break;
1041 LocalChange = performPromotion(F);
1042 if (!LocalChange)
break;
1064 for (User *U :
DINode->users())
1066 DDIs.push_back(DDI);
1067 else if (
DbgValueInst *DVI = dyn_cast<DbgValueInst>(U))
1068 DVIs.push_back(DVI);
1075 E = DDIs.end();
I != E; ++
I) {
1080 E = DVIs.end();
I != E; ++
I) {
1088 if (
LoadInst *LI = dyn_cast<LoadInst>(I))
1089 return LI->getOperand(0) == AI;
1093 void updateDebugInfo(
Instruction *Inst)
const override {
1095 E = DDIs.end(); I != E; ++
I) {
1099 else if (
LoadInst *LI = dyn_cast<LoadInst>(Inst))
1103 E = DVIs.end(); I != E; ++
I) {
1105 Value *Arg =
nullptr;
1109 if (
ZExtInst *ZExt = dyn_cast<ZExtInst>(
SI->getOperand(0)))
1111 if (
SExtInst *SExt = dyn_cast<SExtInst>(
SI->getOperand(0)))
1114 Arg =
SI->getOperand(0);
1115 }
else if (
LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
1116 Arg = LI->getOperand(0);
1120 DIB->insertDbgValueIntrinsic(Arg, 0, DVI->
getVariable(),
1146 for (User *U : SI->
users()) {
1148 if (!LI || !LI->
isSimple())
return false;
1187 unsigned MaxAlign = 0;
1188 for (User *U : PN->
users()) {
1190 if (!LI || !LI->
isSimple())
return false;
1194 if (LI->
getParent() != BB)
return false;
1199 if (BBI->mayWriteToMemory())
1249 for (User *U : AI->
users()) {
1250 if (
LoadInst *LI = dyn_cast<LoadInst>(U)) {
1251 if (!LI->isSimple())
1257 if (
SI->getOperand(0) == AI || !
SI->isSimple())
1265 if (
ConstantInt *CI = dyn_cast<ConstantInt>(
SI->getCondition())) {
1266 Value *Result =
SI->getOperand(1+CI->isZero());
1267 SI->replaceAllUsesWith(Result);
1268 SI->eraseFromParent();
1284 if (
PHINode *PN = dyn_cast<PHINode>(U)) {
1285 if (PN->use_empty()) {
1286 InstsToRewrite.
insert(PN);
1295 InstsToRewrite.
insert(PN);
1299 if (
BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
1301 InstsToRewrite.
insert(BCI);
1311 if (InstsToRewrite.
empty())
1316 for (
unsigned i = 0, e = InstsToRewrite.
size(); i != e; ++i) {
1317 if (
BitCastInst *BCI = dyn_cast<BitCastInst>(InstsToRewrite[i])) {
1321 cast<Instruction>(*I++)->eraseFromParent();
1322 BCI->eraseFromParent();
1326 if (
SelectInst *
SI = dyn_cast<SelectInst>(InstsToRewrite[i])) {
1329 while (!
SI->use_empty()) {
1330 LoadInst *LI = cast<LoadInst>(
SI->user_back());
1334 Builder.CreateLoad(
SI->getTrueValue(), LI->
getName()+
".t");
1336 Builder.CreateLoad(
SI->getFalseValue(), LI->
getName()+
".f");
1349 Value *V = Builder.CreateSelect(
SI->getCondition(), TrueLoad, FalseLoad);
1356 SI->eraseFromParent();
1362 PHINode *PN = cast<PHINode>(InstsToRewrite[i]);
1368 Type *LoadTy = cast<PointerType>(PN->
getType())->getElementType();
1377 SomeLoad->getAAMetadata(AATags);
1378 unsigned Align = SomeLoad->getAlignment();
1412 bool SROA::performPromotion(
Function &F) {
1413 std::vector<AllocaInst*> Allocas;
1417 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1419 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1423 bool Changed =
false;
1431 if (
AllocaInst *AI = dyn_cast<AllocaInst>(I))
1433 Allocas.push_back(AI);
1435 if (Allocas.empty())
break;
1441 for (
unsigned i = 0, e = Allocas.size(); i != e; ++i) {
1445 for (User *U : AI->
users())
1447 AllocaPromoter(Insts, SSA, &DIB).run(AI, Insts);
1451 NumPromoted += Allocas.size();
1461 bool SROA::ShouldAttemptScalarRepl(
AllocaInst *AI) {
1464 if (
StructType *ST = dyn_cast<StructType>(T))
1465 return ST->getNumElements() <= StructMemberThreshold;
1467 if (
ArrayType *AT = dyn_cast<ArrayType>(T))
1468 return AT->getNumElements() <= ArrayElementThreshold;
1476 bool SROA::performScalarRepl(
Function &F) {
1477 std::vector<AllocaInst*> WorkList;
1484 WorkList.push_back(
A);
1487 bool Changed =
false;
1488 while (!WorkList.empty()) {
1490 WorkList.pop_back();
1511 if (AllocaSize == 0)
continue;
1514 if (AllocaSize > SRThreshold)
continue;
1519 if (ShouldAttemptScalarRepl(AI) && isSafeAllocaToScalarRepl(AI)) {
1520 DoScalarReplacement(AI, WorkList);
1532 ConvertToScalarInfo((
unsigned)AllocaSize, DL, ScalarLoadThreshold)
1549 void SROA::DoScalarReplacement(
AllocaInst *AI,
1550 std::vector<AllocaInst*> &WorkList) {
1551 DEBUG(
dbgs() <<
"Found inst to SROA: " << *AI <<
'\n');
1554 ElementAllocas.
reserve(ST->getNumContainedTypes());
1555 for (
unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) {
1560 WorkList.push_back(NA);
1570 WorkList.push_back(NA);
1576 RewriteForScalarRepl(AI, AI, 0, ElementAllocas);
1579 DeleteDeadInstructions();
1587 void SROA::DeleteDeadInstructions() {
1588 while (!DeadInsts.empty()) {
1589 Instruction *I = cast<Instruction>(DeadInsts.pop_back_val());
1592 if (
Instruction *U = dyn_cast<Instruction>(*OI)) {
1598 DeadInsts.push_back(U);
1609 void SROA::isSafeForScalarRepl(
Instruction *I, uint64_t Offset,
1613 Instruction *User = cast<Instruction>(U.getUser());
1615 if (
BitCastInst *BC = dyn_cast<BitCastInst>(User)) {
1616 isSafeForScalarRepl(BC, Offset, Info);
1618 uint64_t GEPOffset = Offset;
1619 isSafeGEP(GEPI, GEPOffset, Info);
1621 isSafeForScalarRepl(GEPI, GEPOffset, Info);
1625 return MarkUnsafe(Info, User);
1627 isSafeMemAccess(Offset, Length->
getZExtValue(),
nullptr,
1628 U.getOperandNo() == 0, Info,
MI,
1630 }
else if (
LoadInst *LI = dyn_cast<LoadInst>(User)) {
1631 if (!LI->isSimple())
1632 return MarkUnsafe(Info, User);
1633 Type *LIType = LI->getType();
1636 Info.hasALoadOrStore =
true;
1638 }
else if (
StoreInst *SI = dyn_cast<StoreInst>(User)) {
1640 if (!
SI->isSimple() ||
SI->getOperand(0) ==
I)
1641 return MarkUnsafe(Info, User);
1643 Type *SIType =
SI->getOperand(0)->getType();
1646 Info.hasALoadOrStore =
true;
1647 }
else if (
IntrinsicInst *II = dyn_cast<IntrinsicInst>(User)) {
1648 if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
1649 II->getIntrinsicID() != Intrinsic::lifetime_end)
1650 return MarkUnsafe(Info, User);
1651 }
else if (isa<PHINode>(User) || isa<SelectInst>(User)) {
1652 isSafePHISelectUseForScalarRepl(User, Offset, Info);
1654 return MarkUnsafe(Info, User);
1656 if (Info.isUnsafe)
return;
1670 void SROA::isSafePHISelectUseForScalarRepl(
Instruction *I, uint64_t Offset,
1673 if (
PHINode *PN = dyn_cast<PHINode>(I))
1674 if (!Info.CheckedPHIs.insert(PN).second)
1678 for (User *U : I->
users()) {
1681 if (
BitCastInst *BC = dyn_cast<BitCastInst>(UI)) {
1682 isSafePHISelectUseForScalarRepl(BC, Offset, Info);
1687 if (!GEPI->hasAllZeroIndices())
1688 return MarkUnsafe(Info, UI);
1689 isSafePHISelectUseForScalarRepl(GEPI, Offset, Info);
1690 }
else if (
LoadInst *LI = dyn_cast<LoadInst>(UI)) {
1691 if (!LI->isSimple())
1692 return MarkUnsafe(Info, UI);
1693 Type *LIType = LI->getType();
1696 Info.hasALoadOrStore =
true;
1698 }
else if (
StoreInst *SI = dyn_cast<StoreInst>(UI)) {
1700 if (!
SI->isSimple() ||
SI->getOperand(0) ==
I)
1701 return MarkUnsafe(Info, UI);
1703 Type *SIType =
SI->getOperand(0)->getType();
1706 Info.hasALoadOrStore =
true;
1707 }
else if (isa<PHINode>(UI) || isa<SelectInst>(UI)) {
1708 isSafePHISelectUseForScalarRepl(UI, Offset, Info);
1710 return MarkUnsafe(Info, UI);
1712 if (Info.isUnsafe)
return;
1722 uint64_t &Offset, AllocaInfo &Info) {
1726 bool NonConstant =
false;
1727 unsigned NonConstantIdxSize = 0;
1731 for (; GEPIt != E; ++GEPIt) {
1733 if ((*GEPIt)->isStructTy())
1738 return MarkUnsafe(Info, GEPI);
1752 if (!TypeHasComponent(Info.AI->getAllocatedType(), Offset, NonConstantIdxSize,
1754 MarkUnsafe(Info, GEPI);
1763 if (
ArrayType *AT = dyn_cast<ArrayType>(T)) {
1768 if (
StructType *ST = dyn_cast<StructType>(T)) {
1769 NumElts = ST->getNumContainedTypes();
1770 EltTy = (NumElts == 0 ?
nullptr : ST->getContainedType(0));
1771 for (
unsigned n = 1; n < NumElts; ++n) {
1772 if (ST->getContainedType(n) != EltTy)
1786 unsigned NumElts1, NumElts2;
1787 Type *EltTy1, *EltTy2;
1790 NumElts1 == NumElts2 &&
1804 void SROA::isSafeMemAccess(uint64_t Offset, uint64_t MemSize,
1805 Type *MemOpType,
bool isStore,
1807 bool AllowWholeAccess) {
1810 if (Offset == 0 && AllowWholeAccess &&
1819 Info.isMemCpyDst =
true;
1821 Info.isMemCpySrc =
true;
1828 Info.hasSubelementAccess =
true;
1833 Type *T = Info.AI->getAllocatedType();
1834 if (TypeHasComponent(T, Offset, MemSize, DL)) {
1835 Info.hasSubelementAccess =
true;
1839 return MarkUnsafe(Info, TheAccess);
1844 bool SROA::TypeHasComponent(
Type *T, uint64_t Offset, uint64_t Size,
1848 if (
StructType *ST = dyn_cast<StructType>(T)) {
1851 EltTy = ST->getContainedType(EltIdx);
1854 }
else if (
ArrayType *AT = dyn_cast<ArrayType>(T)) {
1860 }
else if (
VectorType *VT = dyn_cast<VectorType>(T)) {
1861 EltTy = VT->getElementType();
1863 if (Offset >= VT->getNumElements() * EltSize)
1869 if (Offset == 0 && (Size == 0 || EltSize == Size))
1872 if (Offset + Size > EltSize)
1874 return TypeHasComponent(EltTy, Offset, Size, DL);
1885 Use &TheUse = *UI++;
1888 if (
BitCastInst *BC = dyn_cast<BitCastInst>(User)) {
1889 RewriteBitCast(BC, AI, Offset, NewElts);
1894 RewriteGEP(GEPI, AI, Offset, NewElts);
1902 RewriteMemIntrinUserOfAlloca(
MI, I, AI, NewElts);
1909 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
1910 II->getIntrinsicID() == Intrinsic::lifetime_end) {
1911 RewriteLifetimeIntrinsic(II, AI, Offset, NewElts);
1916 if (
LoadInst *LI = dyn_cast<LoadInst>(User)) {
1917 Type *LIType = LI->getType();
1930 for (
unsigned i = 0, e = NewElts.
size(); i != e; ++i) {
1935 DeadInsts.push_back(LI);
1940 RewriteLoadUserOfWholeAlloca(LI, AI, NewElts);
1945 if (
StoreInst *SI = dyn_cast<StoreInst>(User)) {
1946 Value *Val =
SI->getOperand(0);
1958 for (
unsigned i = 0, e = NewElts.
size(); i != e; ++i) {
1962 DeadInsts.push_back(SI);
1967 RewriteStoreUserOfWholeAlloca(SI, AI, NewElts);
1972 if (isa<SelectInst>(User) || isa<PHINode>(User)) {
1976 if (!isa<AllocaInst>(I))
continue;
1978 assert(Offset == 0 && NewElts[0] &&
1979 "Direct alloca use should have a zero offset");
1997 RewriteForScalarRepl(BC, AI, Offset, NewElts);
2006 uint64_t EltOffset = 0;
2008 uint64_t Idx = FindElementAndOffset(T, EltOffset, IdxTy,
2011 if (Val->getType() != BC->
getDestTy()) {
2016 DeadInsts.push_back(BC);
2024 uint64_t SROA::FindElementAndOffset(
Type *&T, uint64_t &Offset,
Type *&IdxTy,
2028 if (
StructType *ST = dyn_cast<StructType>(T)) {
2031 T = ST->getContainedType(Idx);
2035 }
else if (
ArrayType *AT = dyn_cast<ArrayType>(T)) {
2038 Idx = Offset / EltSize;
2039 Offset -= Idx * EltSize;
2046 Idx = Offset / EltSize;
2047 Offset -= Idx * EltSize;
2057 uint64_t OldOffset = Offset;
2064 Value* NonConstantIdx =
nullptr;
2069 RewriteForScalarRepl(GEPI, AI, Offset, NewElts);
2073 uint64_t OldIdx = FindElementAndOffset(T, OldOffset, IdxTy, DL);
2078 uint64_t EltOffset = Offset;
2079 uint64_t Idx = FindElementAndOffset(T, EltOffset, IdxTy, DL);
2089 while (EltOffset != 0) {
2090 uint64_t EltIdx = FindElementAndOffset(T, EltOffset, IdxTy, DL);
2093 if (NonConstantIdx) {
2098 while (!isa<VectorType>(GepTy)) {
2100 GepTy = cast<CompositeType>(GepTy)->getTypeAtIndex(0U);
2102 NewArgs.push_back(NonConstantIdx);
2105 if (NewArgs.size() > 1) {
2112 DeadInsts.push_back(GEPI);
2125 uint64_t NewOffset = Offset;
2127 uint64_t Idx = FindElementAndOffset(AIType, NewOffset, IdxTy, DL);
2139 IdxTy = NewElts[Idx]->getAllocatedType();
2141 if (EltSize > Size) {
2154 for (; Idx != NewElts.
size() && Size; ++Idx) {
2155 IdxTy = NewElts[Idx]->getAllocatedType();
2157 if (EltSize > Size) {
2170 DeadInsts.push_back(II);
2183 Value *OtherPtr =
nullptr;
2186 if (Inst == MTI->getRawDest())
2187 OtherPtr = MTI->getRawSource();
2189 assert(Inst == MTI->getRawSource());
2190 OtherPtr = MTI->getRawDest();
2197 unsigned AddrSpace =
2198 cast<PointerType>(OtherPtr->
getType())->getAddressSpace();
2209 if (OtherPtr == AI || OtherPtr == NewElts[0]) {
2213 E = DeadInsts.end(); I != E; ++
I)
2214 if (*I == MI)
return;
2215 DeadInsts.push_back(MI);
2224 if (OtherPtr->
getType() != NewTy)
2234 for (
unsigned i = 0, e = NewElts.
size(); i != e; ++i) {
2236 Value *OtherElt =
nullptr;
2237 unsigned OtherEltAlign = MemAlignment;
2240 Value *Idx[2] = { Zero,
2248 if (
StructType *ST = dyn_cast<StructType>(OtherTy)) {
2251 Type *EltTy = cast<SequentialType>(OtherTy)->getElementType();
2263 Value *EltPtr = NewElts[i];
2264 Type *EltTy = cast<PointerType>(EltPtr->
getType())->getElementType();
2268 if (isa<MemTransferInst>(MI)) {
2271 Value *Elt =
new LoadInst(OtherElt,
"tmp",
false, OtherEltAlign, MI);
2276 new StoreInst(Elt, OtherElt,
false, OtherEltAlign, MI);
2280 assert(isa<MemSetInst>(MI));
2294 APInt OneVal(EltSize, CI->getZExtValue());
2295 APInt TotalVal(OneVal);
2297 for (
unsigned i = 0; 8*i < EltSize; ++i) {
2298 TotalVal = TotalVal.shl(8);
2308 assert(StoreVal->
getType() == ValTy &&
"Type mismatch!");
2312 unsigned NumElts = cast<VectorType>(EltTy)->getNumElements();
2330 if (isa<MemSetInst>(MI)) {
2334 assert(isa<MemTransferInst>(MI));
2335 Value *Dst = SROADest ? EltPtr : OtherElt;
2336 Value *Src = SROADest ? OtherElt : EltPtr;
2338 if (isa<MemCpyInst>(MI))
2344 DeadInsts.push_back(MI);
2367 DEBUG(
dbgs() <<
"PROMOTING STORE TO WHOLE ALLOCA: " << *AI <<
'\n' << *SI
2372 if (
StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
2375 for (
unsigned i = 0, e = NewElts.
size(); i != e; ++i) {
2377 Type *FieldTy = EltSTy->getElementType(i);
2383 Value *EltVal = SrcVal;
2386 EltVal = Builder.
CreateLShr(EltVal, ShiftVal,
"sroa.store.elt");
2393 if (FieldSizeBits == 0)
continue;
2395 if (FieldSizeBits != AllocaSizeBits)
2398 Value *DestField = NewElts[i];
2399 if (EltVal->getType() == FieldTy) {
2413 ArrayType *ATy = cast<ArrayType>(AllocaEltTy);
2421 Shift = AllocaSizeBits-ElementOffset;
2425 for (
unsigned i = 0, e = NewElts.
size(); i != e; ++i) {
2427 if (ElementSizeBits == 0)
continue;
2429 Value *EltVal = SrcVal;
2432 EltVal = Builder.
CreateLShr(EltVal, ShiftVal,
"sroa.store.elt");
2436 if (ElementSizeBits != AllocaSizeBits)
2440 Value *DestField = NewElts[i];
2441 if (EltVal->getType() == ArrayEltTy) {
2443 }
else if (ArrayEltTy->isFloatingPointTy() ||
2444 ArrayEltTy->isVectorTy()) {
2455 Shift -= ElementOffset;
2457 Shift += ElementOffset;
2461 DeadInsts.push_back(SI);
2475 DEBUG(
dbgs() <<
"PROMOTING LOAD OF WHOLE ALLOCA: " << *AI <<
'\n' << *LI
2481 uint64_t ArrayEltBitOffset = 0;
2482 if (
StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
2485 Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType();
2492 for (
unsigned i = 0, e = NewElts.
size(); i != e; ++i) {
2495 Value *SrcField = NewElts[i];
2497 cast<PointerType>(SrcField->
getType())->getElementType();
2501 if (FieldSizeBits == 0)
continue;
2510 SrcField =
new LoadInst(SrcField,
"sroa.load.elt", LI);
2514 if (SrcField->
getType() != FieldIntTy)
2515 SrcField =
new BitCastInst(SrcField, FieldIntTy,
"", LI);
2527 Shift = i*ArrayEltBitOffset;
2530 Shift = AllocaSizeBits-Shift-FieldIntTy->
getBitWidth();
2534 SrcField = BinaryOperator::CreateShl(SrcField, ShiftVal,
"", LI);
2538 if (!isa<Constant>(ResultVal) ||
2539 !cast<Constant>(ResultVal)->isNullValue())
2540 ResultVal = BinaryOperator::CreateOr(SrcField, ResultVal,
"", LI);
2542 ResultVal = SrcField;
2550 DeadInsts.push_back(LI);
2557 if (
ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
2565 unsigned PrevFieldBitOffset = 0;
2572 unsigned PrevFieldEnd =
2574 if (PrevFieldEnd < FieldBitOffset)
2577 PrevFieldBitOffset = FieldBitOffset;
2581 unsigned PrevFieldEnd = PrevFieldBitOffset +
2583 if (PrevFieldEnd < SL->getSizeInBits())
2592 bool SROA::isSafeAllocaToScalarRepl(
AllocaInst *AI) {
2595 AllocaInfo Info(AI);
2597 isSafeForScalarRepl(AI, 0, Info);
2598 if (Info.isUnsafe) {
2599 DEBUG(
dbgs() <<
"Cannot transform: " << *AI <<
'\n');
2610 if (Info.isMemCpySrc && Info.isMemCpyDst &&
2618 if (!Info.hasSubelementAccess && Info.hasALoadOrStore) {
2621 if (ST->getNumElements() > 1)
return false;
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
unsigned getAlignment() const
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type (if unknown returns 0).
Value * CreateGEP(Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
iplist< Instruction >::iterator eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing basic block and deletes it...
void push_back(const T &Elt)
use_iterator_impl< Use > use_iterator
A parsed version of the target data layout string in and methods for querying it. ...
iterator_range< use_iterator > uses()
Helper class for SSA formation on a set of values defined in multiple blocks.
LoadInst * CreateLoad(Value *Ptr, const char *Name)
void addIncoming(Value *V, BasicBlock *BB)
addIncoming - Add an incoming value to the end of the PHI list
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
LLVM Argument representation.
STATISTIC(NumFunctions,"Total number of functions")
FunctionPass * createScalarReplAggregatesPass(signed Threshold=-1, bool UseDomTree=true, signed StructMemberThreshold=-1, signed ArrayElementThreshold=-1, signed ScalarLoadThreshold=-1)
Intrinsic::ID getIntrinsicID() const
getIntrinsicID - Return the intrinsic ID of this intrinsic.
static bool isSafeSelectToSpeculate(SelectInst *SI)
isSafeSelectToSpeculate - Select instructions that use an alloca and are subsequently loaded can be r...
This class represents zero extension of integer types.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
An immutable pass that tracks lazily created AssumptionCache objects.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
PointerType::get - This constructs a pointer to an object of the specified type in a numbered address...
gep_type_iterator gep_type_end(const User *GEP)
bool mayHaveSideEffects() const
mayHaveSideEffects - Return true if the instruction may have side effects.
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
A cache of .assume calls within a function.
bool isDoubleTy() const
isDoubleTy - Return true if this is 'double', a 64-bit IEEE fp type.
MemSetInst - This class wraps the llvm.memset intrinsic.
This class represents a sign extension of integer types.
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, unsigned Align, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memset to the specified pointer and the specified value.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
LoadInst - an instruction for reading from memory.
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
static IntegerType * getInt64Ty(LLVMContext &C)
void reserve(size_type N)
size_type size() const
Determine the number of elements in the SetVector.
void initializeSROA_DTPass(PassRegistry &)
Tagged DWARF-like metadata node.
uint64_t getTypeAllocSizeInBits(Type *Ty) const
Returns the offset in bits between successive objects of the specified type, including alignment padd...
Value * CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx, const Twine &Name="")
static Constant * getNullValue(Type *Ty)
StringRef getName() const
Return a constant reference to the value's name.
iterator begin()
Instruction iterator methods.
bool isSingleValueType() const
isSingleValueType - Return true if the type is a valid type for a register in codegen.
bool isArrayAllocation() const
isArrayAllocation - Return true if there is an allocation size parameter to the allocation instructio...
Scalar Replacement of Aggregates(DT)"
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
AnalysisUsage & addRequired()
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
#define INITIALIZE_PASS_DEPENDENCY(depName)
static Value * getPointerOperand(Instruction &Inst)
SelectInst - This class represents the LLVM 'select' instruction.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
T LLVM_ATTRIBUTE_UNUSED_RESULT pop_back_val()
StructType - Class to represent struct types.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
A Use represents the edge between a Value definition and its users.
bool isDereferenceablePointer(const Value *V, const DataLayout &DL, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
isDereferenceablePointer - Return true if this is always a dereferenceable pointer.
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
unsigned getBitWidth() const
Return the number of bits in the Vector type.
bool isSized(SmallPtrSetImpl< const Type * > *Visited=nullptr) const
isSized - Return true if it makes sense to take the size of this type.
uint64_t getIndexedOffset(Type *Ty, ArrayRef< Value * > Indices) const
Returns the offset from the beginning of the type for the specified indices.
Type * getPointerOperandType() const
getPointerOperandType - Method to return the pointer operand as a PointerType.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
void PromoteMemToReg(ArrayRef< AllocaInst * > Allocas, DominatorTree &DT, AliasSetTracker *AST=nullptr, AssumptionCache *AC=nullptr)
Promote the specified list of alloca instructions into scalar registers, inserting PHI nodes as appro...
user_iterator_impl< User > user_iterator
static GetElementPtrInst * CreateInBounds(Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
bool insert(const value_type &X)
Insert a new element into the SetVector.
LLVMContext & getContext() const
getContext - Return the LLVMContext in which this type was uniqued.
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
ArrayType - Class to represent array types.
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
getLimitedValue - If the value is smaller than the specified limit, return it, otherwise return the l...
CallInst * CreateMemMove(Value *Dst, Value *Src, uint64_t Size, unsigned Align, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memmove between the specified pointers.
This class represents a no-op cast from one type to another.
bool empty() const
Determine if the SetVector is empty or not.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
bool isFloatingPointTy() const
isFloatingPointTy - Return true if this is one of the six floating point types
StoreInst - an instruction for storing to memory.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
void takeName(Value *V)
Transfer the name from V to this value.
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
This class represents a truncation of integer types.
Type * getElementType() const
PointerType - Class to represent pointers.
unsigned getNumIncomingValues() const
getNumIncomingValues - Return the number of incoming edges
uint64_t getElementOffset(unsigned Idx) const
uint64_t getElementOffsetInBits(unsigned Idx) const
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
unsigned getNumSuccessors() const
Return the number of successors that this terminator has.
GetElementPtrInst - an instruction for type-safe pointer arithmetic to access elements of arrays and ...
void setAAMetadata(const AAMDNodes &N)
setAAMetadata - Sets the metadata on this instruction from the AAMDNodes structure.
LLVM Basic Block Representation.
The instances of the Type class are immutable: once they are created, they are never changed...
This is an important class for using LLVM in a threaded context.
bool isVectorTy() const
isVectorTy - True if this is an instance of VectorType.
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Type * getElementType(unsigned N) const
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
This is an important base class in LLVM.
PointerType * getType() const
getType - Overload to return most specific pointer type
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool onlyUsedByLifetimeMarkers(const Value *V)
onlyUsedByLifetimeMarkers - Return true if the only users of this pointer are lifetime markers...
bool isFloatTy() const
isFloatTy - Return true if this is 'float', a 32-bit IEEE fp type.
unsigned getAlignment() const
getAlignment - Return the alignment of the memory that is being allocated by the instruction.
Value * getRawDest() const
const DebugLoc & getDebugLoc() const
getDebugLoc - Return the debug location for this node as a DebugLoc.
Represent the analysis usage information of a pass.
BasicBlock * getIncomingBlock(unsigned i) const
getIncomingBlock - Return incoming basic block number i.
static bool isSafePHIToSpeculate(PHINode *PN)
isSafePHIToSpeculate - PHI instructions that use an alloca and are subsequently loaded can be rewritt...
uint64_t getNumElements() const
User * getUser() const
Returns the User that contains this Use.
FunctionPass class - This class is used to implement most global optimizations.
Value * getOperand(unsigned i) const
Class to represent integer types.
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
void setAlignment(unsigned Align)
bool isPointerTy() const
isPointerTy - True if this is an instance of PointerType.
static UndefValue * get(Type *T)
get() - Static factory methods - Return an 'undef' object of the specified type.
bool hasAllConstantIndices() const
hasAllConstantIndices - Return true if all of the indices of this GEP are constant integers...
LLVMContext & getContext() const
All values hold a context through their type.
PointerType * getInt8PtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer to an 8-bit integer value.
const Value * getTrueValue() const
Value * GetUnderlyingObject(Value *V, const DataLayout &DL, unsigned MaxLookup=6)
GetUnderlyingObject - This method strips off any GEP address adjustments and pointer casts from the s...
CallInst * CreateLifetimeEnd(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.end intrinsic.
static Constant * getSplat(unsigned NumElts, Constant *Elt)
getSplat - Return a ConstantVector with the specified constant in each element.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
DIExpression * getExpression() const
MemIntrinsic - This is the common base class for memset/memcpy/memmove.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
static PointerType * getUnqual(Type *ElementType)
PointerType::getUnqual - This constructs a pointer to an object of the specified type in the generic ...
This is the shared class of boolean and integer constants.
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
Value * getIncomingValue(unsigned i) const
getIncomingValue - Return incoming value number x
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
bool ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI, StoreInst *SI, DIBuilder &Builder)
===---------------------------------------------------------------——===// Dbg Intrinsic utilities ...
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Module.h This file contains the declarations for the Module class.
Type * getType() const
All values are typed, get the type of this value.
Instruction * user_back()
user_back - Specialize the methods defined in Value, as we know that an instruction can only be used ...
void initializeSROA_SSAUpPass(PassRegistry &)
static bool HasPadding(Type *Ty, const DataLayout &DL)
HasPadding - Return true if the specified type has any structure or alignment padding in between the ...
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Type * getDestTy() const
Return the destination type, as a convenience.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
SequentialType * getType() const
Value * stripPointerCasts()
Strip off pointer casts, all-zero GEPs, and aliases.
unsigned getElementContainingOffset(uint64_t Offset) const
Given a valid byte offset into the structure, returns the structure index that contains it...
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
void setPreservesCFG()
This function should be called by the pass, iff they do not:
const BasicBlock & getEntryBlock() const
static cl::opt< AlignMode > Align(cl::desc("Load/store alignment support"), cl::Hidden, cl::init(NoStrictAlign), cl::values(clEnumValN(StrictAlign,"aarch64-strict-align","Disallow all unaligned memory accesses"), clEnumValN(NoStrictAlign,"aarch64-no-strict-align","Allow unaligned memory accesses"), clEnumValEnd))
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
VectorType - Class to represent vector types.
Class for arbitrary precision integers.
bool isIntegerTy() const
isIntegerTy - True if this is an instance of IntegerType.
iterator_range< user_iterator > users()
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
LLVM_ATTRIBUTE_UNUSED_RESULT std::enable_if< !is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
const Type * getScalarType() const LLVM_READONLY
getScalarType - If this is a vector type, return the element type, otherwise return 'this'...
Value * getOperand() const
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
INITIALIZE_PASS_BEGIN(SROA_DT,"scalarrepl","Scalar Replacement of Aggregates (DT)", false, false) INITIALIZE_PASS_END(SROA_DT
uint64_t MinAlign(uint64_t A, uint64_t B)
MinAlign - A and B are either alignments or offsets.
MemTransferInst - This class wraps the llvm.memcpy/memmove intrinsics.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout &DL)
tryToMakeAllocaBePromotable - This returns true if the alloca only has direct (non-volatile) loads an...
CallInst * CreateMemCpy(Value *Dst, Value *Src, uint64_t Size, unsigned Align, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memcpy between the specified pointers.
static IntegerType * getInt32Ty(LLVMContext &C)
DbgValueInst - This represents the llvm.dbg.value instruction.
unsigned getAlignment() const
getAlignment - Return the alignment of the access that is being performed
void getAAMetadata(AAMDNodes &N, bool Merge=false) const
getAAMetadata - Fills the AAMDNodes structure with AA metadata from this instruction.
TerminatorInst * getTerminator()
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Scalar Replacement of false scalarrepl ssa
static bool isCompatibleAggregate(Type *T1, Type *T2)
isCompatibleAggregate - Check if T1 and T2 are either the same type or are "homogeneous" aggregates w...
static int const Threshold
TODO: Write a new FunctionPass AliasAnalysis so that it can keep a cache.
Scalar Replacement of false
user_iterator user_begin()
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
getPrimitiveSizeInBits - Return the basic size of this type if it is a primitive type.
Module * getParent()
Get the module that this global value is contained inside of...
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
isInstructionTriviallyDead - Return true if the result produced by the instruction is not used...
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
LLVM Value Representation.
void setAlignment(unsigned Align)
A vector that has set insertion semantics.
DILocalVariable * getVariable() const
static VectorType * get(Type *ElementType, unsigned NumElements)
VectorType::get - This static method is the primary way to construct an VectorType.
static bool isHomogeneousAggregate(Type *T, unsigned &NumElts, Type *&EltTy)
isHomogeneousAggregate - Check if type T is a struct or array containing elements of the same type (w...
void moveBefore(Instruction *MovePos)
moveBefore - Unlink this instruction from its current basic block and insert it into the basic block ...
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom, unsigned Align)
isSafeToLoadUnconditionally - Return true if we know that executing a load from this value cannot tra...
CallInst * CreateLifetimeStart(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.start intrinsic.
bool isPowerOf2_32(uint32_t Value)
isPowerOf2_32 - This function returns true if the argument is a power of two > 0. ...
const Value * getFalseValue() const
Legacy analysis pass which computes a DominatorTree.
unsigned getNumElements() const
Random access to the elements.
Type * getAllocatedType() const
getAllocatedType - Return the type that is being allocated by the instruction.
DbgDeclareInst - This represents the llvm.dbg.declare instruction.
const BasicBlock * getParent() const
IntrinsicInst - A useful wrapper class for inspecting calls to intrinsic functions.
AllocaInst - an instruction to allocate memory on the stack.
gep_type_iterator gep_type_begin(const User *GEP)