51 using namespace llvm::PatternMatch;
53 #define DEBUG_TYPE "codegenprepare"
55 STATISTIC(NumBlocksElim,
"Number of blocks eliminated");
56 STATISTIC(NumPHIsElim,
"Number of trivial PHIs eliminated");
57 STATISTIC(NumGEPsElim,
"Number of GEPs converted to casts");
58 STATISTIC(NumCmpUses,
"Number of uses of Cmp expressions replaced with uses of "
60 STATISTIC(NumCastUses,
"Number of uses of Cast expressions replaced with uses "
62 STATISTIC(NumMemoryInsts,
"Number of memory instructions whose address "
63 "computations were sunk");
64 STATISTIC(NumExtsMoved,
"Number of [s|z]ext instructions combined with loads");
65 STATISTIC(NumExtUses,
"Number of uses of [s|z]ext instructions optimized");
66 STATISTIC(NumRetsDup,
"Number of return instructions duplicated");
67 STATISTIC(NumDbgValueMoved,
"Number of debug value instructions moved");
68 STATISTIC(NumSelectsExpanded,
"Number of selects turned into branches");
69 STATISTIC(NumAndCmpsMoved,
"Number of and/cmp's pushed into branches");
70 STATISTIC(NumStoreExtractExposed,
"Number of store(extractelement) exposed");
74 cl::desc(
"Disable branch optimizations in CodeGenPrepare"));
78 cl::desc(
"Disable GC optimizations in CodeGenPrepare"));
82 cl::desc(
"Disable select to branch conversion."));
86 cl::desc(
"Address sinking in CGP using GEPs."));
90 cl::desc(
"Enable sinkinig and/cmp into branches."));
94 cl::desc(
"Disable store(extract) optimizations in CodeGenPrepare"));
98 cl::desc(
"Stress test store(extract) optimizations in CodeGenPrepare"));
102 cl::desc(
"Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in "
107 cl::desc(
"Stress test ext(promotable(ld)) -> promoted(ext(ld)) "
108 "optimization in CodeGenPrepare"));
115 TypeIsSExt(
Type *Ty,
bool IsSExt) : Ty(Ty), IsSExt(IsSExt) {}
118 class TypePromotionTransaction;
139 SetOfInstrs InsertedInsts;
142 InstrToOrigTy PromotedInsts;
159 bool runOnFunction(
Function &
F)
override;
161 const char *getPassName()
const override {
return "CodeGen Prepare"; }
171 bool EliminateMostlyEmptyBlocks(
Function &
F);
173 void EliminateMostlyEmptyBlock(
BasicBlock *BB);
174 bool OptimizeBlock(
BasicBlock &BB,
bool& ModifiedDT);
177 Type *AccessTy,
unsigned AS);
178 bool OptimizeInlineAsmInst(
CallInst *CS);
179 bool OptimizeCallInst(
CallInst *CI,
bool& ModifiedDT);
184 bool OptimizeExtractElementInst(
Instruction *Inst);
185 bool DupRetToEnableTailCallOpts(
BasicBlock *BB);
188 bool ExtLdPromotion(TypePromotionTransaction &TPT,
LoadInst *&LI,
191 unsigned CreatedInstCost);
199 "Optimize for code generation",
false,
false)
202 return new CodeGenPrepare(TM);
205 bool CodeGenPrepare::runOnFunction(
Function &
F) {
206 if (skipOptnoneFunction(F))
211 bool EverMadeChange =
false;
213 InsertedInsts.clear();
214 PromotedInsts.clear();
218 TLI =
TM->getSubtargetImpl(F)->getTargetLowering();
219 TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
220 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
225 if (!OptSize && TLI && TLI->isSlowDivBypassed()) {
227 TLI->getBypassSlowDivWidths();
234 EverMadeChange |= EliminateMostlyEmptyBlocks(F);
239 EverMadeChange |= PlaceDbgValues(F);
246 EverMadeChange |= sinkAndCmp(F);
247 EverMadeChange |= splitBranchCondition(F);
250 bool MadeChange =
true;
255 bool ModifiedDTOnIteration =
false;
256 MadeChange |= OptimizeBlock(*BB, ModifiedDTOnIteration);
259 if (ModifiedDTOnIteration)
262 EverMadeChange |= MadeChange;
273 if (!MadeChange)
continue;
276 II = Successors.begin(),
IE = Successors.end(); II !=
IE; ++II)
282 MadeChange |= !WorkList.
empty();
283 while (!WorkList.
empty()) {
291 II = Successors.begin(),
IE = Successors.end(); II !=
IE; ++II)
298 if (EverMadeChange || MadeChange)
299 MadeChange |= EliminateFallThrough(F);
301 EverMadeChange |= MadeChange;
310 for (
auto &
I : Statepoints)
311 EverMadeChange |= simplifyOffsetableRelocate(*
I);
314 return EverMadeChange;
320 bool CodeGenPrepare::EliminateFallThrough(
Function &F) {
321 bool Changed =
false;
330 if (!SinglePred || SinglePred == BB || BB->
hasAddressTaken())
continue;
335 DEBUG(
dbgs() <<
"To merge:\n"<< *SinglePred <<
"\n\n\n");
356 bool CodeGenPrepare::EliminateMostlyEmptyBlocks(
Function &F) {
357 bool MadeChange =
false;
370 if (BBI != BB->
begin()) {
372 while (isa<DbgInfoIntrinsic>(BBI)) {
373 if (BBI == BB->
begin())
377 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI))
386 if (!CanMergeBlocks(BB, DestBB))
389 EliminateMostlyEmptyBlock(BB);
398 bool CodeGenPrepare::CanMergeBlocks(
const BasicBlock *BB,
404 while (
const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
407 if (UI->
getParent() != DestBB || !isa<PHINode>(UI))
413 if (
const PHINode *UPN = dyn_cast<PHINode>(UI))
414 for (
unsigned I = 0, E = UPN->getNumIncomingValues();
I != E; ++
I) {
417 Insn->
getParent() != UPN->getIncomingBlock(
I))
428 if (!DestBBPN)
return true;
432 if (
const PHINode *BBPN = dyn_cast<PHINode>(BB->
begin())) {
434 for (
unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
435 BBPreds.
insert(BBPN->getIncomingBlock(i));
443 if (BBPreds.
count(Pred)) {
444 BBI = DestBB->
begin();
445 while (
const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
446 const Value *V1 = PN->getIncomingValueForBlock(Pred);
447 const Value *
V2 = PN->getIncomingValueForBlock(BB);
450 if (
const PHINode *V2PN = dyn_cast<PHINode>(V2))
451 if (V2PN->getParent() == BB)
452 V2 = V2PN->getIncomingValueForBlock(Pred);
455 if (V1 != V2)
return false;
466 void CodeGenPrepare::EliminateMostlyEmptyBlock(
BasicBlock *BB) {
470 DEBUG(
dbgs() <<
"MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB);
475 if (SinglePred != DestBB) {
484 DEBUG(
dbgs() <<
"AFTER:\n" << *DestBB <<
"\n\n\n");
500 if (InValPhi && InValPhi->
getParent() == BB) {
509 for (
unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
524 DEBUG(
dbgs() <<
"AFTER:\n" << *DestBB <<
"\n\n\n");
537 for (
auto &U : AllRelocateCalls) {
542 RelocateIdxMap.
insert(std::make_pair(K, I));
544 for (
auto &Item : RelocateIdxMap) {
545 std::pair<unsigned, unsigned> Key = Item.first;
546 if (Key.first == Key.second)
551 auto BaseKey = std::make_pair(Key.first, Key.first);
554 auto MaybeBase = RelocateIdxMap.find(BaseKey);
555 if (MaybeBase == RelocateIdxMap.end())
560 RelocateInstMap[MaybeBase->second].push_back(I);
571 if (!Op || Op->getZExtValue() > 20)
585 bool MadeChange =
false;
586 for (
auto &ToReplace : Targets) {
591 "Not relocating a derived object of the original base object");
599 if (!Derived || Derived->getPointerOperand() != Base)
607 assert(RelocatedBase->
getNextNode() &&
"Should always have one since it's not a terminator");
636 ActualRelocatedBase =
637 cast<Instruction>(Builder.CreateBitCast(RelocatedBase, Base->
getType()));
639 Value *Replacement = Builder.CreateGEP(
640 Derived->getSourceElementType(), ActualRelocatedBase,
makeArrayRef(OffsetV));
641 Instruction *ReplacementInst = cast<Instruction>(Replacement);
646 if (ReplacementInst->
getType() != ToReplace->getType()) {
648 cast<Instruction>(Builder.CreateBitCast(ReplacementInst, ToReplace->
getType()));
650 ToReplace->replaceAllUsesWith(ActualReplacement);
651 ToReplace->eraseFromParent();
675 bool CodeGenPrepare::simplifyOffsetableRelocate(
Instruction &
I) {
676 bool MadeChange =
false;
679 for (
auto *U : I.
users())
686 if (AllRelocateCalls.
size() < 2)
693 if (RelocateInstMap.
empty())
696 for (
auto &Item : RelocateInstMap)
710 bool MadeChange =
false;
713 Use &TheUse = UI.getUse();
719 if (
PHINode *PN = dyn_cast<PHINode>(User)) {
727 if (UserBB == DefBB)
continue;
730 CastInst *&InsertedCast = InsertedCasts[UserBB];
740 TheUse = InsertedCast;
773 if (SrcVT.
bitsLT(DstVT))
return false;
804 if (!isa<IntegerType>(Ty))
818 assert(*AddI->
user_begin() == CI &&
"expected!");
824 auto *InsertPt = AddI->
hasOneUse() ? CI : AddI;
826 auto *UAddWithOverflow =
851 bool MadeChange =
false;
854 Use &TheUse = UI.getUse();
861 if (isa<PHINode>(User))
868 if (UserBB == DefBB)
continue;
871 CmpInst *&InsertedCmp = InsertedCmps[UserBB];
882 TheUse = InsertedCmp;
912 if (!isa<TruncInst>(User)) {
917 const APInt &Cimm = cast<ConstantInt>(User->
getOperand(1))->getValue();
919 if ((Cimm & (Cimm + 1)).getBoolValue())
934 bool MadeChange =
false;
938 TruncUI != TruncE;) {
940 Use &TruncTheUse = TruncUI.getUse();
941 Instruction *TruncUser = cast<Instruction>(*TruncUI);
960 if (isa<PHINode>(TruncUser))
965 if (UserBB == TruncUserBB)
969 CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB];
971 if (!InsertedShift && !InsertedTrunc) {
974 if (ShiftI->
getOpcode() == Instruction::AShr)
976 BinaryOperator::CreateAShr(ShiftI->
getOperand(0), CI,
"", InsertPt);
979 BinaryOperator::CreateLShr(ShiftI->
getOperand(0), CI,
"", InsertPt);
986 TruncI->
getType(),
"", TruncInsertPt);
990 TruncTheUse = InsertedTrunc;
1023 bool MadeChange =
false;
1026 Use &TheUse = UI.getUse();
1032 if (isa<PHINode>(User))
1040 if (UserBB == DefBB) {
1055 if (isa<TruncInst>(User) && shiftIsLegal
1068 if (!InsertedShift) {
1071 if (ShiftI->
getOpcode() == Instruction::AShr)
1073 BinaryOperator::CreateAShr(ShiftI->
getOperand(0), CI,
"", InsertPt);
1076 BinaryOperator::CreateLShr(ShiftI->
getOperand(0), CI,
"", InsertPt);
1082 TheUse = InsertedShift;
1134 assert(VecType &&
"Unexpected return type of masked load intrinsic");
1141 Builder.SetInsertPoint(InsertPt);
1143 Builder.SetCurrentDebugLocation(CI->
getDebugLoc());
1148 Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType);
1152 Value *VResult = UndefVal;
1155 Value *PrevPhi = UndefVal;
1158 for (
unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
1168 Phi = Builder.CreatePHI(VecType, 2,
"res.phi.else");
1175 Value *
Predicate = Builder.CreateExtractElement(Mask, Builder.getInt32(Idx));
1186 Builder.SetInsertPoint(InsertPt);
1189 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx));
1191 VResult = Builder.CreateInsertElement(VResult, Load, Builder.getInt32(Idx));
1195 Builder.SetInsertPoint(InsertPt);
1199 PrevIfBlock = IfBlock;
1200 IfBlock = NewIfBlock;
1203 Phi = Builder.CreatePHI(VecType, 2,
"res.phi.select");
1206 Value *NewI = Builder.CreateSelect(Mask, Phi, Src0);
1247 assert(VecType &&
"Unexpected data type in masked store intrinsic");
1252 Builder.SetInsertPoint(InsertPt);
1253 Builder.SetCurrentDebugLocation(CI->
getDebugLoc());
1258 Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType);
1261 for (
unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
1269 Value *
Predicate = Builder.CreateExtractElement(Mask, Builder.getInt32(Idx));
1280 Builder.SetInsertPoint(InsertPt);
1282 Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx));
1284 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx));
1285 Builder.CreateStore(OneElt, Gep);
1289 Builder.SetInsertPoint(InsertPt);
1293 IfBlock = NewIfBlock;
1298 bool CodeGenPrepare::OptimizeCallInst(
CallInst *CI,
bool& ModifiedDT) {
1305 if (TLI->ExpandInlineAsm(CI)) {
1307 CurInstIterator = BB->
begin();
1314 if (OptimizeInlineAsmInst(CI))
1320 unsigned MinSize, PrefAlign;
1321 if (TLI && TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
1327 if (!Arg->getType()->isPointerTy())
1329 APInt Offset(
DL->getPointerSizeInBits(
1330 cast<PointerType>(Arg->getType())->getAddressSpace()),
1333 uint64_t Offset2 = Offset.getLimitedValue();
1334 if ((Offset2 & (PrefAlign-1)) != 0)
1337 if ((AI = dyn_cast<AllocaInst>(Val)) && AI->
getAlignment() < PrefAlign &&
1357 if (Align >
MI->getAlignment())
1366 case Intrinsic::objectsize: {
1368 bool Min = (cast<ConstantInt>(II->
getArgOperand(1))->getZExtValue() == 1);
1375 WeakVH IterHandle(CurInstIterator);
1382 if (IterHandle != CurInstIterator) {
1383 CurInstIterator = BB->
begin();
1388 case Intrinsic::masked_load: {
1390 if (!TTI->isLegalMaskedLoad(CI->
getType(), 1)) {
1397 case Intrinsic::masked_store: {
1405 case Intrinsic::aarch64_stlxr:
1406 case Intrinsic::aarch64_stxr: {
1415 InsertedInsts.insert(ExtVal);
1424 unsigned AddrSpace = ~0u;
1427 if (TLI->GetAddrModeArguments(II, PtrOps, AccessTy, AddrSpace))
1428 while (!PtrOps.
empty())
1429 if (OptimizeMemoryInst(II, PtrOps.
pop_back_val(), AccessTy, AddrSpace))
1442 if (
Value *V = Simplifier.optimizeCall(CI)) {
1481 bool CodeGenPrepare::DupRetToEnableTailCallOpts(
BasicBlock *BB) {
1517 do { ++BI; }
while (isa<DbgInfoIntrinsic>(BI));
1525 while (isa<DbgInfoIntrinsic>(BI)) ++BI;
1538 TLI->mayBeEmittedAsTailCall(CI))
1544 if (!VisitedBBs.
insert(*PI).second)
1550 do { ++RI; }
while (RI != RE && isa<DbgInfoIntrinsic>(&*RI));
1555 if (CI && CI->
use_empty() && TLI->mayBeEmittedAsTailCall(CI))
1560 bool Changed =
false;
1561 for (
unsigned i = 0, e = TailCalls.
size(); i != e; ++i) {
1583 ModifiedDT = Changed =
true;
1605 ExtAddrMode() : BaseReg(nullptr), ScaledReg(nullptr) {}
1609 bool operator==(
const ExtAddrMode& O)
const {
1610 return (BaseReg == O.BaseReg) && (ScaledReg == O.ScaledReg) &&
1611 (BaseGV == O.BaseGV) && (BaseOffs == O.BaseOffs) &&
1612 (HasBaseReg == O.HasBaseReg) && (Scale == O.Scale);
1624 bool NeedPlus =
false;
1627 OS << (NeedPlus ?
" + " :
"")
1629 BaseGV->printAsOperand(OS,
false);
1634 OS << (NeedPlus ?
" + " :
"")
1640 OS << (NeedPlus ?
" + " :
"")
1642 BaseReg->printAsOperand(OS,
false);
1646 OS << (NeedPlus ?
" + " :
"")
1648 ScaledReg->printAsOperand(OS,
false);
1654 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1664 class TypePromotionTransaction {
1669 class TypePromotionAction {
1677 TypePromotionAction(
Instruction *Inst) : Inst(Inst) {}
1679 virtual ~TypePromotionAction() {}
1686 virtual void undo() = 0;
1691 virtual void commit() {
1697 class InsertionHandler {
1707 bool HasPrevInstruction;
1714 if (HasPrevInstruction)
1715 Point.PrevInst = --It;
1722 if (HasPrevInstruction) {
1727 Instruction *Position = Point.BB->getFirstInsertionPt();
1737 class InstructionMoveBefore :
public TypePromotionAction {
1739 InsertionHandler Position;
1744 : TypePromotionAction(Inst), Position(Inst) {
1745 DEBUG(
dbgs() <<
"Do: move: " << *Inst <<
"\nbefore: " << *Before <<
"\n");
1750 void undo()
override {
1751 DEBUG(
dbgs() <<
"Undo: moveBefore: " << *Inst <<
"\n");
1752 Position.insert(Inst);
1757 class OperandSetter :
public TypePromotionAction {
1766 : TypePromotionAction(Inst), Idx(Idx) {
1767 DEBUG(
dbgs() <<
"Do: setOperand: " << Idx <<
"\n"
1768 <<
"for:" << *Inst <<
"\n"
1769 <<
"with:" << *NewVal <<
"\n");
1775 void undo()
override {
1776 DEBUG(
dbgs() <<
"Undo: setOperand:" << Idx <<
"\n"
1777 <<
"for: " << *Inst <<
"\n"
1778 <<
"with: " << *Origin <<
"\n");
1785 class OperandsHider :
public TypePromotionAction {
1791 OperandsHider(
Instruction *Inst) : TypePromotionAction(Inst) {
1792 DEBUG(
dbgs() <<
"Do: OperandsHider: " << *Inst <<
"\n");
1794 OriginalValues.reserve(NumOpnds);
1795 for (
unsigned It = 0; It < NumOpnds; ++It) {
1798 OriginalValues.push_back(Val);
1807 void undo()
override {
1808 DEBUG(
dbgs() <<
"Undo: OperandsHider: " << *Inst <<
"\n");
1809 for (
unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It)
1815 class TruncBuilder :
public TypePromotionAction {
1821 TruncBuilder(
Instruction *Opnd,
Type *Ty) : TypePromotionAction(Opnd) {
1823 Val = Builder.CreateTrunc(Opnd, Ty,
"promoted");
1824 DEBUG(
dbgs() <<
"Do: TruncBuilder: " << *Val <<
"\n");
1828 Value *getBuiltValue() {
return Val; }
1831 void undo()
override {
1832 DEBUG(
dbgs() <<
"Undo: TruncBuilder: " << *Val <<
"\n");
1833 if (
Instruction *IVal = dyn_cast<Instruction>(Val))
1834 IVal->eraseFromParent();
1839 class SExtBuilder :
public TypePromotionAction {
1846 : TypePromotionAction(InsertPt) {
1848 Val = Builder.CreateSExt(Opnd, Ty,
"promoted");
1849 DEBUG(
dbgs() <<
"Do: SExtBuilder: " << *Val <<
"\n");
1853 Value *getBuiltValue() {
return Val; }
1856 void undo()
override {
1857 DEBUG(
dbgs() <<
"Undo: SExtBuilder: " << *Val <<
"\n");
1858 if (
Instruction *IVal = dyn_cast<Instruction>(Val))
1859 IVal->eraseFromParent();
1864 class ZExtBuilder :
public TypePromotionAction {
1871 : TypePromotionAction(InsertPt) {
1873 Val = Builder.CreateZExt(Opnd, Ty,
"promoted");
1874 DEBUG(
dbgs() <<
"Do: ZExtBuilder: " << *Val <<
"\n");
1878 Value *getBuiltValue() {
return Val; }
1881 void undo()
override {
1882 DEBUG(
dbgs() <<
"Undo: ZExtBuilder: " << *Val <<
"\n");
1883 if (
Instruction *IVal = dyn_cast<Instruction>(Val))
1884 IVal->eraseFromParent();
1889 class TypeMutator :
public TypePromotionAction {
1896 : TypePromotionAction(Inst), OrigTy(Inst->
getType()) {
1897 DEBUG(
dbgs() <<
"Do: MutateType: " << *Inst <<
" with " << *NewTy
1903 void undo()
override {
1904 DEBUG(
dbgs() <<
"Undo: MutateType: " << *Inst <<
" with " << *OrigTy
1911 class UsesReplacer :
public TypePromotionAction {
1913 struct InstructionAndIdx {
1918 InstructionAndIdx(
Instruction *Inst,
unsigned Idx)
1919 : Inst(Inst), Idx(Idx) {}
1929 DEBUG(
dbgs() <<
"Do: UsersReplacer: " << *Inst <<
" with " << *New
1933 Instruction *UserI = cast<Instruction>(U.getUser());
1934 OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo()));
1941 void undo()
override {
1942 DEBUG(
dbgs() <<
"Undo: UsersReplacer: " << *Inst <<
"\n");
1943 for (use_iterator UseIt = OriginalUses.begin(),
1944 EndIt = OriginalUses.end();
1945 UseIt != EndIt; ++UseIt) {
1946 UseIt->Inst->setOperand(UseIt->Idx, Inst);
1952 class InstructionRemover :
public TypePromotionAction {
1957 OperandsHider Hider;
1959 UsesReplacer *Replacer;
1966 : TypePromotionAction(Inst),
Inserter(Inst), Hider(Inst),
1969 Replacer =
new UsesReplacer(Inst, New);
1970 DEBUG(
dbgs() <<
"Do: InstructionRemover: " << *Inst <<
"\n");
1974 ~InstructionRemover()
override {
delete Replacer; }
1977 void commit()
override {
delete Inst; }
1981 void undo()
override {
1982 DEBUG(
dbgs() <<
"Undo: InstructionRemover: " << *Inst <<
"\n");
1994 typedef const TypePromotionAction *ConstRestorationPt;
1998 void rollback(ConstRestorationPt Point);
2000 ConstRestorationPt getRestorationPoint()
const;
2028 void TypePromotionTransaction::setOperand(
Instruction *Inst,
unsigned Idx,
2031 make_unique<TypePromotionTransaction::OperandSetter>(Inst, Idx, NewVal));
2034 void TypePromotionTransaction::eraseInstruction(
Instruction *Inst,
2037 make_unique<TypePromotionTransaction::InstructionRemover>(Inst, NewVal));
2040 void TypePromotionTransaction::replaceAllUsesWith(
Instruction *Inst,
2042 Actions.push_back(make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New));
2045 void TypePromotionTransaction::mutateType(
Instruction *Inst,
Type *NewTy) {
2046 Actions.push_back(make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy));
2051 std::unique_ptr<TruncBuilder> Ptr(
new TruncBuilder(Opnd, Ty));
2052 Value *Val = Ptr->getBuiltValue();
2053 Actions.push_back(std::move(Ptr));
2059 std::unique_ptr<SExtBuilder> Ptr(
new SExtBuilder(Inst, Opnd, Ty));
2060 Value *Val = Ptr->getBuiltValue();
2061 Actions.push_back(std::move(Ptr));
2067 std::unique_ptr<ZExtBuilder> Ptr(
new ZExtBuilder(Inst, Opnd, Ty));
2068 Value *Val = Ptr->getBuiltValue();
2069 Actions.push_back(std::move(Ptr));
2073 void TypePromotionTransaction::moveBefore(
Instruction *Inst,
2076 make_unique<TypePromotionTransaction::InstructionMoveBefore>(Inst, Before));
2079 TypePromotionTransaction::ConstRestorationPt
2080 TypePromotionTransaction::getRestorationPoint()
const {
2081 return !Actions.empty() ? Actions.back().get() :
nullptr;
2084 void TypePromotionTransaction::commit() {
2085 for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt;
2091 void TypePromotionTransaction::rollback(
2092 TypePromotionTransaction::ConstRestorationPt Point) {
2093 while (!Actions.empty() && Point != Actions.back().get()) {
2094 std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val();
2102 class AddressingModeMatcher {
2119 const SetOfInstrs &InsertedInsts;
2121 InstrToOrigTy &PromotedInsts;
2123 TypePromotionTransaction &TPT;
2128 bool IgnoreProfitability;
2133 const SetOfInstrs &InsertedInsts,
2134 InstrToOrigTy &PromotedInsts,
2135 TypePromotionTransaction &TPT)
2136 : AddrModeInsts(AMI), TM(TM),
2138 ->getTargetLowering()),
2139 DL(MI->getModule()->getDataLayout()), AccessTy(AT), AddrSpace(AS),
2140 MemoryInst(MI),
AddrMode(AM), InsertedInsts(InsertedInsts),
2141 PromotedInsts(PromotedInsts), TPT(TPT) {
2142 IgnoreProfitability =
false;
2153 static ExtAddrMode Match(
Value *V,
Type *AccessTy,
unsigned AS,
2157 const SetOfInstrs &InsertedInsts,
2158 InstrToOrigTy &PromotedInsts,
2159 TypePromotionTransaction &TPT) {
2162 bool Success = AddressingModeMatcher(AddrModeInsts, TM, AccessTy, AS,
2163 MemoryInst, Result, InsertedInsts,
2164 PromotedInsts, TPT).MatchAddr(V, 0);
2165 (void)Success; assert(Success &&
"Couldn't select *anything*?");
2169 bool MatchScaledValue(
Value *ScaleReg, int64_t Scale,
unsigned Depth);
2170 bool MatchAddr(
Value *V,
unsigned Depth);
2171 bool MatchOperationAddr(
User *Operation,
unsigned Opcode,
unsigned Depth,
2172 bool *MovedAway =
nullptr);
2173 bool IsProfitableToFoldIntoAddressingMode(
Instruction *I,
2174 ExtAddrMode &AMBefore,
2175 ExtAddrMode &AMAfter);
2176 bool ValueAlreadyLiveAtInst(
Value *Val,
Value *KnownLive1,
Value *KnownLive2);
2177 bool IsPromotionProfitable(
unsigned NewCost,
unsigned OldCost,
2178 Value *PromotedOperand)
const;
2184 bool AddressingModeMatcher::MatchScaledValue(
Value *ScaleReg, int64_t Scale,
2189 return MatchAddr(ScaleReg, Depth);
2200 ExtAddrMode TestAddrMode =
AddrMode;
2204 TestAddrMode.Scale += Scale;
2205 TestAddrMode.ScaledReg = ScaleReg;
2208 if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace))
2218 if (isa<Instruction>(ScaleReg) &&
2220 TestAddrMode.ScaledReg = AddLHS;
2221 TestAddrMode.BaseOffs += CI->
getSExtValue()*TestAddrMode.Scale;
2225 if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) {
2226 AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
2242 case Instruction::BitCast:
2243 case Instruction::AddrSpaceCast:
2248 case Instruction::PtrToInt:
2251 case Instruction::IntToPtr:
2254 case Instruction::Add:
2256 case Instruction::Mul:
2257 case Instruction::Shl:
2260 case Instruction::GetElementPtr:
2271 static bool isPromotedInstructionLegal(
const TargetLowering &TLI,
2286 class TypePromotionHelper {
2298 static bool canGetThrough(
const Instruction *Inst,
Type *ConsideredExtType,
2299 const InstrToOrigTy &PromotedInsts,
bool IsSExt);
2303 static bool shouldExtOperand(
const Instruction *Inst,
int OpIdx) {
2304 if (isa<SelectInst>(Inst) && OpIdx == 0)
2318 static Value *promoteOperandForTruncAndAnyExt(
2320 InstrToOrigTy &PromotedInsts,
unsigned &CreatedInstsCost,
2334 TypePromotionTransaction &TPT,
2335 InstrToOrigTy &PromotedInsts,
2336 unsigned &CreatedInstsCost,
2342 static Value *signExtendOperandForOther(
2344 InstrToOrigTy &PromotedInsts,
unsigned &CreatedInstsCost,
2347 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
2348 Exts, Truncs, TLI,
true);
2352 static Value *zeroExtendOperandForOther(
2354 InstrToOrigTy &PromotedInsts,
unsigned &CreatedInstsCost,
2357 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
2358 Exts, Truncs, TLI,
false);
2364 InstrToOrigTy &PromotedInsts,
2365 unsigned &CreatedInstsCost,
2378 static Action getAction(
Instruction *
Ext,
const SetOfInstrs &InsertedInsts,
2380 const InstrToOrigTy &PromotedInsts);
2383 bool TypePromotionHelper::canGetThrough(
const Instruction *Inst,
2384 Type *ConsideredExtType,
2385 const InstrToOrigTy &PromotedInsts,
2394 if (isa<ZExtInst>(Inst))
2398 if (IsSExt && isa<SExtInst>(Inst))
2404 if (BinOp && isa<OverflowingBinaryOperator>(BinOp) &&
2411 if (!isa<TruncInst>(Inst))
2434 const Type *OpndType;
2435 InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd);
2436 if (It != PromotedInsts.end() && It->second.IsSExt == IsSExt)
2437 OpndType = It->second.Ty;
2438 else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd)))
2450 TypePromotionHelper::Action TypePromotionHelper::getAction(
2453 assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
2454 "Unexpected instruction type");
2457 bool IsSExt = isa<SExtInst>(
Ext);
2461 if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt))
2467 if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd))
2472 if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) ||
2473 isa<ZExtInst>(ExtOpnd))
2474 return promoteOperandForTruncAndAnyExt;
2480 return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther;
2483 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt(
2485 InstrToOrigTy &PromotedInsts,
unsigned &CreatedInstsCost,
2491 Value *ExtVal = SExt;
2492 bool HasMergedNonFreeExt =
false;
2493 if (isa<ZExtInst>(SExtOpnd)) {
2496 HasMergedNonFreeExt = !TLI.
isExtFree(SExtOpnd);
2499 TPT.replaceAllUsesWith(SExt, ZExt);
2500 TPT.eraseInstruction(SExt);
2505 TPT.setOperand(SExt, 0, SExtOpnd->
getOperand(0));
2507 CreatedInstsCost = 0;
2511 TPT.eraseInstruction(SExtOpnd);
2519 CreatedInstsCost = !TLI.
isExtFree(ExtInst) && !HasMergedNonFreeExt;
2527 TPT.eraseInstruction(ExtInst, NextVal);
2531 Value *TypePromotionHelper::promoteOperandForOther(
2533 InstrToOrigTy &PromotedInsts,
unsigned &CreatedInstsCost,
2540 CreatedInstsCost = 0;
2546 Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->
getType());
2547 if (
Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) {
2548 ITrunc->removeFromParent();
2550 ITrunc->insertAfter(ExtOpnd);
2555 TPT.replaceAllUsesWith(ExtOpnd, Trunc);
2558 TPT.setOperand(Ext, 0, ExtOpnd);
2568 PromotedInsts.insert(std::pair<Instruction *, TypeIsSExt>(
2569 ExtOpnd, TypeIsSExt(ExtOpnd->
getType(), IsSExt)));
2571 TPT.mutateType(ExtOpnd, Ext->
getType());
2573 TPT.replaceAllUsesWith(Ext, ExtOpnd);
2577 DEBUG(
dbgs() <<
"Propagate Ext to operands\n");
2578 for (
int OpIdx = 0, EndOpIdx = ExtOpnd->
getNumOperands(); OpIdx != EndOpIdx;
2582 !shouldExtOperand(ExtOpnd, OpIdx)) {
2583 DEBUG(
dbgs() <<
"No need to propagate\n");
2588 if (
const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) {
2591 APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth)
2592 : Cst->getValue().zext(BitWidth);
2597 if (isa<UndefValue>(Opnd)) {
2607 DEBUG(
dbgs() <<
"More operands to ext\n");
2608 Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->
getType())
2609 : TPT.createZExt(Ext, Opnd, Ext->
getType());
2610 if (!isa<Instruction>(ValForExtOpnd)) {
2611 TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd);
2614 ExtForOpnd = cast<Instruction>(ValForExtOpnd);
2618 TPT.setOperand(ExtForOpnd, 0, Opnd);
2621 TPT.moveBefore(ExtForOpnd, ExtOpnd);
2622 TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd);
2623 CreatedInstsCost += !TLI.
isExtFree(ExtForOpnd);
2625 ExtForOpnd =
nullptr;
2627 if (ExtForOpnd == Ext) {
2628 DEBUG(
dbgs() <<
"Extension is useless now\n");
2629 TPT.eraseInstruction(Ext);
2643 bool AddressingModeMatcher::IsPromotionProfitable(
2644 unsigned NewCost,
unsigned OldCost,
Value *PromotedOperand)
const {
2645 DEBUG(
dbgs() <<
"OldCost: " << OldCost <<
"\tNewCost: " << NewCost <<
'\n');
2649 if (NewCost > OldCost)
2651 if (NewCost < OldCost)
2656 return isPromotedInstructionLegal(TLI, DL, PromotedOperand);
2670 bool AddressingModeMatcher::MatchOperationAddr(
User *AddrInst,
unsigned Opcode,
2674 if (Depth >= 5)
return false;
2681 case Instruction::PtrToInt:
2683 return MatchAddr(AddrInst->
getOperand(0), Depth);
2684 case Instruction::IntToPtr: {
2689 return MatchAddr(AddrInst->
getOperand(0), Depth);
2692 case Instruction::BitCast:
2701 return MatchAddr(AddrInst->
getOperand(0), Depth);
2703 case Instruction::AddrSpaceCast: {
2708 return MatchAddr(AddrInst->
getOperand(0), Depth);
2711 case Instruction::Add: {
2713 ExtAddrMode BackupAddrMode =
AddrMode;
2714 unsigned OldSize = AddrModeInsts.size();
2719 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
2720 TPT.getRestorationPoint();
2722 if (MatchAddr(AddrInst->
getOperand(1), Depth+1) &&
2728 AddrModeInsts.resize(OldSize);
2729 TPT.rollback(LastKnownGood);
2732 if (MatchAddr(AddrInst->
getOperand(0), Depth+1) &&
2738 AddrModeInsts.resize(OldSize);
2739 TPT.rollback(LastKnownGood);
2745 case Instruction::Mul:
2746 case Instruction::Shl: {
2752 if (Opcode == Instruction::Shl)
2753 Scale = 1LL << Scale;
2755 return MatchScaledValue(AddrInst->
getOperand(0), Scale, Depth);
2757 case Instruction::GetElementPtr: {
2760 int VariableOperand = -1;
2761 unsigned VariableScale = 0;
2763 int64_t ConstantOffset = 0;
2765 for (
unsigned i = 1, e = AddrInst->
getNumOperands(); i != e; ++i, ++GTI) {
2766 if (
StructType *STy = dyn_cast<StructType>(*GTI)) {
2769 cast<ConstantInt>(AddrInst->
getOperand(i))->getZExtValue();
2775 }
else if (TypeSize) {
2777 if (VariableOperand != -1)
2781 VariableOperand = i;
2782 VariableScale = TypeSize;
2789 if (VariableOperand == -1) {
2790 AddrMode.BaseOffs += ConstantOffset;
2791 if (ConstantOffset == 0 ||
2794 if (MatchAddr(AddrInst->
getOperand(0), Depth+1))
2797 AddrMode.BaseOffs -= ConstantOffset;
2802 ExtAddrMode BackupAddrMode =
AddrMode;
2803 unsigned OldSize = AddrModeInsts.size();
2806 AddrMode.BaseOffs += ConstantOffset;
2809 if (!MatchAddr(AddrInst->
getOperand(0), Depth+1)) {
2813 AddrModeInsts.resize(OldSize);
2821 if (!MatchScaledValue(AddrInst->
getOperand(VariableOperand), VariableScale,
2826 AddrModeInsts.resize(OldSize);
2831 AddrMode.BaseOffs += ConstantOffset;
2832 if (!MatchScaledValue(AddrInst->
getOperand(VariableOperand),
2833 VariableScale, Depth)) {
2836 AddrModeInsts.resize(OldSize);
2843 case Instruction::SExt:
2844 case Instruction::ZExt: {
2851 TypePromotionHelper::Action TPH =
2852 TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts);
2856 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
2857 TPT.getRestorationPoint();
2858 unsigned CreatedInstsCost = 0;
2860 Value *PromotedOperand =
2861 TPH(Ext, TPT, PromotedInsts, CreatedInstsCost,
nullptr,
nullptr, TLI);
2876 assert(PromotedOperand &&
2877 "TypePromotionHelper should have filtered out those cases");
2879 ExtAddrMode BackupAddrMode =
AddrMode;
2880 unsigned OldSize = AddrModeInsts.size();
2882 if (!MatchAddr(PromotedOperand, Depth) ||
2887 !IsPromotionProfitable(CreatedInstsCost,
2888 ExtCost + (AddrModeInsts.size() - OldSize),
2891 AddrModeInsts.resize(OldSize);
2892 DEBUG(
dbgs() <<
"Sign extension does not pay off: rollback\n");
2893 TPT.rollback(LastKnownGood);
2907 bool AddressingModeMatcher::MatchAddr(
Value *Addr,
unsigned Depth) {
2910 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
2911 TPT.getRestorationPoint();
2912 if (
ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
2918 }
else if (
GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
2926 }
else if (
Instruction *I = dyn_cast<Instruction>(Addr)) {
2927 ExtAddrMode BackupAddrMode =
AddrMode;
2928 unsigned OldSize = AddrModeInsts.size();
2931 bool MovedAway =
false;
2932 if (MatchOperationAddr(I, I->
getOpcode(), Depth, &MovedAway)) {
2941 IsProfitableToFoldIntoAddressingMode(I, BackupAddrMode,
AddrMode)) {
2942 AddrModeInsts.push_back(I);
2949 AddrModeInsts.resize(OldSize);
2950 TPT.rollback(LastKnownGood);
2952 }
else if (
ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
2953 if (MatchOperationAddr(CE,
CE->getOpcode(), Depth))
2955 TPT.rollback(LastKnownGood);
2956 }
else if (isa<ConstantPointerNull>(Addr)) {
2982 TPT.rollback(LastKnownGood);
2997 for (
unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
3017 static bool FindAllMemoryUses(
3022 if (!ConsideredInsts.
insert(I).second)
3026 if (!MightBeFoldableInst(I))
3031 Instruction *UserI = cast<Instruction>(U.getUser());
3033 if (
LoadInst *LI = dyn_cast<LoadInst>(UserI)) {
3034 MemoryUses.push_back(std::make_pair(LI, U.getOperandNo()));
3038 if (
StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
3039 unsigned opNo = U.getOperandNo();
3040 if (opNo == 0)
return true;
3041 MemoryUses.push_back(std::make_pair(SI, opNo));
3045 if (
CallInst *CI = dyn_cast<CallInst>(UserI)) {
3047 if (!IA)
return true;
3050 if (!IsOperandAMemoryOperand(CI, IA, I, TM))
3055 if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TM))
3066 bool AddressingModeMatcher::ValueAlreadyLiveAtInst(
Value *Val,
Value *KnownLive1,
3067 Value *KnownLive2) {
3069 if (Val ==
nullptr || Val == KnownLive1 || Val == KnownLive2)
3073 if (!isa<Instruction>(Val) && !isa<Argument>(Val))
return true;
3078 if (
AllocaInst *AI = dyn_cast<AllocaInst>(Val))
3109 bool AddressingModeMatcher::
3110 IsProfitableToFoldIntoAddressingMode(
Instruction *I, ExtAddrMode &AMBefore,
3111 ExtAddrMode &AMAfter) {
3112 if (IgnoreProfitability)
return true;
3123 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
3127 if (ValueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
3129 if (ValueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
3130 ScaledReg =
nullptr;
3134 if (!BaseReg && !ScaledReg)
3143 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TM))
3151 for (
unsigned i = 0, e = MemoryUses.
size(); i != e; ++i) {
3153 unsigned OpNo = MemoryUses[i].second;
3168 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
3169 TPT.getRestorationPoint();
3170 AddressingModeMatcher Matcher(MatchedAddrModeInsts, TM, AddressAccessTy, AS,
3171 MemoryInst, Result, InsertedInsts,
3172 PromotedInsts, TPT);
3173 Matcher.IgnoreProfitability =
true;
3174 bool Success = Matcher.MatchAddr(Address, 0);
3175 (void)Success; assert(Success &&
"Couldn't select *anything*?");
3180 TPT.rollback(LastKnownGood);
3183 if (std::find(MatchedAddrModeInsts.
begin(), MatchedAddrModeInsts.
end(),
3184 I) == MatchedAddrModeInsts.
end())
3187 MatchedAddrModeInsts.
clear();
3212 bool CodeGenPrepare::OptimizeMemoryInst(
Instruction *MemoryInst,
Value *Addr,
3213 Type *AccessTy,
unsigned AddrSpace) {
3225 Value *Consensus =
nullptr;
3226 unsigned NumUsesConsensus = 0;
3227 bool IsNumUsesConsensusValid =
false;
3230 TypePromotionTransaction TPT;
3231 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
3232 TPT.getRestorationPoint();
3233 while (!worklist.
empty()) {
3238 if (!Visited.
insert(V).second) {
3239 Consensus =
nullptr;
3244 if (
PHINode *
P = dyn_cast<PHINode>(V)) {
3245 for (
Value *IncValue :
P->incoming_values())
3252 ExtAddrMode NewAddrMode = AddressingModeMatcher::Match(
3253 V, AccessTy, AddrSpace, MemoryInst, NewAddrModeInsts, *TM,
3254 InsertedInsts, PromotedInsts, TPT);
3262 AddrMode = NewAddrMode;
3263 AddrModeInsts = NewAddrModeInsts;
3265 }
else if (NewAddrMode == AddrMode) {
3266 if (!IsNumUsesConsensusValid) {
3268 IsNumUsesConsensusValid =
true;
3277 if (NumUses > NumUsesConsensus) {
3279 NumUsesConsensus = NumUses;
3280 AddrModeInsts = NewAddrModeInsts;
3285 Consensus =
nullptr;
3292 TPT.rollback(LastKnownGood);
3299 bool AnyNonLocal =
false;
3300 for (
unsigned i = 0, e = AddrModeInsts.
size(); i != e; ++i) {
3309 DEBUG(
dbgs() <<
"CGP: Found local addrmode: " << AddrMode <<
"\n");
3322 Value *&SunkAddr = SunkAddrs[Addr];
3324 DEBUG(
dbgs() <<
"CGP: Reusing nonlocal addrmode: " << AddrMode <<
" for "
3325 << *MemoryInst <<
"\n");
3327 SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->
getType());
3334 DEBUG(
dbgs() <<
"CGP: SINKING nonlocal addrmode: " << AddrMode <<
" for "
3335 << *MemoryInst <<
"\n");
3337 Value *ResultPtr =
nullptr, *ResultIndex =
nullptr;
3340 if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) {
3341 ResultPtr = AddrMode.BaseReg;
3342 AddrMode.BaseReg =
nullptr;
3345 if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) {
3348 if (ResultPtr || AddrMode.Scale != 1)
3351 ResultPtr = AddrMode.ScaledReg;
3355 if (AddrMode.BaseGV) {
3359 ResultPtr = AddrMode.BaseGV;
3365 if (!ResultPtr && AddrMode.BaseReg) {
3367 Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->
getType(),
"sunkaddr");
3368 AddrMode.BaseReg =
nullptr;
3369 }
else if (!ResultPtr && AddrMode.Scale == 1) {
3371 Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->
getType(),
"sunkaddr");
3376 !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) {
3378 }
else if (!ResultPtr) {
3390 if (AddrMode.BaseReg) {
3391 Value *V = AddrMode.BaseReg;
3393 V = Builder.CreateIntCast(V, IntPtrTy,
true,
"sunkaddr");
3399 if (AddrMode.Scale) {
3400 Value *V = AddrMode.ScaledReg;
3401 if (V->
getType() == IntPtrTy) {
3403 }
else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
3405 V = Builder.CreateTrunc(V, IntPtrTy,
"sunkaddr");
3412 Instruction *I = dyn_cast_or_null<Instruction>(ResultIndex);
3413 if (I && (ResultIndex != AddrMode.BaseReg))
3418 if (AddrMode.Scale != 1)
3422 ResultIndex = Builder.CreateAdd(ResultIndex, V,
"sunkaddr");
3428 if (AddrMode.BaseOffs) {
3433 if (ResultPtr->
getType() != I8PtrTy)
3434 ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy);
3435 ResultPtr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex,
"sunkaddr");
3442 SunkAddr = ResultPtr;
3444 if (ResultPtr->
getType() != I8PtrTy)
3445 ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy);
3446 SunkAddr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex,
"sunkaddr");
3450 SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->
getType());
3453 DEBUG(
dbgs() <<
"CGP: SINKING nonlocal addrmode: " << AddrMode <<
" for "
3454 << *MemoryInst <<
"\n");
3456 Value *Result =
nullptr;
3463 if (AddrMode.BaseReg) {
3464 Value *V = AddrMode.BaseReg;
3466 V = Builder.CreatePtrToInt(V, IntPtrTy,
"sunkaddr");
3468 V = Builder.CreateIntCast(V, IntPtrTy,
true,
"sunkaddr");
3473 if (AddrMode.Scale) {
3474 Value *V = AddrMode.ScaledReg;
3475 if (V->
getType() == IntPtrTy) {
3478 V = Builder.CreatePtrToInt(V, IntPtrTy,
"sunkaddr");
3479 }
else if (cast<IntegerType>(IntPtrTy)->
getBitWidth() <
3481 V = Builder.CreateTrunc(V, IntPtrTy,
"sunkaddr");
3488 Instruction *I = dyn_cast_or_null<Instruction>(Result);
3489 if (I && (Result != AddrMode.BaseReg))
3493 if (AddrMode.Scale != 1)
3497 Result = Builder.CreateAdd(Result, V,
"sunkaddr");
3503 if (AddrMode.BaseGV) {
3504 Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy,
"sunkaddr");
3506 Result = Builder.CreateAdd(Result, V,
"sunkaddr");
3512 if (AddrMode.BaseOffs) {
3515 Result = Builder.CreateAdd(Result, V,
"sunkaddr");
3523 SunkAddr = Builder.CreateIntToPtr(Result, Addr->
getType(),
"sunkaddr");
3533 WeakVH IterHandle(CurInstIterator);
3538 if (IterHandle != CurInstIterator) {
3541 CurInstIterator = BB->
begin();
3552 bool CodeGenPrepare::OptimizeInlineAsmInst(
CallInst *CS) {
3553 bool MadeChange =
false;
3560 for (
unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
3569 MadeChange |= OptimizeMemoryInst(CS, OpVal, OpVal->
getType(), ~0u);
3580 assert(!Inst->
use_empty() &&
"Input must have at least one use");
3582 bool IsSExt = isa<SExtInst>(FirstUser);
3583 Type *ExtTy = FirstUser->getType();
3584 for (
const User *U : Inst->
users()) {
3586 if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI)))
3609 if (ExtTy->getScalarType()->getIntegerBitWidth() >
3649 bool CodeGenPrepare::ExtLdPromotion(TypePromotionTransaction &TPT,
3652 unsigned CreatedInstsCost = 0) {
3654 for (
auto I : Exts) {
3656 if ((LI = dyn_cast<LoadInst>(I->
getOperand(0)))) {
3665 TypePromotionHelper::Action TPH = TypePromotionHelper::getAction(
3666 I, InsertedInsts, *TLI, PromotedInsts);
3671 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
3672 TPT.getRestorationPoint();
3674 unsigned NewCreatedInstsCost = 0;
3677 Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost,
3678 &NewExts,
nullptr, *TLI);
3679 assert(PromotedVal &&
3680 "TypePromotionHelper should have filtered out those cases");
3688 long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost;
3689 TotalCreatedInstsCost -= ExtCost;
3691 (TotalCreatedInstsCost > 1 ||
3692 !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) {
3694 TPT.rollback(LastKnownGood);
3699 (void)ExtLdPromotion(TPT, LI, Inst, NewExts, TotalCreatedInstsCost);
3708 TPT.rollback(LastKnownGood);
3722 bool CodeGenPrepare::MoveExtToFormExtLoad(
Instruction *&I) {
3725 TypePromotionTransaction TPT;
3726 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
3727 TPT.getRestorationPoint();
3733 bool HasPromoted = ExtLdPromotion(TPT, LI, I, Exts);
3735 assert(!HasPromoted && !LI &&
"If we did not match any load instruction "
3736 "the code must remain the same");
3756 TPT.rollback(LastKnownGood);
3762 if (isa<ZExtInst>(I))
3765 assert(isa<SExtInst>(I) &&
"Unexpected ext type!");
3770 TPT.rollback(LastKnownGood);
3783 bool CodeGenPrepare::OptimizeExtUses(
Instruction *I) {
3798 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->
getParent())
3801 bool DefIsLiveOut =
false;
3802 for (User *U : I->
users()) {
3807 if (UserBB == DefBB)
continue;
3808 DefIsLiveOut =
true;
3815 for (User *U : Src->
users()) {
3818 if (UserBB == DefBB)
continue;
3821 if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI))
3828 bool MadeChange =
false;
3830 Instruction *User = cast<Instruction>(U.getUser());
3834 if (UserBB == DefBB)
continue;
3837 Instruction *&InsertedTrunc = InsertedTruncs[UserBB];
3839 if (!InsertedTrunc) {
3842 InsertedInsts.insert(InsertedTrunc);
3856 static bool isFormingBranchFromSelectProfitable(
SelectInst *SI) {
3878 ((isa<LoadInst>(CmpOp0) && CmpOp0->
hasOneUse()) ||
3879 (isa<LoadInst>(CmpOp1) && CmpOp1->
hasOneUse()));
3885 bool CodeGenPrepare::OptimizeSelectInst(
SelectInst *SI) {
3905 !isFormingBranchFromSelectProfitable(SI))
3937 CurInstIterator = StartBlock->
end();
3938 ++NumSelectsExpanded;
3945 for (
unsigned i = 0; i < Mask.size(); ++i) {
3946 if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem)
3948 SplatElem = Mask[i];
3967 if (!isBroadcastShuffle(SVI))
3973 bool MadeChange =
false;
3974 for (User *U : SVI->
users()) {
3979 if (UserBB == DefBB)
continue;
3986 Instruction *&InsertedShuffle = InsertedShuffles[UserBB];
3988 if (!InsertedShuffle) {
4024 class VectorPromoteHelper {
4039 unsigned StoreExtractCombineCost;
4047 if (InstsToBePromoted.empty())
4049 return InstsToBePromoted.back();
4055 unsigned getTransitionOriginalValueIdx()
const {
4056 assert(isa<ExtractElementInst>(Transition) &&
4057 "Other kind of transitions are not supported yet");
4064 unsigned getTransitionIdx()
const {
4065 assert(isa<ExtractElementInst>(Transition) &&
4066 "Other kind of transitions are not supported yet");
4074 Type *getTransitionType()
const {
4075 return Transition->getOperand(getTransitionOriginalValueIdx())->getType();
4089 bool isProfitableToPromote() {
4090 Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx());
4091 unsigned Index = isa<ConstantInt>(ValIdx)
4092 ? cast<ConstantInt>(ValIdx)->getZExtValue()
4094 Type *PromotedType = getTransitionType();
4111 uint64_t ScalarCost =
4112 TTI.getVectorInstrCost(Transition->getOpcode(), PromotedType, Index);
4113 uint64_t VectorCost = StoreExtractCombineCost;
4114 for (
const auto &Inst : InstsToBePromoted) {
4120 bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) ||
4121 isa<ConstantFP>(Arg0);
4128 ScalarCost += TTI.getArithmeticInstrCost(
4130 VectorCost += TTI.getArithmeticInstrCost(Inst->
getOpcode(), PromotedType,
4133 DEBUG(
dbgs() <<
"Estimated cost of computation to be promoted:\nScalar: "
4134 << ScalarCost <<
"\nVector: " << VectorCost <<
'\n');
4135 return ScalarCost > VectorCost;
4146 Value *getConstantVector(
Constant *Val,
bool UseSplat)
const {
4147 unsigned ExtractIdx = UINT_MAX;
4151 Value *ValExtractIdx = Transition->getOperand(getTransitionIdx());
4152 if (
ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx))
4158 unsigned End = getTransitionType()->getVectorNumElements();
4164 for (
unsigned Idx = 0; Idx != End; ++Idx) {
4165 if (Idx == ExtractIdx)
4176 unsigned OperandIdx) {
4179 if (OperandIdx != 1)
4184 case Instruction::SDiv:
4185 case Instruction::UDiv:
4186 case Instruction::SRem:
4187 case Instruction::URem:
4189 case Instruction::FDiv:
4190 case Instruction::FRem:
4199 unsigned CombineCost)
4200 : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition),
4201 StoreExtractCombineCost(CombineCost), CombineInst(nullptr) {
4202 assert(Transition &&
"Do not know how to promote null");
4206 bool canPromote(
const Instruction *ToBePromoted)
const {
4208 return isa<BinaryOperator>(ToBePromoted);
4213 bool shouldPromote(
const Instruction *ToBePromoted)
const {
4216 for (
const Use &U : ToBePromoted->
operands()) {
4217 const Value *Val = U.get();
4218 if (Val == getEndOfTransition()) {
4222 if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()))
4226 if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) &&
4227 !isa<ConstantFP>(Val))
4236 ISDOpcode, TLI.
getValueType(DL, getTransitionType(),
true));
4242 bool canCombine(
const Instruction *Use) {
return isa<StoreInst>(Use); }
4245 void enqueueForPromotion(
Instruction *ToBePromoted) {
4246 InstsToBePromoted.push_back(ToBePromoted);
4250 void recordCombineInstruction(
Instruction *ToBeCombined) {
4251 assert(canCombine(ToBeCombined) &&
"Unsupported instruction to combine");
4252 CombineInst = ToBeCombined;
4262 if (InstsToBePromoted.empty() || !CombineInst)
4270 for (
auto &ToBePromoted : InstsToBePromoted)
4271 promoteImpl(ToBePromoted);
4272 InstsToBePromoted.clear();
4278 void VectorPromoteHelper::promoteImpl(
Instruction *ToBePromoted) {
4287 assert(ToBePromoted->
getType() == Transition->getType() &&
4288 "The type of the result of the transition does not match "
4293 Type *TransitionTy = getTransitionType();
4298 for (Use &U : ToBePromoted->
operands()) {
4299 Value *Val = U.get();
4300 Value *NewVal =
nullptr;
4301 if (Val == Transition)
4302 NewVal = Transition->getOperand(getTransitionOriginalValueIdx());
4303 else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) ||
4304 isa<ConstantFP>(Val)) {
4306 NewVal = getConstantVector(
4307 cast<Constant>(Val),
4308 isa<UndefValue>(Val) ||
4309 canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()));
4313 ToBePromoted->
setOperand(U.getOperandNo(), NewVal);
4315 Transition->removeFromParent();
4316 Transition->insertAfter(ToBePromoted);
4317 Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted);
4323 bool CodeGenPrepare::OptimizeExtractElementInst(
Instruction *Inst) {
4324 unsigned CombineCost = UINT_MAX;
4339 DEBUG(
dbgs() <<
"Found an interesting transition: " << *Inst <<
'\n');
4340 VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost);
4345 DEBUG(
dbgs() <<
"Use: " << *ToBePromoted <<
'\n');
4347 if (ToBePromoted->
getParent() != Parent) {
4348 DEBUG(
dbgs() <<
"Instruction to promote is in a different block ("
4350 <<
") than the transition (" << Parent->
getName() <<
").\n");
4354 if (VPH.canCombine(ToBePromoted)) {
4355 DEBUG(
dbgs() <<
"Assume " << *Inst <<
'\n'
4356 <<
"will be combined with: " << *ToBePromoted <<
'\n');
4357 VPH.recordCombineInstruction(ToBePromoted);
4358 bool Changed = VPH.promote();
4359 NumStoreExtractExposed += Changed;
4364 if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted))
4367 DEBUG(
dbgs() <<
"Promoting is possible... Enqueue for promotion!\n");
4369 VPH.enqueueForPromotion(ToBePromoted);
4370 Inst = ToBePromoted;
4375 bool CodeGenPrepare::OptimizeInst(
Instruction *I,
bool& ModifiedDT) {
4378 if (InsertedInsts.count(I))
4381 if (
PHINode *
P = dyn_cast<PHINode>(I)) {
4386 P->replaceAllUsesWith(V);
4387 P->eraseFromParent();
4394 if (
CastInst *CI = dyn_cast<CastInst>(I)) {
4407 if (isa<ZExtInst>(I) || isa<SExtInst>(
I)) {
4416 bool MadeChange = MoveExtToFormExtLoad(I);
4417 return MadeChange | OptimizeExtUses(I);
4423 if (
CmpInst *CI = dyn_cast<CmpInst>(I))
4427 if (
LoadInst *LI = dyn_cast<LoadInst>(I)) {
4435 if (
StoreInst *SI = dyn_cast<StoreInst>(I)) {
4437 unsigned AS = SI->getPointerAddressSpace();
4438 return OptimizeMemoryInst(I, SI->
getOperand(1),
4446 if (BinOp && (BinOp->
getOpcode() == Instruction::AShr ||
4447 BinOp->
getOpcode() == Instruction::LShr)) {
4456 if (GEPI->hasAllZeroIndices()) {
4459 GEPI->getName(), GEPI);
4461 GEPI->eraseFromParent();
4463 OptimizeInst(NC, ModifiedDT);
4469 if (
CallInst *CI = dyn_cast<CallInst>(I))
4470 return OptimizeCallInst(CI, ModifiedDT);
4472 if (
SelectInst *SI = dyn_cast<SelectInst>(I))
4473 return OptimizeSelectInst(SI);
4476 return OptimizeShuffleVectorInst(SVI);
4478 if (isa<ExtractElementInst>(I))
4479 return OptimizeExtractElementInst(I);
4487 bool CodeGenPrepare::OptimizeBlock(
BasicBlock &BB,
bool& ModifiedDT) {
4489 bool MadeChange =
false;
4491 CurInstIterator = BB.
begin();
4492 while (CurInstIterator != BB.
end()) {
4493 MadeChange |= OptimizeInst(CurInstIterator++, ModifiedDT);
4497 MadeChange |= DupRetToEnableTailCallOpts(&BB);
4505 bool CodeGenPrepare::PlaceDbgValues(
Function &F) {
4506 bool MadeChange =
false;
4518 PrevNonDbgInst = Insn;
4523 if (VI && VI != PrevNonDbgInst && !VI->
isTerminator()) {
4524 DEBUG(
dbgs() <<
"Moving Debug Value before :\n" << *DVI <<
' ' << *VI);
4526 if (isa<PHINode>(VI))
4544 bool CodeGenPrepare::sinkAndCmp(
Function &F) {
4549 bool MadeChange =
false;
4564 if (!Zero || !Zero->
isZero())
4586 if (UserBB == BB)
continue;
4607 static bool extractBranchMetadata(
BranchInst *BI,
4608 uint64_t &ProbTrue, uint64_t &ProbFalse) {
4610 "Looking for probabilities on unconditional branch?");
4612 if (!ProfileData || ProfileData->getNumOperands() != 3)
4615 const auto *CITrue =
4616 mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1));
4617 const auto *CIFalse =
4618 mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(2));
4619 if (!CITrue || !CIFalse)
4622 ProbTrue = CITrue->getValue().getZExtValue();
4623 ProbFalse = CIFalse->getValue().getZExtValue();
4629 static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) {
4630 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse;
4631 uint32_t Scale = (NewMax / UINT32_MAX) + 1;
4632 NewTrue = NewTrue / Scale;
4633 NewFalse = NewFalse / Scale;
4658 bool CodeGenPrepare::splitBranchCondition(
Function &F) {
4662 bool MadeChange =
false;
4663 for (
auto &BB : F) {
4675 Value *Cond1, *Cond2;
4689 DEBUG(
dbgs() <<
"Before branch condition splitting\n"; BB.
dump());
4693 .getNodePtrUnchecked();
4701 Br1->setCondition(Cond1);
4707 Br1->setSuccessor(0, TmpBB);
4709 Br1->setSuccessor(1, TmpBB);
4712 auto *Br2 =
IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB);
4713 if (
auto *I = dyn_cast<Instruction>(Cond2)) {
4731 for (
auto &I : *TBB) {
4741 for (
auto &I : *FBB) {
4771 uint64_t TrueWeight, FalseWeight;
4772 if (extractBranchMetadata(Br1, TrueWeight, FalseWeight)) {
4773 uint64_t NewTrueWeight = TrueWeight;
4774 uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight;
4775 scaleWeights(NewTrueWeight, NewFalseWeight);
4777 .createBranchWeights(TrueWeight, FalseWeight));
4779 NewTrueWeight = TrueWeight;
4780 NewFalseWeight = 2 * FalseWeight;
4781 scaleWeights(NewTrueWeight, NewFalseWeight);
4783 .createBranchWeights(TrueWeight, FalseWeight));
4804 uint64_t TrueWeight, FalseWeight;
4805 if (extractBranchMetadata(Br1, TrueWeight, FalseWeight)) {
4806 uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight;
4807 uint64_t NewFalseWeight = FalseWeight;
4808 scaleWeights(NewTrueWeight, NewFalseWeight);
4810 .createBranchWeights(TrueWeight, FalseWeight));
4812 NewTrueWeight = 2 * TrueWeight;
4813 NewFalseWeight = FalseWeight;
4814 scaleWeights(NewTrueWeight, NewFalseWeight);
4816 .createBranchWeights(TrueWeight, FalseWeight));
4826 DEBUG(
dbgs() <<
"After branch condition splitting\n"; BB.
dump();
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type (if unknown returns 0).
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
ReturnInst - Return a value (possibly void), from a function.
Value * getValueOperand()
const Value * getCalledValue() const
getCalledValue - Get a pointer to the function that is invoked by this instruction.
iplist< Instruction >::iterator eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing basic block and deletes it...
static MVT getIntegerVT(unsigned BitWidth)
void push_back(const T &Elt)
use_iterator_impl< Use > use_iterator
A parsed version of the target data layout string in and methods for querying it. ...
Type * getIndexedType() const
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
This class is the base class for the comparison instructions.
iterator_range< use_iterator > uses()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
static bool SinkCmpExpression(CmpInst *CI)
SinkCmpExpression - Sink the given CmpInst into user blocks to reduce the number of virtual registers...
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
void addIncoming(Value *V, BasicBlock *BB)
addIncoming - Add an incoming value to the end of the PHI list
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
unsigned getBasePtrIndex()
The index into the associate statepoint's argument list which contains the base pointer of the pointe...
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const
STATISTIC(NumFunctions,"Total number of functions")
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
Sign extended before/after call.
unsigned EnableFastISel
EnableFastISel - This flag enables fast-path instruction selection which trades away generated code q...
A Module instance is used to store all the information related to an LLVM module. ...
void setAlignment(unsigned Align)
Intrinsic::ID getIntrinsicID() const
getIntrinsicID - Return the intrinsic ID of this intrinsic.
This class represents zero extension of integer types.
unsigned getNumOperands() const
void DeleteDeadBlock(BasicBlock *BB)
DeleteDeadBlock - Delete the specified block, which must have no predecessors.
CallInst - This class represents a function call, abstracting a target machine's calling convention...
size_type count(PtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, DominatorTree *DT=nullptr)
MergeBasicBlockIntoOnlyPred - BB is a block with one predecessor and its predecessor is known to have...
static void computeBaseDerivedRelocateMap(const SmallVectorImpl< User * > &AllRelocateCalls, DenseMap< IntrinsicInst *, SmallVector< IntrinsicInst *, 2 >> &RelocateInstMap)
ShuffleVectorInst - This instruction constructs a fixed permutation of two input vectors.
const Function * getParent() const
Return the enclosing method, or null if none.
virtual bool isZExtFree(Type *, Type *) const
Return true if any actual instruction that defines a value of type Ty1 implicitly zero-extends the va...
const APInt & getUniqueInteger() const
If C is a constant integer then return its value, otherwise C must be a vector of constant integers...
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, unsigned Align=1, bool *=nullptr) const
Determine if the target supports unaligned memory accesses.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
LoadInst - an instruction for reading from memory.
FunctionType * getType(LLVMContext &Context, ID id, ArrayRef< Type * > Tys=None)
Return the function type for an intrinsic.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Value * CallOperandVal
If this is the result output operand or a clobber, this is null, otherwise it is the incoming operand...
void setAlignment(unsigned Align)
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
bool bitsLT(EVT VT) const
bitsLT - Return true if this has less bits than VT.
static Constant * getNullValue(Type *Ty)
StringRef getName() const
Return a constant reference to the value's name.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
iterator begin()
Instruction iterator methods.
reverse_iterator rbegin()
bool enableExtLdPromotion() const
Return true if the target wants to use the optimization that turns ext(promotableInst1(...(promotableInstN(load)))) into promotedInst1(...(promotedInstN(ext(load)))).
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool match(Val *V, const Pattern &P)
AnalysisUsage & addRequired()
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
bool hasNoNaNs() const
Determine whether the no-NaNs flag is set.
bool isUnconditional() const
Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
removeIncomingValue - Remove an incoming value.
SelectInst - This class represents the LLVM 'select' instruction.
bool hasMultipleConditionRegisters() const
Return true if multiple condition registers are available.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
static CallInst * Create(Value *Func, ArrayRef< Value * > Args, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
This is the base class for all instructions that perform data casts.
NodeTy * getNextNode()
Get the next node, or 0 for the list tail.
static cl::opt< bool > DisableExtLdPromotion("disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in ""CodeGenPrepare"))
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
UndefValue - 'undef' values are things that do not have specified contents.
T LLVM_ATTRIBUTE_UNUSED_RESULT pop_back_val()
StructType - Class to represent struct types.
static void ScalarizeMaskedLoad(CallInst *CI)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
A Use represents the edge between a Value definition and its users.
bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions=false, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldTerminator - If a terminator instruction is predicated on a constant value, convert it into an unconditional branch to the constant destination.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
static Constant * get(ArrayRef< Constant * > V)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
static cl::opt< bool > DisableBranchOpts("disable-cgp-branch-opts", cl::Hidden, cl::init(false), cl::desc("Disable branch optimizations in CodeGenPrepare"))
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches, switches, etc.
Number of individual test Apply this number of consecutive mutations to each input exit after the first new interesting input is found the minimized corpus is saved into the first input directory Number of jobs to run If min(jobs, NumberOfCpuCores()/2)\" is used.") FUZZER_FLAG_INT(reload
Windows NT (Windows on ARM)
bool hasUniqueInitializer() const
hasUniqueInitializer - Whether the global variable has an initializer, and any changes made to the in...
Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset)
Accumulate offsets from stripInBoundsConstantOffsets().
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
static cl::opt< bool > DisableStoreExtract("disable-cgp-store-extract", cl::Hidden, cl::init(false), cl::desc("Disable store(extract) optimizations in CodeGenPrepare"))
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
bool isExtFree(const Instruction *I) const
Return true if the extension represented by I is free.
Interval::succ_iterator succ_begin(Interval *I)
succ_begin/succ_end - define methods so that Intervals may be used just like BasicBlocks can with the...
bool isStatepoint(const ImmutableCallSite &CS)
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI, const DataLayout &DL)
OptimizeNoopCopyExpression - If the specified cast instruction is a noop copy (e.g.
user_iterator_impl< User > user_iterator
ConstantExpr - a constant value that is initialized with an expression using other constant values...
static cl::opt< bool > AddrSinkUsingGEPs("addr-sink-using-gep", cl::Hidden, cl::init(false), cl::desc("Address sinking in CGP using GEPs."))
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const
unsigned getDerivedPtrIndex()
The index into the associate statepoint's argument list which contains the pointer whose relocation t...
Value handle that is nullable, but tries to track the Value.
unsigned getAlignment() const
This contains information for each constraint that we are lowering.
BasicBlock * getSuccessor(unsigned i) const
This class represents a no-op cast from one type to another.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
StoreInst - an instruction for storing to memory.
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber.
unsigned getNumElements() const
Return the number of elements in the Vector type.
virtual bool isSelectSupported(SelectSupportKind) const
void takeName(Value *V)
Transfer the name from V to this value.
INITIALIZE_TM_PASS(CodeGenPrepare,"codegenprepare","Optimize for code generation", false, false) FunctionPass *llvm
This class implements simplifications for calls to fortified library functions (__st*cpy_chk, __memcpy_chk, __memmove_chk, __memset_chk), to, when possible, replace them with their non-checking counterparts.
Type * getElementType() const
This class represents a truncation of integer types.
Considered to not alias after call.
static unsigned getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
getKnownAlignment - Try to infer an alignment for the specified pointer.
PointerType - Class to represent pointers.
static bool simplifyRelocatesOffABase(IntrinsicInst *RelocatedBase, const SmallVectorImpl< IntrinsicInst * > &Targets)
unsigned getNumIncomingValues() const
getNumIncomingValues - Return the number of incoming edges
Interval::succ_iterator succ_end(Interval *I)
void replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
static cl::opt< bool > StressStoreExtract("stress-cgp-store-extract", cl::Hidden, cl::init(false), cl::desc("Stress test store(extract) optimizations in CodeGenPrepare"))
FunctionPass * createCodeGenPreparePass(const TargetMachine *TM=nullptr)
createCodeGenPreparePass - Transform the code to expose more pattern matching during instruction sele...
static bool isExtractBitsCandidateUse(Instruction *User)
isExtractBitsCandidateUse - Check if the candidates could be combined with shift instruction, which includes:
uint64_t getElementOffset(unsigned Idx) const
GetElementPtrInst - an instruction for type-safe pointer arithmetic to access elements of arrays and ...
OneUse_match< T > m_OneUse(const T &SubPattern)
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
initializer< Ty > init(const Ty &Val)
static CmpInst * Create(OtherOps Op, unsigned short predicate, Value *S1, Value *S2, const Twine &Name="", Instruction *InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
static bool CombineUAddWithOverflow(CmpInst *CI)
CombineUAddWithOverflow - try to combine CI into a call to the llvm.uadd.with.overflow intrinsic if p...
unsigned getAlignment() const
getAlignment - Return the alignment of the access that is being performed
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
static void ScalarizeMaskedStore(CallInst *CI)
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction...
LLVM Basic Block Representation.
The instances of the Type class are immutable: once they are created, they are never changed...
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
BranchInst - Conditional or Unconditional Branch instruction.
bool isVectorTy() const
isVectorTy - True if this is an instance of VectorType.
bool isIndirect
isIndirect - True if this operand is an indirect operand.
bool isOperationLegalOrCustom(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
This is an important base class in LLVM.
const Value * getCondition() const
int64_t getSExtValue() const
Get sign extended value.
bool isGCRelocate(const Value *V)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool isMaskAndBranchFoldingLegal() const
Return if the target supports combining a chain like:
APInt Or(const APInt &LHS, const APInt &RHS)
Bitwise OR function for APInt.
unsigned getAlignment() const
getAlignment - Return the alignment of the memory that is being allocated by the instruction.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
const DebugLoc & getDebugLoc() const
getDebugLoc - Return the debug location for this node as a DebugLoc.
brc_match< Cond_t > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
virtual bool isTruncateFree(Type *, Type *) const
Return true if it's free to truncate a value of type Ty1 to type Ty2.
Represent the analysis usage information of a pass.
BasicBlock * getIncomingBlock(unsigned i) const
getIncomingBlock - Return incoming basic block number i.
This instruction compares its operands according to the predicate given to the constructor.
static bool SinkCast(CastInst *CI)
SinkCast - Sink the specified cast instruction into its user blocks.
FunctionPass class - This class is used to implement most global optimizations.
Value * getOperand(unsigned i) const
Zero extended before/after call.
Interval::pred_iterator pred_end(Interval *I)
bool isPredictableSelectExpensive() const
Return true if selects are only cheaper than branches if the branch is unlikely to be predicted right...
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
Wraps a call to a gc.relocate and provides access to it's operands.
Predicate getPredicate() const
Return the predicate for this instruction.
static cl::opt< bool > DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false), cl::desc("Disable GC optimizations in CodeGenPrepare"))
EVT - Extended Value Type.
bool isPointerTy() const
isPointerTy - True if this is an instance of PointerType.
static UndefValue * get(Type *T)
get() - Static factory methods - Return an 'undef' object of the specified type.
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr)
RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a trivially dead instruction...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
LLVMContext & getContext() const
All values hold a context through their type.
PointerType * getPointerTo(unsigned AddrSpace=0)
getPointerTo - Return a pointer to the current type.
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
bool isUsedInBasicBlock(const BasicBlock *BB) const
Check if this value is used in the specified basic block.
bool hasNoSignedWrap() const
Determine whether the no signed wrap flag is set.
const Value * getTrueValue() const
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
void dump() const
Support for debugging, callable in GDB: V->dump()
static bool OptimizeCmpExpression(CmpInst *CI)
bool isTerminator() const
bool isConditional() const
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
std::vector< AsmOperandInfo > AsmOperandInfoVector
BinaryOps getOpcode() const
static Constant * getSplat(unsigned NumElts, Constant *Elt)
getSplat - Return a ConstantVector with the specified constant in each element.
void initializeCodeGenPreparePass(PassRegistry &)
MemIntrinsic - This is the common base class for memset/memcpy/memmove.
static bool IsNonLocalValue(Value *V, BasicBlock *BB)
IsNonLocalValue - Return true if the specified values are defined in a different basic block than BB...
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
unsigned getIntegerBitWidth() const
This is the shared class of boolean and integer constants.
void setIncomingBlock(unsigned i, BasicBlock *BB)
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false...
Value * getIncomingValue(unsigned i) const
getIncomingValue - Return incoming value number x
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
static bool SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, DenseMap< BasicBlock *, BinaryOperator * > &InsertedShifts, const TargetLowering &TLI, const DataLayout &DL)
SinkShiftAndTruncate - sink both shift and truncate instruction to the use of truncate's BB...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
bool isStaticAlloca() const
isStaticAlloca - Return true if this alloca is in the entry block of the function and is a constant s...
Type * getType() const
All values are typed, get the type of this value.
Provides information about what library functions are available for the current target.
MDNode * getMetadata(unsigned KindID) const
getMetadata - Get the metadata of given kind attached to this Instruction.
virtual const TargetLowering * getTargetLowering() const
static cl::opt< bool > DisableSelectToBranch("disable-cgp-select2branch", cl::Hidden, cl::init(false), cl::desc("Disable select to branch conversion."))
std::reverse_iterator< iterator > reverse_iterator
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Function * getCalledFunction() const
getCalledFunction - Return the function called, or null if this is an indirect function invocation...
static BranchInst * Create(BasicBlock *IfTrue, Instruction *InsertBefore=nullptr)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
const BasicBlock & getEntryBlock() const
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
bool hasExtractBitsInsn() const
Return true if the target has BitExtract instructions.
static cl::opt< AlignMode > Align(cl::desc("Load/store alignment support"), cl::Hidden, cl::init(NoStrictAlign), cl::values(clEnumValN(StrictAlign,"aarch64-strict-align","Disallow all unaligned memory accesses"), clEnumValN(NoStrictAlign,"aarch64-no-strict-align","Allow unaligned memory accesses"), clEnumValEnd))
void setOperand(unsigned i, Value *Val)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
AttributeSet getAttributes() const
Return the attribute list for this Function.
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
VectorType - Class to represent vector types.
Class for arbitrary precision integers.
Value * getIncomingValueForBlock(const BasicBlock *BB) const
bool isIntegerTy() const
isIntegerTy - True if this is an instance of IntegerType.
iterator_range< user_iterator > users()
BasicBlock * getSinglePredecessor()
Return the predecessor of this block if it has a single predecessor block.
AddrMode
ARM Addressing Modes.
LLVM_ATTRIBUTE_UNUSED_RESULT std::enable_if< !is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
static cl::opt< bool > StressExtLdPromotion("stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) ""optimization in CodeGenPrepare"))
const Type * getScalarType() const LLVM_READONLY
getScalarType - If this is a vector type, return the element type, otherwise return 'this'...
APInt And(const APInt &LHS, const APInt &RHS)
Bitwise AND function for APInt.
void removeFromParent()
removeFromParent - This method unlinks 'this' from the containing basic block, but does not delete it...
static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP, SmallVectorImpl< Value * > &OffsetV)
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
PointerType * getType() const
Global values are always pointers.
MemTransferInst - This class wraps the llvm.memcpy/memmove intrinsics.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
virtual bool isVectorShiftByScalarCheap(Type *Ty) const
Return true if it's significantly cheaper to shift a vector by a uniform scalar than by an amount whi...
SelectSupportKind
Enum that describes what type of support for selects the target has.
DbgValueInst - This represents the llvm.dbg.value instruction.
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
ImmutableCallSite - establish a view to a call site for examination.
void insertAfter(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately after the specified instruction...
iplist< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
TerminatorInst * getTerminator()
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
bool hasOneUse() const
Return true if there is exactly one user of this value.
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, const TargetLowering &TLI, const DataLayout &DL)
OptimizeExtractBits - sink the shift right instruction into user blocks if the uses could potentially...
ReturnInst * FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB, BasicBlock *Pred)
FoldReturnIntoUncondBranch - This method duplicates the specified return instruction into a predecess...
VectorType * getType() const
getType - Overload to return most specific vector type.
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="")
Split the basic block into two basic blocks at the specified instruction.
bool bypassSlowDivision(Function &F, Function::iterator &I, const DenseMap< unsigned int, unsigned int > &BypassWidth)
This optimization identifies DIV instructions that can be profitably bypassed and carried out with a ...
raw_ostream & operator<<(raw_ostream &OS, const APInt &I)
void mutateType(Type *Ty)
Mutate the type of this Value to be of the specified type.
OtherOps getOpcode() const
Get the opcode casted to the right type.
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const
iterator_range< op_iterator > arg_operands()
arg_operands - iteration adapter for range-for loops.
user_iterator user_begin()
LLVMContext & getContext() const
Get the context in which this basic block lives.
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
Module * getParent()
Get the module that this global value is contained inside of...
LLVM Value Representation.
bool hasNoUnsignedWrap() const
Determine whether the no unsigned wrap flag is set.
unsigned getOpcode() const
getOpcode() returns a member of one of the enums like Instruction::Add.
static cl::opt< bool > EnableAndCmpSinking("enable-andcmp-sinking", cl::Hidden, cl::init(true), cl::desc("Enable sinkinig and/cmp into branches."))
static const Function * getParent(const Value *V)
void moveBefore(Instruction *MovePos)
moveBefore - Unlink this instruction from its current basic block and insert it into the basic block ...
const Value * getValue() const
DbgValueInst - This represents the llvm.dbg.value instruction.
This class implements an extremely fast bulk output stream that can only output to a stream...
virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, unsigned &Cost) const
Return true if the target can combine store(extractelement VectorTy, Idx).
Primary interface to the complete machine description for the target machine.
const Value * getFalseValue() const
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
Value * SimplifyInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr)
SimplifyInstruction - See if we can compute a simplified version of this instruction.
Legacy analysis pass which computes a DominatorTree.
static bool hasSameExtUse(Instruction *Inst, const TargetLowering &TLI)
Check if all the uses of Inst are equivalent (or free) zero or sign extensions.
bool operator==(uint64_t V1, const APInt &V2)
iterator getFirstInsertionPt()
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
unsigned getNumUses() const
This method computes the number of uses of this Value.
Type * getAllocatedType() const
getAllocatedType - Return the type that is being allocated by the instruction.
bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr)
Replace all uses of 'I' with 'SimpleV' and simplify the uses recursively.
static void getShuffleMask(Constant *Mask, SmallVectorImpl< int > &Result)
getShuffleMask - Return the full mask for this instruction, where each element is the element number ...
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
bool isJumpExpensive() const
Return true if Flow Control is an expensive operation that should be avoided.
int getBasicBlockIndex(const BasicBlock *BB) const
getBasicBlockIndex - Return the first index of the specified basic block in the value list for this P...
static IntegerType * getInt8Ty(LLVMContext &C)
void moveBefore(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it into the function that MovePos lives ...
const BasicBlock * getParent() const
InstListType::iterator iterator
Instruction iterators...
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, ImmutableCallSite CS) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
UAddWithOverflow_match< LHS_t, RHS_t, Sum_t > m_UAddWithOverflow(const LHS_t &L, const RHS_t &R, const Sum_t &S)
Match an icmp instruction checking for unsigned overflow on addition.
IntrinsicInst - A useful wrapper class for inspecting calls to intrinsic functions.
This file describes how to lower LLVM code to machine code.
AllocaInst - an instruction to allocate memory on the stack.
gep_type_iterator gep_type_begin(const User *GEP)