47#define DEBUG_TYPE "select-optimize"
50 "Number of select groups considered for conversion to branch");
52 "Number of select groups converted due to expensive cold operand");
54 "Number of select groups converted due to high-predictability");
56 "Number of select groups not converted due to unpredictability");
58 "Number of select groups not converted due to cold basic block");
60 "Number of select groups converted due to loop-level analysis");
61STATISTIC(NumSelectsConverted,
"Number of selects converted");
64 "cold-operand-threshold",
65 cl::desc(
"Maximum frequency of path for an operand to be considered cold."),
69 "cold-operand-max-cost-multiplier",
70 cl::desc(
"Maximum cost multiplier of TCC_expensive for the dependence "
71 "slice of a cold operand to be considered inexpensive."),
76 cl::desc(
"Gradient gain threshold (%)."),
81 cl::desc(
"Minimum gain per loop (in cycles) threshold."),
85 "select-opti-loop-relative-gain-threshold",
87 "Minimum relative gain per loop threshold (1/X). Defaults to 12.5%"),
92 cl::desc(
"Default mispredict rate (initialized to 25%)."));
97 cl::desc(
"Disable loop-level heuristics."));
101class SelectOptimizeImpl {
113 SelectOptimizeImpl() =
default;
137 bool Inverted =
false;
143 if (isa<SelectInst>(I))
144 return SelectLike(I);
151 X->getType()->isIntegerTy(1))
152 return SelectLike(I);
154 return SelectLike(
nullptr);
162 assert(!Inverted &&
"Trying to invert an inverted SelectLike");
163 assert(isa<Instruction>(getCondition()) &&
164 cast<Instruction>(getCondition())->
getOpcode() ==
168 bool isInverted()
const {
return Inverted; }
175 Value *getNonInvertedCondition()
const {
176 if (
auto *Sel = dyn_cast<SelectInst>(I))
177 return Sel->getCondition();
179 if (
auto *BO = dyn_cast<BinaryOperator>(I)) {
194 Value *getCondition()
const {
195 Value *
CC = getNonInvertedCondition();
199 return cast<Instruction>(
CC)->getOperand(0);
207 Value *getTrueValue(
bool HonorInverts =
true)
const {
208 if (Inverted && HonorInverts)
209 return getFalseValue(
false);
210 if (
auto *Sel = dyn_cast<SelectInst>(I))
211 return Sel->getTrueValue();
214 if (isa<BinaryOperator>(I))
223 Value *getFalseValue(
bool HonorInverts =
true)
const {
224 if (Inverted && HonorInverts)
225 return getTrueValue(
false);
226 if (
auto *Sel = dyn_cast<SelectInst>(I))
227 return Sel->getFalseValue();
229 if (
auto *BO = dyn_cast<BinaryOperator>(I)) {
233 return BO->getOperand(1);
236 return BO->getOperand(0);
246 if (isa<SelectInst>(I))
247 if (
auto *I = dyn_cast<Instruction>(getTrueValue()))
248 return InstCostMap.
contains(I) ? InstCostMap[
I].NonPredCost
252 if (isa<BinaryOperator>(I))
253 if (
auto I = dyn_cast<Instruction>(getFalseValue()))
257 {TargetTransformInfo::OK_AnyValue,
258 TargetTransformInfo::OP_None},
259 {TTI::OK_UniformConstantValue, TTI::OP_PowerOf2});
260 return InstCostMap[
I].NonPredCost +
272 if (isa<SelectInst>(I))
273 if (
auto *I = dyn_cast<Instruction>(getFalseValue()))
274 return InstCostMap.
contains(I) ? InstCostMap[
I].NonPredCost
278 if (isa<BinaryOperator>(I))
279 if (
auto I = dyn_cast<Instruction>(getFalseValue()))
281 return InstCostMap[
I].NonPredCost;
301 void optimizeSelectsBase(
Function &
F, SelectGroups &ProfSIGroups);
302 void optimizeSelectsInnerLoops(
Function &
F, SelectGroups &ProfSIGroups);
306 void convertProfitableSIGroups(SelectGroups &ProfSIGroups);
309 void collectSelectGroups(
BasicBlock &BB, SelectGroups &SIGroups);
313 void findProfitableSIGroupsBase(SelectGroups &SIGroups,
314 SelectGroups &ProfSIGroups);
315 void findProfitableSIGroupsInnerLoops(
const Loop *L, SelectGroups &SIGroups,
316 SelectGroups &ProfSIGroups);
320 bool isConvertToBranchProfitableBase(
const SelectGroup &ASI);
325 bool hasExpensiveColdOperand(
const SelectGroup &ASI);
330 void getExclBackwardsSlice(
Instruction *
I, std::stack<Instruction *> &Slice,
334 bool isSelectHighlyPredictable(
const SelectLike SI);
338 bool checkLoopHeuristics(
const Loop *L,
const CostInfo LoopDepth[2]);
342 bool computeLoopCosts(
const Loop *L,
const SelectGroups &SIGroups,
348 getSImap(
const SelectGroups &SIGroups);
351 std::optional<uint64_t> computeInstCost(
const Instruction *
I);
354 Scaled64 getMispredictionCost(
const SelectLike SI,
const Scaled64 CondCost);
358 const SelectLike SI);
361 bool isSelectKindSupported(
const SelectLike SI);
365 SelectOptimizeImpl Impl;
375 return Impl.runOnFunction(
F, *
this);
392 SelectOptimizeImpl Impl(TM);
393 return Impl.run(
F,
FAM);
396char SelectOptimize::ID = 0;
413 TSI = TM->getSubtargetImpl(
F);
429 .getCachedResult<ProfileSummaryAnalysis>(*
F.getParent());
430 assert(PSI &&
"This pass requires module analysis pass `profile-summary`!");
439 TSchedModel.
init(TSI);
441 bool Changed = optimizeSelects(
F);
447 TSI =
TM->getSubtargetImpl(
F);
467 TSchedModel.
init(TSI);
473 return optimizeSelects(
F);
476bool SelectOptimizeImpl::optimizeSelects(
Function &
F) {
478 SelectGroups ProfSIGroups;
480 optimizeSelectsBase(
F, ProfSIGroups);
482 optimizeSelectsInnerLoops(
F, ProfSIGroups);
486 convertProfitableSIGroups(ProfSIGroups);
489 return !ProfSIGroups.empty();
492void SelectOptimizeImpl::optimizeSelectsBase(
Function &
F,
493 SelectGroups &ProfSIGroups) {
495 SelectGroups SIGroups;
499 if (L &&
L->isInnermost())
501 collectSelectGroups(BB, SIGroups);
505 findProfitableSIGroupsBase(SIGroups, ProfSIGroups);
508void SelectOptimizeImpl::optimizeSelectsInnerLoops(
Function &
F,
509 SelectGroups &ProfSIGroups) {
512 for (
unsigned long i = 0; i <
Loops.size(); ++i)
513 for (
Loop *ChildL :
Loops[i]->getSubLoops())
514 Loops.push_back(ChildL);
517 if (!
L->isInnermost())
520 SelectGroups SIGroups;
522 collectSelectGroups(*BB, SIGroups);
524 findProfitableSIGroupsInnerLoops(L, SIGroups, ProfSIGroups);
537 for (
SelectInst *DefSI = dyn_cast<SelectInst>(SI.getI());
538 DefSI !=
nullptr && Selects.
count(DefSI);
539 DefSI = dyn_cast<SelectInst>(V)) {
540 if (DefSI->getCondition() == SI.getCondition())
541 V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue());
543 V = (!isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue());
546 if (isa<BinaryOperator>(SI.getI())) {
547 assert(SI.getI()->getOpcode() == Instruction::Or &&
548 "Only currently handling Or instructions.");
549 V = SI.getFalseValue();
551 V = IB.CreateOr(V, ConstantInt::get(V->getType(), 1));
554 assert(V &&
"Failed to get select true/false value");
558void SelectOptimizeImpl::convertProfitableSIGroups(SelectGroups &ProfSIGroups) {
559 for (SelectGroup &ASI : ProfSIGroups) {
599 typedef std::stack<Instruction *>::size_type StackSizeType;
600 StackSizeType maxTrueSliceLen = 0, maxFalseSliceLen = 0;
601 for (SelectLike SI : ASI) {
604 if (
auto *TI = dyn_cast_or_null<Instruction>(
SI.getTrueValue())) {
605 std::stack<Instruction *> TrueSlice;
606 getExclBackwardsSlice(TI, TrueSlice,
SI.getI(),
true);
607 maxTrueSliceLen = std::max(maxTrueSliceLen, TrueSlice.size());
610 if (
auto *FI = dyn_cast_or_null<Instruction>(
SI.getFalseValue())) {
611 if (isa<SelectInst>(
SI.getI()) || !FI->hasOneUse()) {
612 std::stack<Instruction *> FalseSlice;
613 getExclBackwardsSlice(FI, FalseSlice,
SI.getI(),
true);
614 maxFalseSliceLen = std::max(maxFalseSliceLen, FalseSlice.size());
630 for (StackSizeType IS = 0; IS < maxTrueSliceLen; ++IS) {
631 for (
auto &S : TrueSlices) {
633 TrueSlicesInterleaved.
push_back(S.top());
638 for (StackSizeType IS = 0; IS < maxFalseSliceLen; ++IS) {
639 for (
auto &S : FalseSlices) {
641 FalseSlicesInterleaved.
push_back(S.top());
648 SelectLike
SI = ASI.front();
649 SelectLike LastSI = ASI.back();
657 SplitPt.setHeadBit(
true);
659 BFI->setBlockFreq(EndBlock,
BFI->getBlockFreq(StartBlock));
666 auto DIt =
SI.getI()->getIterator();
667 while (&*DIt != LastSI.getI()) {
668 if (DIt->isDebugOrPseudoInst())
674 for (
auto *DI : SinkInstrs)
692 std::next(LastSI.getI()->getIterator()));
697 BasicBlock *TrueBlock =
nullptr, *FalseBlock =
nullptr;
698 BranchInst *TrueBranch =
nullptr, *FalseBranch =
nullptr;
699 if (!TrueSlicesInterleaved.
empty()) {
703 TrueBranch->
setDebugLoc(LastSI.getI()->getDebugLoc());
704 for (
Instruction *TrueInst : TrueSlicesInterleaved)
705 TrueInst->moveBefore(TrueBranch);
707 if (!FalseSlicesInterleaved.
empty()) {
712 FalseBranch->setDebugLoc(LastSI.getI()->getDebugLoc());
713 for (
Instruction *FalseInst : FalseSlicesInterleaved)
714 FalseInst->moveBefore(FalseBranch);
718 if (TrueBlock == FalseBlock) {
719 assert(TrueBlock ==
nullptr &&
720 "Unexpected basic block transform while optimizing select");
725 FalseBranch->setDebugLoc(
SI.getI()->getDebugLoc());
734 if (TrueBlock ==
nullptr) {
737 TrueBlock = StartBlock;
738 }
else if (FalseBlock ==
nullptr) {
741 FalseBlock = StartBlock;
747 auto *CondFr =
IB.CreateFreeze(
SI.getCondition(),
748 SI.getCondition()->getName() +
".frozen");
757 for (
auto It = ASI.rbegin(); It != ASI.rend(); ++It) {
766 SI.getI()->replaceAllUsesWith(PN);
768 ++NumSelectsConverted;
770 IB.CreateCondBr(CondFr, TT, FT,
SI.getI());
774 SI.getI()->eraseFromParent();
778void SelectOptimizeImpl::collectSelectGroups(
BasicBlock &BB,
779 SelectGroups &SIGroups) {
781 while (BBIt != BB.
end()) {
783 if (SelectLike SI = SelectLike::match(
I)) {
788 SIGroup.push_back(SI);
789 while (BBIt != BB.
end()) {
806 if (!isa<SelectInst>(NI))
809 SelectLike NSI = SelectLike::match(NI);
810 if (NSI &&
SI.getCondition() == NSI.getCondition()) {
811 SIGroup.push_back(NSI);
812 }
else if (NSI &&
match(NSI.getCondition(),
815 SIGroup.push_back(NSI);
823 if (!isSelectKindSupported(SI))
827 dbgs() <<
"New Select group with\n";
828 for (
auto SI : SIGroup)
829 dbgs() <<
" " << *
SI.getI() <<
"\n";
832 SIGroups.push_back(SIGroup);
837void SelectOptimizeImpl::findProfitableSIGroupsBase(
838 SelectGroups &SIGroups, SelectGroups &ProfSIGroups) {
839 for (SelectGroup &ASI : SIGroups) {
840 ++NumSelectOptAnalyzed;
841 if (isConvertToBranchProfitableBase(ASI))
842 ProfSIGroups.push_back(ASI);
852void SelectOptimizeImpl::findProfitableSIGroupsInnerLoops(
853 const Loop *L, SelectGroups &SIGroups, SelectGroups &ProfSIGroups) {
854 NumSelectOptAnalyzed += SIGroups.size();
868 if (!computeLoopCosts(L, SIGroups, InstCostMap, LoopCost) ||
869 !checkLoopHeuristics(L, LoopCost)) {
873 for (SelectGroup &ASI : SIGroups) {
877 for (SelectLike SI : ASI) {
878 SelectCost = std::max(SelectCost, InstCostMap[
SI.getI()].PredCost);
879 BranchCost = std::max(BranchCost, InstCostMap[
SI.getI()].NonPredCost);
881 if (BranchCost < SelectCost) {
883 OR <<
"Profitable to convert to branch (loop analysis). BranchCost="
884 << BranchCost.toString() <<
", SelectCost=" << SelectCost.
toString()
887 ++NumSelectConvertedLoop;
888 ProfSIGroups.push_back(ASI);
892 ORmiss <<
"Select is more profitable (loop analysis). BranchCost="
893 << BranchCost.toString()
894 <<
", SelectCost=" << SelectCost.
toString() <<
". ";
900bool SelectOptimizeImpl::isConvertToBranchProfitableBase(
901 const SelectGroup &ASI) {
902 SelectLike
SI = ASI.front();
911 ORmiss <<
"Not converted to branch because of cold basic block. ";
917 if (
SI.getI()->getMetadata(LLVMContext::MD_unpredictable)) {
919 ORmiss <<
"Not converted to branch because of unpredictable branch. ";
927 ++NumSelectConvertedHighPred;
928 OR <<
"Converted to branch because of highly predictable branch. ";
935 if (hasExpensiveColdOperand(ASI)) {
936 ++NumSelectConvertedExpColdOperand;
937 OR <<
"Converted to branch because of expensive cold operand.";
942 ORmiss <<
"Not profitable to convert to branch (base heuristic).";
949 return (Numerator + (Denominator / 2)) / Denominator;
954 if (isa<SelectInst>(SI.getI()))
959bool SelectOptimizeImpl::hasExpensiveColdOperand(
const SelectGroup &ASI) {
960 bool ColdOperand =
false;
961 uint64_t TrueWeight, FalseWeight, TotalWeight;
963 uint64_t MinWeight = std::min(TrueWeight, FalseWeight);
964 TotalWeight = TrueWeight + FalseWeight;
970 ORmiss <<
"Profile data available but missing branch-weights metadata for "
971 "select instruction. ";
978 for (SelectLike SI : ASI) {
981 if (TrueWeight < FalseWeight) {
982 ColdI = dyn_cast_or_null<Instruction>(
SI.getTrueValue());
983 HotWeight = FalseWeight;
985 ColdI = dyn_cast_or_null<Instruction>(
SI.getFalseValue());
986 HotWeight = TrueWeight;
989 std::stack<Instruction *> ColdSlice;
990 getExclBackwardsSlice(ColdI, ColdSlice,
SI.getI());
992 while (!ColdSlice.empty()) {
1016 if (LoadI->
getParent() != SI->getParent())
1019 while (&*It != SI) {
1020 if (It->mayWriteToMemory())
1033void SelectOptimizeImpl::getExclBackwardsSlice(
Instruction *
I,
1034 std::stack<Instruction *> &Slice,
1038 std::queue<Instruction *> Worklist;
1040 while (!Worklist.empty()) {
1048 if (!
II->hasOneUse())
1054 if (ForSinking && (
II->isTerminator() ||
II->mayHaveSideEffects() ||
1055 isa<SelectInst>(
II) || isa<PHINode>(
II)))
1067 if (
BFI->getBlockFreq(
II->getParent()) <
BFI->getBlockFreq(
I->getParent()))
1075 if (
auto *OpI = dyn_cast<Instruction>(
Op))
1080bool SelectOptimizeImpl::isSelectHighlyPredictable(
const SelectLike SI) {
1084 uint64_t Sum = TrueWeight + FalseWeight;
1094bool SelectOptimizeImpl::checkLoopHeuristics(
const Loop *L,
1095 const CostInfo LoopCost[2]) {
1103 L->getHeader()->getFirstNonPHI());
1105 if (LoopCost[0].NonPredCost > LoopCost[0].PredCost ||
1106 LoopCost[1].NonPredCost >= LoopCost[1].PredCost) {
1107 ORmissL <<
"No select conversion in the loop due to no reduction of loop's "
1113 Scaled64 Gain[2] = {LoopCost[0].PredCost - LoopCost[0].NonPredCost,
1114 LoopCost[1].PredCost - LoopCost[1].NonPredCost};
1122 ORmissL <<
"No select conversion in the loop due to small reduction of "
1123 "loop's critical path. Gain="
1125 <<
", RelativeGain=" << RelativeGain.
toString() <<
"%. ";
1135 if (Gain[1] > Gain[0]) {
1137 (LoopCost[1].PredCost - LoopCost[0].PredCost);
1139 ORmissL <<
"No select conversion in the loop due to small gradient gain. "
1141 << GradientGain.
toString() <<
"%. ";
1147 else if (Gain[1] < Gain[0]) {
1149 <<
"No select conversion in the loop due to negative gradient gain. ";
1163bool SelectOptimizeImpl::computeLoopCosts(
1164 const Loop *L,
const SelectGroups &SIGroups,
1166 LLVM_DEBUG(
dbgs() <<
"Calculating Latency / IPredCost / INonPredCost of loop "
1167 <<
L->getHeader()->getName() <<
"\n");
1168 const auto &SImap = getSImap(SIGroups);
1171 const unsigned Iterations = 2;
1172 for (
unsigned Iter = 0; Iter < Iterations; ++Iter) {
1174 CostInfo &MaxCost = LoopCost[Iter];
1177 if (
I.isDebugOrPseudoInst())
1186 for (
const Use &U :
I.operands()) {
1187 auto UI = dyn_cast<Instruction>(
U.get());
1190 if (InstCostMap.
count(UI)) {
1191 IPredCost = std::max(IPredCost, InstCostMap[UI].PredCost);
1192 INonPredCost = std::max(INonPredCost, InstCostMap[UI].NonPredCost);
1195 auto ILatency = computeInstCost(&
I);
1198 ORmissL <<
"Invalid instruction cost preventing analysis and "
1199 "optimization of the inner-most loop containing this "
1213 if (SImap.contains(&
I)) {
1214 auto SI = SImap.at(&
I);
1218 getPredictedPathCost(TrueOpCost, FalseOpCost, SI);
1221 if (
auto *CI = dyn_cast<Instruction>(
SI.getCondition()))
1222 if (InstCostMap.
count(CI))
1223 CondCost = InstCostMap[CI].NonPredCost;
1224 Scaled64 MispredictCost = getMispredictionCost(SI, CondCost);
1226 INonPredCost = PredictedPathCost + MispredictCost;
1229 << INonPredCost <<
" for " <<
I <<
"\n");
1231 InstCostMap[&
I] = {IPredCost, INonPredCost};
1232 MaxCost.PredCost = std::max(MaxCost.PredCost, IPredCost);
1233 MaxCost.NonPredCost = std::max(MaxCost.NonPredCost, INonPredCost);
1237 <<
" MaxCost = " << MaxCost.PredCost <<
" "
1238 << MaxCost.NonPredCost <<
"\n");
1244SelectOptimizeImpl::getSImap(
const SelectGroups &SIGroups) {
1246 for (
const SelectGroup &ASI : SIGroups)
1247 for (SelectLike SI : ASI)
1252std::optional<uint64_t>
1253SelectOptimizeImpl::computeInstCost(
const Instruction *
I) {
1257 return std::optional<uint64_t>(*OC);
1258 return std::nullopt;
1262SelectOptimizeImpl::getMispredictionCost(
const SelectLike SI,
1271 if (isSelectHighlyPredictable(SI))
1282 return MispredictCost;
1288SelectOptimizeImpl::getPredictedPathCost(
Scaled64 TrueCost,
Scaled64 FalseCost,
1289 const SelectLike SI) {
1293 uint64_t SumWeight = TrueWeight + FalseWeight;
1294 if (SumWeight != 0) {
1298 return PredPathCost;
1303 PredPathCost = std::max(TrueCost *
Scaled64::get(3) + FalseCost,
1306 return PredPathCost;
1309bool SelectOptimizeImpl::isSelectKindSupported(
const SelectLike SI) {
1310 bool VectorCond = !
SI.getCondition()->getType()->isIntegerTy(1);
1314 if (
SI.getType()->isVectorTy())
static Value * getTrueOrFalseValue(SelectInst *SI, bool isTrue, const SmallPtrSet< const Instruction *, 2 > &Selects)
If isTrue is true, return the true value of SI, otherwise return false value of SI.
static bool runOnFunction(Function &F, bool PostInlining)
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file contains the declarations for profiling metadata utility functions.
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isSafeToSinkLoad(Instruction *LoadI, Instruction *SI)
static cl::opt< unsigned > ColdOperandMaxCostMultiplier("cold-operand-max-cost-multiplier", cl::desc("Maximum cost multiplier of TCC_expensive for the dependence " "slice of a cold operand to be considered inexpensive."), cl::init(1), cl::Hidden)
static cl::opt< unsigned > ColdOperandThreshold("cold-operand-threshold", cl::desc("Maximum frequency of path for an operand to be considered cold."), cl::init(20), cl::Hidden)
static cl::opt< bool > DisableLoopLevelHeuristics("disable-loop-level-heuristics", cl::Hidden, cl::init(false), cl::desc("Disable loop-level heuristics."))
static cl::opt< unsigned > GainCycleThreshold("select-opti-loop-cycle-gain-threshold", cl::desc("Minimum gain per loop (in cycles) threshold."), cl::init(4), cl::Hidden)
static cl::opt< unsigned > MispredictDefaultRate("mispredict-default-rate", cl::Hidden, cl::init(25), cl::desc("Default mispredict rate (initialized to 25%)."))
static void EmitAndPrintRemark(OptimizationRemarkEmitter *ORE, DiagnosticInfoOptimizationBase &Rem)
static cl::opt< unsigned > GainGradientThreshold("select-opti-loop-gradient-gain-threshold", cl::desc("Gradient gain threshold (%)."), cl::init(25), cl::Hidden)
static cl::opt< unsigned > GainRelativeThreshold("select-opti-loop-relative-gain-threshold", cl::desc("Minimum relative gain per loop threshold (1/X). Defaults to 12.5%"), cl::init(8), cl::Hidden)
This file contains the declaration of the SelectOptimizePass class, its corresponding pass name is se...
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
void insertDbgRecordBefore(DbgRecord *DR, InstListType::iterator Here)
Insert a DbgRecord into a block at the position given by Here.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Analysis pass which computes BlockFrequencyInfo.
Legacy analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
static BranchProbability getBranchProbability(uint64_t Numerator, uint64_t Denominator)
This class represents an Operation in the Expression.
Base class for non-instruction debug metadata records that have positions within IR.
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Common features for diagnostics dealing with optimization remarks that are used by both IR and MIR pa...
std::string getMsg() const
FunctionPass class - This class is used to implement most global optimizations.
virtual bool runOnFunction(Function &F)=0
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
std::optional< CostType > getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
bool isDebugOrPseudoInst() const LLVM_READONLY
Return true if the instruction is a DbgInfoIntrinsic or PseudoProbeInst.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Analysis pass that exposes the LoopInfo for a function.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Represents a single loop in the control flow graph.
An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
bool isColdBlock(const BBType *BB, BFIT *BFI) const
Returns true if BasicBlock BB is considered cold.
std::string toString(unsigned Precision=DefaultPrecision)
Convert to a decimal representation in a string.
static ScaledNumber get(uint64_t N)
static ScaledNumber getZero()
This class represents the LLVM 'select' instruction.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM)
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Analysis pass providing the TargetTransformInfo.
virtual bool isSelectSupported(SelectSupportKind) const
SelectSupportKind
Enum that describes what type of support for selects the target has.
bool isPredictableSelectExpensive() const
Return true if selects are only cheaper than branches if the branch is unlikely to be predicted right...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
Target-Independent Code Generator Pass Configuration Options.
Provide an instruction scheduling machine model to CodeGen passes.
const MCSchedModel * getMCSchedModel() const
void init(const TargetSubtargetInfo *TSInfo)
Initialize the machine model for instruction scheduling.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetLowering * getTargetLowering() const
The instances of the Type class are immutable: once they are created, they are never changed.
A Use represents the edge between a Value definition and its users.
LLVM Value Representation.
void takeName(Value *V)
Transfer the name from V to this value.
const ParentTy * getParent() const
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
OneUse_match< T > m_OneUse(const T &SubPattern)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
This is an optimization pass for GlobalISel generic memory operations.
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
FunctionPass * createSelectOptimizePass()
This pass converts conditional moves to conditional jumps when profitable.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
constexpr T divideNearest(U Numerator, V Denominator)
Returns (Numerator / Denominator) rounded by round-half-up.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
void initializeSelectOptimizePass(PassRegistry &)
unsigned MispredictPenalty