LLVM 19.0.0git
CodeMetrics.cpp
Go to the documentation of this file.
1//===- CodeMetrics.cpp - Code cost measurements ---------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements code cost measurement utilities.
10//
11//===----------------------------------------------------------------------===//
12
18#include "llvm/IR/Function.h"
20#include "llvm/Support/Debug.h"
22
23#define DEBUG_TYPE "code-metrics"
24
25using namespace llvm;
26
27static void
31 const User *U = dyn_cast<User>(V);
32 if (!U)
33 return;
34
35 for (const Value *Operand : U->operands())
36 if (Visited.insert(Operand).second)
37 if (const auto *I = dyn_cast<Instruction>(Operand))
38 if (!I->mayHaveSideEffects() && !I->isTerminator())
39 Worklist.push_back(I);
40}
41
45 // Note: We don't speculate PHIs here, so we'll miss instruction chains kept
46 // alive only by ephemeral values.
47
48 // Walk the worklist using an index but without caching the size so we can
49 // append more entries as we process the worklist. This forms a queue without
50 // quadratic behavior by just leaving processed nodes at the head of the
51 // worklist forever.
52 for (int i = 0; i < (int)Worklist.size(); ++i) {
53 const Value *V = Worklist[i];
54
55 assert(Visited.count(V) &&
56 "Failed to add a worklist entry to our visited set!");
57
58 // If all uses of this value are ephemeral, then so is this value.
59 if (!all_of(V->users(), [&](const User *U) { return EphValues.count(U); }))
60 continue;
61
62 EphValues.insert(V);
63 LLVM_DEBUG(dbgs() << "Ephemeral Value: " << *V << "\n");
64
65 // Append any more operands to consider.
66 appendSpeculatableOperands(V, Visited, Worklist);
67 }
68}
69
70// Find all ephemeral values.
72 const Loop *L, AssumptionCache *AC,
76
77 for (auto &AssumeVH : AC->assumptions()) {
78 if (!AssumeVH)
79 continue;
80 Instruction *I = cast<Instruction>(AssumeVH);
81
82 // Filter out call sites outside of the loop so we don't do a function's
83 // worth of work for each of its loops (and, in the common case, ephemeral
84 // values in the loop are likely due to @llvm.assume calls in the loop).
85 if (!L->contains(I->getParent()))
86 continue;
87
88 if (EphValues.insert(I).second)
89 appendSpeculatableOperands(I, Visited, Worklist);
90 }
91
92 completeEphemeralValues(Visited, Worklist, EphValues);
93}
94
96 const Function *F, AssumptionCache *AC,
100
101 for (auto &AssumeVH : AC->assumptions()) {
102 if (!AssumeVH)
103 continue;
104 Instruction *I = cast<Instruction>(AssumeVH);
105 assert(I->getParent()->getParent() == F &&
106 "Found assumption for the wrong function!");
107
108 if (EphValues.insert(I).second)
109 appendSpeculatableOperands(I, Visited, Worklist);
110 }
111
112 completeEphemeralValues(Visited, Worklist, EphValues);
113}
114
115static bool extendsConvergenceOutsideLoop(const Instruction &I, const Loop *L) {
116 if (!L)
117 return false;
118 if (!isa<ConvergenceControlInst>(I))
119 return false;
120 for (const auto *U : I.users()) {
121 if (!L->contains(cast<Instruction>(U)))
122 return true;
123 }
124 return false;
125}
126
127/// Fill in the current structure with information gleaned from the specified
128/// block.
130 const BasicBlock *BB, const TargetTransformInfo &TTI,
131 const SmallPtrSetImpl<const Value *> &EphValues, bool PrepareForLTO,
132 const Loop *L) {
133 ++NumBlocks;
134 InstructionCost NumInstsBeforeThisBB = NumInsts;
135 for (const Instruction &I : *BB) {
136 // Skip ephemeral values.
137 if (EphValues.count(&I))
138 continue;
139
140 // Special handling for calls.
141 if (const auto *Call = dyn_cast<CallBase>(&I)) {
142 if (const Function *F = Call->getCalledFunction()) {
143 bool IsLoweredToCall = TTI.isLoweredToCall(F);
144 // If a function is both internal and has a single use, then it is
145 // extremely likely to get inlined in the future (it was probably
146 // exposed by an interleaved devirtualization pass).
147 // When preparing for LTO, liberally consider calls as inline
148 // candidates.
149 if (!Call->isNoInline() && IsLoweredToCall &&
150 ((F->hasInternalLinkage() && F->hasOneLiveUse()) ||
151 PrepareForLTO)) {
153 }
154
155 // If this call is to function itself, then the function is recursive.
156 // Inlining it into other functions is a bad idea, because this is
157 // basically just a form of loop peeling, and our metrics aren't useful
158 // for that case.
159 if (F == BB->getParent())
160 isRecursive = true;
161
162 if (IsLoweredToCall)
163 ++NumCalls;
164 } else {
165 // We don't want inline asm to count as a call - that would prevent loop
166 // unrolling. The argument setup cost is still real, though.
167 if (!Call->isInlineAsm())
168 ++NumCalls;
169 }
170 }
171
172 if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
173 if (!AI->isStaticAlloca())
174 this->usesDynamicAlloca = true;
175 }
176
177 if (isa<ExtractElementInst>(I) || I.getType()->isVectorTy())
179
180 if (I.getType()->isTokenTy() && !isa<ConvergenceControlInst>(I) &&
181 I.isUsedOutsideOfBlock(BB)) {
182 LLVM_DEBUG(dbgs() << I
183 << "\n Cannot duplicate a token value used outside "
184 "the current block (except convergence control).\n");
185 notDuplicatable = true;
186 }
187
188 if (const CallBase *CB = dyn_cast<CallBase>(&I)) {
189 if (CB->cannotDuplicate())
190 notDuplicatable = true;
191 // Compute a meet over the visited blocks for the following partial order:
192 //
193 // None -> { Controlled, ExtendedLoop, Uncontrolled}
194 // Controlled -> ExtendedLoop
195 if (Convergence <= ConvergenceKind::Controlled && CB->isConvergent()) {
196 if (isa<ConvergenceControlInst>(CB) ||
197 CB->getConvergenceControlToken()) {
199 LLVM_DEBUG(dbgs() << "Found controlled convergence:\n" << I << "\n");
202 else {
205 }
206 } else {
209 }
210 }
211 }
212
214 }
215
216 if (isa<ReturnInst>(BB->getTerminator()))
217 ++NumRets;
218
219 // We never want to inline functions that contain an indirectbr. This is
220 // incorrect because all the blockaddress's (in static global initializers
221 // for example) would be referring to the original function, and this indirect
222 // jump would jump from the inlined copy of the function into the original
223 // function which is extremely undefined behavior.
224 // FIXME: This logic isn't really right; we can safely inline functions
225 // with indirectbr's as long as no other function or global references the
226 // blockaddress of a block within the current function. And as a QOI issue,
227 // if someone is using a blockaddress without an indirectbr, and that
228 // reference somehow ends up in another function or global, we probably
229 // don't want to inline this function.
230 notDuplicatable |= isa<IndirectBrInst>(BB->getTerminator());
231
232 // Remember NumInsts for this BB.
233 InstructionCost NumInstsThisBB = NumInsts - NumInstsBeforeThisBB;
234 NumBBInsts[BB] = NumInstsThisBB;
235}
static bool extendsConvergenceOutsideLoop(const Instruction &I, const Loop *L)
static void appendSpeculatableOperands(const Value *V, SmallPtrSetImpl< const Value * > &Visited, SmallVectorImpl< const Value * > &Worklist)
Definition: CodeMetrics.cpp:28
static void completeEphemeralValues(SmallPtrSetImpl< const Value * > &Visited, SmallVectorImpl< const Value * > &Worklist, SmallPtrSetImpl< const Value * > &EphValues)
Definition: CodeMetrics.cpp:42
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This pass exposes codegen information to IR-level passes.
an instruction to allocate memory on the stack
Definition: Instructions.h:61
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptions()
Access the list of assumption handles currently tracked for this function.
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:209
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:229
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1236
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:44
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:323
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:412
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:344
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:479
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
@ TCK_CodeSize
Instruction code size.
bool isLoweredToCall(const Function *F) const
Test whether calls to a function lower to actual program function calls.
InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TargetCostKind CostKind) const
Estimate the cost of a given IR user when lowered.
LLVM Value Representation.
Definition: Value.h:74
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool usesDynamicAlloca
True if this function calls alloca (in the C sense).
Definition: CodeMetrics.h:52
unsigned NumBlocks
Number of analyzed blocks.
Definition: CodeMetrics.h:58
ConvergenceKind Convergence
The kind of convergence specified in this function.
Definition: CodeMetrics.h:49
bool notDuplicatable
True if this function cannot be duplicated.
Definition: CodeMetrics.h:46
unsigned NumInlineCandidates
The number of calls to internal functions with a single caller.
Definition: CodeMetrics.h:70
bool isRecursive
True if this function calls itself.
Definition: CodeMetrics.h:40
static void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop).
Definition: CodeMetrics.cpp:71
unsigned NumRets
How many 'ret' instructions the blocks contain.
Definition: CodeMetrics.h:78
DenseMap< const BasicBlock *, InstructionCost > NumBBInsts
Keeps track of basic block code size estimates.
Definition: CodeMetrics.h:61
void analyzeBasicBlock(const BasicBlock *BB, const TargetTransformInfo &TTI, const SmallPtrSetImpl< const Value * > &EphValues, bool PrepareForLTO=false, const Loop *L=nullptr)
Add information about a block to the current state.
unsigned NumCalls
Keep track of the number of calls to 'big' functions.
Definition: CodeMetrics.h:64
unsigned NumVectorInsts
How many instructions produce vector values.
Definition: CodeMetrics.h:75
InstructionCost NumInsts
Code size cost of the analyzed blocks.
Definition: CodeMetrics.h:55