29#define EXPECT_OR_RET(LHS, RHS) \
32 return LHS.takeError();
34#define RET_ON_ERR(EXPR) \
35 if (auto Err = (EXPR)) \
41 auto [Iter, Inserted] =
45 "Duplicate GUID for same callsite.");
53Error PGOCtxProfileReader::wrongValue(
const Twine &Msg) {
57Error PGOCtxProfileReader::unsupported(
const Twine &Msg) {
61bool PGOCtxProfileReader::canReadContext() {
72PGOCtxProfileReader::readContext(
bool ExpectIndex) {
75 std::optional<ctx_profile::GUID>
Guid;
76 std::optional<SmallVector<uint64_t, 16>>
Counters;
77 std::optional<uint32_t> CallsiteIndex;
84 auto GotAllWeNeed = [&]() {
86 (!ExpectIndex || CallsiteIndex.has_value());
88 while (!GotAllWeNeed()) {
93 "Expected records before encountering more subcontexts");
96 switch (*ReadRecord) {
98 if (RecordValues.
size() != 1)
99 return wrongValue(
"The GUID record should have exactly one value");
100 Guid = RecordValues[0];
105 return wrongValue(
"Empty counters. At least the entry counter (one "
106 "value) was expected");
110 return wrongValue(
"The root context should not have a callee index");
111 if (RecordValues.
size() != 1)
112 return wrongValue(
"The callee index should have exactly one value");
113 CallsiteIndex = RecordValues[0];
124 while (canReadContext()) {
126 auto &Targets =
Ret.callsites()[*
SC->first];
128 Targets.insert({
SC->second.guid(), std::move(
SC->second)});
131 "Unexpected duplicate target (callee) at the same callsite.");
133 return std::make_pair(CallsiteIndex, std::move(Ret));
136Error PGOCtxProfileReader::readMetadata() {
146 return unsupported(
"Expected Block ID");
153 return unsupported(
"Expected Version record");
158 return unsupported(
"Expected Version record");
163 return unsupported(
"Expected Version record");
165 return unsupported(
"Version " +
Twine(*Code) +
166 " is higher than supported version " +
173 std::map<GlobalValue::GUID, PGOCtxProfContext> Ret;
175 while (canReadContext()) {
177 auto Key = E->second.guid();
178 if (!Ret.insert({Key, std::move(E->second)}).second)
179 return wrongValue(
"Duplicate roots");
181 return std::move(Ret);
192void toYaml(yaml::Output &Out,
196 void *SaveData =
nullptr;
197 for (
const auto &[
_, Ctx] : CallTargets) {
198 Out.preflightElement(Index++, SaveData);
200 Out.postflightElement(
nullptr);
205void toYaml(yaml::Output &Out,
209 assert(MaxIt != AllCS.end() &&
"We should have a max value because the "
210 "callsites collection is not empty.");
211 void *SaveData =
nullptr;
213 for (
auto I = 0U;
I <= *MaxIt; ++
I) {
214 Out.preflightElement(
I, SaveData);
215 auto It = Callsites.find(
I);
216 if (It == Callsites.end()) {
218 Out.beginFlowSequence();
219 Out.endFlowSequence();
221 toYaml(Out, It->second);
223 Out.postflightElement(
nullptr);
229 yaml::EmptyContext
Empty;
231 void *SaveInfo =
nullptr;
232 bool UseDefault =
false;
234 Out.preflightKey(
"Guid",
true,
false,
235 UseDefault, SaveInfo);
237 yaml::yamlize(Out,
Guid,
true, Empty);
238 Out.postflightKey(
nullptr);
241 Out.preflightKey(
"Counters",
true,
false, UseDefault, SaveInfo);
242 Out.beginFlowSequence();
244 Out.preflightFlowElement(
I, SaveInfo);
246 yaml::yamlize(Out, V,
true, Empty);
247 Out.postflightFlowElement(SaveInfo);
249 Out.endFlowSequence();
250 Out.postflightKey(
nullptr);
253 Out.preflightKey(
"Callsites",
true,
false, UseDefault, SaveInfo);
255 Out.postflightKey(
nullptr);
263 yaml::Output Out(
OS);
264 toYaml(Out, Profiles);
#define EXPECT_OR_RET(LHS, RHS)
Reader for contextual iFDO profile, which comes in bitstream format.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Expected< BitstreamEntry > advance(unsigned Flags=0)
Advance the current bitstream, returning the next entry in the stream.
Expected< unsigned > readRecord(unsigned AbbrevID, SmallVectorImpl< uint64_t > &Vals, StringRef *Blob=nullptr)
Error EnterSubBlock(unsigned BlockID, unsigned *NumWordsP=nullptr)
Having read the ENTER_SUBBLOCK abbrevid, and enter the block.
@ AF_DontAutoprocessAbbrevs
If this flag is used, abbrev entries are returned just like normal records.
Error SkipBlock()
Having read the ENTER_SUBBLOCK abbrevid and a BlockID, skip over the body of this block.
Lightweight error class with error context and mandatory checking.
static ErrorSuccess success()
Create a success value.
Tagged union holding either a T or a Error.
A node (context) in the loaded contextual profile, suitable for mutation during IPO passes.
GlobalValue::GUID guid() const
const SmallVectorImpl< uint64_t > & counters() const
std::map< uint32_t, CallTargetMapTy > CallsiteMapTy
std::map< GlobalValue::GUID, PGOCtxProfContext > CallTargetMapTy
const CallsiteMapTy & callsites() const
Expected< std::map< GlobalValue::GUID, PGOCtxProfContext > > loadContexts()
static constexpr uint32_t CurrentVersion
static constexpr StringRef ContainerMagic
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
constexpr size_t size() const
size - Get the string size.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
This class implements an extremely fast bulk output stream that can only output to a stream.
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
@ BLOCKINFO_BLOCK_ID
BLOCKINFO_BLOCK is used to define metadata about blocks, for example, standard abbrevs that should be...
This is an optimization pass for GlobalISel generic memory operations.
auto make_first_range(ContainerTy &&c)
Given a container of pairs, return a range over the first elements.
void convertCtxProfToYaml(raw_ostream &OS, const PGOCtxProfContext::CallTargetMapTy &)
auto max_element(R &&Range)
Provide wrappers to std::max_element which take ranges instead of having to pass begin/end explicitly...
void consumeError(Error Err)
Consume a Error without doing anything.
When advancing through a bitstream cursor, each advance can discover a few different kinds of entries...