LLVM 20.0.0git
InstrProfWriter.cpp
Go to the documentation of this file.
1//===- InstrProfWriter.cpp - Instrumented profiling writer ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains support for writing profiling data for clang's
10// instrumentation based PGO and coverage.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/STLExtras.h"
16#include "llvm/ADT/SetVector.h"
17#include "llvm/ADT/StringRef.h"
23#include "llvm/Support/Endian.h"
25#include "llvm/Support/Error.h"
30#include <cstdint>
31#include <memory>
32#include <string>
33#include <tuple>
34#include <utility>
35#include <vector>
36
37using namespace llvm;
38
39// A struct to define how the data stream should be patched. For Indexed
40// profiling, only uint64_t data type is needed.
41struct PatchItem {
42 uint64_t Pos; // Where to patch.
43 ArrayRef<uint64_t> D; // An array of source data.
44};
45
46namespace llvm {
47
48// A wrapper class to abstract writer stream with support of bytes
49// back patching.
51public:
53 : IsFDOStream(true), OS(FD), LE(FD, llvm::endianness::little) {}
55 : IsFDOStream(false), OS(STR), LE(STR, llvm::endianness::little) {}
56
57 [[nodiscard]] uint64_t tell() const { return OS.tell(); }
58 void write(uint64_t V) { LE.write<uint64_t>(V); }
59 void write32(uint32_t V) { LE.write<uint32_t>(V); }
60 void writeByte(uint8_t V) { LE.write<uint8_t>(V); }
61
62 // \c patch can only be called when all data is written and flushed.
63 // For raw_string_ostream, the patch is done on the target string
64 // directly and it won't be reflected in the stream's internal buffer.
66 using namespace support;
67
68 if (IsFDOStream) {
69 raw_fd_ostream &FDOStream = static_cast<raw_fd_ostream &>(OS);
70 const uint64_t LastPos = FDOStream.tell();
71 for (const auto &K : P) {
72 FDOStream.seek(K.Pos);
73 for (uint64_t Elem : K.D)
74 write(Elem);
75 }
76 // Reset the stream to the last position after patching so that users
77 // don't accidentally overwrite data. This makes it consistent with
78 // the string stream below which replaces the data directly.
79 FDOStream.seek(LastPos);
80 } else {
81 raw_string_ostream &SOStream = static_cast<raw_string_ostream &>(OS);
82 std::string &Data = SOStream.str(); // with flush
83 for (const auto &K : P) {
84 for (int I = 0, E = K.D.size(); I != E; I++) {
85 uint64_t Bytes =
86 endian::byte_swap<uint64_t, llvm::endianness::little>(K.D[I]);
87 Data.replace(K.Pos + I * sizeof(uint64_t), sizeof(uint64_t),
88 (const char *)&Bytes, sizeof(uint64_t));
89 }
90 }
91 }
92 }
93
94 // If \c OS is an instance of \c raw_fd_ostream, this field will be
95 // true. Otherwise, \c OS will be an raw_string_ostream.
99};
100
102public:
105
108
111
115
117
120 }
121
122 static std::pair<offset_type, offset_type>
124 using namespace support;
125
127
128 offset_type N = K.size();
129 LE.write<offset_type>(N);
130
131 offset_type M = 0;
132 for (const auto &ProfileData : *V) {
133 const InstrProfRecord &ProfRecord = ProfileData.second;
134 M += sizeof(uint64_t); // The function hash
135 M += sizeof(uint64_t); // The size of the Counts vector
136 M += ProfRecord.Counts.size() * sizeof(uint64_t);
137 M += sizeof(uint64_t); // The size of the Bitmap vector
138 M += ProfRecord.BitmapBytes.size() * sizeof(uint64_t);
139
140 // Value data
141 M += ValueProfData::getSize(ProfileData.second);
142 }
143 LE.write<offset_type>(M);
144
145 return std::make_pair(N, M);
146 }
147
149 Out.write(K.data(), N);
150 }
151
153 using namespace support;
154
156 for (const auto &ProfileData : *V) {
157 const InstrProfRecord &ProfRecord = ProfileData.second;
158 if (NamedInstrProfRecord::hasCSFlagInHash(ProfileData.first))
159 CSSummaryBuilder->addRecord(ProfRecord);
160 else
161 SummaryBuilder->addRecord(ProfRecord);
162
163 LE.write<uint64_t>(ProfileData.first); // Function hash
164 LE.write<uint64_t>(ProfRecord.Counts.size());
165 for (uint64_t I : ProfRecord.Counts)
166 LE.write<uint64_t>(I);
167
168 LE.write<uint64_t>(ProfRecord.BitmapBytes.size());
169 for (uint64_t I : ProfRecord.BitmapBytes)
170 LE.write<uint64_t>(I);
171
172 // Write value data
173 std::unique_ptr<ValueProfData> VDataPtr =
174 ValueProfData::serializeFrom(ProfileData.second);
175 uint32_t S = VDataPtr->getSize();
176 VDataPtr->swapBytesFromHost(ValueProfDataEndianness);
177 Out.write((const char *)VDataPtr.get(), S);
178 }
179 }
180};
181
182} // end namespace llvm
183
185 bool Sparse, uint64_t TemporalProfTraceReservoirSize,
186 uint64_t MaxTemporalProfTraceLength, bool WritePrevVersion,
187 memprof::IndexedVersion MemProfVersionRequested, bool MemProfFullSchema)
188 : Sparse(Sparse), MaxTemporalProfTraceLength(MaxTemporalProfTraceLength),
189 TemporalProfTraceReservoirSize(TemporalProfTraceReservoirSize),
190 InfoObj(new InstrProfRecordWriterTrait()),
191 WritePrevVersion(WritePrevVersion),
192 MemProfVersionRequested(MemProfVersionRequested),
193 MemProfFullSchema(MemProfFullSchema) {}
194
196
197// Internal interface for testing purpose only.
199 InfoObj->ValueProfDataEndianness = Endianness;
200}
201
203 this->Sparse = Sparse;
204}
205
207 function_ref<void(Error)> Warn) {
208 auto Name = I.Name;
209 auto Hash = I.Hash;
210 addRecord(Name, Hash, std::move(I), Weight, Warn);
211}
212
214 OverlapStats &Overlap,
215 OverlapStats &FuncLevelOverlap,
216 const OverlapFuncFilters &FuncFilter) {
217 auto Name = Other.Name;
218 auto Hash = Other.Hash;
219 Other.accumulateCounts(FuncLevelOverlap.Test);
220 if (!FunctionData.contains(Name)) {
221 Overlap.addOneUnique(FuncLevelOverlap.Test);
222 return;
223 }
224 if (FuncLevelOverlap.Test.CountSum < 1.0f) {
225 Overlap.Overlap.NumEntries += 1;
226 return;
227 }
228 auto &ProfileDataMap = FunctionData[Name];
229 bool NewFunc;
231 std::tie(Where, NewFunc) =
232 ProfileDataMap.insert(std::make_pair(Hash, InstrProfRecord()));
233 if (NewFunc) {
234 Overlap.addOneMismatch(FuncLevelOverlap.Test);
235 return;
236 }
237 InstrProfRecord &Dest = Where->second;
238
239 uint64_t ValueCutoff = FuncFilter.ValueCutoff;
240 if (!FuncFilter.NameFilter.empty() && Name.contains(FuncFilter.NameFilter))
241 ValueCutoff = 0;
242
243 Dest.overlap(Other, Overlap, FuncLevelOverlap, ValueCutoff);
244}
245
247 InstrProfRecord &&I, uint64_t Weight,
248 function_ref<void(Error)> Warn) {
249 auto &ProfileDataMap = FunctionData[Name];
250
251 bool NewFunc;
253 std::tie(Where, NewFunc) =
254 ProfileDataMap.insert(std::make_pair(Hash, InstrProfRecord()));
255 InstrProfRecord &Dest = Where->second;
256
257 auto MapWarn = [&](instrprof_error E) {
258 Warn(make_error<InstrProfError>(E));
259 };
260
261 if (NewFunc) {
262 // We've never seen a function with this name and hash, add it.
263 Dest = std::move(I);
264 if (Weight > 1)
265 Dest.scale(Weight, 1, MapWarn);
266 } else {
267 // We're updating a function we've seen before.
268 Dest.merge(I, Weight, MapWarn);
269 }
270
271 Dest.sortValueData();
272}
273
276 auto [Iter, Inserted] = MemProfData.Records.insert({Id, Record});
277 // If we inserted a new record then we are done.
278 if (Inserted) {
279 return;
280 }
281 memprof::IndexedMemProfRecord &Existing = Iter->second;
282 Existing.merge(Record);
283}
284
286 const memprof::Frame &Frame,
287 function_ref<void(Error)> Warn) {
288 auto [Iter, Inserted] = MemProfData.Frames.insert({Id, Frame});
289 // If a mapping already exists for the current frame id and it does not
290 // match the new mapping provided then reset the existing contents and bail
291 // out. We don't support the merging of memprof data whose Frame -> Id
292 // mapping across profiles is inconsistent.
293 if (!Inserted && Iter->second != Frame) {
294 Warn(make_error<InstrProfError>(instrprof_error::malformed,
295 "frame to id mapping mismatch"));
296 return false;
297 }
298 return true;
299}
300
302 const memprof::CallStackId CSId,
304 function_ref<void(Error)> Warn) {
305 auto [Iter, Inserted] = MemProfData.CallStacks.insert({CSId, CallStack});
306 // If a mapping already exists for the current call stack id and it does not
307 // match the new mapping provided then reset the existing contents and bail
308 // out. We don't support the merging of memprof data whose CallStack -> Id
309 // mapping across profiles is inconsistent.
310 if (!Inserted && Iter->second != CallStack) {
311 Warn(make_error<InstrProfError>(instrprof_error::malformed,
312 "call stack to id mapping mismatch"));
313 return false;
314 }
315 return true;
316}
317
319 llvm::append_range(BinaryIds, BIs);
320}
321
322void InstrProfWriter::addTemporalProfileTrace(TemporalProfTraceTy Trace) {
323 assert(Trace.FunctionNameRefs.size() <= MaxTemporalProfTraceLength);
324 assert(!Trace.FunctionNameRefs.empty());
325 if (TemporalProfTraceStreamSize < TemporalProfTraceReservoirSize) {
326 // Simply append the trace if we have not yet hit our reservoir size limit.
327 TemporalProfTraces.push_back(std::move(Trace));
328 } else {
329 // Otherwise, replace a random trace in the stream.
330 std::uniform_int_distribution<uint64_t> Distribution(
331 0, TemporalProfTraceStreamSize);
332 uint64_t RandomIndex = Distribution(RNG);
333 if (RandomIndex < TemporalProfTraces.size())
334 TemporalProfTraces[RandomIndex] = std::move(Trace);
335 }
336 ++TemporalProfTraceStreamSize;
337}
338
340 SmallVectorImpl<TemporalProfTraceTy> &SrcTraces, uint64_t SrcStreamSize) {
341 for (auto &Trace : SrcTraces)
342 if (Trace.FunctionNameRefs.size() > MaxTemporalProfTraceLength)
343 Trace.FunctionNameRefs.resize(MaxTemporalProfTraceLength);
344 llvm::erase_if(SrcTraces, [](auto &T) { return T.FunctionNameRefs.empty(); });
345 // Assume that the source has the same reservoir size as the destination to
346 // avoid needing to record it in the indexed profile format.
347 bool IsDestSampled =
348 (TemporalProfTraceStreamSize > TemporalProfTraceReservoirSize);
349 bool IsSrcSampled = (SrcStreamSize > TemporalProfTraceReservoirSize);
350 if (!IsDestSampled && IsSrcSampled) {
351 // If one of the traces are sampled, ensure that it belongs to Dest.
352 std::swap(TemporalProfTraces, SrcTraces);
353 std::swap(TemporalProfTraceStreamSize, SrcStreamSize);
354 std::swap(IsDestSampled, IsSrcSampled);
355 }
356 if (!IsSrcSampled) {
357 // If the source stream is not sampled, we add each source trace normally.
358 for (auto &Trace : SrcTraces)
359 addTemporalProfileTrace(std::move(Trace));
360 return;
361 }
362 // Otherwise, we find the traces that would have been removed if we added
363 // the whole source stream.
364 SmallSetVector<uint64_t, 8> IndicesToReplace;
365 for (uint64_t I = 0; I < SrcStreamSize; I++) {
366 std::uniform_int_distribution<uint64_t> Distribution(
367 0, TemporalProfTraceStreamSize);
368 uint64_t RandomIndex = Distribution(RNG);
369 if (RandomIndex < TemporalProfTraces.size())
370 IndicesToReplace.insert(RandomIndex);
371 ++TemporalProfTraceStreamSize;
372 }
373 // Then we insert a random sample of the source traces.
374 llvm::shuffle(SrcTraces.begin(), SrcTraces.end(), RNG);
375 for (const auto &[Index, Trace] : llvm::zip(IndicesToReplace, SrcTraces))
376 TemporalProfTraces[Index] = std::move(Trace);
377}
378
380 function_ref<void(Error)> Warn) {
381 for (auto &I : IPW.FunctionData)
382 for (auto &Func : I.getValue())
383 addRecord(I.getKey(), Func.first, std::move(Func.second), 1, Warn);
384
385 BinaryIds.reserve(BinaryIds.size() + IPW.BinaryIds.size());
386 for (auto &I : IPW.BinaryIds)
388
389 addTemporalProfileTraces(IPW.TemporalProfTraces,
390 IPW.TemporalProfTraceStreamSize);
391
392 MemProfData.Frames.reserve(IPW.MemProfData.Frames.size());
393 for (auto &[FrameId, Frame] : IPW.MemProfData.Frames) {
394 // If we weren't able to add the frame mappings then it doesn't make sense
395 // to try to merge the records from this profile.
396 if (!addMemProfFrame(FrameId, Frame, Warn))
397 return;
398 }
399
400 MemProfData.CallStacks.reserve(IPW.MemProfData.CallStacks.size());
401 for (auto &[CSId, CallStack] : IPW.MemProfData.CallStacks) {
402 if (!addMemProfCallStack(CSId, CallStack, Warn))
403 return;
404 }
405
406 MemProfData.Records.reserve(IPW.MemProfData.Records.size());
407 for (auto &[GUID, Record] : IPW.MemProfData.Records) {
409 }
410}
411
412bool InstrProfWriter::shouldEncodeData(const ProfilingData &PD) {
413 if (!Sparse)
414 return true;
415 for (const auto &Func : PD) {
416 const InstrProfRecord &IPR = Func.second;
417 if (llvm::any_of(IPR.Counts, [](uint64_t Count) { return Count > 0; }))
418 return true;
419 if (llvm::any_of(IPR.BitmapBytes, [](uint8_t Byte) { return Byte > 0; }))
420 return true;
421 }
422 return false;
423}
424
425static void setSummary(IndexedInstrProf::Summary *TheSummary,
426 ProfileSummary &PS) {
427 using namespace IndexedInstrProf;
428
429 const std::vector<ProfileSummaryEntry> &Res = PS.getDetailedSummary();
430 TheSummary->NumSummaryFields = Summary::NumKinds;
431 TheSummary->NumCutoffEntries = Res.size();
432 TheSummary->set(Summary::MaxFunctionCount, PS.getMaxFunctionCount());
433 TheSummary->set(Summary::MaxBlockCount, PS.getMaxCount());
434 TheSummary->set(Summary::MaxInternalBlockCount, PS.getMaxInternalCount());
435 TheSummary->set(Summary::TotalBlockCount, PS.getTotalCount());
436 TheSummary->set(Summary::TotalNumBlocks, PS.getNumCounts());
437 TheSummary->set(Summary::TotalNumFunctions, PS.getNumFunctions());
438 for (unsigned I = 0; I < Res.size(); I++)
439 TheSummary->setEntry(I, Res[I]);
440}
441
442// Serialize Schema.
444 const memprof::MemProfSchema &Schema) {
445 OS.write(static_cast<uint64_t>(Schema.size()));
446 for (const auto Id : Schema)
447 OS.write(static_cast<uint64_t>(Id));
448}
449
450// Serialize MemProfRecordData. Return RecordTableOffset.
454 &MemProfRecordData,
457 *MemProfCallStackIndexes = nullptr) {
458 memprof::RecordWriterTrait RecordWriter(Schema, Version,
459 MemProfCallStackIndexes);
461 RecordTableGenerator;
462 for (auto &[GUID, Record] : MemProfRecordData) {
463 // Insert the key (func hash) and value (memprof record).
464 RecordTableGenerator.insert(GUID, Record, RecordWriter);
465 }
466 // Release the memory of this MapVector as it is no longer needed.
467 MemProfRecordData.clear();
468
469 // The call to Emit invokes RecordWriterTrait::EmitData which destructs
470 // the memprof record copies owned by the RecordTableGenerator. This works
471 // because the RecordTableGenerator is not used after this point.
472 return RecordTableGenerator.Emit(OS.OS, RecordWriter);
473}
474
475// Serialize MemProfFrameData. Return FrameTableOffset.
480 FrameTableGenerator;
481 for (auto &[FrameId, Frame] : MemProfFrameData) {
482 // Insert the key (frame id) and value (frame contents).
483 FrameTableGenerator.insert(FrameId, Frame);
484 }
485 // Release the memory of this MapVector as it is no longer needed.
486 MemProfFrameData.clear();
487
488 return FrameTableGenerator.Emit(OS.OS);
489}
490
491// Serialize MemProfFrameData. Return the mapping from FrameIds to their
492// indexes within the frame array.
498 // Mappings from FrameIds to array indexes.
500
501 // Compute the order in which we serialize Frames. The order does not matter
502 // in terms of correctness, but we still compute it for deserialization
503 // performance. Specifically, if we serialize frequently used Frames one
504 // after another, we have better cache utilization. For two Frames that
505 // appear equally frequently, we break a tie by serializing the one that tends
506 // to appear earlier in call stacks. We implement the tie-breaking mechanism
507 // by computing the sum of indexes within call stacks for each Frame. If we
508 // still have a tie, then we just resort to compare two FrameIds, which is
509 // just for stability of output.
510 std::vector<std::pair<memprof::FrameId, const memprof::Frame *>> FrameIdOrder;
511 FrameIdOrder.reserve(MemProfFrameData.size());
512 for (const auto &[Id, Frame] : MemProfFrameData)
513 FrameIdOrder.emplace_back(Id, &Frame);
514 assert(MemProfFrameData.size() == FrameIdOrder.size());
515 llvm::sort(FrameIdOrder,
516 [&](const std::pair<memprof::FrameId, const memprof::Frame *> &L,
517 const std::pair<memprof::FrameId, const memprof::Frame *> &R) {
518 const auto &SL = FrameHistogram[L.first];
519 const auto &SR = FrameHistogram[R.first];
520 // Popular FrameIds should come first.
521 if (SL.Count != SR.Count)
522 return SL.Count > SR.Count;
523 // If they are equally popular, then the one that tends to appear
524 // earlier in call stacks should come first.
525 if (SL.PositionSum != SR.PositionSum)
526 return SL.PositionSum < SR.PositionSum;
527 // Compare their FrameIds for sort stability.
528 return L.first < R.first;
529 });
530
531 // Serialize all frames while creating mappings from linear IDs to FrameIds.
532 uint64_t Index = 0;
533 MemProfFrameIndexes.reserve(FrameIdOrder.size());
534 for (const auto &[Id, F] : FrameIdOrder) {
535 F->serialize(OS.OS);
536 MemProfFrameIndexes.insert({Id, Index});
537 ++Index;
538 }
539 assert(MemProfFrameData.size() == Index);
540 assert(MemProfFrameData.size() == MemProfFrameIndexes.size());
541
542 // Release the memory of this MapVector as it is no longer needed.
543 MemProfFrameData.clear();
544
545 return MemProfFrameIndexes;
546}
547
551 &MemProfCallStackData) {
553 CallStackTableGenerator;
554 for (auto &[CSId, CallStack] : MemProfCallStackData)
555 CallStackTableGenerator.insert(CSId, CallStack);
556 // Release the memory of this vector as it is no longer needed.
557 MemProfCallStackData.clear();
558
559 return CallStackTableGenerator.Emit(OS.OS);
560}
561
566 &MemProfCallStackData,
568 &MemProfFrameIndexes,
571 MemProfCallStackIndexes;
572
574 Builder.build(std::move(MemProfCallStackData), MemProfFrameIndexes,
575 FrameHistogram);
576 for (auto I : Builder.getRadixArray())
577 OS.write32(I);
578 MemProfCallStackIndexes = Builder.takeCallStackPos();
579
580 // Release the memory of this vector as it is no longer needed.
581 MemProfCallStackData.clear();
582
583 return MemProfCallStackIndexes;
584}
585
586// Write out MemProf Version0 as follows:
587// uint64_t RecordTableOffset = RecordTableGenerator.Emit
588// uint64_t FramePayloadOffset = Offset for the frame payload
589// uint64_t FrameTableOffset = FrameTableGenerator.Emit
590// uint64_t Num schema entries
591// uint64_t Schema entry 0
592// uint64_t Schema entry 1
593// ....
594// uint64_t Schema entry N - 1
595// OnDiskChainedHashTable MemProfRecordData
596// OnDiskChainedHashTable MemProfFrameData
598 memprof::IndexedMemProfData &MemProfData) {
599 uint64_t HeaderUpdatePos = OS.tell();
600 OS.write(0ULL); // Reserve space for the memprof record table offset.
601 OS.write(0ULL); // Reserve space for the memprof frame payload offset.
602 OS.write(0ULL); // Reserve space for the memprof frame table offset.
603
604 auto Schema = memprof::getFullSchema();
605 writeMemProfSchema(OS, Schema);
606
607 uint64_t RecordTableOffset =
608 writeMemProfRecords(OS, MemProfData.Records, &Schema, memprof::Version0);
609
610 uint64_t FramePayloadOffset = OS.tell();
611 uint64_t FrameTableOffset = writeMemProfFrames(OS, MemProfData.Frames);
612
613 uint64_t Header[] = {RecordTableOffset, FramePayloadOffset, FrameTableOffset};
614 OS.patch({{HeaderUpdatePos, Header}});
615
616 return Error::success();
617}
618
619// Write out MemProf Version1 as follows:
620// uint64_t Version (NEW in V1)
621// uint64_t RecordTableOffset = RecordTableGenerator.Emit
622// uint64_t FramePayloadOffset = Offset for the frame payload
623// uint64_t FrameTableOffset = FrameTableGenerator.Emit
624// uint64_t Num schema entries
625// uint64_t Schema entry 0
626// uint64_t Schema entry 1
627// ....
628// uint64_t Schema entry N - 1
629// OnDiskChainedHashTable MemProfRecordData
630// OnDiskChainedHashTable MemProfFrameData
632 memprof::IndexedMemProfData &MemProfData) {
634 uint64_t HeaderUpdatePos = OS.tell();
635 OS.write(0ULL); // Reserve space for the memprof record table offset.
636 OS.write(0ULL); // Reserve space for the memprof frame payload offset.
637 OS.write(0ULL); // Reserve space for the memprof frame table offset.
638
639 auto Schema = memprof::getFullSchema();
640 writeMemProfSchema(OS, Schema);
641
642 uint64_t RecordTableOffset =
643 writeMemProfRecords(OS, MemProfData.Records, &Schema, memprof::Version1);
644
645 uint64_t FramePayloadOffset = OS.tell();
646 uint64_t FrameTableOffset = writeMemProfFrames(OS, MemProfData.Frames);
647
648 uint64_t Header[] = {RecordTableOffset, FramePayloadOffset, FrameTableOffset};
649 OS.patch({{HeaderUpdatePos, Header}});
650
651 return Error::success();
652}
653
654// Write out MemProf Version2 as follows:
655// uint64_t Version
656// uint64_t RecordTableOffset = RecordTableGenerator.Emit
657// uint64_t FramePayloadOffset = Offset for the frame payload
658// uint64_t FrameTableOffset = FrameTableGenerator.Emit
659// uint64_t CallStackPayloadOffset = Offset for the call stack payload (NEW V2)
660// uint64_t CallStackTableOffset = CallStackTableGenerator.Emit (NEW in V2)
661// uint64_t Num schema entries
662// uint64_t Schema entry 0
663// uint64_t Schema entry 1
664// ....
665// uint64_t Schema entry N - 1
666// OnDiskChainedHashTable MemProfRecordData
667// OnDiskChainedHashTable MemProfFrameData
668// OnDiskChainedHashTable MemProfCallStackData (NEW in V2)
670 memprof::IndexedMemProfData &MemProfData,
671 bool MemProfFullSchema) {
673 uint64_t HeaderUpdatePos = OS.tell();
674 OS.write(0ULL); // Reserve space for the memprof record table offset.
675 OS.write(0ULL); // Reserve space for the memprof frame payload offset.
676 OS.write(0ULL); // Reserve space for the memprof frame table offset.
677 OS.write(0ULL); // Reserve space for the memprof call stack payload offset.
678 OS.write(0ULL); // Reserve space for the memprof call stack table offset.
679
680 auto Schema = memprof::getHotColdSchema();
681 if (MemProfFullSchema)
682 Schema = memprof::getFullSchema();
683 writeMemProfSchema(OS, Schema);
684
685 uint64_t RecordTableOffset =
686 writeMemProfRecords(OS, MemProfData.Records, &Schema, memprof::Version2);
687
688 uint64_t FramePayloadOffset = OS.tell();
689 uint64_t FrameTableOffset = writeMemProfFrames(OS, MemProfData.Frames);
690
691 uint64_t CallStackPayloadOffset = OS.tell();
692 uint64_t CallStackTableOffset =
694
695 uint64_t Header[] = {
696 RecordTableOffset, FramePayloadOffset, FrameTableOffset,
697 CallStackPayloadOffset, CallStackTableOffset,
698 };
699 OS.patch({{HeaderUpdatePos, Header}});
700
701 return Error::success();
702}
703
704// Write out MemProf Version3 as follows:
705// uint64_t Version
706// uint64_t CallStackPayloadOffset = Offset for the call stack payload
707// uint64_t RecordPayloadOffset = Offset for the record payload
708// uint64_t RecordTableOffset = RecordTableGenerator.Emit
709// uint64_t Num schema entries
710// uint64_t Schema entry 0
711// uint64_t Schema entry 1
712// ....
713// uint64_t Schema entry N - 1
714// Frames serialized one after another
715// Call stacks encoded as a radix tree
716// OnDiskChainedHashTable MemProfRecordData
718 memprof::IndexedMemProfData &MemProfData,
719 bool MemProfFullSchema) {
721 uint64_t HeaderUpdatePos = OS.tell();
722 OS.write(0ULL); // Reserve space for the memprof call stack payload offset.
723 OS.write(0ULL); // Reserve space for the memprof record payload offset.
724 OS.write(0ULL); // Reserve space for the memprof record table offset.
725
726 auto Schema = memprof::getHotColdSchema();
727 if (MemProfFullSchema)
728 Schema = memprof::getFullSchema();
729 writeMemProfSchema(OS, Schema);
730
733 assert(MemProfData.Frames.size() == FrameHistogram.size());
734
736 writeMemProfFrameArray(OS, MemProfData.Frames, FrameHistogram);
737
738 uint64_t CallStackPayloadOffset = OS.tell();
740 MemProfCallStackIndexes = writeMemProfCallStackArray(
741 OS, MemProfData.CallStacks, MemProfFrameIndexes, FrameHistogram);
742
743 uint64_t RecordPayloadOffset = OS.tell();
744 uint64_t RecordTableOffset =
745 writeMemProfRecords(OS, MemProfData.Records, &Schema, memprof::Version3,
746 &MemProfCallStackIndexes);
747
748 uint64_t Header[] = {
749 CallStackPayloadOffset,
750 RecordPayloadOffset,
751 RecordTableOffset,
752 };
753 OS.patch({{HeaderUpdatePos, Header}});
754
755 return Error::success();
756}
757
758// Write out the MemProf data in a requested version.
760 memprof::IndexedMemProfData &MemProfData,
761 memprof::IndexedVersion MemProfVersionRequested,
762 bool MemProfFullSchema) {
763 switch (MemProfVersionRequested) {
765 return writeMemProfV0(OS, MemProfData);
767 return writeMemProfV1(OS, MemProfData);
769 return writeMemProfV2(OS, MemProfData, MemProfFullSchema);
771 return writeMemProfV3(OS, MemProfData, MemProfFullSchema);
772 }
773
774 return make_error<InstrProfError>(
776 formatv("MemProf version {} not supported; "
777 "requires version between {} and {}, inclusive",
778 MemProfVersionRequested, memprof::MinimumSupportedVersion,
780}
781
782uint64_t InstrProfWriter::writeHeader(const IndexedInstrProf::Header &Header,
783 const bool WritePrevVersion,
784 ProfOStream &OS) {
785 // Only write out the first four fields.
786 for (int I = 0; I < 4; I++)
787 OS.write(reinterpret_cast<const uint64_t *>(&Header)[I]);
788
789 // Remember the offset of the remaining fields to allow back patching later.
790 auto BackPatchStartOffset = OS.tell();
791
792 // Reserve the space for back patching later.
793 OS.write(0); // HashOffset
794 OS.write(0); // MemProfOffset
795 OS.write(0); // BinaryIdOffset
796 OS.write(0); // TemporalProfTracesOffset
797 if (!WritePrevVersion)
798 OS.write(0); // VTableNamesOffset
799
800 return BackPatchStartOffset;
801}
802
803Error InstrProfWriter::writeVTableNames(ProfOStream &OS) {
804 std::vector<std::string> VTableNameStrs;
805 for (StringRef VTableName : VTableNames.keys())
806 VTableNameStrs.push_back(VTableName.str());
807
808 std::string CompressedVTableNames;
809 if (!VTableNameStrs.empty())
811 VTableNameStrs, compression::zlib::isAvailable(),
812 CompressedVTableNames))
813 return E;
814
815 const uint64_t CompressedStringLen = CompressedVTableNames.length();
816
817 // Record the length of compressed string.
818 OS.write(CompressedStringLen);
819
820 // Write the chars in compressed strings.
821 for (auto &c : CompressedVTableNames)
822 OS.writeByte(static_cast<uint8_t>(c));
823
824 // Pad up to a multiple of 8.
825 // InstrProfReader could read bytes according to 'CompressedStringLen'.
826 const uint64_t PaddedLength = alignTo(CompressedStringLen, 8);
827
828 for (uint64_t K = CompressedStringLen; K < PaddedLength; K++)
829 OS.writeByte(0);
830
831 return Error::success();
832}
833
834Error InstrProfWriter::writeImpl(ProfOStream &OS) {
835 using namespace IndexedInstrProf;
836 using namespace support;
837
839
841 InfoObj->SummaryBuilder = &ISB;
843 InfoObj->CSSummaryBuilder = &CSISB;
844
845 // Populate the hash table generator.
847 for (const auto &I : FunctionData)
848 if (shouldEncodeData(I.getValue()))
849 OrderedData.emplace_back((I.getKey()), &I.getValue());
850 llvm::sort(OrderedData, less_first());
851 for (const auto &I : OrderedData)
852 Generator.insert(I.first, I.second);
853
854 // Write the header.
856 Header.Version = WritePrevVersion
859 // The WritePrevVersion handling will either need to be removed or updated
860 // if the version is advanced beyond 12.
863 if (static_cast<bool>(ProfileKind & InstrProfKind::IRInstrumentation))
864 Header.Version |= VARIANT_MASK_IR_PROF;
865 if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive))
866 Header.Version |= VARIANT_MASK_CSIR_PROF;
867 if (static_cast<bool>(ProfileKind &
869 Header.Version |= VARIANT_MASK_INSTR_ENTRY;
870 if (static_cast<bool>(ProfileKind & InstrProfKind::SingleByteCoverage))
871 Header.Version |= VARIANT_MASK_BYTE_COVERAGE;
872 if (static_cast<bool>(ProfileKind & InstrProfKind::FunctionEntryOnly))
873 Header.Version |= VARIANT_MASK_FUNCTION_ENTRY_ONLY;
874 if (static_cast<bool>(ProfileKind & InstrProfKind::MemProf))
875 Header.Version |= VARIANT_MASK_MEMPROF;
876 if (static_cast<bool>(ProfileKind & InstrProfKind::TemporalProfile))
877 Header.Version |= VARIANT_MASK_TEMPORAL_PROF;
878
879 const uint64_t BackPatchStartOffset =
880 writeHeader(Header, WritePrevVersion, OS);
881
882 // Reserve space to write profile summary data.
884 uint32_t SummarySize = Summary::getSize(Summary::NumKinds, NumEntries);
885 // Remember the summary offset.
886 uint64_t SummaryOffset = OS.tell();
887 for (unsigned I = 0; I < SummarySize / sizeof(uint64_t); I++)
888 OS.write(0);
889 uint64_t CSSummaryOffset = 0;
890 uint64_t CSSummarySize = 0;
891 if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive)) {
892 CSSummaryOffset = OS.tell();
893 CSSummarySize = SummarySize / sizeof(uint64_t);
894 for (unsigned I = 0; I < CSSummarySize; I++)
895 OS.write(0);
896 }
897
898 // Write the hash table.
899 uint64_t HashTableStart = Generator.Emit(OS.OS, *InfoObj);
900
901 // Write the MemProf profile data if we have it.
902 uint64_t MemProfSectionStart = 0;
903 if (static_cast<bool>(ProfileKind & InstrProfKind::MemProf)) {
904 MemProfSectionStart = OS.tell();
905 if (auto E = writeMemProf(OS, MemProfData, MemProfVersionRequested,
906 MemProfFullSchema))
907 return E;
908 }
909
910 // BinaryIdSection has two parts:
911 // 1. uint64_t BinaryIdsSectionSize
912 // 2. list of binary ids that consist of:
913 // a. uint64_t BinaryIdLength
914 // b. uint8_t BinaryIdData
915 // c. uint8_t Padding (if necessary)
916 uint64_t BinaryIdSectionStart = OS.tell();
917 // Calculate size of binary section.
918 uint64_t BinaryIdsSectionSize = 0;
919
920 // Remove duplicate binary ids.
921 llvm::sort(BinaryIds);
922 BinaryIds.erase(llvm::unique(BinaryIds), BinaryIds.end());
923
924 for (const auto &BI : BinaryIds) {
925 // Increment by binary id length data type size.
926 BinaryIdsSectionSize += sizeof(uint64_t);
927 // Increment by binary id data length, aligned to 8 bytes.
928 BinaryIdsSectionSize += alignToPowerOf2(BI.size(), sizeof(uint64_t));
929 }
930 // Write binary ids section size.
931 OS.write(BinaryIdsSectionSize);
932
933 for (const auto &BI : BinaryIds) {
934 uint64_t BILen = BI.size();
935 // Write binary id length.
936 OS.write(BILen);
937 // Write binary id data.
938 for (unsigned K = 0; K < BILen; K++)
939 OS.writeByte(BI[K]);
940 // Write padding if necessary.
941 uint64_t PaddingSize = alignToPowerOf2(BILen, sizeof(uint64_t)) - BILen;
942 for (unsigned K = 0; K < PaddingSize; K++)
943 OS.writeByte(0);
944 }
945
946 uint64_t VTableNamesSectionStart = OS.tell();
947
948 if (!WritePrevVersion)
949 if (Error E = writeVTableNames(OS))
950 return E;
951
952 uint64_t TemporalProfTracesSectionStart = 0;
953 if (static_cast<bool>(ProfileKind & InstrProfKind::TemporalProfile)) {
954 TemporalProfTracesSectionStart = OS.tell();
955 OS.write(TemporalProfTraces.size());
956 OS.write(TemporalProfTraceStreamSize);
957 for (auto &Trace : TemporalProfTraces) {
958 OS.write(Trace.Weight);
959 OS.write(Trace.FunctionNameRefs.size());
960 for (auto &NameRef : Trace.FunctionNameRefs)
961 OS.write(NameRef);
962 }
963 }
964
965 // Allocate space for data to be serialized out.
966 std::unique_ptr<IndexedInstrProf::Summary> TheSummary =
968 // Compute the Summary and copy the data to the data
969 // structure to be serialized out (to disk or buffer).
970 std::unique_ptr<ProfileSummary> PS = ISB.getSummary();
971 setSummary(TheSummary.get(), *PS);
972 InfoObj->SummaryBuilder = nullptr;
973
974 // For Context Sensitive summary.
975 std::unique_ptr<IndexedInstrProf::Summary> TheCSSummary = nullptr;
976 if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive)) {
977 TheCSSummary = IndexedInstrProf::allocSummary(SummarySize);
978 std::unique_ptr<ProfileSummary> CSPS = CSISB.getSummary();
979 setSummary(TheCSSummary.get(), *CSPS);
980 }
981 InfoObj->CSSummaryBuilder = nullptr;
982
983 SmallVector<uint64_t, 8> HeaderOffsets = {HashTableStart, MemProfSectionStart,
984 BinaryIdSectionStart,
985 TemporalProfTracesSectionStart};
986 if (!WritePrevVersion)
987 HeaderOffsets.push_back(VTableNamesSectionStart);
988
989 PatchItem PatchItems[] = {
990 // Patch the Header fields
991 {BackPatchStartOffset, HeaderOffsets},
992 // Patch the summary data.
993 {SummaryOffset,
994 ArrayRef<uint64_t>(reinterpret_cast<uint64_t *>(TheSummary.get()),
995 SummarySize / sizeof(uint64_t))},
996 {CSSummaryOffset,
997 ArrayRef<uint64_t>(reinterpret_cast<uint64_t *>(TheCSSummary.get()),
998 CSSummarySize)}};
999
1000 OS.patch(PatchItems);
1001
1002 for (const auto &I : FunctionData)
1003 for (const auto &F : I.getValue())
1004 if (Error E = validateRecord(F.second))
1005 return E;
1006
1007 return Error::success();
1008}
1009
1011 // Write the hash table.
1012 ProfOStream POS(OS);
1013 return writeImpl(POS);
1014}
1015
1017 ProfOStream POS(OS);
1018 return writeImpl(POS);
1019}
1020
1021std::unique_ptr<MemoryBuffer> InstrProfWriter::writeBuffer() {
1022 std::string Data;
1024 // Write the hash table.
1025 if (Error E = write(OS))
1026 return nullptr;
1027 // Return this in an aligned memory buffer.
1029}
1030
1031static const char *ValueProfKindStr[] = {
1032#define VALUE_PROF_KIND(Enumerator, Value, Descr) #Enumerator,
1034};
1035
1037 for (uint32_t VK = 0; VK <= IPVK_Last; VK++) {
1038 if (VK == IPVK_IndirectCallTarget || VK == IPVK_VTableTarget)
1039 continue;
1040 uint32_t NS = Func.getNumValueSites(VK);
1041 for (uint32_t S = 0; S < NS; S++) {
1042 DenseSet<uint64_t> SeenValues;
1043 for (const auto &V : Func.getValueArrayForSite(VK, S))
1044 if (!SeenValues.insert(V.Value).second)
1045 return make_error<InstrProfError>(instrprof_error::invalid_prof);
1046 }
1047 }
1048
1049 return Error::success();
1050}
1051
1053 const InstrProfRecord &Func,
1054 InstrProfSymtab &Symtab,
1055 raw_fd_ostream &OS) {
1056 OS << Name << "\n";
1057 OS << "# Func Hash:\n" << Hash << "\n";
1058 OS << "# Num Counters:\n" << Func.Counts.size() << "\n";
1059 OS << "# Counter Values:\n";
1060 for (uint64_t Count : Func.Counts)
1061 OS << Count << "\n";
1062
1063 if (Func.BitmapBytes.size() > 0) {
1064 OS << "# Num Bitmap Bytes:\n$" << Func.BitmapBytes.size() << "\n";
1065 OS << "# Bitmap Byte Values:\n";
1066 for (uint8_t Byte : Func.BitmapBytes) {
1067 OS << "0x";
1068 OS.write_hex(Byte);
1069 OS << "\n";
1070 }
1071 OS << "\n";
1072 }
1073
1074 uint32_t NumValueKinds = Func.getNumValueKinds();
1075 if (!NumValueKinds) {
1076 OS << "\n";
1077 return;
1078 }
1079
1080 OS << "# Num Value Kinds:\n" << Func.getNumValueKinds() << "\n";
1081 for (uint32_t VK = 0; VK < IPVK_Last + 1; VK++) {
1082 uint32_t NS = Func.getNumValueSites(VK);
1083 if (!NS)
1084 continue;
1085 OS << "# ValueKind = " << ValueProfKindStr[VK] << ":\n" << VK << "\n";
1086 OS << "# NumValueSites:\n" << NS << "\n";
1087 for (uint32_t S = 0; S < NS; S++) {
1088 auto VD = Func.getValueArrayForSite(VK, S);
1089 OS << VD.size() << "\n";
1090 for (const auto &V : VD) {
1091 if (VK == IPVK_IndirectCallTarget || VK == IPVK_VTableTarget)
1092 OS << Symtab.getFuncOrVarNameIfDefined(V.Value) << ":" << V.Count
1093 << "\n";
1094 else
1095 OS << V.Value << ":" << V.Count << "\n";
1096 }
1097 }
1098 }
1099
1100 OS << "\n";
1101}
1102
1104 // Check CS first since it implies an IR level profile.
1105 if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive))
1106 OS << "# CSIR level Instrumentation Flag\n:csir\n";
1107 else if (static_cast<bool>(ProfileKind & InstrProfKind::IRInstrumentation))
1108 OS << "# IR level Instrumentation Flag\n:ir\n";
1109
1110 if (static_cast<bool>(ProfileKind &
1112 OS << "# Always instrument the function entry block\n:entry_first\n";
1113 if (static_cast<bool>(ProfileKind & InstrProfKind::SingleByteCoverage))
1114 OS << "# Instrument block coverage\n:single_byte_coverage\n";
1115 InstrProfSymtab Symtab;
1116
1118 using RecordType = std::pair<StringRef, FuncPair>;
1119 SmallVector<RecordType, 4> OrderedFuncData;
1120
1121 for (const auto &I : FunctionData) {
1122 if (shouldEncodeData(I.getValue())) {
1123 if (Error E = Symtab.addFuncName(I.getKey()))
1124 return E;
1125 for (const auto &Func : I.getValue())
1126 OrderedFuncData.push_back(std::make_pair(I.getKey(), Func));
1127 }
1128 }
1129
1130 for (const auto &VTableName : VTableNames)
1131 if (Error E = Symtab.addVTableName(VTableName.getKey()))
1132 return E;
1133
1134 if (static_cast<bool>(ProfileKind & InstrProfKind::TemporalProfile))
1136
1137 llvm::sort(OrderedFuncData, [](const RecordType &A, const RecordType &B) {
1138 return std::tie(A.first, A.second.first) <
1139 std::tie(B.first, B.second.first);
1140 });
1141
1142 for (const auto &record : OrderedFuncData) {
1143 const StringRef &Name = record.first;
1144 const FuncPair &Func = record.second;
1145 writeRecordInText(Name, Func.first, Func.second, Symtab, OS);
1146 }
1147
1148 for (const auto &record : OrderedFuncData) {
1149 const FuncPair &Func = record.second;
1150 if (Error E = validateRecord(Func.second))
1151 return E;
1152 }
1153
1154 return Error::success();
1155}
1156
1158 InstrProfSymtab &Symtab) {
1159 OS << ":temporal_prof_traces\n";
1160 OS << "# Num Temporal Profile Traces:\n" << TemporalProfTraces.size() << "\n";
1161 OS << "# Temporal Profile Trace Stream Size:\n"
1162 << TemporalProfTraceStreamSize << "\n";
1163 for (auto &Trace : TemporalProfTraces) {
1164 OS << "# Weight:\n" << Trace.Weight << "\n";
1165 for (auto &NameRef : Trace.FunctionNameRefs)
1166 OS << Symtab.getFuncOrVarName(NameRef) << ",";
1167 OS << "\n";
1168 }
1169 OS << "\n";
1170}
basic Basic Alias true
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
std::string Name
static Error writeMemProfV0(ProfOStream &OS, memprof::IndexedMemProfData &MemProfData)
static uint64_t writeMemProfRecords(ProfOStream &OS, llvm::MapVector< GlobalValue::GUID, memprof::IndexedMemProfRecord > &MemProfRecordData, memprof::MemProfSchema *Schema, memprof::IndexedVersion Version, llvm::DenseMap< memprof::CallStackId, memprof::LinearCallStackId > *MemProfCallStackIndexes=nullptr)
static Error writeMemProf(ProfOStream &OS, memprof::IndexedMemProfData &MemProfData, memprof::IndexedVersion MemProfVersionRequested, bool MemProfFullSchema)
static uint64_t writeMemProfCallStacks(ProfOStream &OS, llvm::MapVector< memprof::CallStackId, llvm::SmallVector< memprof::FrameId > > &MemProfCallStackData)
static Error writeMemProfV1(ProfOStream &OS, memprof::IndexedMemProfData &MemProfData)
static Error writeMemProfV3(ProfOStream &OS, memprof::IndexedMemProfData &MemProfData, bool MemProfFullSchema)
static uint64_t writeMemProfFrames(ProfOStream &OS, llvm::MapVector< memprof::FrameId, memprof::Frame > &MemProfFrameData)
static Error writeMemProfV2(ProfOStream &OS, memprof::IndexedMemProfData &MemProfData, bool MemProfFullSchema)
static void setSummary(IndexedInstrProf::Summary *TheSummary, ProfileSummary &PS)
static const char * ValueProfKindStr[]
static llvm::DenseMap< memprof::FrameId, memprof::LinearFrameId > writeMemProfFrameArray(ProfOStream &OS, llvm::MapVector< memprof::FrameId, memprof::Frame > &MemProfFrameData, llvm::DenseMap< memprof::FrameId, memprof::FrameStat > &FrameHistogram)
static llvm::DenseMap< memprof::CallStackId, memprof::LinearCallStackId > writeMemProfCallStackArray(ProfOStream &OS, llvm::MapVector< memprof::CallStackId, llvm::SmallVector< memprof::FrameId > > &MemProfCallStackData, llvm::DenseMap< memprof::FrameId, memprof::LinearFrameId > &MemProfFrameIndexes, llvm::DenseMap< memprof::FrameId, memprof::FrameStat > &FrameHistogram)
static void writeMemProfSchema(ProfOStream &OS, const memprof::MemProfSchema &Schema)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Defines facilities for reading and writing on-disk hash tables.
#define P(N)
if(VerifyEach)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file implements a set that has insertion order iteration characteristics.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition: DenseMap.h:71
unsigned size() const
Definition: DenseMap.h:99
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Definition: DenseMap.h:103
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
static ErrorSuccess success()
Create a success value.
Definition: Error.h:337
static std::pair< offset_type, offset_type > EmitKeyDataLength(raw_ostream &Out, key_type_ref K, data_type_ref V)
const InstrProfWriter::ProfilingData *const data_type_ref
InstrProfSummaryBuilder * SummaryBuilder
static hash_value_type ComputeHash(key_type_ref K)
InstrProfSummaryBuilder * CSSummaryBuilder
void EmitKey(raw_ostream &Out, key_type_ref K, offset_type N)
void EmitData(raw_ostream &Out, key_type_ref, data_type_ref V, offset_type)
const InstrProfWriter::ProfilingData *const data_type
void addRecord(const InstrProfRecord &)
A symbol table used for function [IR]PGO name look-up with keys (such as pointers,...
Definition: InstrProf.h:450
StringRef getFuncOrVarName(uint64_t ValMD5Hash)
Return name of functions or global variables from the name's md5 hash value.
Definition: InstrProf.h:707
Error addVTableName(StringRef VTableName)
Adds VTableName as a known symbol, and inserts it to a map that tracks all vtable names.
Definition: InstrProf.h:592
Error addFuncName(StringRef FuncName)
The method name is kept since there are many callers.
Definition: InstrProf.h:588
StringRef getFuncOrVarNameIfDefined(uint64_t ValMD5Hash)
Just like getFuncOrVarName, except that it will return literal string 'External Symbol' if the functi...
Definition: InstrProf.h:700
Error write(raw_fd_ostream &OS)
Write the profile to OS.
void addTemporalProfileTraces(SmallVectorImpl< TemporalProfTraceTy > &SrcTraces, uint64_t SrcStreamSize)
Add SrcTraces using reservoir sampling where SrcStreamSize is the total number of temporal profiling ...
void overlapRecord(NamedInstrProfRecord &&Other, OverlapStats &Overlap, OverlapStats &FuncLevelOverlap, const OverlapFuncFilters &FuncFilter)
Error writeText(raw_fd_ostream &OS)
Write the profile in text format to OS.
void addBinaryIds(ArrayRef< llvm::object::BuildID > BIs)
void addMemProfRecord(const GlobalValue::GUID Id, const memprof::IndexedMemProfRecord &Record)
Add a memprof record for a function identified by its Id.
static void writeRecordInText(StringRef Name, uint64_t Hash, const InstrProfRecord &Counters, InstrProfSymtab &Symtab, raw_fd_ostream &OS)
Write Record in text format to OS.
void setValueProfDataEndianness(llvm::endianness Endianness)
bool addMemProfCallStack(const memprof::CallStackId CSId, const llvm::SmallVector< memprof::FrameId > &CallStack, function_ref< void(Error)> Warn)
Add a call stack identified by the hash of the contents of the call stack in CallStack.
void addRecord(NamedInstrProfRecord &&I, uint64_t Weight, function_ref< void(Error)> Warn)
Add function counts for the given function.
InstrProfWriter(bool Sparse=false, uint64_t TemporalProfTraceReservoirSize=0, uint64_t MaxTemporalProfTraceLength=0, bool WritePrevVersion=false, memprof::IndexedVersion MemProfVersionRequested=memprof::Version0, bool MemProfFullSchema=false)
void mergeRecordsFromWriter(InstrProfWriter &&IPW, function_ref< void(Error)> Warn)
Merge existing function counts from the given writer.
void writeTextTemporalProfTraceData(raw_fd_ostream &OS, InstrProfSymtab &Symtab)
Write temporal profile trace data to the header in text format to OS.
std::unique_ptr< MemoryBuffer > writeBuffer()
Write the profile, returning the raw data. For testing.
bool addMemProfFrame(const memprof::FrameId, const memprof::Frame &F, function_ref< void(Error)> Warn)
Add a memprof frame identified by the hash of the contents of the frame in FrameId.
void setOutputSparse(bool Sparse)
Error validateRecord(const InstrProfRecord &Func)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
size_type size() const
Definition: MapVector.h:60
void clear()
Definition: MapVector.h:88
static std::unique_ptr< MemoryBuffer > getMemBufferCopy(StringRef InputData, const Twine &BufferName="")
Open the specified memory range as a MemoryBuffer, copying the contents and taking ownership of it.
Generates an on disk hash table.
offset_type Emit(raw_ostream &Out)
Emit the table to Out, which must not be at offset 0.
uint64_t tell() const
void writeByte(uint8_t V)
void patch(ArrayRef< PatchItem > P)
void write32(uint32_t V)
ProfOStream(raw_string_ostream &STR)
support::endian::Writer LE
ProfOStream(raw_fd_ostream &FD)
void write(uint64_t V)
static const ArrayRef< uint32_t > DefaultCutoffs
A vector of useful cutoff values for detailed summary.
Definition: ProfileCommon.h:70
uint64_t getTotalCount() const
uint64_t getMaxCount() const
const SummaryEntryVector & getDetailedSummary()
uint32_t getNumCounts() const
uint64_t getMaxInternalCount() const
uint64_t getMaxFunctionCount() const
uint32_t getNumFunctions() const
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:162
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:370
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
iterator_range< StringMapKeyIterator< ValueTy > > keys() const
Definition: StringMap.h:228
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
bool empty() const
Definition: Trace.h:96
unsigned size() const
Definition: Trace.h:95
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
An efficient, type-erasing, non-owning reference to a callable.
void build(llvm::MapVector< CallStackId, llvm::SmallVector< FrameId > > &&MemProfCallStackData, const llvm::DenseMap< FrameId, LinearFrameId > &MemProfFrameIndexes, llvm::DenseMap< FrameId, FrameStat > &FrameHistogram)
Definition: MemProf.cpp:486
llvm::DenseMap< CallStackId, LinearCallStackId > takeCallStackPos()
Definition: MemProf.h:1056
const std::vector< LinearFrameId > & getRadixArray() const
Definition: MemProf.h:1054
A raw_ostream that writes to a file descriptor.
Definition: raw_ostream.h:460
uint64_t seek(uint64_t off)
Flushes the stream and repositions the underlying file descriptor position to the offset specified fr...
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
uint64_t tell() const
tell - Return the current offset with the file.
Definition: raw_ostream.h:147
raw_ostream & write_hex(unsigned long long N)
Output N in hexadecimal, without any prefix or padding.
raw_ostream & write(unsigned char C)
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:661
std::string & str()
Returns the string's reference.
Definition: raw_ostream.h:679
std::unique_ptr< Summary > allocSummary(uint32_t TotalSize)
Definition: InstrProf.h:1230
uint64_t ComputeHash(StringRef K)
Definition: InstrProf.h:1111
constexpr uint64_t MaximumSupportedVersion
Definition: MemProf.h:37
MemProfSchema getHotColdSchema()
Definition: MemProf.cpp:21
llvm::DenseMap< FrameId, FrameStat > computeFrameHistogram(llvm::MapVector< CallStackId, llvm::SmallVector< FrameId > > &MemProfCallStackData)
Definition: MemProf.cpp:616
constexpr uint64_t MinimumSupportedVersion
Definition: MemProf.h:36
MemProfSchema getFullSchema()
Definition: MemProf.cpp:13
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition: STLExtras.h:853
auto formatv(const char *Fmt, Ts &&...Vals) -> formatv_object< decltype(std::make_tuple(support::detail::build_format_adapter(std::forward< Ts >(Vals))...))>
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2067
void shuffle(Iterator first, Iterator last, RNG &&g)
Definition: STLExtras.h:1541
auto unique(Range &&R, Predicate P)
Definition: STLExtras.h:2013
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1647
@ Other
Any other memory.
instrprof_error
Definition: InstrProf.h:344
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
constexpr uint64_t alignToPowerOf2(uint64_t Value, uint64_t Align)
Definition: MathExtras.h:500
Error collectGlobalObjectNameStrings(ArrayRef< std::string > NameStrs, bool doCompression, std::string &Result)
Given a vector of strings (names of global objects like functions or, virtual tables) NameStrs,...
Definition: InstrProf.cpp:662
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2051
endianness
Definition: bit.h:70
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
ArrayRef< uint64_t > D
uint64_t Pos
Helper object to track which of three possible relocation mechanisms are used for a particular value ...
void set(SummaryFieldKind K, uint64_t V)
Definition: InstrProf.h:1216
void setEntry(uint32_t I, const ProfileSummaryEntry &E)
Definition: InstrProf.h:1222
Profiling information for a single function.
Definition: InstrProf.h:823
std::vector< uint64_t > Counts
Definition: InstrProf.h:824
void merge(InstrProfRecord &Other, uint64_t Weight, function_ref< void(instrprof_error)> Warn)
Merge the counts in Other into this one.
Definition: InstrProf.cpp:905
void overlap(InstrProfRecord &Other, OverlapStats &Overlap, OverlapStats &FuncLevelOverlap, uint64_t ValueCutoff)
Compute the overlap b/w this IntrprofRecord and Other.
Definition: InstrProf.cpp:794
void sortValueData()
Sort value profile data (per site) by count.
Definition: InstrProf.h:885
std::vector< uint8_t > BitmapBytes
Definition: InstrProf.h:825
void scale(uint64_t N, uint64_t D, function_ref< void(instrprof_error)> Warn)
Scale up profile counts (including value profile data) by a factor of (N / D).
Definition: InstrProf.cpp:968
static bool hasCSFlagInHash(uint64_t FuncHash)
Definition: InstrProf.h:1001
const std::string NameFilter
Definition: InstrProf.h:789
void addOneMismatch(const CountSumOrPercent &MismatchFunc)
Definition: InstrProf.cpp:1532
CountSumOrPercent Overlap
Definition: InstrProf.h:753
void addOneUnique(const CountSumOrPercent &UniqueFunc)
Definition: InstrProf.cpp:1542
CountSumOrPercent Test
Definition: InstrProf.h:751
An ordered list of functions identified by their NameRef found in INSTR_PROF_DATA.
Definition: InstrProf.h:375
Function object to check whether the first component of a container supported by std::get (like std::...
Definition: STLExtras.h:1450
llvm::MapVector< CallStackId, llvm::SmallVector< FrameId > > CallStacks
Definition: MemProf.h:939
llvm::MapVector< GlobalValue::GUID, IndexedMemProfRecord > Records
Definition: MemProf.h:931
llvm::MapVector< FrameId, Frame > Frames
Definition: MemProf.h:936
void merge(const IndexedMemProfRecord &Other)
Definition: MemProf.h:427
Adapter to write values to a stream in a particular byte order.
Definition: EndianStream.h:67
void write(ArrayRef< value_type > Val)
Definition: EndianStream.h:71