LLVM 19.0.0git
InstrProfWriter.cpp
Go to the documentation of this file.
1//===- InstrProfWriter.cpp - Instrumented profiling writer ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains support for writing profiling data for clang's
10// instrumentation based PGO and coverage.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/STLExtras.h"
16#include "llvm/ADT/SetVector.h"
17#include "llvm/ADT/StringRef.h"
23#include "llvm/Support/Endian.h"
25#include "llvm/Support/Error.h"
30#include <cstdint>
31#include <memory>
32#include <string>
33#include <tuple>
34#include <utility>
35#include <vector>
36
37using namespace llvm;
38
39// A struct to define how the data stream should be patched. For Indexed
40// profiling, only uint64_t data type is needed.
41struct PatchItem {
42 uint64_t Pos; // Where to patch.
43 uint64_t *D; // Pointer to an array of source data.
44 int N; // Number of elements in \c D array.
45};
46
47namespace llvm {
48
49// A wrapper class to abstract writer stream with support of bytes
50// back patching.
52public:
54 : IsFDOStream(true), OS(FD), LE(FD, llvm::endianness::little) {}
56 : IsFDOStream(false), OS(STR), LE(STR, llvm::endianness::little) {}
57
58 uint64_t tell() { return OS.tell(); }
59 void write(uint64_t V) { LE.write<uint64_t>(V); }
60 void writeByte(uint8_t V) { LE.write<uint8_t>(V); }
61
62 // \c patch can only be called when all data is written and flushed.
63 // For raw_string_ostream, the patch is done on the target string
64 // directly and it won't be reflected in the stream's internal buffer.
66 using namespace support;
67
68 if (IsFDOStream) {
69 raw_fd_ostream &FDOStream = static_cast<raw_fd_ostream &>(OS);
70 const uint64_t LastPos = FDOStream.tell();
71 for (const auto &K : P) {
72 FDOStream.seek(K.Pos);
73 for (int I = 0; I < K.N; I++)
74 write(K.D[I]);
75 }
76 // Reset the stream to the last position after patching so that users
77 // don't accidentally overwrite data. This makes it consistent with
78 // the string stream below which replaces the data directly.
79 FDOStream.seek(LastPos);
80 } else {
81 raw_string_ostream &SOStream = static_cast<raw_string_ostream &>(OS);
82 std::string &Data = SOStream.str(); // with flush
83 for (const auto &K : P) {
84 for (int I = 0; I < K.N; I++) {
85 uint64_t Bytes =
86 endian::byte_swap<uint64_t, llvm::endianness::little>(K.D[I]);
87 Data.replace(K.Pos + I * sizeof(uint64_t), sizeof(uint64_t),
88 (const char *)&Bytes, sizeof(uint64_t));
89 }
90 }
91 }
92 }
93
94 // If \c OS is an instance of \c raw_fd_ostream, this field will be
95 // true. Otherwise, \c OS will be an raw_string_ostream.
99};
100
102public:
105
108
111
115
117
120 }
121
122 static std::pair<offset_type, offset_type>
124 using namespace support;
125
127
128 offset_type N = K.size();
129 LE.write<offset_type>(N);
130
131 offset_type M = 0;
132 for (const auto &ProfileData : *V) {
133 const InstrProfRecord &ProfRecord = ProfileData.second;
134 M += sizeof(uint64_t); // The function hash
135 M += sizeof(uint64_t); // The size of the Counts vector
136 M += ProfRecord.Counts.size() * sizeof(uint64_t);
137 M += sizeof(uint64_t); // The size of the Bitmap vector
138 M += ProfRecord.BitmapBytes.size() * sizeof(uint64_t);
139
140 // Value data
141 M += ValueProfData::getSize(ProfileData.second);
142 }
143 LE.write<offset_type>(M);
144
145 return std::make_pair(N, M);
146 }
147
149 Out.write(K.data(), N);
150 }
151
153 using namespace support;
154
156 for (const auto &ProfileData : *V) {
157 const InstrProfRecord &ProfRecord = ProfileData.second;
158 if (NamedInstrProfRecord::hasCSFlagInHash(ProfileData.first))
159 CSSummaryBuilder->addRecord(ProfRecord);
160 else
161 SummaryBuilder->addRecord(ProfRecord);
162
163 LE.write<uint64_t>(ProfileData.first); // Function hash
164 LE.write<uint64_t>(ProfRecord.Counts.size());
165 for (uint64_t I : ProfRecord.Counts)
166 LE.write<uint64_t>(I);
167
168 LE.write<uint64_t>(ProfRecord.BitmapBytes.size());
169 for (uint64_t I : ProfRecord.BitmapBytes)
170 LE.write<uint64_t>(I);
171
172 // Write value data
173 std::unique_ptr<ValueProfData> VDataPtr =
174 ValueProfData::serializeFrom(ProfileData.second);
175 uint32_t S = VDataPtr->getSize();
176 VDataPtr->swapBytesFromHost(ValueProfDataEndianness);
177 Out.write((const char *)VDataPtr.get(), S);
178 }
179 }
180};
181
182} // end namespace llvm
183
185 bool Sparse, uint64_t TemporalProfTraceReservoirSize,
186 uint64_t MaxTemporalProfTraceLength, bool WritePrevVersion,
187 memprof::IndexedVersion MemProfVersionRequested, bool MemProfFullSchema)
188 : Sparse(Sparse), MaxTemporalProfTraceLength(MaxTemporalProfTraceLength),
189 TemporalProfTraceReservoirSize(TemporalProfTraceReservoirSize),
190 InfoObj(new InstrProfRecordWriterTrait()),
191 WritePrevVersion(WritePrevVersion),
192 MemProfVersionRequested(MemProfVersionRequested),
193 MemProfFullSchema(MemProfFullSchema) {}
194
196
197// Internal interface for testing purpose only.
199 InfoObj->ValueProfDataEndianness = Endianness;
200}
201
203 this->Sparse = Sparse;
204}
205
207 function_ref<void(Error)> Warn) {
208 auto Name = I.Name;
209 auto Hash = I.Hash;
210 addRecord(Name, Hash, std::move(I), Weight, Warn);
211}
212
214 OverlapStats &Overlap,
215 OverlapStats &FuncLevelOverlap,
216 const OverlapFuncFilters &FuncFilter) {
217 auto Name = Other.Name;
218 auto Hash = Other.Hash;
219 Other.accumulateCounts(FuncLevelOverlap.Test);
220 if (!FunctionData.contains(Name)) {
221 Overlap.addOneUnique(FuncLevelOverlap.Test);
222 return;
223 }
224 if (FuncLevelOverlap.Test.CountSum < 1.0f) {
225 Overlap.Overlap.NumEntries += 1;
226 return;
227 }
228 auto &ProfileDataMap = FunctionData[Name];
229 bool NewFunc;
231 std::tie(Where, NewFunc) =
232 ProfileDataMap.insert(std::make_pair(Hash, InstrProfRecord()));
233 if (NewFunc) {
234 Overlap.addOneMismatch(FuncLevelOverlap.Test);
235 return;
236 }
237 InstrProfRecord &Dest = Where->second;
238
239 uint64_t ValueCutoff = FuncFilter.ValueCutoff;
240 if (!FuncFilter.NameFilter.empty() && Name.contains(FuncFilter.NameFilter))
241 ValueCutoff = 0;
242
243 Dest.overlap(Other, Overlap, FuncLevelOverlap, ValueCutoff);
244}
245
247 InstrProfRecord &&I, uint64_t Weight,
248 function_ref<void(Error)> Warn) {
249 auto &ProfileDataMap = FunctionData[Name];
250
251 bool NewFunc;
253 std::tie(Where, NewFunc) =
254 ProfileDataMap.insert(std::make_pair(Hash, InstrProfRecord()));
255 InstrProfRecord &Dest = Where->second;
256
257 auto MapWarn = [&](instrprof_error E) {
258 Warn(make_error<InstrProfError>(E));
259 };
260
261 if (NewFunc) {
262 // We've never seen a function with this name and hash, add it.
263 Dest = std::move(I);
264 if (Weight > 1)
265 Dest.scale(Weight, 1, MapWarn);
266 } else {
267 // We're updating a function we've seen before.
268 Dest.merge(I, Weight, MapWarn);
269 }
270
271 Dest.sortValueData();
272}
273
276 auto [Iter, Inserted] = MemProfRecordData.insert({Id, Record});
277 // If we inserted a new record then we are done.
278 if (Inserted) {
279 return;
280 }
281 memprof::IndexedMemProfRecord &Existing = Iter->second;
282 Existing.merge(Record);
283}
284
286 const memprof::Frame &Frame,
287 function_ref<void(Error)> Warn) {
288 auto [Iter, Inserted] = MemProfFrameData.insert({Id, Frame});
289 // If a mapping already exists for the current frame id and it does not
290 // match the new mapping provided then reset the existing contents and bail
291 // out. We don't support the merging of memprof data whose Frame -> Id
292 // mapping across profiles is inconsistent.
293 if (!Inserted && Iter->second != Frame) {
294 Warn(make_error<InstrProfError>(instrprof_error::malformed,
295 "frame to id mapping mismatch"));
296 return false;
297 }
298 return true;
299}
300
302 const memprof::CallStackId CSId,
304 function_ref<void(Error)> Warn) {
305 auto [Iter, Inserted] = MemProfCallStackData.insert({CSId, CallStack});
306 // If a mapping already exists for the current call stack id and it does not
307 // match the new mapping provided then reset the existing contents and bail
308 // out. We don't support the merging of memprof data whose CallStack -> Id
309 // mapping across profiles is inconsistent.
310 if (!Inserted && Iter->second != CallStack) {
311 Warn(make_error<InstrProfError>(instrprof_error::malformed,
312 "call stack to id mapping mismatch"));
313 return false;
314 }
315 return true;
316}
317
319 llvm::append_range(BinaryIds, BIs);
320}
321
322void InstrProfWriter::addTemporalProfileTrace(TemporalProfTraceTy Trace) {
323 if (Trace.FunctionNameRefs.size() > MaxTemporalProfTraceLength)
324 Trace.FunctionNameRefs.resize(MaxTemporalProfTraceLength);
325 if (Trace.FunctionNameRefs.empty())
326 return;
327
328 if (TemporalProfTraceStreamSize < TemporalProfTraceReservoirSize) {
329 // Simply append the trace if we have not yet hit our reservoir size limit.
330 TemporalProfTraces.push_back(std::move(Trace));
331 } else {
332 // Otherwise, replace a random trace in the stream.
333 std::uniform_int_distribution<uint64_t> Distribution(
334 0, TemporalProfTraceStreamSize);
335 uint64_t RandomIndex = Distribution(RNG);
336 if (RandomIndex < TemporalProfTraces.size())
337 TemporalProfTraces[RandomIndex] = std::move(Trace);
338 }
339 ++TemporalProfTraceStreamSize;
340}
341
343 SmallVectorImpl<TemporalProfTraceTy> &SrcTraces, uint64_t SrcStreamSize) {
344 // Assume that the source has the same reservoir size as the destination to
345 // avoid needing to record it in the indexed profile format.
346 bool IsDestSampled =
347 (TemporalProfTraceStreamSize > TemporalProfTraceReservoirSize);
348 bool IsSrcSampled = (SrcStreamSize > TemporalProfTraceReservoirSize);
349 if (!IsDestSampled && IsSrcSampled) {
350 // If one of the traces are sampled, ensure that it belongs to Dest.
351 std::swap(TemporalProfTraces, SrcTraces);
352 std::swap(TemporalProfTraceStreamSize, SrcStreamSize);
353 std::swap(IsDestSampled, IsSrcSampled);
354 }
355 if (!IsSrcSampled) {
356 // If the source stream is not sampled, we add each source trace normally.
357 for (auto &Trace : SrcTraces)
358 addTemporalProfileTrace(std::move(Trace));
359 return;
360 }
361 // Otherwise, we find the traces that would have been removed if we added
362 // the whole source stream.
363 SmallSetVector<uint64_t, 8> IndicesToReplace;
364 for (uint64_t I = 0; I < SrcStreamSize; I++) {
365 std::uniform_int_distribution<uint64_t> Distribution(
366 0, TemporalProfTraceStreamSize);
367 uint64_t RandomIndex = Distribution(RNG);
368 if (RandomIndex < TemporalProfTraces.size())
369 IndicesToReplace.insert(RandomIndex);
370 ++TemporalProfTraceStreamSize;
371 }
372 // Then we insert a random sample of the source traces.
373 llvm::shuffle(SrcTraces.begin(), SrcTraces.end(), RNG);
374 for (const auto &[Index, Trace] : llvm::zip(IndicesToReplace, SrcTraces))
375 TemporalProfTraces[Index] = std::move(Trace);
376}
377
379 function_ref<void(Error)> Warn) {
380 for (auto &I : IPW.FunctionData)
381 for (auto &Func : I.getValue())
382 addRecord(I.getKey(), Func.first, std::move(Func.second), 1, Warn);
383
384 BinaryIds.reserve(BinaryIds.size() + IPW.BinaryIds.size());
385 for (auto &I : IPW.BinaryIds)
387
388 addTemporalProfileTraces(IPW.TemporalProfTraces,
389 IPW.TemporalProfTraceStreamSize);
390
391 MemProfFrameData.reserve(IPW.MemProfFrameData.size());
392 for (auto &[FrameId, Frame] : IPW.MemProfFrameData) {
393 // If we weren't able to add the frame mappings then it doesn't make sense
394 // to try to merge the records from this profile.
395 if (!addMemProfFrame(FrameId, Frame, Warn))
396 return;
397 }
398
399 MemProfCallStackData.reserve(IPW.MemProfCallStackData.size());
400 for (auto &[CSId, CallStack] : IPW.MemProfCallStackData) {
401 if (!addMemProfCallStack(CSId, CallStack, Warn))
402 return;
403 }
404
405 MemProfRecordData.reserve(IPW.MemProfRecordData.size());
406 for (auto &[GUID, Record] : IPW.MemProfRecordData) {
408 }
409}
410
411bool InstrProfWriter::shouldEncodeData(const ProfilingData &PD) {
412 if (!Sparse)
413 return true;
414 for (const auto &Func : PD) {
415 const InstrProfRecord &IPR = Func.second;
416 if (llvm::any_of(IPR.Counts, [](uint64_t Count) { return Count > 0; }))
417 return true;
418 if (llvm::any_of(IPR.BitmapBytes, [](uint8_t Byte) { return Byte > 0; }))
419 return true;
420 }
421 return false;
422}
423
424static void setSummary(IndexedInstrProf::Summary *TheSummary,
425 ProfileSummary &PS) {
426 using namespace IndexedInstrProf;
427
428 const std::vector<ProfileSummaryEntry> &Res = PS.getDetailedSummary();
429 TheSummary->NumSummaryFields = Summary::NumKinds;
430 TheSummary->NumCutoffEntries = Res.size();
431 TheSummary->set(Summary::MaxFunctionCount, PS.getMaxFunctionCount());
432 TheSummary->set(Summary::MaxBlockCount, PS.getMaxCount());
433 TheSummary->set(Summary::MaxInternalBlockCount, PS.getMaxInternalCount());
434 TheSummary->set(Summary::TotalBlockCount, PS.getTotalCount());
435 TheSummary->set(Summary::TotalNumBlocks, PS.getNumCounts());
436 TheSummary->set(Summary::TotalNumFunctions, PS.getNumFunctions());
437 for (unsigned I = 0; I < Res.size(); I++)
438 TheSummary->setEntry(I, Res[I]);
439}
440
441// Serialize Schema.
443 const memprof::MemProfSchema &Schema) {
444 OS.write(static_cast<uint64_t>(Schema.size()));
445 for (const auto Id : Schema)
446 OS.write(static_cast<uint64_t>(Id));
447}
448
449// Serialize MemProfRecordData. Return RecordTableOffset.
453 &MemProfRecordData,
455 memprof::RecordWriterTrait RecordWriter(Schema, Version);
457 RecordTableGenerator;
458 for (auto &[GUID, Record] : MemProfRecordData) {
459 // Insert the key (func hash) and value (memprof record).
460 RecordTableGenerator.insert(GUID, Record, RecordWriter);
461 }
462 // Release the memory of this MapVector as it is no longer needed.
463 MemProfRecordData.clear();
464
465 // The call to Emit invokes RecordWriterTrait::EmitData which destructs
466 // the memprof record copies owned by the RecordTableGenerator. This works
467 // because the RecordTableGenerator is not used after this point.
468 return RecordTableGenerator.Emit(OS.OS, RecordWriter);
469}
470
471// Serialize MemProfFrameData. Return FrameTableOffset.
476 FrameTableGenerator;
477 for (auto &[FrameId, Frame] : MemProfFrameData) {
478 // Insert the key (frame id) and value (frame contents).
479 FrameTableGenerator.insert(FrameId, Frame);
480 }
481 // Release the memory of this MapVector as it is no longer needed.
482 MemProfFrameData.clear();
483
484 return FrameTableGenerator.Emit(OS.OS);
485}
486
490 &MemProfCallStackData) {
492 CallStackTableGenerator;
493 for (auto &[CSId, CallStack] : MemProfCallStackData)
494 CallStackTableGenerator.insert(CSId, CallStack);
495 // Release the memory of this vector as it is no longer needed.
496 MemProfCallStackData.clear();
497
498 return CallStackTableGenerator.Emit(OS.OS);
499}
500
504 &MemProfRecordData,
506 uint64_t HeaderUpdatePos = OS.tell();
507 OS.write(0ULL); // Reserve space for the memprof record table offset.
508 OS.write(0ULL); // Reserve space for the memprof frame payload offset.
509 OS.write(0ULL); // Reserve space for the memprof frame table offset.
510
511 auto Schema = memprof::getFullSchema();
512 writeMemProfSchema(OS, Schema);
513
514 uint64_t RecordTableOffset =
515 writeMemProfRecords(OS, MemProfRecordData, &Schema, memprof::Version0);
516
517 uint64_t FramePayloadOffset = OS.tell();
518 uint64_t FrameTableOffset = writeMemProfFrames(OS, MemProfFrameData);
519
520 uint64_t Header[] = {RecordTableOffset, FramePayloadOffset, FrameTableOffset};
521 OS.patch({{HeaderUpdatePos, Header, std::size(Header)}});
522
523 return Error::success();
524}
525
529 &MemProfRecordData,
532 uint64_t HeaderUpdatePos = OS.tell();
533 OS.write(0ULL); // Reserve space for the memprof record table offset.
534 OS.write(0ULL); // Reserve space for the memprof frame payload offset.
535 OS.write(0ULL); // Reserve space for the memprof frame table offset.
536
537 auto Schema = memprof::getFullSchema();
538 writeMemProfSchema(OS, Schema);
539
540 uint64_t RecordTableOffset =
541 writeMemProfRecords(OS, MemProfRecordData, &Schema, memprof::Version1);
542
543 uint64_t FramePayloadOffset = OS.tell();
544 uint64_t FrameTableOffset = writeMemProfFrames(OS, MemProfFrameData);
545
546 uint64_t Header[] = {RecordTableOffset, FramePayloadOffset, FrameTableOffset};
547 OS.patch({{HeaderUpdatePos, Header, std::size(Header)}});
548
549 return Error::success();
550}
551
555 &MemProfRecordData,
558 &MemProfCallStackData,
559 bool MemProfFullSchema) {
561 uint64_t HeaderUpdatePos = OS.tell();
562 OS.write(0ULL); // Reserve space for the memprof record table offset.
563 OS.write(0ULL); // Reserve space for the memprof frame payload offset.
564 OS.write(0ULL); // Reserve space for the memprof frame table offset.
565 OS.write(0ULL); // Reserve space for the memprof call stack payload offset.
566 OS.write(0ULL); // Reserve space for the memprof call stack table offset.
567
568 auto Schema = memprof::getHotColdSchema();
569 if (MemProfFullSchema)
570 Schema = memprof::getFullSchema();
571 writeMemProfSchema(OS, Schema);
572
573 uint64_t RecordTableOffset =
574 writeMemProfRecords(OS, MemProfRecordData, &Schema, memprof::Version2);
575
576 uint64_t FramePayloadOffset = OS.tell();
577 uint64_t FrameTableOffset = writeMemProfFrames(OS, MemProfFrameData);
578
579 uint64_t CallStackPayloadOffset = OS.tell();
580 uint64_t CallStackTableOffset =
581 writeMemProfCallStacks(OS, MemProfCallStackData);
582
583 uint64_t Header[] = {
584 RecordTableOffset, FramePayloadOffset, FrameTableOffset,
585 CallStackPayloadOffset, CallStackTableOffset,
586 };
587 OS.patch({{HeaderUpdatePos, Header, std::size(Header)}});
588
589 return Error::success();
590}
591
592// The MemProf profile data includes a simple schema
593// with the format described below followed by the hashtable:
594// uint64_t Version
595// uint64_t RecordTableOffset = RecordTableGenerator.Emit
596// uint64_t FramePayloadOffset = Stream offset before emitting the frame table
597// uint64_t FrameTableOffset = FrameTableGenerator.Emit
598// uint64_t Num schema entries
599// uint64_t Schema entry 0
600// uint64_t Schema entry 1
601// ....
602// uint64_t Schema entry N - 1
603// OnDiskChainedHashTable MemProfRecordData
604// OnDiskChainedHashTable MemProfFrameData
608 &MemProfRecordData,
611 &MemProfCallStackData,
612 memprof::IndexedVersion MemProfVersionRequested, bool MemProfFullSchema) {
613
614 switch (MemProfVersionRequested) {
616 return writeMemProfV0(OS, MemProfRecordData, MemProfFrameData);
618 return writeMemProfV1(OS, MemProfRecordData, MemProfFrameData);
620 return writeMemProfV2(OS, MemProfRecordData, MemProfFrameData,
621 MemProfCallStackData, MemProfFullSchema);
622 }
623
624 return make_error<InstrProfError>(
626 formatv("MemProf version {} not supported; "
627 "requires version between {} and {}, inclusive",
628 MemProfVersionRequested, memprof::MinimumSupportedVersion,
630}
631
632Error InstrProfWriter::writeImpl(ProfOStream &OS) {
633 using namespace IndexedInstrProf;
634 using namespace support;
635
637
639 InfoObj->SummaryBuilder = &ISB;
641 InfoObj->CSSummaryBuilder = &CSISB;
642
643 // Populate the hash table generator.
645 for (const auto &I : FunctionData)
646 if (shouldEncodeData(I.getValue()))
647 OrderedData.emplace_back((I.getKey()), &I.getValue());
648 llvm::sort(OrderedData, less_first());
649 for (const auto &I : OrderedData)
650 Generator.insert(I.first, I.second);
651
652 // Write the header.
654 Header.Magic = IndexedInstrProf::Magic;
655 Header.Version = WritePrevVersion
658 // The WritePrevVersion handling will either need to be removed or updated
659 // if the version is advanced beyond 12.
662 if (static_cast<bool>(ProfileKind & InstrProfKind::IRInstrumentation))
663 Header.Version |= VARIANT_MASK_IR_PROF;
664 if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive))
665 Header.Version |= VARIANT_MASK_CSIR_PROF;
666 if (static_cast<bool>(ProfileKind &
668 Header.Version |= VARIANT_MASK_INSTR_ENTRY;
669 if (static_cast<bool>(ProfileKind & InstrProfKind::SingleByteCoverage))
670 Header.Version |= VARIANT_MASK_BYTE_COVERAGE;
671 if (static_cast<bool>(ProfileKind & InstrProfKind::FunctionEntryOnly))
672 Header.Version |= VARIANT_MASK_FUNCTION_ENTRY_ONLY;
673 if (static_cast<bool>(ProfileKind & InstrProfKind::MemProf))
674 Header.Version |= VARIANT_MASK_MEMPROF;
675 if (static_cast<bool>(ProfileKind & InstrProfKind::TemporalProfile))
676 Header.Version |= VARIANT_MASK_TEMPORAL_PROF;
677
678 Header.Unused = 0;
679 Header.HashType = static_cast<uint64_t>(IndexedInstrProf::HashType);
680 Header.HashOffset = 0;
681 Header.MemProfOffset = 0;
682 Header.BinaryIdOffset = 0;
683 Header.TemporalProfTracesOffset = 0;
684 Header.VTableNamesOffset = 0;
685
686 // Only write out the first four fields. We need to remember the offset of the
687 // remaining fields to allow back patching later.
688 for (int I = 0; I < 4; I++)
689 OS.write(reinterpret_cast<uint64_t *>(&Header)[I]);
690
691 // Save the location of Header.HashOffset field in \c OS.
692 uint64_t HashTableStartFieldOffset = OS.tell();
693 // Reserve the space for HashOffset field.
694 OS.write(0);
695
696 // Save the location of MemProf profile data. This is stored in two parts as
697 // the schema and as a separate on-disk chained hashtable.
698 uint64_t MemProfSectionOffset = OS.tell();
699 // Reserve space for the MemProf table field to be patched later if this
700 // profile contains memory profile information.
701 OS.write(0);
702
703 // Save the location of binary ids section.
704 uint64_t BinaryIdSectionOffset = OS.tell();
705 // Reserve space for the BinaryIdOffset field to be patched later if this
706 // profile contains binary ids.
707 OS.write(0);
708
709 uint64_t TemporalProfTracesOffset = OS.tell();
710 OS.write(0);
711
712 uint64_t VTableNamesOffset = OS.tell();
713 if (!WritePrevVersion)
714 OS.write(0);
715
716 // Reserve space to write profile summary data.
718 uint32_t SummarySize = Summary::getSize(Summary::NumKinds, NumEntries);
719 // Remember the summary offset.
720 uint64_t SummaryOffset = OS.tell();
721 for (unsigned I = 0; I < SummarySize / sizeof(uint64_t); I++)
722 OS.write(0);
723 uint64_t CSSummaryOffset = 0;
724 uint64_t CSSummarySize = 0;
725 if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive)) {
726 CSSummaryOffset = OS.tell();
727 CSSummarySize = SummarySize / sizeof(uint64_t);
728 for (unsigned I = 0; I < CSSummarySize; I++)
729 OS.write(0);
730 }
731
732 // Write the hash table.
733 uint64_t HashTableStart = Generator.Emit(OS.OS, *InfoObj);
734
735 // Write the MemProf profile data if we have it.
736 uint64_t MemProfSectionStart = 0;
737 if (static_cast<bool>(ProfileKind & InstrProfKind::MemProf)) {
738 MemProfSectionStart = OS.tell();
739 if (auto E = writeMemProf(OS, MemProfRecordData, MemProfFrameData,
740 MemProfCallStackData, MemProfVersionRequested,
741 MemProfFullSchema))
742 return E;
743 }
744
745 // BinaryIdSection has two parts:
746 // 1. uint64_t BinaryIdsSectionSize
747 // 2. list of binary ids that consist of:
748 // a. uint64_t BinaryIdLength
749 // b. uint8_t BinaryIdData
750 // c. uint8_t Padding (if necessary)
751 uint64_t BinaryIdSectionStart = OS.tell();
752 // Calculate size of binary section.
753 uint64_t BinaryIdsSectionSize = 0;
754
755 // Remove duplicate binary ids.
756 llvm::sort(BinaryIds);
757 BinaryIds.erase(std::unique(BinaryIds.begin(), BinaryIds.end()),
758 BinaryIds.end());
759
760 for (auto BI : BinaryIds) {
761 // Increment by binary id length data type size.
762 BinaryIdsSectionSize += sizeof(uint64_t);
763 // Increment by binary id data length, aligned to 8 bytes.
764 BinaryIdsSectionSize += alignToPowerOf2(BI.size(), sizeof(uint64_t));
765 }
766 // Write binary ids section size.
767 OS.write(BinaryIdsSectionSize);
768
769 for (auto BI : BinaryIds) {
770 uint64_t BILen = BI.size();
771 // Write binary id length.
772 OS.write(BILen);
773 // Write binary id data.
774 for (unsigned K = 0; K < BILen; K++)
775 OS.writeByte(BI[K]);
776 // Write padding if necessary.
777 uint64_t PaddingSize = alignToPowerOf2(BILen, sizeof(uint64_t)) - BILen;
778 for (unsigned K = 0; K < PaddingSize; K++)
779 OS.writeByte(0);
780 }
781
782 uint64_t VTableNamesSectionStart = OS.tell();
783
784 if (!WritePrevVersion) {
785 std::vector<std::string> VTableNameStrs;
786 for (StringRef VTableName : VTableNames.keys())
787 VTableNameStrs.push_back(VTableName.str());
788
789 std::string CompressedVTableNames;
790 if (!VTableNameStrs.empty())
792 VTableNameStrs, compression::zlib::isAvailable(),
793 CompressedVTableNames))
794 return E;
795
796 const uint64_t CompressedStringLen = CompressedVTableNames.length();
797
798 // Record the length of compressed string.
799 OS.write(CompressedStringLen);
800
801 // Write the chars in compressed strings.
802 for (auto &c : CompressedVTableNames)
803 OS.writeByte(static_cast<uint8_t>(c));
804
805 // Pad up to a multiple of 8.
806 // InstrProfReader could read bytes according to 'CompressedStringLen'.
807 const uint64_t PaddedLength = alignTo(CompressedStringLen, 8);
808
809 for (uint64_t K = CompressedStringLen; K < PaddedLength; K++)
810 OS.writeByte(0);
811 }
812
813 uint64_t TemporalProfTracesSectionStart = 0;
814 if (static_cast<bool>(ProfileKind & InstrProfKind::TemporalProfile)) {
815 TemporalProfTracesSectionStart = OS.tell();
816 OS.write(TemporalProfTraces.size());
817 OS.write(TemporalProfTraceStreamSize);
818 for (auto &Trace : TemporalProfTraces) {
819 OS.write(Trace.Weight);
820 OS.write(Trace.FunctionNameRefs.size());
821 for (auto &NameRef : Trace.FunctionNameRefs)
822 OS.write(NameRef);
823 }
824 }
825
826 // Allocate space for data to be serialized out.
827 std::unique_ptr<IndexedInstrProf::Summary> TheSummary =
829 // Compute the Summary and copy the data to the data
830 // structure to be serialized out (to disk or buffer).
831 std::unique_ptr<ProfileSummary> PS = ISB.getSummary();
832 setSummary(TheSummary.get(), *PS);
833 InfoObj->SummaryBuilder = nullptr;
834
835 // For Context Sensitive summary.
836 std::unique_ptr<IndexedInstrProf::Summary> TheCSSummary = nullptr;
837 if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive)) {
838 TheCSSummary = IndexedInstrProf::allocSummary(SummarySize);
839 std::unique_ptr<ProfileSummary> CSPS = CSISB.getSummary();
840 setSummary(TheCSSummary.get(), *CSPS);
841 }
842 InfoObj->CSSummaryBuilder = nullptr;
843
844 if (!WritePrevVersion) {
845 // Now do the final patch:
846 PatchItem PatchItems[] = {
847 // Patch the Header.HashOffset field.
848 {HashTableStartFieldOffset, &HashTableStart, 1},
849 // Patch the Header.MemProfOffset (=0 for profiles without MemProf
850 // data).
851 {MemProfSectionOffset, &MemProfSectionStart, 1},
852 // Patch the Header.BinaryIdSectionOffset.
853 {BinaryIdSectionOffset, &BinaryIdSectionStart, 1},
854 // Patch the Header.TemporalProfTracesOffset (=0 for profiles without
855 // traces).
856 {TemporalProfTracesOffset, &TemporalProfTracesSectionStart, 1},
857 {VTableNamesOffset, &VTableNamesSectionStart, 1},
858 // Patch the summary data.
859 {SummaryOffset, reinterpret_cast<uint64_t *>(TheSummary.get()),
860 (int)(SummarySize / sizeof(uint64_t))},
861 {CSSummaryOffset, reinterpret_cast<uint64_t *>(TheCSSummary.get()),
862 (int)CSSummarySize}};
863
864 OS.patch(PatchItems);
865 } else {
866 // Now do the final patch:
867 PatchItem PatchItems[] = {
868 // Patch the Header.HashOffset field.
869 {HashTableStartFieldOffset, &HashTableStart, 1},
870 // Patch the Header.MemProfOffset (=0 for profiles without MemProf
871 // data).
872 {MemProfSectionOffset, &MemProfSectionStart, 1},
873 // Patch the Header.BinaryIdSectionOffset.
874 {BinaryIdSectionOffset, &BinaryIdSectionStart, 1},
875 // Patch the Header.TemporalProfTracesOffset (=0 for profiles without
876 // traces).
877 {TemporalProfTracesOffset, &TemporalProfTracesSectionStart, 1},
878 // Patch the summary data.
879 {SummaryOffset, reinterpret_cast<uint64_t *>(TheSummary.get()),
880 (int)(SummarySize / sizeof(uint64_t))},
881 {CSSummaryOffset, reinterpret_cast<uint64_t *>(TheCSSummary.get()),
882 (int)CSSummarySize}};
883
884 OS.patch(PatchItems);
885 }
886
887 for (const auto &I : FunctionData)
888 for (const auto &F : I.getValue())
889 if (Error E = validateRecord(F.second))
890 return E;
891
892 return Error::success();
893}
894
896 // Write the hash table.
897 ProfOStream POS(OS);
898 return writeImpl(POS);
899}
900
902 ProfOStream POS(OS);
903 return writeImpl(POS);
904}
905
906std::unique_ptr<MemoryBuffer> InstrProfWriter::writeBuffer() {
907 std::string Data;
909 // Write the hash table.
910 if (Error E = write(OS))
911 return nullptr;
912 // Return this in an aligned memory buffer.
914}
915
916static const char *ValueProfKindStr[] = {
917#define VALUE_PROF_KIND(Enumerator, Value, Descr) #Enumerator,
919};
920
922 for (uint32_t VK = 0; VK <= IPVK_Last; VK++) {
923 uint32_t NS = Func.getNumValueSites(VK);
924 if (!NS)
925 continue;
926 for (uint32_t S = 0; S < NS; S++) {
927 uint32_t ND = Func.getNumValueDataForSite(VK, S);
928 std::unique_ptr<InstrProfValueData[]> VD = Func.getValueForSite(VK, S);
929 DenseSet<uint64_t> SeenValues;
930 for (uint32_t I = 0; I < ND; I++)
931 if ((VK != IPVK_IndirectCallTarget && VK != IPVK_VTableTarget) &&
932 !SeenValues.insert(VD[I].Value).second)
933 return make_error<InstrProfError>(instrprof_error::invalid_prof);
934 }
935 }
936
937 return Error::success();
938}
939
941 const InstrProfRecord &Func,
942 InstrProfSymtab &Symtab,
944 OS << Name << "\n";
945 OS << "# Func Hash:\n" << Hash << "\n";
946 OS << "# Num Counters:\n" << Func.Counts.size() << "\n";
947 OS << "# Counter Values:\n";
948 for (uint64_t Count : Func.Counts)
949 OS << Count << "\n";
950
951 if (Func.BitmapBytes.size() > 0) {
952 OS << "# Num Bitmap Bytes:\n$" << Func.BitmapBytes.size() << "\n";
953 OS << "# Bitmap Byte Values:\n";
954 for (uint8_t Byte : Func.BitmapBytes) {
955 OS << "0x";
956 OS.write_hex(Byte);
957 OS << "\n";
958 }
959 OS << "\n";
960 }
961
962 uint32_t NumValueKinds = Func.getNumValueKinds();
963 if (!NumValueKinds) {
964 OS << "\n";
965 return;
966 }
967
968 OS << "# Num Value Kinds:\n" << Func.getNumValueKinds() << "\n";
969 for (uint32_t VK = 0; VK < IPVK_Last + 1; VK++) {
970 uint32_t NS = Func.getNumValueSites(VK);
971 if (!NS)
972 continue;
973 OS << "# ValueKind = " << ValueProfKindStr[VK] << ":\n" << VK << "\n";
974 OS << "# NumValueSites:\n" << NS << "\n";
975 for (uint32_t S = 0; S < NS; S++) {
976 uint32_t ND = Func.getNumValueDataForSite(VK, S);
977 OS << ND << "\n";
978 std::unique_ptr<InstrProfValueData[]> VD = Func.getValueForSite(VK, S);
979 for (uint32_t I = 0; I < ND; I++) {
980 if (VK == IPVK_IndirectCallTarget || VK == IPVK_VTableTarget)
981 OS << Symtab.getFuncOrVarNameIfDefined(VD[I].Value) << ":"
982 << VD[I].Count << "\n";
983 else
984 OS << VD[I].Value << ":" << VD[I].Count << "\n";
985 }
986 }
987 }
988
989 OS << "\n";
990}
991
993 // Check CS first since it implies an IR level profile.
994 if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive))
995 OS << "# CSIR level Instrumentation Flag\n:csir\n";
996 else if (static_cast<bool>(ProfileKind & InstrProfKind::IRInstrumentation))
997 OS << "# IR level Instrumentation Flag\n:ir\n";
998
999 if (static_cast<bool>(ProfileKind &
1001 OS << "# Always instrument the function entry block\n:entry_first\n";
1002 if (static_cast<bool>(ProfileKind & InstrProfKind::SingleByteCoverage))
1003 OS << "# Instrument block coverage\n:single_byte_coverage\n";
1004 InstrProfSymtab Symtab;
1005
1007 using RecordType = std::pair<StringRef, FuncPair>;
1008 SmallVector<RecordType, 4> OrderedFuncData;
1009
1010 for (const auto &I : FunctionData) {
1011 if (shouldEncodeData(I.getValue())) {
1012 if (Error E = Symtab.addFuncName(I.getKey()))
1013 return E;
1014 for (const auto &Func : I.getValue())
1015 OrderedFuncData.push_back(std::make_pair(I.getKey(), Func));
1016 }
1017 }
1018
1019 for (const auto &VTableName : VTableNames)
1020 if (Error E = Symtab.addVTableName(VTableName.getKey()))
1021 return E;
1022
1023 if (static_cast<bool>(ProfileKind & InstrProfKind::TemporalProfile))
1025
1026 llvm::sort(OrderedFuncData, [](const RecordType &A, const RecordType &B) {
1027 return std::tie(A.first, A.second.first) <
1028 std::tie(B.first, B.second.first);
1029 });
1030
1031 for (const auto &record : OrderedFuncData) {
1032 const StringRef &Name = record.first;
1033 const FuncPair &Func = record.second;
1034 writeRecordInText(Name, Func.first, Func.second, Symtab, OS);
1035 }
1036
1037 for (const auto &record : OrderedFuncData) {
1038 const FuncPair &Func = record.second;
1039 if (Error E = validateRecord(Func.second))
1040 return E;
1041 }
1042
1043 return Error::success();
1044}
1045
1047 InstrProfSymtab &Symtab) {
1048 OS << ":temporal_prof_traces\n";
1049 OS << "# Num Temporal Profile Traces:\n" << TemporalProfTraces.size() << "\n";
1050 OS << "# Temporal Profile Trace Stream Size:\n"
1051 << TemporalProfTraceStreamSize << "\n";
1052 for (auto &Trace : TemporalProfTraces) {
1053 OS << "# Weight:\n" << Trace.Weight << "\n";
1054 for (auto &NameRef : Trace.FunctionNameRefs)
1055 OS << Symtab.getFuncOrVarName(NameRef) << ",";
1056 OS << "\n";
1057 }
1058 OS << "\n";
1059}
basic Basic Alias true
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
std::string Name
static uint64_t writeMemProfRecords(ProfOStream &OS, llvm::MapVector< GlobalValue::GUID, memprof::IndexedMemProfRecord > &MemProfRecordData, memprof::MemProfSchema *Schema, memprof::IndexedVersion Version)
static Error writeMemProfV1(ProfOStream &OS, llvm::MapVector< GlobalValue::GUID, memprof::IndexedMemProfRecord > &MemProfRecordData, llvm::MapVector< memprof::FrameId, memprof::Frame > &MemProfFrameData)
static uint64_t writeMemProfCallStacks(ProfOStream &OS, llvm::MapVector< memprof::CallStackId, llvm::SmallVector< memprof::FrameId > > &MemProfCallStackData)
static Error writeMemProfV0(ProfOStream &OS, llvm::MapVector< GlobalValue::GUID, memprof::IndexedMemProfRecord > &MemProfRecordData, llvm::MapVector< memprof::FrameId, memprof::Frame > &MemProfFrameData)
static uint64_t writeMemProfFrames(ProfOStream &OS, llvm::MapVector< memprof::FrameId, memprof::Frame > &MemProfFrameData)
static Error writeMemProf(ProfOStream &OS, llvm::MapVector< GlobalValue::GUID, memprof::IndexedMemProfRecord > &MemProfRecordData, llvm::MapVector< memprof::FrameId, memprof::Frame > &MemProfFrameData, llvm::MapVector< memprof::CallStackId, llvm::SmallVector< memprof::FrameId > > &MemProfCallStackData, memprof::IndexedVersion MemProfVersionRequested, bool MemProfFullSchema)
static void setSummary(IndexedInstrProf::Summary *TheSummary, ProfileSummary &PS)
static const char * ValueProfKindStr[]
static Error writeMemProfV2(ProfOStream &OS, llvm::MapVector< GlobalValue::GUID, memprof::IndexedMemProfRecord > &MemProfRecordData, llvm::MapVector< memprof::FrameId, memprof::Frame > &MemProfFrameData, llvm::MapVector< memprof::CallStackId, llvm::SmallVector< memprof::FrameId > > &MemProfCallStackData, bool MemProfFullSchema)
static void writeMemProfSchema(ProfOStream &OS, const memprof::MemProfSchema &Schema)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Defines facilities for reading and writing on-disk hash tables.
#define P(N)
if(VerifyEach)
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file implements a set that has insertion order iteration characteristics.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition: DenseMap.h:71
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
static ErrorSuccess success()
Create a success value.
Definition: Error.h:334
static std::pair< offset_type, offset_type > EmitKeyDataLength(raw_ostream &Out, key_type_ref K, data_type_ref V)
const InstrProfWriter::ProfilingData *const data_type_ref
InstrProfSummaryBuilder * SummaryBuilder
static hash_value_type ComputeHash(key_type_ref K)
InstrProfSummaryBuilder * CSSummaryBuilder
void EmitKey(raw_ostream &Out, key_type_ref K, offset_type N)
void EmitData(raw_ostream &Out, key_type_ref, data_type_ref V, offset_type)
const InstrProfWriter::ProfilingData *const data_type
void addRecord(const InstrProfRecord &)
A symbol table used for function [IR]PGO name look-up with keys (such as pointers,...
Definition: InstrProf.h:451
StringRef getFuncOrVarName(uint64_t ValMD5Hash)
Return name of functions or global variables from the name's md5 hash value.
Definition: InstrProf.h:692
Error addVTableName(StringRef VTableName)
Adds VTableName as a known symbol, and inserts it to a map that tracks all vtable names.
Definition: InstrProf.h:580
Error addFuncName(StringRef FuncName)
The method name is kept since there are many callers.
Definition: InstrProf.h:576
StringRef getFuncOrVarNameIfDefined(uint64_t ValMD5Hash)
Just like getFuncOrVarName, except that it will return literal string 'External Symbol' if the functi...
Definition: InstrProf.h:685
Error write(raw_fd_ostream &OS)
Write the profile to OS.
void addTemporalProfileTraces(SmallVectorImpl< TemporalProfTraceTy > &SrcTraces, uint64_t SrcStreamSize)
Add SrcTraces using reservoir sampling where SrcStreamSize is the total number of temporal profiling ...
void overlapRecord(NamedInstrProfRecord &&Other, OverlapStats &Overlap, OverlapStats &FuncLevelOverlap, const OverlapFuncFilters &FuncFilter)
Error writeText(raw_fd_ostream &OS)
Write the profile in text format to OS.
void addBinaryIds(ArrayRef< llvm::object::BuildID > BIs)
void addMemProfRecord(const GlobalValue::GUID Id, const memprof::IndexedMemProfRecord &Record)
Add a memprof record for a function identified by its Id.
static void writeRecordInText(StringRef Name, uint64_t Hash, const InstrProfRecord &Counters, InstrProfSymtab &Symtab, raw_fd_ostream &OS)
Write Record in text format to OS.
void setValueProfDataEndianness(llvm::endianness Endianness)
bool addMemProfCallStack(const memprof::CallStackId CSId, const llvm::SmallVector< memprof::FrameId > &CallStack, function_ref< void(Error)> Warn)
Add a call stack identified by the hash of the contents of the call stack in CallStack.
void addRecord(NamedInstrProfRecord &&I, uint64_t Weight, function_ref< void(Error)> Warn)
Add function counts for the given function.
InstrProfWriter(bool Sparse=false, uint64_t TemporalProfTraceReservoirSize=0, uint64_t MaxTemporalProfTraceLength=0, bool WritePrevVersion=false, memprof::IndexedVersion MemProfVersionRequested=memprof::Version0, bool MemProfFullSchema=false)
void mergeRecordsFromWriter(InstrProfWriter &&IPW, function_ref< void(Error)> Warn)
Merge existing function counts from the given writer.
void writeTextTemporalProfTraceData(raw_fd_ostream &OS, InstrProfSymtab &Symtab)
Write temporal profile trace data to the header in text format to OS.
std::unique_ptr< MemoryBuffer > writeBuffer()
Write the profile, returning the raw data. For testing.
bool addMemProfFrame(const memprof::FrameId, const memprof::Frame &F, function_ref< void(Error)> Warn)
Add a memprof frame identified by the hash of the contents of the frame in FrameId.
void setOutputSparse(bool Sparse)
Error validateRecord(const InstrProfRecord &Func)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
void clear()
Definition: MapVector.h:88
static std::unique_ptr< MemoryBuffer > getMemBufferCopy(StringRef InputData, const Twine &BufferName="")
Open the specified memory range as a MemoryBuffer, copying the contents and taking ownership of it.
Generates an on disk hash table.
offset_type Emit(raw_ostream &Out)
Emit the table to Out, which must not be at offset 0.
void writeByte(uint8_t V)
void patch(ArrayRef< PatchItem > P)
ProfOStream(raw_string_ostream &STR)
support::endian::Writer LE
ProfOStream(raw_fd_ostream &FD)
void write(uint64_t V)
static const ArrayRef< uint32_t > DefaultCutoffs
A vector of useful cutoff values for detailed summary.
Definition: ProfileCommon.h:70
uint64_t getTotalCount() const
uint64_t getMaxCount() const
const SummaryEntryVector & getDetailedSummary()
uint32_t getNumCounts() const
uint64_t getMaxInternalCount() const
uint64_t getMaxFunctionCount() const
uint32_t getNumFunctions() const
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:162
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:370
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
iterator_range< StringMapKeyIterator< ValueTy > > keys() const
Definition: StringMap.h:228
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
bool empty() const
Definition: Trace.h:96
unsigned size() const
Definition: Trace.h:95
LLVM Value Representation.
Definition: Value.h:74
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
An efficient, type-erasing, non-owning reference to a callable.
A raw_ostream that writes to a file descriptor.
Definition: raw_ostream.h:470
uint64_t seek(uint64_t off)
Flushes the stream and repositions the underlying file descriptor position to the offset specified fr...
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
uint64_t tell() const
tell - Return the current offset with the file.
Definition: raw_ostream.h:150
raw_ostream & write_hex(unsigned long long N)
Output N in hexadecimal, without any prefix or padding.
raw_ostream & write(unsigned char C)
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:660
std::string & str()
Returns the string's reference.
Definition: raw_ostream.h:678
std::unique_ptr< Summary > allocSummary(uint32_t TotalSize)
Definition: InstrProf.h:1275
uint64_t ComputeHash(StringRef K)
Definition: InstrProf.h:1157
const uint64_t Magic
Definition: InstrProf.h:1114
const HashT HashType
Definition: InstrProf.h:1155
constexpr uint64_t MaximumSupportedVersion
Definition: MemProf.h:34
MemProfSchema getHotColdSchema()
Definition: MemProf.cpp:21
constexpr uint64_t MinimumSupportedVersion
Definition: MemProf.h:33
MemProfSchema getFullSchema()
Definition: MemProf.cpp:13
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition: STLExtras.h:853
uint64_t alignToPowerOf2(uint64_t Value, uint64_t Align)
Definition: MathExtras.h:393
auto formatv(const char *Fmt, Ts &&...Vals) -> formatv_object< decltype(std::make_tuple(support::detail::build_format_adapter(std::forward< Ts >(Vals))...))>
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2073
void shuffle(Iterator first, Iterator last, RNG &&g)
Definition: STLExtras.h:1541
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1647
@ Other
Any other memory.
instrprof_error
Definition: InstrProf.h:346
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
Error collectGlobalObjectNameStrings(ArrayRef< std::string > NameStrs, bool doCompression, std::string &Result)
Given a vector of strings (names of global objects like functions or, virtual tables) NameStrs,...
Definition: InstrProf.cpp:626
endianness
Definition: bit.h:70
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
uint64_t Pos
uint64_t * D
Helper object to track which of three possible relocation mechanisms are used for a particular value ...
void set(SummaryFieldKind K, uint64_t V)
Definition: InstrProf.h:1261
void setEntry(uint32_t I, const ProfileSummaryEntry &E)
Definition: InstrProf.h:1267
Profiling information for a single function.
Definition: InstrProf.h:808
std::vector< uint64_t > Counts
Definition: InstrProf.h:809
void merge(InstrProfRecord &Other, uint64_t Weight, function_ref< void(instrprof_error)> Warn)
Merge the counts in Other into this one.
Definition: InstrProf.cpp:864
void overlap(InstrProfRecord &Other, OverlapStats &Overlap, OverlapStats &FuncLevelOverlap, uint64_t ValueCutoff)
Compute the overlap b/w this IntrprofRecord and Other.
Definition: InstrProf.cpp:760
void sortValueData()
Sort value profile data (per site) by count.
Definition: InstrProf.h:883
std::vector< uint8_t > BitmapBytes
Definition: InstrProf.h:810
void scale(uint64_t N, uint64_t D, function_ref< void(instrprof_error)> Warn)
Scale up profile counts (including value profile data) by a factor of (N / D).
Definition: InstrProf.cpp:927
static bool hasCSFlagInHash(uint64_t FuncHash)
Definition: InstrProf.h:1016
const std::string NameFilter
Definition: InstrProf.h:773
void addOneMismatch(const CountSumOrPercent &MismatchFunc)
Definition: InstrProf.cpp:1492
CountSumOrPercent Overlap
Definition: InstrProf.h:735
void addOneUnique(const CountSumOrPercent &UniqueFunc)
Definition: InstrProf.cpp:1502
CountSumOrPercent Test
Definition: InstrProf.h:733
An ordered list of functions identified by their NameRef found in INSTR_PROF_DATA.
Definition: InstrProf.h:377
Function object to check whether the first component of a container supported by std::get (like std::...
Definition: STLExtras.h:1450
void merge(const IndexedMemProfRecord &Other)
Definition: MemProf.h:397
Adapter to write values to a stream in a particular byte order.
Definition: EndianStream.h:67
void write(ArrayRef< value_type > Val)
Definition: EndianStream.h:71