LLVM 20.0.0git
MemProfReader.cpp
Go to the documentation of this file.
1//===- RawMemProfReader.cpp - Instrumented memory profiling reader --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains support for reading MemProf profiling data.
10//
11//===----------------------------------------------------------------------===//
12
13#include <cstdint>
14#include <memory>
15#include <type_traits>
16
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/SetVector.h"
20#include "llvm/ADT/SmallSet.h"
23#include "llvm/ADT/Twine.h"
27#include "llvm/Object/Binary.h"
28#include "llvm/Object/BuildID.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/Endian.h"
39#include "llvm/Support/Error.h"
41#include "llvm/Support/Path.h"
42
43#define DEBUG_TYPE "memprof"
44
45namespace llvm {
46namespace memprof {
47namespace {
48template <class T = uint64_t> inline T alignedRead(const char *Ptr) {
49 static_assert(std::is_integral_v<T>, "Not an integral type");
50 assert(reinterpret_cast<size_t>(Ptr) % sizeof(T) == 0 && "Unaligned Read");
51 return *reinterpret_cast<const T *>(Ptr);
52}
53
54Error checkBuffer(const MemoryBuffer &Buffer) {
55 if (!RawMemProfReader::hasFormat(Buffer))
56 return make_error<InstrProfError>(instrprof_error::bad_magic);
57
58 if (Buffer.getBufferSize() == 0)
59 return make_error<InstrProfError>(instrprof_error::empty_raw_profile);
60
61 if (Buffer.getBufferSize() < sizeof(Header)) {
62 return make_error<InstrProfError>(instrprof_error::truncated);
63 }
64
65 // The size of the buffer can be > header total size since we allow repeated
66 // serialization of memprof profiles to the same file.
67 uint64_t TotalSize = 0;
68 const char *Next = Buffer.getBufferStart();
69 while (Next < Buffer.getBufferEnd()) {
70 const auto *H = reinterpret_cast<const Header *>(Next);
71
72 // Check if the version in header is among the supported versions.
73 bool IsSupported = false;
74 for (auto SupportedVersion : MEMPROF_RAW_SUPPORTED_VERSIONS) {
75 if (H->Version == SupportedVersion)
76 IsSupported = true;
77 }
78 if (!IsSupported) {
79 return make_error<InstrProfError>(instrprof_error::unsupported_version);
80 }
81
82 TotalSize += H->TotalSize;
83 Next += H->TotalSize;
84 }
85
86 if (Buffer.getBufferSize() != TotalSize) {
87 return make_error<InstrProfError>(instrprof_error::malformed);
88 }
89 return Error::success();
90}
91
92llvm::SmallVector<SegmentEntry> readSegmentEntries(const char *Ptr) {
93 using namespace support;
94
95 const uint64_t NumItemsToRead =
96 endian::readNext<uint64_t, llvm::endianness::little>(Ptr);
98 for (uint64_t I = 0; I < NumItemsToRead; I++) {
99 Items.push_back(*reinterpret_cast<const SegmentEntry *>(
100 Ptr + I * sizeof(SegmentEntry)));
101 }
102 return Items;
103}
104
106readMemInfoBlocksV3(const char *Ptr) {
107 using namespace support;
108
109 const uint64_t NumItemsToRead =
110 endian::readNext<uint64_t, llvm::endianness::little, unaligned>(Ptr);
111
113 for (uint64_t I = 0; I < NumItemsToRead; I++) {
114 const uint64_t Id =
115 endian::readNext<uint64_t, llvm::endianness::little, unaligned>(Ptr);
116
117 // We cheat a bit here and remove the const from cast to set the
118 // Histogram Pointer to newly allocated buffer. We also cheat, since V3 and
119 // V4 do not have the same fields. V3 is missing AccessHistogramSize and
120 // AccessHistogram. This means we read "dirty" data in here, but it should
121 // not segfault, since there will be callstack data placed after this in the
122 // binary format.
123 MemInfoBlock MIB = *reinterpret_cast<const MemInfoBlock *>(Ptr);
124 // Overwrite dirty data.
125 MIB.AccessHistogramSize = 0;
126 MIB.AccessHistogram = 0;
127
128 Items.push_back({Id, MIB});
129 // Only increment by the size of MIB in V3.
130 Ptr += MEMPROF_V3_MIB_SIZE;
131 }
132 return Items;
133}
134
136readMemInfoBlocksV4(const char *Ptr) {
137 using namespace support;
138
139 const uint64_t NumItemsToRead =
140 endian::readNext<uint64_t, llvm::endianness::little, unaligned>(Ptr);
141
143 for (uint64_t I = 0; I < NumItemsToRead; I++) {
144 const uint64_t Id =
145 endian::readNext<uint64_t, llvm::endianness::little, unaligned>(Ptr);
146 // We cheat a bit here and remove the const from cast to set the
147 // Histogram Pointer to newly allocated buffer.
148 MemInfoBlock MIB = *reinterpret_cast<const MemInfoBlock *>(Ptr);
149
150 // Only increment by size of MIB since readNext implicitly increments.
151 Ptr += sizeof(MemInfoBlock);
152
153 if (MIB.AccessHistogramSize > 0) {
154 MIB.AccessHistogram =
155 (uintptr_t)malloc(MIB.AccessHistogramSize * sizeof(uint64_t));
156 }
157
158 for (uint64_t J = 0; J < MIB.AccessHistogramSize; J++) {
159 ((uint64_t *)MIB.AccessHistogram)[J] =
160 endian::readNext<uint64_t, llvm::endianness::little, unaligned>(Ptr);
161 }
162 Items.push_back({Id, MIB});
163 }
164 return Items;
165}
166
167CallStackMap readStackInfo(const char *Ptr) {
168 using namespace support;
169
170 const uint64_t NumItemsToRead =
171 endian::readNext<uint64_t, llvm::endianness::little>(Ptr);
172 CallStackMap Items;
173
174 for (uint64_t I = 0; I < NumItemsToRead; I++) {
175 const uint64_t StackId =
176 endian::readNext<uint64_t, llvm::endianness::little>(Ptr);
177 const uint64_t NumPCs =
178 endian::readNext<uint64_t, llvm::endianness::little>(Ptr);
179
180 SmallVector<uint64_t> CallStack;
181 CallStack.reserve(NumPCs);
182 for (uint64_t J = 0; J < NumPCs; J++) {
183 CallStack.push_back(
184 endian::readNext<uint64_t, llvm::endianness::little>(Ptr));
185 }
186
187 Items[StackId] = CallStack;
188 }
189 return Items;
190}
191
192// Merges the contents of stack information in \p From to \p To. Returns true if
193// any stack ids observed previously map to a different set of program counter
194// addresses.
195bool mergeStackMap(const CallStackMap &From, CallStackMap &To) {
196 for (const auto &[Id, Stack] : From) {
197 auto [It, Inserted] = To.try_emplace(Id, Stack);
198 // Check that the PCs are the same (in order).
199 if (!Inserted && Stack != It->second)
200 return true;
201 }
202 return false;
203}
204
205Error report(Error E, const StringRef Context) {
207 std::move(E));
208}
209
210bool isRuntimePath(const StringRef Path) {
211 const StringRef Filename = llvm::sys::path::filename(Path);
212 // This list should be updated in case new files with additional interceptors
213 // are added to the memprof runtime.
214 return Filename == "memprof_malloc_linux.cpp" ||
215 Filename == "memprof_interceptors.cpp" ||
216 Filename == "memprof_new_delete.cpp";
217}
218
219std::string getBuildIdString(const SegmentEntry &Entry) {
220 // If the build id is unset print a helpful string instead of all zeros.
221 if (Entry.BuildIdSize == 0)
222 return "<None>";
223
224 std::string Str;
225 raw_string_ostream OS(Str);
226 for (size_t I = 0; I < Entry.BuildIdSize; I++) {
227 OS << format_hex_no_prefix(Entry.BuildId[I], 2);
228 }
229 return OS.str();
230}
231} // namespace
232
233Expected<std::unique_ptr<RawMemProfReader>>
234RawMemProfReader::create(const Twine &Path, const StringRef ProfiledBinary,
235 bool KeepName) {
236 auto BufferOr = MemoryBuffer::getFileOrSTDIN(Path);
237 if (std::error_code EC = BufferOr.getError())
238 return report(errorCodeToError(EC), Path.getSingleStringRef());
239
240 std::unique_ptr<MemoryBuffer> Buffer(BufferOr.get().release());
241 return create(std::move(Buffer), ProfiledBinary, KeepName);
242}
243
245RawMemProfReader::create(std::unique_ptr<MemoryBuffer> Buffer,
246 const StringRef ProfiledBinary, bool KeepName) {
247 if (Error E = checkBuffer(*Buffer))
248 return report(std::move(E), Buffer->getBufferIdentifier());
249
250 if (ProfiledBinary.empty()) {
251 // Peek the build ids to print a helpful error message.
252 const std::vector<std::string> BuildIds = peekBuildIds(Buffer.get());
253 std::string ErrorMessage(
254 R"(Path to profiled binary is empty, expected binary with one of the following build ids:
255)");
256 for (const auto &Id : BuildIds) {
257 ErrorMessage += "\n BuildId: ";
258 ErrorMessage += Id;
259 }
260 return report(
261 make_error<StringError>(ErrorMessage, inconvertibleErrorCode()),
262 /*Context=*/"");
263 }
264
265 auto BinaryOr = llvm::object::createBinary(ProfiledBinary);
266 if (!BinaryOr) {
267 return report(BinaryOr.takeError(), ProfiledBinary);
268 }
269
270 // Use new here since constructor is private.
271 std::unique_ptr<RawMemProfReader> Reader(
272 new RawMemProfReader(std::move(BinaryOr.get()), KeepName));
273 if (Error E = Reader->initialize(std::move(Buffer))) {
274 return std::move(E);
275 }
276 return std::move(Reader);
277}
278
279// We need to make sure that all leftover MIB histograms that have not been
280// freed by merge are freed here.
282 for (auto &[_, MIB] : CallstackProfileData) {
283 if (MemprofRawVersion >= 4ULL && MIB.AccessHistogramSize > 0) {
284 free((void *)MIB.AccessHistogram);
285 }
286 }
287}
289bool RawMemProfReader::hasFormat(const StringRef Path) {
290 auto BufferOr = MemoryBuffer::getFileOrSTDIN(Path);
291 if (!BufferOr)
292 return false;
293
294 std::unique_ptr<MemoryBuffer> Buffer(BufferOr.get().release());
295 return hasFormat(*Buffer);
296}
298bool RawMemProfReader::hasFormat(const MemoryBuffer &Buffer) {
299 if (Buffer.getBufferSize() < sizeof(uint64_t))
300 return false;
301 // Aligned read to sanity check that the buffer was allocated with at least 8b
302 // alignment.
303 const uint64_t Magic = alignedRead(Buffer.getBufferStart());
304 return Magic == MEMPROF_RAW_MAGIC_64;
305}
308 uint64_t NumAllocFunctions = 0, NumMibInfo = 0;
309 for (const auto &KV : MemProfData.Records) {
310 const size_t NumAllocSites = KV.second.AllocSites.size();
311 if (NumAllocSites > 0) {
312 NumAllocFunctions++;
313 NumMibInfo += NumAllocSites;
314 }
315 }
316
317 OS << "MemprofProfile:\n";
318 OS << " Summary:\n";
319 OS << " Version: " << MemprofRawVersion << "\n";
320 OS << " NumSegments: " << SegmentInfo.size() << "\n";
321 OS << " NumMibInfo: " << NumMibInfo << "\n";
322 OS << " NumAllocFunctions: " << NumAllocFunctions << "\n";
323 OS << " NumStackOffsets: " << StackMap.size() << "\n";
324 // Print out the segment information.
325 OS << " Segments:\n";
326 for (const auto &Entry : SegmentInfo) {
327 OS << " -\n";
328 OS << " BuildId: " << getBuildIdString(Entry) << "\n";
329 OS << " Start: 0x" << llvm::utohexstr(Entry.Start) << "\n";
330 OS << " End: 0x" << llvm::utohexstr(Entry.End) << "\n";
331 OS << " Offset: 0x" << llvm::utohexstr(Entry.Offset) << "\n";
332 }
333 // Print out the merged contents of the profiles.
334 OS << " Records:\n";
335 for (const auto &[GUID, Record] : *this) {
336 OS << " -\n";
337 OS << " FunctionGUID: " << GUID << "\n";
338 Record.print(OS);
339 }
340}
341
342Error RawMemProfReader::initialize(std::unique_ptr<MemoryBuffer> DataBuffer) {
343 const StringRef FileName = Binary.getBinary()->getFileName();
344
345 auto *ElfObject = dyn_cast<object::ELFObjectFileBase>(Binary.getBinary());
346 if (!ElfObject) {
347 return report(make_error<StringError>(Twine("Not an ELF file: "),
349 FileName);
350 }
351
352 // Check whether the profiled binary was built with position independent code
353 // (PIC). Perform sanity checks for assumptions we rely on to simplify
354 // symbolization.
355 auto *Elf64LEObject = llvm::cast<llvm::object::ELF64LEObjectFile>(ElfObject);
356 const llvm::object::ELF64LEFile &ElfFile = Elf64LEObject->getELFFile();
357 auto PHdrsOr = ElfFile.program_headers();
358 if (!PHdrsOr)
359 return report(
360 make_error<StringError>(Twine("Could not read program headers: "),
362 FileName);
363
364 int NumExecutableSegments = 0;
365 for (const auto &Phdr : *PHdrsOr) {
366 if (Phdr.p_type == ELF::PT_LOAD) {
367 if (Phdr.p_flags & ELF::PF_X) {
368 // We assume only one text segment in the main binary for simplicity and
369 // reduce the overhead of checking multiple ranges during symbolization.
370 if (++NumExecutableSegments > 1) {
371 return report(
372 make_error<StringError>(
373 "Expect only one executable load segment in the binary",
375 FileName);
376 }
377 // Segment will always be loaded at a page boundary, expect it to be
378 // aligned already. Assume 4K pagesize for the machine from which the
379 // profile has been collected. This should be fine for now, in case we
380 // want to support other pagesizes it can be recorded in the raw profile
381 // during collection.
382 PreferredTextSegmentAddress = Phdr.p_vaddr;
383 assert(Phdr.p_vaddr == (Phdr.p_vaddr & ~(0x1000 - 1U)) &&
384 "Expect p_vaddr to always be page aligned");
385 assert(Phdr.p_offset == 0 && "Expect p_offset = 0 for symbolization.");
386 }
387 }
388 }
389
390 auto Triple = ElfObject->makeTriple();
391 if (!Triple.isX86())
392 return report(make_error<StringError>(Twine("Unsupported target: ") +
393 Triple.getArchName(),
395 FileName);
396
397 // Process the raw profile.
398 if (Error E = readRawProfile(std::move(DataBuffer)))
399 return E;
400
401 if (Error E = setupForSymbolization())
402 return E;
403
404 auto *Object = cast<object::ObjectFile>(Binary.getBinary());
405 std::unique_ptr<DIContext> Context = DWARFContext::create(
407
409 Object, std::move(Context), /*UntagAddresses=*/false);
410 if (!SOFOr)
411 return report(SOFOr.takeError(), FileName);
412 auto Symbolizer = std::move(SOFOr.get());
413
414 // The symbolizer ownership is moved into symbolizeAndFilterStackFrames so
415 // that it is freed automatically at the end, when it is no longer used. This
416 // reduces peak memory since it won't be live while also mapping the raw
417 // profile into records afterwards.
418 if (Error E = symbolizeAndFilterStackFrames(std::move(Symbolizer)))
419 return E;
420
421 return mapRawProfileToRecords();
422}
423
424Error RawMemProfReader::setupForSymbolization() {
425 auto *Object = cast<object::ObjectFile>(Binary.getBinary());
426 object::BuildIDRef BinaryId = object::getBuildID(Object);
427 if (BinaryId.empty())
428 return make_error<StringError>(Twine("No build id found in binary ") +
429 Binary.getBinary()->getFileName(),
431
432 int NumMatched = 0;
433 for (const auto &Entry : SegmentInfo) {
434 llvm::ArrayRef<uint8_t> SegmentId(Entry.BuildId, Entry.BuildIdSize);
435 if (BinaryId == SegmentId) {
436 // We assume only one text segment in the main binary for simplicity and
437 // reduce the overhead of checking multiple ranges during symbolization.
438 if (++NumMatched > 1) {
439 return make_error<StringError>(
440 "We expect only one executable segment in the profiled binary",
442 }
443 ProfiledTextSegmentStart = Entry.Start;
444 ProfiledTextSegmentEnd = Entry.End;
445 }
446 }
447 assert(NumMatched != 0 && "No matching executable segments in segment info.");
448 assert((PreferredTextSegmentAddress == 0 ||
449 (PreferredTextSegmentAddress == ProfiledTextSegmentStart)) &&
450 "Expect text segment address to be 0 or equal to profiled text "
451 "segment start.");
452 return Error::success();
453}
454
455Error RawMemProfReader::mapRawProfileToRecords() {
456 // Hold a mapping from function to each callsite location we encounter within
457 // it that is part of some dynamic allocation context. The location is stored
458 // as a pointer to a symbolized list of inline frames.
459 using LocationPtr = const llvm::SmallVector<FrameId> *;
461 PerFunctionCallSites;
462
463 // Convert the raw profile callstack data into memprof records. While doing so
464 // keep track of related contexts so that we can fill these in later.
465 for (const auto &[StackId, MIB] : CallstackProfileData) {
466 auto It = StackMap.find(StackId);
467 if (It == StackMap.end())
468 return make_error<InstrProfError>(
470 "memprof callstack record does not contain id: " + Twine(StackId));
471
472 // Construct the symbolized callstack.
474 Callstack.reserve(It->getSecond().size());
475
476 llvm::ArrayRef<uint64_t> Addresses = It->getSecond();
477 for (size_t I = 0; I < Addresses.size(); I++) {
478 const uint64_t Address = Addresses[I];
479 assert(SymbolizedFrame.count(Address) > 0 &&
480 "Address not found in SymbolizedFrame map");
481 const SmallVector<FrameId> &Frames = SymbolizedFrame[Address];
482
483 assert(!idToFrame(Frames.back()).IsInlineFrame &&
484 "The last frame should not be inlined");
485
486 // Record the callsites for each function. Skip the first frame of the
487 // first address since it is the allocation site itself that is recorded
488 // as an alloc site.
489 for (size_t J = 0; J < Frames.size(); J++) {
490 if (I == 0 && J == 0)
491 continue;
492 // We attach the entire bottom-up frame here for the callsite even
493 // though we only need the frames up to and including the frame for
494 // Frames[J].Function. This will enable better deduplication for
495 // compression in the future.
496 const GlobalValue::GUID Guid = idToFrame(Frames[J]).Function;
497 PerFunctionCallSites[Guid].insert(&Frames);
498 }
499
500 // Add all the frames to the current allocation callstack.
501 Callstack.append(Frames.begin(), Frames.end());
502 }
503
504 CallStackId CSId = MemProfData.addCallStack(Callstack);
505
506 // We attach the memprof record to each function bottom-up including the
507 // first non-inline frame.
508 for (size_t I = 0; /*Break out using the condition below*/; I++) {
509 const Frame &F = idToFrame(Callstack[I]);
510 IndexedMemProfRecord &Record = MemProfData.Records[F.Function];
511 Record.AllocSites.emplace_back(CSId, MIB);
512
513 if (!F.IsInlineFrame)
514 break;
515 }
516 }
517
518 // Fill in the related callsites per function.
519 for (const auto &[Id, Locs] : PerFunctionCallSites) {
520 // Some functions may have only callsite data and no allocation data. Here
521 // we insert a new entry for callsite data if we need to.
522 IndexedMemProfRecord &Record = MemProfData.Records[Id];
523 for (LocationPtr Loc : Locs)
524 Record.CallSiteIds.push_back(MemProfData.addCallStack(*Loc));
525 }
526
527 return Error::success();
528}
529
530Error RawMemProfReader::symbolizeAndFilterStackFrames(
531 std::unique_ptr<llvm::symbolize::SymbolizableModule> Symbolizer) {
532 // The specifier to use when symbolization is requested.
533 const DILineInfoSpecifier Specifier(
534 DILineInfoSpecifier::FileLineInfoKind::RawValue,
535 DILineInfoSpecifier::FunctionNameKind::LinkageName);
536
537 // For entries where all PCs in the callstack are discarded, we erase the
538 // entry from the stack map.
539 llvm::SmallVector<uint64_t> EntriesToErase;
540 // We keep track of all prior discarded entries so that we can avoid invoking
541 // the symbolizer for such entries.
542 llvm::DenseSet<uint64_t> AllVAddrsToDiscard;
543 for (auto &Entry : StackMap) {
544 for (const uint64_t VAddr : Entry.getSecond()) {
545 // Check if we have already symbolized and cached the result or if we
546 // don't want to attempt symbolization since we know this address is bad.
547 // In this case the address is also removed from the current callstack.
548 if (SymbolizedFrame.count(VAddr) > 0 ||
549 AllVAddrsToDiscard.contains(VAddr))
550 continue;
551
552 Expected<DIInliningInfo> DIOr = Symbolizer->symbolizeInlinedCode(
553 getModuleOffset(VAddr), Specifier, /*UseSymbolTable=*/false);
554 if (!DIOr)
555 return DIOr.takeError();
556 DIInliningInfo DI = DIOr.get();
557
558 // Drop frames which we can't symbolize or if they belong to the runtime.
559 if (DI.getFrame(0).FunctionName == DILineInfo::BadString ||
560 isRuntimePath(DI.getFrame(0).FileName)) {
561 AllVAddrsToDiscard.insert(VAddr);
562 continue;
563 }
564
565 for (size_t I = 0, NumFrames = DI.getNumberOfFrames(); I < NumFrames;
566 I++) {
567 const auto &DIFrame = DI.getFrame(I);
568 const uint64_t Guid =
569 IndexedMemProfRecord::getGUID(DIFrame.FunctionName);
570 const Frame F(Guid, DIFrame.Line - DIFrame.StartLine, DIFrame.Column,
571 // Only the last entry is not an inlined location.
572 I != NumFrames - 1);
573 // Here we retain a mapping from the GUID to canonical symbol name
574 // instead of adding it to the frame object directly to reduce memory
575 // overhead. This is because there can be many unique frames,
576 // particularly for callsite frames.
577 if (KeepSymbolName) {
578 StringRef CanonicalName =
580 DIFrame.FunctionName);
581 GuidToSymbolName.insert({Guid, CanonicalName.str()});
582 }
583
584 SymbolizedFrame[VAddr].push_back(MemProfData.addFrame(F));
585 }
586 }
587
588 auto &CallStack = Entry.getSecond();
589 llvm::erase_if(CallStack, [&AllVAddrsToDiscard](const uint64_t A) {
590 return AllVAddrsToDiscard.contains(A);
591 });
592 if (CallStack.empty())
593 EntriesToErase.push_back(Entry.getFirst());
594 }
595
596 // Drop the entries where the callstack is empty.
597 for (const uint64_t Id : EntriesToErase) {
598 StackMap.erase(Id);
599 if (CallstackProfileData[Id].AccessHistogramSize > 0)
600 free((void *)CallstackProfileData[Id].AccessHistogram);
601 CallstackProfileData.erase(Id);
602 }
603
604 if (StackMap.empty())
605 return make_error<InstrProfError>(
607 "no entries in callstack map after symbolization");
608
609 return Error::success();
610}
611
612std::vector<std::string>
614 const char *Next = DataBuffer->getBufferStart();
615 // Use a SetVector since a profile file may contain multiple raw profile
616 // dumps, each with segment information. We want them unique and in order they
617 // were stored in the profile; the profiled binary should be the first entry.
618 // The runtime uses dl_iterate_phdr and the "... first object visited by
619 // callback is the main program."
620 // https://man7.org/linux/man-pages/man3/dl_iterate_phdr.3.html
623 BuildIds;
624 while (Next < DataBuffer->getBufferEnd()) {
625 const auto *Header = reinterpret_cast<const memprof::Header *>(Next);
626
627 const llvm::SmallVector<SegmentEntry> Entries =
628 readSegmentEntries(Next + Header->SegmentOffset);
629
630 for (const auto &Entry : Entries)
631 BuildIds.insert(getBuildIdString(Entry));
632
633 Next += Header->TotalSize;
634 }
635 return BuildIds.takeVector();
636}
637
638// FIXME: Add a schema for serializing similiar to IndexedMemprofReader. This
639// will help being able to deserialize different versions raw memprof versions
640// more easily.
642RawMemProfReader::readMemInfoBlocks(const char *Ptr) {
643 if (MemprofRawVersion == 3ULL)
644 return readMemInfoBlocksV3(Ptr);
645 if (MemprofRawVersion == 4ULL)
646 return readMemInfoBlocksV4(Ptr);
648 "Panic: Unsupported version number when reading MemInfoBlocks");
649}
650
651Error RawMemProfReader::readRawProfile(
652 std::unique_ptr<MemoryBuffer> DataBuffer) {
653 const char *Next = DataBuffer->getBufferStart();
654
655 while (Next < DataBuffer->getBufferEnd()) {
656 const auto *Header = reinterpret_cast<const memprof::Header *>(Next);
657
658 // Set Reader version to memprof raw version of profile. Checking if version
659 // is supported is checked before creating the reader.
660 MemprofRawVersion = Header->Version;
661
662 // Read in the segment information, check whether its the same across all
663 // profiles in this binary file.
664 const llvm::SmallVector<SegmentEntry> Entries =
665 readSegmentEntries(Next + Header->SegmentOffset);
666 if (!SegmentInfo.empty() && SegmentInfo != Entries) {
667 // We do not expect segment information to change when deserializing from
668 // the same binary profile file. This can happen if dynamic libraries are
669 // loaded/unloaded between profile dumping.
670 return make_error<InstrProfError>(
672 "memprof raw profile has different segment information");
673 }
674 SegmentInfo.assign(Entries.begin(), Entries.end());
675
676 // Read in the MemInfoBlocks. Merge them based on stack id - we assume that
677 // raw profiles in the same binary file are from the same process so the
678 // stackdepot ids are the same.
679 for (const auto &[Id, MIB] : readMemInfoBlocks(Next + Header->MIBOffset)) {
680 if (CallstackProfileData.count(Id)) {
681
682 if (MemprofRawVersion >= 4ULL &&
683 (CallstackProfileData[Id].AccessHistogramSize > 0 ||
684 MIB.AccessHistogramSize > 0)) {
685 uintptr_t ShorterHistogram;
686 if (CallstackProfileData[Id].AccessHistogramSize >
687 MIB.AccessHistogramSize)
688 ShorterHistogram = MIB.AccessHistogram;
689 else
690 ShorterHistogram = CallstackProfileData[Id].AccessHistogram;
691 CallstackProfileData[Id].Merge(MIB);
692 free((void *)ShorterHistogram);
693 } else {
694 CallstackProfileData[Id].Merge(MIB);
695 }
696 } else {
697 CallstackProfileData[Id] = MIB;
698 }
699 }
700
701 // Read in the callstack for each ids. For multiple raw profiles in the same
702 // file, we expect that the callstack is the same for a unique id.
703 const CallStackMap CSM = readStackInfo(Next + Header->StackOffset);
704 if (StackMap.empty()) {
705 StackMap = CSM;
706 } else {
707 if (mergeStackMap(CSM, StackMap))
708 return make_error<InstrProfError>(
710 "memprof raw profile got different call stack for same id");
711 }
712
713 Next += Header->TotalSize;
714 }
715
716 return Error::success();
717}
718
719object::SectionedAddress
720RawMemProfReader::getModuleOffset(const uint64_t VirtualAddress) {
721 if (VirtualAddress > ProfiledTextSegmentStart &&
722 VirtualAddress <= ProfiledTextSegmentEnd) {
723 // For PIE binaries, the preferred address is zero and we adjust the virtual
724 // address by start of the profiled segment assuming that the offset of the
725 // segment in the binary is zero. For non-PIE binaries the preferred and
726 // profiled segment addresses should be equal and this is a no-op.
727 const uint64_t AdjustedAddress =
728 VirtualAddress + PreferredTextSegmentAddress - ProfiledTextSegmentStart;
729 return object::SectionedAddress{AdjustedAddress};
730 }
731 // Addresses which do not originate from the profiled text segment in the
732 // binary are not adjusted. These will fail symbolization and be filtered out
733 // during processing.
734 return object::SectionedAddress{VirtualAddress};
735}
738 GuidMemProfRecordPair &GuidRecord,
739 std::function<const Frame(const FrameId)> Callback) {
740 // Create a new callback for the RawMemProfRecord iterator so that we can
741 // provide the symbol name if the reader was initialized with KeepSymbolName =
742 // true. This is useful for debugging and testing.
743 auto IdToFrameCallback = [this](const FrameId Id) {
744 Frame F = this->idToFrame(Id);
745 if (!this->KeepSymbolName)
746 return F;
747 auto Iter = this->GuidToSymbolName.find(F.Function);
748 assert(Iter != this->GuidToSymbolName.end());
749 F.SymbolName = std::make_unique<std::string>(Iter->getSecond());
750 return F;
751 };
752 return MemProfReader::readNextRecord(GuidRecord, IdToFrameCallback);
753}
754
756YAMLMemProfReader::create(const Twine &Path) {
757 auto BufferOr = MemoryBuffer::getFileOrSTDIN(Path);
758 if (std::error_code EC = BufferOr.getError())
759 return report(errorCodeToError(EC), Path.getSingleStringRef());
760
761 std::unique_ptr<MemoryBuffer> Buffer(BufferOr.get().release());
762 return create(std::move(Buffer));
763}
764
766YAMLMemProfReader::create(std::unique_ptr<MemoryBuffer> Buffer) {
767 auto Reader = std::make_unique<YAMLMemProfReader>();
768 Reader->parse(Buffer->getBuffer());
769 return std::move(Reader);
770}
773 auto BufferOr = MemoryBuffer::getFileOrSTDIN(Path);
774 if (!BufferOr)
775 return false;
776
777 std::unique_ptr<MemoryBuffer> Buffer(BufferOr.get().release());
778 return hasFormat(*Buffer);
779}
781bool YAMLMemProfReader::hasFormat(const MemoryBuffer &Buffer) {
782 return Buffer.getBuffer().starts_with("---");
783}
785void YAMLMemProfReader::parse(StringRef YAMLData) {
787 yaml::Input Yin(YAMLData);
788
789 Yin >> Doc;
790 if (Yin.error())
791 return;
792
793 // Add a call stack to MemProfData.CallStacks and return its CallStackId.
794 auto AddCallStack = [&](ArrayRef<Frame> CallStack) -> CallStackId {
795 SmallVector<FrameId> IndexedCallStack;
796 IndexedCallStack.reserve(CallStack.size());
797 for (const Frame &F : CallStack)
798 IndexedCallStack.push_back(MemProfData.addFrame(F));
799 return MemProfData.addCallStack(std::move(IndexedCallStack));
800 };
801
802 for (const auto &[GUID, Record] : Doc.HeapProfileRecords) {
803 IndexedMemProfRecord IndexedRecord;
804
805 // Convert AllocationInfo to IndexedAllocationInfo.
806 for (const AllocationInfo &AI : Record.AllocSites) {
807 CallStackId CSId = AddCallStack(AI.CallStack);
808 IndexedRecord.AllocSites.emplace_back(CSId, AI.Info);
809 }
810
811 // Populate CallSiteIds.
812 for (const auto &CallSite : Record.CallSites) {
813 CallStackId CSId = AddCallStack(CallSite);
814 IndexedRecord.CallSiteIds.push_back(CSId);
815 }
816
817 MemProfData.Records.try_emplace(GUID, std::move(IndexedRecord));
818 }
819}
820} // namespace memprof
821} // namespace llvm
BlockVerifier::State From
This file declares a library for handling Build IDs and using them to find debug info.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file defines the DenseMap class.
#define _
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define H(x, y, z)
Definition: MD5.cpp:57
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:168
static std::unique_ptr< DWARFContext > create(const object::ObjectFile &Obj, ProcessDebugRelocations RelocAction=ProcessDebugRelocations::Process, const LoadedObjectInfo *L=nullptr, std::string DWPName="", std::function< void(Error)> RecoverableErrorHandler=WithColor::defaultErrorHandler, std::function< void(Error)> WarningHandler=WithColor::defaultWarningHandler, bool ThreadSafe=false)
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:156
unsigned size() const
Definition: DenseMap.h:99
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:211
Implements a dense probed hash-table based set.
Definition: DenseSet.h:278
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
static ErrorSuccess success()
Create a success value.
Definition: Error.h:337
Tagged union holding either a T or a Error.
Definition: Error.h:481
uint64_t GUID
Declare a type to represent a global unique identifier for a global value.
Definition: GlobalValue.h:587
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: MapVector.h:141
This interface provides simple read-only access to a block of memory, and provides simple methods for...
Definition: MemoryBuffer.h:51
size_t getBufferSize() const
Definition: MemoryBuffer.h:68
StringRef getBuffer() const
Definition: MemoryBuffer.h:70
static ErrorOr< std::unique_ptr< MemoryBuffer > > getFileOrSTDIN(const Twine &Filename, bool IsText=false, bool RequiresNullTerminator=true, std::optional< Align > Alignment=std::nullopt)
Open the specified file as a MemoryBuffer, or open stdin if the Filename is "-".
const char * getBufferStart() const
Definition: MemoryBuffer.h:66
A vector that has set insertion semantics.
Definition: SetVector.h:57
Vector takeVector()
Clear the SetVector and return the underlying vector.
Definition: SetVector.h:87
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:162
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:132
size_t size() const
Definition: SmallVector.h:78
void reserve(size_type N)
Definition: SmallVector.h:663
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:683
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:265
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:147
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:213
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition: DenseSet.h:193
Helper class to iterate through stack ids in both metadata (memprof MIB and callsite) and the corresp...
const Frame & idToFrame(const FrameId Id) const
Definition: MemProfReader.h:91
IndexedMemProfData MemProfData
Definition: MemProfReader.h:97
virtual Error readNextRecord(GuidMemProfRecordPair &GuidRecord, std::function< const Frame(const FrameId)> Callback=nullptr)
Definition: MemProfReader.h:54
llvm::MapVector< GlobalValue::GUID, IndexedMemProfRecord >::iterator Iter
Definition: MemProfReader.h:99
std::pair< GlobalValue::GUID, MemProfRecord > GuidMemProfRecordPair
Definition: MemProfReader.h:41
void printYAML(raw_ostream &OS)
static Expected< std::unique_ptr< RawMemProfReader > > create(const Twine &Path, StringRef ProfiledBinary, bool KeepName=false)
static std::vector< std::string > peekBuildIds(MemoryBuffer *DataBuffer)
Error readNextRecord(GuidMemProfRecordPair &GuidRecord, std::function< const Frame(const FrameId)> Callback) override
static bool hasFormat(const MemoryBuffer &DataBuffer)
virtual ~RawMemProfReader() override
static bool hasFormat(const MemoryBuffer &DataBuffer)
static Expected< std::unique_ptr< YAMLMemProfReader > > create(const Twine &Path)
void parse(StringRef YAMLData)
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
static StringRef getCanonicalFnName(const Function &F)
Return the canonical name for a function, taking into account suffix elision policy attributes.
Definition: SampleProf.h:1090
static Expected< std::unique_ptr< SymbolizableObjectFile > > create(const object::ObjectFile *Obj, std::unique_ptr< DIContext > DICtx, bool UntagAddresses)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Entry
Definition: COFF.h:844
@ PF_X
Definition: ELF.h:1544
@ PT_LOAD
Definition: ELF.h:1495
uint64_t CallStackId
Definition: MemProf.h:329
llvm::DenseMap< uint64_t, llvm::SmallVector< uint64_t > > CallStackMap
BuildIDRef getBuildID(const ObjectFile *Obj)
Returns the build ID, if any, contained in the given object file.
Definition: BuildID.cpp:56
ArrayRef< uint8_t > BuildIDRef
A reference to a BuildID in binary form.
Definition: BuildID.h:28
Expected< std::unique_ptr< Binary > > createBinary(MemoryBufferRef Source, LLVMContext *Context=nullptr, bool InitContent=true)
Create a Binary from Source, autodetecting the file type.
Definition: Binary.cpp:45
StringRef filename(StringRef path, Style style=Style::native)
Get filename.
Definition: Path.cpp:577
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
std::error_code inconvertibleErrorCode()
The value returned by this function can be returned from convertToErrorCode for Error values where no...
Definition: Error.cpp:98
Error createStringError(std::error_code EC, char const *Fmt, const Ts &... Vals)
Create formatted StringError object.
Definition: Error.h:1291
Error joinErrors(Error E1, Error E2)
Concatenate errors.
Definition: Error.h:438
FormattedNumber format_hex_no_prefix(uint64_t N, unsigned Width, bool Upper=false)
format_hex_no_prefix - Output N as a fixed width hexadecimal.
Definition: Format.h:200
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2099
Error errorCodeToError(std::error_code EC)
Helper for converting an std::error_code to a Error.
Definition: Error.cpp:111
static constexpr const char *const BadString
Definition: DIContext.h:35
std::vector< GUIDMemProfRecordPair > HeapProfileRecords
Definition: MemProfYAML.h:24
std::vector< Frame > CallStack
Definition: MemProf.h:372
PortableMemInfoBlock Info
Definition: MemProf.h:374
GlobalValue::GUID Function
Definition: MemProf.h:222
llvm::MapVector< GlobalValue::GUID, IndexedMemProfRecord > Records
Definition: MemProf.h:985
CallStackId addCallStack(ArrayRef< FrameId > CS)
Definition: MemProf.h:1001
FrameId addFrame(const Frame &F)
Definition: MemProf.h:995
llvm::SmallVector< CallStackId > CallSiteIds
Definition: MemProf.h:402
llvm::SmallVector< IndexedAllocationInfo > AllocSites
Definition: MemProf.h:396
static GlobalValue::GUID getGUID(const StringRef FunctionName)
Definition: MemProf.cpp:251