Bug Summary

File:bolt/runtime/instr.cpp
Warning:line 1094, column 27
Array access (from variable 'CallFreqs') results in a null pointer dereference

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name instr.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -target-cpu x86-64 -target-feature -x87 -target-feature -mmx -target-feature -sse -tune-cpu generic -debugger-tuning=gdb -fdebug-compilation-dir=/build/source/build-llvm/tools/clang/stage2-bins/tools/bolt/bolt_rt-bins -fcoverage-compilation-dir=/build/source/build-llvm/tools/clang/stage2-bins/tools/bolt/bolt_rt-bins -resource-dir /usr/lib/llvm-19/lib/clang/19 -I . -D NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-19/lib/clang/19/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -source-date-epoch 1713179664 -O3 -std=c++17 -fdeprecated-macro -ferror-limit 19 -fno-rtti -fgnuc-version=4.2.1 -fskip-odr-check-in-gmf -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2024-04-15-133351-72004-1 -x c++ /build/source/bolt/runtime/instr.cpp
1//===- bolt/runtime/instr.cpp ---------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// BOLT runtime instrumentation library for x86 Linux. Currently, BOLT does
10// not support linking modules with dependencies on one another into the final
11// binary (TODO?), which means this library has to be self-contained in a single
12// module.
13//
14// All extern declarations here need to be defined by BOLT itself. Those will be
15// undefined symbols that BOLT needs to resolve by emitting these symbols with
16// MCStreamer. Currently, Passes/Instrumentation.cpp is the pass responsible
17// for defining the symbols here and these two files have a tight coupling: one
18// working statically when you run BOLT and another during program runtime when
19// you run an instrumented binary. The main goal here is to output an fdata file
20// (BOLT profile) with the instrumentation counters inserted by the static pass.
21// Counters for indirect calls are an exception, as we can't know them
22// statically. These counters are created and managed here. To allow this, we
23// need a minimal framework for allocating memory dynamically. We provide this
24// with the BumpPtrAllocator class (not LLVM's, but our own version of it).
25//
26// Since this code is intended to be inserted into any executable, we decided to
27// make it standalone and do not depend on any external libraries (i.e. language
28// support libraries, such as glibc or stdc++). To allow this, we provide a few
29// light implementations of common OS interacting functionalities using direct
30// syscall wrappers. Our simple allocator doesn't manage deallocations that
31// fragment the memory space, so it's stack based. This is the minimal framework
32// provided here to allow processing instrumented counters and writing fdata.
33//
34// In the C++ idiom used here, we never use or rely on constructors or
35// destructors for global objects. That's because those need support from the
36// linker in initialization/finalization code, and we want to keep our linker
37// very simple. Similarly, we don't create any global objects that are zero
38// initialized, since those would need to go .bss, which our simple linker also
39// don't support (TODO?).
40//
41//===----------------------------------------------------------------------===//
42
43#include "common.h"
44
45// Enables a very verbose logging to stderr useful when debugging
46//#define ENABLE_DEBUG
47
48#ifdef ENABLE_DEBUG
49#define DEBUG(X){} \
50 { X; }
51#else
52#define DEBUG(X){} \
53 {}
54#endif
55
56#pragma GCC visibility push(hidden)
57
58extern "C" {
59
60#if defined(__APPLE__)
61extern uint64_t* _bolt_instr_locations_getter();
62extern uint32_t _bolt_num_counters_getter();
63
64extern uint8_t* _bolt_instr_tables_getter();
65extern uint32_t _bolt_instr_num_funcs_getter();
66
67#else
68
69// Main counters inserted by instrumentation, incremented during runtime when
70// points of interest (locations) in the program are reached. Those are direct
71// calls and direct and indirect branches (local ones). There are also counters
72// for basic block execution if they are a spanning tree leaf and need to be
73// counted in order to infer the execution count of other edges of the CFG.
74extern uint64_t __bolt_instr_locations[];
75extern uint32_t __bolt_num_counters;
76// Descriptions are serialized metadata about binary functions written by BOLT,
77// so we have a minimal understanding about the program structure. For a
78// reference on the exact format of this metadata, see *Description structs,
79// Location, IntrumentedNode and EntryNode.
80// Number of indirect call site descriptions
81extern uint32_t __bolt_instr_num_ind_calls;
82// Number of indirect call target descriptions
83extern uint32_t __bolt_instr_num_ind_targets;
84// Number of function descriptions
85extern uint32_t __bolt_instr_num_funcs;
86// Time to sleep across dumps (when we write the fdata profile to disk)
87extern uint32_t __bolt_instr_sleep_time;
88// Do not clear counters across dumps, rewrite file with the updated values
89extern bool __bolt_instr_no_counters_clear;
90// Wait until all forks of instrumented process will finish
91extern bool __bolt_instr_wait_forks;
92// Filename to dump data to
93extern char __bolt_instr_filename[];
94// Instumented binary file path
95extern char __bolt_instr_binpath[];
96// If true, append current PID to the fdata filename when creating it so
97// different invocations of the same program can be differentiated.
98extern bool __bolt_instr_use_pid;
99// Functions that will be used to instrument indirect calls. BOLT static pass
100// will identify indirect calls and modify them to load the address in these
101// trampolines and call this address instead. BOLT can't use direct calls to
102// our handlers because our addresses here are not known at analysis time. We
103// only support resolving dependencies from this file to the output of BOLT,
104// *not* the other way around.
105// TODO: We need better linking support to make that happen.
106extern void (*__bolt_ind_call_counter_func_pointer)();
107extern void (*__bolt_ind_tailcall_counter_func_pointer)();
108// Function pointers to init/fini trampoline routines in the binary, so we can
109// resume regular execution of these functions that we hooked
110extern void __bolt_start_trampoline();
111extern void __bolt_fini_trampoline();
112
113#endif
114}
115
116namespace {
117
118/// A simple allocator that mmaps a fixed size region and manages this space
119/// in a stack fashion, meaning you always deallocate the last element that
120/// was allocated. In practice, we don't need to deallocate individual elements.
121/// We monotonically increase our usage and then deallocate everything once we
122/// are done processing something.
123class BumpPtrAllocator {
124 /// This is written before each allocation and act as a canary to detect when
125 /// a bug caused our program to cross allocation boundaries.
126 struct EntryMetadata {
127 uint64_t Magic;
128 uint64_t AllocSize;
129 };
130
131public:
132 void *allocate(size_t Size) {
133 Lock L(M);
134
135 if (StackBase == nullptr) {
136 StackBase = reinterpret_cast<uint8_t *>(
137 __mmap(0, MaxSize, PROT_READ0x1 | PROT_WRITE0x2,
138 (Shared ? MAP_SHARED0x01 : MAP_PRIVATE0x02) | MAP_ANONYMOUS0x20, -1, 0));
139 assert(StackBase != MAP_FAILED((void *)-1),
140 "BumpPtrAllocator: failed to mmap stack!");
141 StackSize = 0;
142 }
143
144 Size = alignTo(Size + sizeof(EntryMetadata), 16);
145 uint8_t *AllocAddress = StackBase + StackSize + sizeof(EntryMetadata);
146 auto *M = reinterpret_cast<EntryMetadata *>(StackBase + StackSize);
147 M->Magic = Magic;
148 M->AllocSize = Size;
149 StackSize += Size;
150 assert(StackSize < MaxSize, "allocator ran out of memory");
151 return AllocAddress;
152 }
153
154#ifdef DEBUG
155 /// Element-wise deallocation is only used for debugging to catch memory
156 /// bugs by checking magic bytes. Ordinarily, we reset the allocator once
157 /// we are done with it. Reset is done with clear(). There's no need
158 /// to deallocate each element individually.
159 void deallocate(void *Ptr) {
160 Lock L(M);
161 uint8_t MetadataOffset = sizeof(EntryMetadata);
162 auto *M = reinterpret_cast<EntryMetadata *>(
163 reinterpret_cast<uint8_t *>(Ptr) - MetadataOffset);
164 const uint8_t *StackTop = StackBase + StackSize + MetadataOffset;
165 // Validate size
166 if (Ptr != StackTop - M->AllocSize) {
167 // Failed validation, check if it is a pointer returned by operator new []
168 MetadataOffset +=
169 sizeof(uint64_t); // Space for number of elements alloc'ed
170 M = reinterpret_cast<EntryMetadata *>(reinterpret_cast<uint8_t *>(Ptr) -
171 MetadataOffset);
172 // Ok, it failed both checks if this assertion fails. Stop the program, we
173 // have a memory bug.
174 assert(Ptr == StackTop - M->AllocSize,
175 "must deallocate the last element alloc'ed");
176 }
177 assert(M->Magic == Magic, "allocator magic is corrupt");
178 StackSize -= M->AllocSize;
179 }
180#else
181 void deallocate(void *) {}
182#endif
183
184 void clear() {
185 Lock L(M);
186 StackSize = 0;
187 }
188
189 /// Set mmap reservation size (only relevant before first allocation)
190 void setMaxSize(uint64_t Size) { MaxSize = Size; }
191
192 /// Set mmap reservation privacy (only relevant before first allocation)
193 void setShared(bool S) { Shared = S; }
194
195 void destroy() {
196 if (StackBase == nullptr)
197 return;
198 __munmap(StackBase, MaxSize);
199 }
200
201 // Placement operator to construct allocator in possibly shared mmaped memory
202 static void *operator new(size_t, void *Ptr) { return Ptr; };
203
204private:
205 static constexpr uint64_t Magic = 0x1122334455667788ull;
206 uint64_t MaxSize = 0xa00000;
207 uint8_t *StackBase{nullptr};
208 uint64_t StackSize{0};
209 bool Shared{false};
210 Mutex M;
211};
212
213/// Used for allocating indirect call instrumentation counters. Initialized by
214/// __bolt_instr_setup, our initialization routine.
215BumpPtrAllocator *GlobalAlloc;
216
217// Base address which we substract from recorded PC values when searching for
218// indirect call description entries. Needed because indCall descriptions are
219// mapped read-only and contain static addresses. Initialized in
220// __bolt_instr_setup.
221uint64_t TextBaseAddress = 0;
222
223// Storage for GlobalAlloc which can be shared if not using
224// instrumentation-file-append-pid.
225void *GlobalMetadataStorage;
226
227} // anonymous namespace
228
229// User-defined placement new operators. We only use those (as opposed to
230// overriding the regular operator new) so we can keep our allocator in the
231// stack instead of in a data section (global).
232void *operator new(size_t Sz, BumpPtrAllocator &A) { return A.allocate(Sz); }
233void *operator new(size_t Sz, BumpPtrAllocator &A, char C) {
234 auto *Ptr = reinterpret_cast<char *>(A.allocate(Sz));
235 memset(Ptr, C, Sz);
236 return Ptr;
237}
238void *operator new[](size_t Sz, BumpPtrAllocator &A) {
239 return A.allocate(Sz);
240}
241void *operator new[](size_t Sz, BumpPtrAllocator &A, char C) {
242 auto *Ptr = reinterpret_cast<char *>(A.allocate(Sz));
243 memset(Ptr, C, Sz);
244 return Ptr;
245}
246// Only called during exception unwinding (useless). We must manually dealloc.
247// C++ language weirdness
248void operator delete(void *Ptr, BumpPtrAllocator &A) { A.deallocate(Ptr); }
249
250namespace {
251
252// Disable instrumentation optimizations that sacrifice profile accuracy
253extern "C" bool __bolt_instr_conservative;
254
255/// Basic key-val atom stored in our hash
256struct SimpleHashTableEntryBase {
257 uint64_t Key;
258 uint64_t Val;
259 void dump(const char *Msg = nullptr) {
260 // TODO: make some sort of formatting function
261 // Currently we have to do it the ugly way because
262 // we want every message to be printed atomically via a single call to
263 // __write. If we use reportNumber() and others nultiple times, we'll get
264 // garbage in mulithreaded environment
265 char Buf[BufSize];
266 char *Ptr = Buf;
267 Ptr = intToStr(Ptr, __getpid(), 10);
268 *Ptr++ = ':';
269 *Ptr++ = ' ';
270 if (Msg)
271 Ptr = strCopy(Ptr, Msg, strLen(Msg));
272 *Ptr++ = '0';
273 *Ptr++ = 'x';
274 Ptr = intToStr(Ptr, (uint64_t)this, 16);
275 *Ptr++ = ':';
276 *Ptr++ = ' ';
277 Ptr = strCopy(Ptr, "MapEntry(0x", sizeof("MapEntry(0x") - 1);
278 Ptr = intToStr(Ptr, Key, 16);
279 *Ptr++ = ',';
280 *Ptr++ = ' ';
281 *Ptr++ = '0';
282 *Ptr++ = 'x';
283 Ptr = intToStr(Ptr, Val, 16);
284 *Ptr++ = ')';
285 *Ptr++ = '\n';
286 assert(Ptr - Buf < BufSize, "Buffer overflow!");
287 // print everything all at once for atomicity
288 __write(2, Buf, Ptr - Buf);
289 }
290};
291
292/// This hash table implementation starts by allocating a table of size
293/// InitialSize. When conflicts happen in this main table, it resolves
294/// them by chaining a new table of size IncSize. It never reallocs as our
295/// allocator doesn't support it. The key is intended to be function pointers.
296/// There's no clever hash function (it's just x mod size, size being prime).
297/// I never tuned the coefficientes in the modular equation (TODO)
298/// This is used for indirect calls (each call site has one of this, so it
299/// should have a small footprint) and for tallying call counts globally for
300/// each target to check if we missed the origin of some calls (this one is a
301/// large instantiation of this template, since it is global for all call sites)
302template <typename T = SimpleHashTableEntryBase, uint32_t InitialSize = 7,
303 uint32_t IncSize = 7>
304class SimpleHashTable {
305public:
306 using MapEntry = T;
307
308 /// Increment by 1 the value of \p Key. If it is not in this table, it will be
309 /// added to the table and its value set to 1.
310 void incrementVal(uint64_t Key, BumpPtrAllocator &Alloc) {
311 if (!__bolt_instr_conservative) {
312 TryLock L(M);
313 if (!L.isLocked())
314 return;
315 auto &E = getOrAllocEntry(Key, Alloc);
316 ++E.Val;
317 return;
318 }
319 Lock L(M);
320 auto &E = getOrAllocEntry(Key, Alloc);
321 ++E.Val;
322 }
323
324 /// Basic member accessing interface. Here we pass the allocator explicitly to
325 /// avoid storing a pointer to it as part of this table (remember there is one
326 /// hash for each indirect call site, so we want to minimize our footprint).
327 MapEntry &get(uint64_t Key, BumpPtrAllocator &Alloc) {
328 if (!__bolt_instr_conservative) {
329 TryLock L(M);
330 if (!L.isLocked())
331 return NoEntry;
332 return getOrAllocEntry(Key, Alloc);
333 }
334 Lock L(M);
335 return getOrAllocEntry(Key, Alloc);
336 }
337
338 /// Traverses all elements in the table
339 template <typename... Args>
340 void forEachElement(void (*Callback)(MapEntry &, Args...), Args... args) {
341 Lock L(M);
342 if (!TableRoot)
343 return;
344 return forEachElement(Callback, InitialSize, TableRoot, args...);
345 }
346
347 void resetCounters();
348
349private:
350 constexpr static uint64_t VacantMarker = 0;
351 constexpr static uint64_t FollowUpTableMarker = 0x8000000000000000ull;
352
353 MapEntry *TableRoot{nullptr};
354 MapEntry NoEntry;
355 Mutex M;
356
357 template <typename... Args>
358 void forEachElement(void (*Callback)(MapEntry &, Args...),
359 uint32_t NumEntries, MapEntry *Entries, Args... args) {
360 for (uint32_t I = 0; I < NumEntries; ++I) {
361 MapEntry &Entry = Entries[I];
362 if (Entry.Key == VacantMarker)
363 continue;
364 if (Entry.Key & FollowUpTableMarker) {
365 MapEntry *Next =
366 reinterpret_cast<MapEntry *>(Entry.Key & ~FollowUpTableMarker);
367 assert(Next != Entries, "Circular reference!");
368 forEachElement(Callback, IncSize, Next, args...);
369 continue;
370 }
371 Callback(Entry, args...);
372 }
373 }
374
375 MapEntry &firstAllocation(uint64_t Key, BumpPtrAllocator &Alloc) {
376 TableRoot = new (Alloc, 0) MapEntry[InitialSize];
377 MapEntry &Entry = TableRoot[Key % InitialSize];
378 Entry.Key = Key;
379 // DEBUG(Entry.dump("Created root entry: "));
380 return Entry;
381 }
382
383 MapEntry &getEntry(MapEntry *Entries, uint64_t Key, uint64_t Selector,
384 BumpPtrAllocator &Alloc, int CurLevel) {
385 // DEBUG(reportNumber("getEntry called, level ", CurLevel, 10));
386 const uint32_t NumEntries = CurLevel == 0 ? InitialSize : IncSize;
387 uint64_t Remainder = Selector / NumEntries;
388 Selector = Selector % NumEntries;
389 MapEntry &Entry = Entries[Selector];
390
391 // A hit
392 if (Entry.Key == Key) {
393 // DEBUG(Entry.dump("Hit: "));
394 return Entry;
395 }
396
397 // Vacant - add new entry
398 if (Entry.Key == VacantMarker) {
399 Entry.Key = Key;
400 // DEBUG(Entry.dump("Adding new entry: "));
401 return Entry;
402 }
403
404 // Defer to the next level
405 if (Entry.Key & FollowUpTableMarker) {
406 return getEntry(
407 reinterpret_cast<MapEntry *>(Entry.Key & ~FollowUpTableMarker),
408 Key, Remainder, Alloc, CurLevel + 1);
409 }
410
411 // Conflict - create the next level
412 // DEBUG(Entry.dump("Creating new level: "));
413
414 MapEntry *NextLevelTbl = new (Alloc, 0) MapEntry[IncSize];
415 // DEBUG(
416 // reportNumber("Newly allocated level: 0x", uint64_t(NextLevelTbl),
417 // 16));
418 uint64_t CurEntrySelector = Entry.Key / InitialSize;
419 for (int I = 0; I < CurLevel; ++I)
420 CurEntrySelector /= IncSize;
421 CurEntrySelector = CurEntrySelector % IncSize;
422 NextLevelTbl[CurEntrySelector] = Entry;
423 Entry.Key = reinterpret_cast<uint64_t>(NextLevelTbl) | FollowUpTableMarker;
424 assert((NextLevelTbl[CurEntrySelector].Key & ~FollowUpTableMarker) !=
425 uint64_t(Entries),
426 "circular reference created!\n");
427 // DEBUG(NextLevelTbl[CurEntrySelector].dump("New level entry: "));
428 // DEBUG(Entry.dump("Updated old entry: "));
429 return getEntry(NextLevelTbl, Key, Remainder, Alloc, CurLevel + 1);
430 }
431
432 MapEntry &getOrAllocEntry(uint64_t Key, BumpPtrAllocator &Alloc) {
433 if (TableRoot) {
434 MapEntry &E = getEntry(TableRoot, Key, Key, Alloc, 0);
435 assert(!(E.Key & FollowUpTableMarker), "Invalid entry!");
436 return E;
437 }
438 return firstAllocation(Key, Alloc);
439 }
440};
441
442template <typename T> void resetIndCallCounter(T &Entry) {
443 Entry.Val = 0;
444}
445
446template <typename T, uint32_t X, uint32_t Y>
447void SimpleHashTable<T, X, Y>::resetCounters() {
448 forEachElement(resetIndCallCounter);
449}
450
451/// Represents a hash table mapping a function target address to its counter.
452using IndirectCallHashTable = SimpleHashTable<>;
453
454/// Initialize with number 1 instead of 0 so we don't go into .bss. This is the
455/// global array of all hash tables storing indirect call destinations happening
456/// during runtime, one table per call site.
457IndirectCallHashTable *GlobalIndCallCounters{
458 reinterpret_cast<IndirectCallHashTable *>(1)};
459
460/// Don't allow reentrancy in the fdata writing phase - only one thread writes
461/// it
462Mutex *GlobalWriteProfileMutex{reinterpret_cast<Mutex *>(1)};
463
464/// Store number of calls in additional to target address (Key) and frequency
465/// as perceived by the basic block counter (Val).
466struct CallFlowEntryBase : public SimpleHashTableEntryBase {
467 uint64_t Calls;
468};
469
470using CallFlowHashTableBase = SimpleHashTable<CallFlowEntryBase, 11939, 233>;
471
472/// This is a large table indexing all possible call targets (indirect and
473/// direct ones). The goal is to find mismatches between number of calls (for
474/// those calls we were able to track) and the entry basic block counter of the
475/// callee. In most cases, these two should be equal. If not, there are two
476/// possible scenarios here:
477///
478/// * Entry BB has higher frequency than all known calls to this function.
479/// In this case, we have dynamic library code or any uninstrumented code
480/// calling this function. We will write the profile for these untracked
481/// calls as having source "0 [unknown] 0" in the fdata file.
482///
483/// * Number of known calls is higher than the frequency of entry BB
484/// This only happens when there is no counter for the entry BB / callee
485/// function is not simple (in BOLT terms). We don't do anything special
486/// here and just ignore those (we still report all calls to the non-simple
487/// function, though).
488///
489class CallFlowHashTable : public CallFlowHashTableBase {
490public:
491 CallFlowHashTable(BumpPtrAllocator &Alloc) : Alloc(Alloc) {}
492
493 MapEntry &get(uint64_t Key) { return CallFlowHashTableBase::get(Key, Alloc); }
494
495private:
496 // Different than the hash table for indirect call targets, we do store the
497 // allocator here since there is only one call flow hash and space overhead
498 // is negligible.
499 BumpPtrAllocator &Alloc;
500};
501
502///
503/// Description metadata emitted by BOLT to describe the program - refer to
504/// Passes/Instrumentation.cpp - Instrumentation::emitTablesAsELFNote()
505///
506struct Location {
507 uint32_t FunctionName;
508 uint32_t Offset;
509};
510
511struct CallDescription {
512 Location From;
513 uint32_t FromNode;
514 Location To;
515 uint32_t Counter;
516 uint64_t TargetAddress;
517};
518
519using IndCallDescription = Location;
520
521struct IndCallTargetDescription {
522 Location Loc;
523 uint64_t Address;
524};
525
526struct EdgeDescription {
527 Location From;
528 uint32_t FromNode;
529 Location To;
530 uint32_t ToNode;
531 uint32_t Counter;
532};
533
534struct InstrumentedNode {
535 uint32_t Node;
536 uint32_t Counter;
537};
538
539struct EntryNode {
540 uint64_t Node;
541 uint64_t Address;
542};
543
544struct FunctionDescription {
545 uint32_t NumLeafNodes;
546 const InstrumentedNode *LeafNodes;
547 uint32_t NumEdges;
548 const EdgeDescription *Edges;
549 uint32_t NumCalls;
550 const CallDescription *Calls;
551 uint32_t NumEntryNodes;
552 const EntryNode *EntryNodes;
553
554 /// Constructor will parse the serialized function metadata written by BOLT
555 FunctionDescription(const uint8_t *FuncDesc);
556
557 uint64_t getSize() const {
558 return 16 + NumLeafNodes * sizeof(InstrumentedNode) +
559 NumEdges * sizeof(EdgeDescription) +
560 NumCalls * sizeof(CallDescription) +
561 NumEntryNodes * sizeof(EntryNode);
562 }
563};
564
565/// The context is created when the fdata profile needs to be written to disk
566/// and we need to interpret our runtime counters. It contains pointers to the
567/// mmaped binary (only the BOLT written metadata section). Deserialization
568/// should be straightforward as most data is POD or an array of POD elements.
569/// This metadata is used to reconstruct function CFGs.
570struct ProfileWriterContext {
571 IndCallDescription *IndCallDescriptions;
572 IndCallTargetDescription *IndCallTargets;
573 uint8_t *FuncDescriptions;
574 char *Strings; // String table with function names used in this binary
575 int FileDesc; // File descriptor for the file on disk backing this
576 // information in memory via mmap
577 void *MMapPtr; // The mmap ptr
578 int MMapSize; // The mmap size
579
580 /// Hash table storing all possible call destinations to detect untracked
581 /// calls and correctly report them as [unknown] in output fdata.
582 CallFlowHashTable *CallFlowTable;
583
584 /// Lookup the sorted indirect call target vector to fetch function name and
585 /// offset for an arbitrary function pointer.
586 const IndCallTargetDescription *lookupIndCallTarget(uint64_t Target) const;
587};
588
589/// Perform a string comparison and returns zero if Str1 matches Str2. Compares
590/// at most Size characters.
591int compareStr(const char *Str1, const char *Str2, int Size) {
592 while (*Str1 == *Str2) {
593 if (*Str1 == '\0' || --Size == 0)
594 return 0;
595 ++Str1;
596 ++Str2;
597 }
598 return 1;
599}
600
601/// Output Location to the fdata file
602char *serializeLoc(const ProfileWriterContext &Ctx, char *OutBuf,
603 const Location Loc, uint32_t BufSize) {
604 // fdata location format: Type Name Offset
605 // Type 1 - regular symbol
606 OutBuf = strCopy(OutBuf, "1 ");
607 const char *Str = Ctx.Strings + Loc.FunctionName;
608 uint32_t Size = 25;
609 while (*Str) {
610 *OutBuf++ = *Str++;
611 if (++Size >= BufSize)
612 break;
613 }
614 assert(!*Str, "buffer overflow, function name too large");
615 *OutBuf++ = ' ';
616 OutBuf = intToStr(OutBuf, Loc.Offset, 16);
617 *OutBuf++ = ' ';
618 return OutBuf;
619}
620
621/// Read and deserialize a function description written by BOLT. \p FuncDesc
622/// points at the beginning of the function metadata structure in the file.
623/// See Instrumentation::emitTablesAsELFNote()
624FunctionDescription::FunctionDescription(const uint8_t *FuncDesc) {
625 NumLeafNodes = *reinterpret_cast<const uint32_t *>(FuncDesc);
626 DEBUG(reportNumber("NumLeafNodes = ", NumLeafNodes, 10)){};
627 LeafNodes = reinterpret_cast<const InstrumentedNode *>(FuncDesc + 4);
628
629 NumEdges = *reinterpret_cast<const uint32_t *>(
630 FuncDesc + 4 + NumLeafNodes * sizeof(InstrumentedNode));
631 DEBUG(reportNumber("NumEdges = ", NumEdges, 10)){};
632 Edges = reinterpret_cast<const EdgeDescription *>(
633 FuncDesc + 8 + NumLeafNodes * sizeof(InstrumentedNode));
634
635 NumCalls = *reinterpret_cast<const uint32_t *>(
636 FuncDesc + 8 + NumLeafNodes * sizeof(InstrumentedNode) +
637 NumEdges * sizeof(EdgeDescription));
638 DEBUG(reportNumber("NumCalls = ", NumCalls, 10)){};
639 Calls = reinterpret_cast<const CallDescription *>(
640 FuncDesc + 12 + NumLeafNodes * sizeof(InstrumentedNode) +
641 NumEdges * sizeof(EdgeDescription));
642 NumEntryNodes = *reinterpret_cast<const uint32_t *>(
643 FuncDesc + 12 + NumLeafNodes * sizeof(InstrumentedNode) +
644 NumEdges * sizeof(EdgeDescription) + NumCalls * sizeof(CallDescription));
645 DEBUG(reportNumber("NumEntryNodes = ", NumEntryNodes, 10)){};
646 EntryNodes = reinterpret_cast<const EntryNode *>(
647 FuncDesc + 16 + NumLeafNodes * sizeof(InstrumentedNode) +
648 NumEdges * sizeof(EdgeDescription) + NumCalls * sizeof(CallDescription));
649}
650
651/// Read and mmap descriptions written by BOLT from the executable's notes
652/// section
653#if defined(HAVE_ELF_H) and !defined(__APPLE__)
654
655void *__attribute__((noinline)) __get_pc() {
656 return __builtin_extract_return_addr(__builtin_return_address(0));
657}
658
659/// Get string with address and parse it to hex pair <StartAddress, EndAddress>
660bool parseAddressRange(const char *Str, uint64_t &StartAddress,
661 uint64_t &EndAddress) {
662 if (!Str)
663 return false;
664 // Parsed string format: <hex1>-<hex2>
665 StartAddress = hexToLong(Str, '-');
666 while (*Str && *Str != '-')
667 ++Str;
668 if (!*Str)
669 return false;
670 ++Str; // swallow '-'
671 EndAddress = hexToLong(Str);
672 return true;
673}
674
675/// Get full path to the real binary by getting current virtual address
676/// and searching for the appropriate link in address range in
677/// /proc/self/map_files
678static char *getBinaryPath() {
679 const uint32_t BufSize = 1024;
680 const uint32_t NameMax = 4096;
681 const char DirPath[] = "/proc/self/map_files/";
682 static char TargetPath[NameMax] = {};
683 char Buf[BufSize];
684
685 if (__bolt_instr_binpath[0] != '\0')
686 return __bolt_instr_binpath;
687
688 if (TargetPath[0] != '\0')
689 return TargetPath;
690
691 unsigned long CurAddr = (unsigned long)__get_pc();
692 uint64_t FDdir = __open(DirPath, O_RDONLY0,
693 /*mode=*/0666);
694 assert(static_cast<int64_t>(FDdir) >= 0,
695 "failed to open /proc/self/map_files");
696
697 while (long Nread = __getdents64(FDdir, (struct dirent64 *)Buf, BufSize)) {
698 assert(static_cast<int64_t>(Nread) != -1, "failed to get folder entries");
699
700 struct dirent64 *d;
701 for (long Bpos = 0; Bpos < Nread; Bpos += d->d_reclen) {
702 d = (struct dirent64 *)(Buf + Bpos);
703
704 uint64_t StartAddress, EndAddress;
705 if (!parseAddressRange(d->d_name, StartAddress, EndAddress))
706 continue;
707 if (CurAddr < StartAddress || CurAddr > EndAddress)
708 continue;
709 char FindBuf[NameMax];
710 char *C = strCopy(FindBuf, DirPath, NameMax);
711 C = strCopy(C, d->d_name, NameMax - (C - FindBuf));
712 *C = '\0';
713 uint32_t Ret = __readlink(FindBuf, TargetPath, sizeof(TargetPath));
714 assert(Ret != -1 && Ret != BufSize, "readlink error");
715 TargetPath[Ret] = '\0';
716 return TargetPath;
717 }
718 }
719 return nullptr;
720}
721
722ProfileWriterContext readDescriptions() {
723 ProfileWriterContext Result;
724 char *BinPath = getBinaryPath();
725 assert(BinPath && BinPath[0] != '\0', "failed to find binary path");
726
727 uint64_t FD = __open(BinPath, O_RDONLY0,
728 /*mode=*/0666);
729 assert(static_cast<int64_t>(FD) >= 0, "failed to open binary path");
730
731 Result.FileDesc = FD;
732
733 // mmap our binary to memory
734 uint64_t Size = __lseek(FD, 0, SEEK_END2);
735 uint8_t *BinContents = reinterpret_cast<uint8_t *>(
736 __mmap(0, Size, PROT_READ0x1, MAP_PRIVATE0x02, FD, 0));
737 assert(BinContents != MAP_FAILED((void *)-1), "readDescriptions: Failed to mmap self!");
738 Result.MMapPtr = BinContents;
739 Result.MMapSize = Size;
740 Elf64_Ehdr *Hdr = reinterpret_cast<Elf64_Ehdr *>(BinContents);
741 Elf64_Shdr *Shdr = reinterpret_cast<Elf64_Shdr *>(BinContents + Hdr->e_shoff);
742 Elf64_Shdr *StringTblHeader = reinterpret_cast<Elf64_Shdr *>(
743 BinContents + Hdr->e_shoff + Hdr->e_shstrndx * Hdr->e_shentsize);
744
745 // Find .bolt.instr.tables with the data we need and set pointers to it
746 for (int I = 0; I < Hdr->e_shnum; ++I) {
747 char *SecName = reinterpret_cast<char *>(
748 BinContents + StringTblHeader->sh_offset + Shdr->sh_name);
749 if (compareStr(SecName, ".bolt.instr.tables", 64) != 0) {
750 Shdr = reinterpret_cast<Elf64_Shdr *>(BinContents + Hdr->e_shoff +
751 (I + 1) * Hdr->e_shentsize);
752 continue;
753 }
754 // Actual contents of the ELF note start after offset 20 decimal:
755 // Offset 0: Producer name size (4 bytes)
756 // Offset 4: Contents size (4 bytes)
757 // Offset 8: Note type (4 bytes)
758 // Offset 12: Producer name (BOLT\0) (5 bytes + align to 4-byte boundary)
759 // Offset 20: Contents
760 uint32_t IndCallDescSize =
761 *reinterpret_cast<uint32_t *>(BinContents + Shdr->sh_offset + 20);
762 uint32_t IndCallTargetDescSize = *reinterpret_cast<uint32_t *>(
763 BinContents + Shdr->sh_offset + 24 + IndCallDescSize);
764 uint32_t FuncDescSize =
765 *reinterpret_cast<uint32_t *>(BinContents + Shdr->sh_offset + 28 +
766 IndCallDescSize + IndCallTargetDescSize);
767 Result.IndCallDescriptions = reinterpret_cast<IndCallDescription *>(
768 BinContents + Shdr->sh_offset + 24);
769 Result.IndCallTargets = reinterpret_cast<IndCallTargetDescription *>(
770 BinContents + Shdr->sh_offset + 28 + IndCallDescSize);
771 Result.FuncDescriptions = BinContents + Shdr->sh_offset + 32 +
772 IndCallDescSize + IndCallTargetDescSize;
773 Result.Strings = reinterpret_cast<char *>(
774 BinContents + Shdr->sh_offset + 32 + IndCallDescSize +
775 IndCallTargetDescSize + FuncDescSize);
776 return Result;
777 }
778 const char ErrMsg[] =
779 "BOLT instrumentation runtime error: could not find section "
780 ".bolt.instr.tables\n";
781 reportError(ErrMsg, sizeof(ErrMsg));
782 return Result;
783}
784
785#else
786
787ProfileWriterContext readDescriptions() {
788 ProfileWriterContext Result;
789 uint8_t *Tables = _bolt_instr_tables_getter();
790 uint32_t IndCallDescSize = *reinterpret_cast<uint32_t *>(Tables);
791 uint32_t IndCallTargetDescSize =
792 *reinterpret_cast<uint32_t *>(Tables + 4 + IndCallDescSize);
793 uint32_t FuncDescSize = *reinterpret_cast<uint32_t *>(
794 Tables + 8 + IndCallDescSize + IndCallTargetDescSize);
795 Result.IndCallDescriptions =
796 reinterpret_cast<IndCallDescription *>(Tables + 4);
797 Result.IndCallTargets = reinterpret_cast<IndCallTargetDescription *>(
798 Tables + 8 + IndCallDescSize);
799 Result.FuncDescriptions =
800 Tables + 12 + IndCallDescSize + IndCallTargetDescSize;
801 Result.Strings = reinterpret_cast<char *>(
802 Tables + 12 + IndCallDescSize + IndCallTargetDescSize + FuncDescSize);
803 return Result;
804}
805
806#endif
807
808#if !defined(__APPLE__)
809/// Debug by printing overall metadata global numbers to check it is sane
810void printStats(const ProfileWriterContext &Ctx) {
811 char StatMsg[BufSize];
812 char *StatPtr = StatMsg;
813 StatPtr =
814 strCopy(StatPtr,
815 "\nBOLT INSTRUMENTATION RUNTIME STATISTICS\n\nIndCallDescSize: ");
816 StatPtr = intToStr(StatPtr,
817 Ctx.FuncDescriptions -
818 reinterpret_cast<uint8_t *>(Ctx.IndCallDescriptions),
819 10);
820 StatPtr = strCopy(StatPtr, "\nFuncDescSize: ");
821 StatPtr = intToStr(
822 StatPtr,
823 reinterpret_cast<uint8_t *>(Ctx.Strings) - Ctx.FuncDescriptions, 10);
824 StatPtr = strCopy(StatPtr, "\n__bolt_instr_num_ind_calls: ");
825 StatPtr = intToStr(StatPtr, __bolt_instr_num_ind_calls, 10);
826 StatPtr = strCopy(StatPtr, "\n__bolt_instr_num_funcs: ");
827 StatPtr = intToStr(StatPtr, __bolt_instr_num_funcs, 10);
828 StatPtr = strCopy(StatPtr, "\n");
829 __write(2, StatMsg, StatPtr - StatMsg);
830}
831#endif
832
833
834/// This is part of a simple CFG representation in memory, where we store
835/// a dynamically sized array of input and output edges per node, and store
836/// a dynamically sized array of nodes per graph. We also store the spanning
837/// tree edges for that CFG in a separate array of nodes in
838/// \p SpanningTreeNodes, while the regular nodes live in \p CFGNodes.
839struct Edge {
840 uint32_t Node; // Index in nodes array regarding the destination of this edge
841 uint32_t ID; // Edge index in an array comprising all edges of the graph
842};
843
844/// A regular graph node or a spanning tree node
845struct Node {
846 uint32_t NumInEdges{0}; // Input edge count used to size InEdge
847 uint32_t NumOutEdges{0}; // Output edge count used to size OutEdges
848 Edge *InEdges{nullptr}; // Created and managed by \p Graph
849 Edge *OutEdges{nullptr}; // ditto
850};
851
852/// Main class for CFG representation in memory. Manages object creation and
853/// destruction, populates an array of CFG nodes as well as corresponding
854/// spanning tree nodes.
855struct Graph {
856 uint32_t NumNodes;
857 Node *CFGNodes;
858 Node *SpanningTreeNodes;
859 uint64_t *EdgeFreqs;
860 uint64_t *CallFreqs;
861 BumpPtrAllocator &Alloc;
862 const FunctionDescription &D;
863
864 /// Reads a list of edges from function description \p D and builds
865 /// the graph from it. Allocates several internal dynamic structures that are
866 /// later destroyed by ~Graph() and uses \p Alloc. D.LeafNodes contain all
867 /// spanning tree leaf nodes descriptions (their counters). They are the seed
868 /// used to compute the rest of the missing edge counts in a bottom-up
869 /// traversal of the spanning tree.
870 Graph(BumpPtrAllocator &Alloc, const FunctionDescription &D,
871 const uint64_t *Counters, ProfileWriterContext &Ctx);
872 ~Graph();
873 void dump() const;
874
875private:
876 void computeEdgeFrequencies(const uint64_t *Counters,
877 ProfileWriterContext &Ctx);
878 void dumpEdgeFreqs() const;
879};
880
881Graph::Graph(BumpPtrAllocator &Alloc, const FunctionDescription &D,
882 const uint64_t *Counters, ProfileWriterContext &Ctx)
883 : Alloc(Alloc), D(D) {
884 DEBUG(reportNumber("G = 0x", (uint64_t)this, 16)){};
885 // First pass to determine number of nodes
886 int32_t MaxNodes = -1;
887 CallFreqs = nullptr;
888 EdgeFreqs = nullptr;
889 for (int I = 0; I < D.NumEdges; ++I) {
890 if (static_cast<int32_t>(D.Edges[I].FromNode) > MaxNodes)
891 MaxNodes = D.Edges[I].FromNode;
892 if (static_cast<int32_t>(D.Edges[I].ToNode) > MaxNodes)
893 MaxNodes = D.Edges[I].ToNode;
894 }
895
896 for (int I = 0; I < D.NumLeafNodes; ++I)
897 if (static_cast<int32_t>(D.LeafNodes[I].Node) > MaxNodes)
898 MaxNodes = D.LeafNodes[I].Node;
899
900 for (int I = 0; I < D.NumCalls; ++I)
901 if (static_cast<int32_t>(D.Calls[I].FromNode) > MaxNodes)
902 MaxNodes = D.Calls[I].FromNode;
903
904 // No nodes? Nothing to do
905 if (MaxNodes < 0) {
906 DEBUG(report("No nodes!\n")){};
907 CFGNodes = nullptr;
908 SpanningTreeNodes = nullptr;
909 NumNodes = 0;
910 return;
911 }
912 ++MaxNodes;
913 DEBUG(reportNumber("NumNodes = ", MaxNodes, 10)){};
914 NumNodes = static_cast<uint32_t>(MaxNodes);
915
916 // Initial allocations
917 CFGNodes = new (Alloc) Node[MaxNodes];
918
919 DEBUG(reportNumber("G->CFGNodes = 0x", (uint64_t)CFGNodes, 16)){};
920 SpanningTreeNodes = new (Alloc) Node[MaxNodes];
921 DEBUG(reportNumber("G->SpanningTreeNodes = 0x",{}
922 (uint64_t)SpanningTreeNodes, 16)){};
923
924 // Figure out how much to allocate to each vector (in/out edge sets)
925 for (int I = 0; I < D.NumEdges; ++I) {
926 CFGNodes[D.Edges[I].FromNode].NumOutEdges++;
927 CFGNodes[D.Edges[I].ToNode].NumInEdges++;
928 if (D.Edges[I].Counter != 0xffffffff)
929 continue;
930
931 SpanningTreeNodes[D.Edges[I].FromNode].NumOutEdges++;
932 SpanningTreeNodes[D.Edges[I].ToNode].NumInEdges++;
933 }
934
935 // Allocate in/out edge sets
936 for (int I = 0; I < MaxNodes; ++I) {
937 if (CFGNodes[I].NumInEdges > 0)
938 CFGNodes[I].InEdges = new (Alloc) Edge[CFGNodes[I].NumInEdges];
939 if (CFGNodes[I].NumOutEdges > 0)
940 CFGNodes[I].OutEdges = new (Alloc) Edge[CFGNodes[I].NumOutEdges];
941 if (SpanningTreeNodes[I].NumInEdges > 0)
942 SpanningTreeNodes[I].InEdges =
943 new (Alloc) Edge[SpanningTreeNodes[I].NumInEdges];
944 if (SpanningTreeNodes[I].NumOutEdges > 0)
945 SpanningTreeNodes[I].OutEdges =
946 new (Alloc) Edge[SpanningTreeNodes[I].NumOutEdges];
947 CFGNodes[I].NumInEdges = 0;
948 CFGNodes[I].NumOutEdges = 0;
949 SpanningTreeNodes[I].NumInEdges = 0;
950 SpanningTreeNodes[I].NumOutEdges = 0;
951 }
952
953 // Fill in/out edge sets
954 for (int I = 0; I < D.NumEdges; ++I) {
955 const uint32_t Src = D.Edges[I].FromNode;
956 const uint32_t Dst = D.Edges[I].ToNode;
957 Edge *E = &CFGNodes[Src].OutEdges[CFGNodes[Src].NumOutEdges++];
958 E->Node = Dst;
959 E->ID = I;
960
961 E = &CFGNodes[Dst].InEdges[CFGNodes[Dst].NumInEdges++];
962 E->Node = Src;
963 E->ID = I;
964
965 if (D.Edges[I].Counter != 0xffffffff)
966 continue;
967
968 E = &SpanningTreeNodes[Src]
969 .OutEdges[SpanningTreeNodes[Src].NumOutEdges++];
970 E->Node = Dst;
971 E->ID = I;
972
973 E = &SpanningTreeNodes[Dst]
974 .InEdges[SpanningTreeNodes[Dst].NumInEdges++];
975 E->Node = Src;
976 E->ID = I;
977 }
978
979 computeEdgeFrequencies(Counters, Ctx);
980}
981
982Graph::~Graph() {
983 if (CallFreqs)
984 Alloc.deallocate(CallFreqs);
985 if (EdgeFreqs)
986 Alloc.deallocate(EdgeFreqs);
987 for (int I = NumNodes - 1; I >= 0; --I) {
988 if (SpanningTreeNodes[I].OutEdges)
989 Alloc.deallocate(SpanningTreeNodes[I].OutEdges);
990 if (SpanningTreeNodes[I].InEdges)
991 Alloc.deallocate(SpanningTreeNodes[I].InEdges);
992 if (CFGNodes[I].OutEdges)
993 Alloc.deallocate(CFGNodes[I].OutEdges);
994 if (CFGNodes[I].InEdges)
995 Alloc.deallocate(CFGNodes[I].InEdges);
996 }
997 if (SpanningTreeNodes)
998 Alloc.deallocate(SpanningTreeNodes);
999 if (CFGNodes)
1000 Alloc.deallocate(CFGNodes);
1001}
1002
1003void Graph::dump() const {
1004 reportNumber("Dumping graph with number of nodes: ", NumNodes, 10);
1005 report(" Full graph:\n");
1006 for (int I = 0; I < NumNodes; ++I) {
1007 const Node *N = &CFGNodes[I];
1008 reportNumber(" Node #", I, 10);
1009 reportNumber(" InEdges total ", N->NumInEdges, 10);
1010 for (int J = 0; J < N->NumInEdges; ++J)
1011 reportNumber(" ", N->InEdges[J].Node, 10);
1012 reportNumber(" OutEdges total ", N->NumOutEdges, 10);
1013 for (int J = 0; J < N->NumOutEdges; ++J)
1014 reportNumber(" ", N->OutEdges[J].Node, 10);
1015 report("\n");
1016 }
1017 report(" Spanning tree:\n");
1018 for (int I = 0; I < NumNodes; ++I) {
1019 const Node *N = &SpanningTreeNodes[I];
1020 reportNumber(" Node #", I, 10);
1021 reportNumber(" InEdges total ", N->NumInEdges, 10);
1022 for (int J = 0; J < N->NumInEdges; ++J)
1023 reportNumber(" ", N->InEdges[J].Node, 10);
1024 reportNumber(" OutEdges total ", N->NumOutEdges, 10);
1025 for (int J = 0; J < N->NumOutEdges; ++J)
1026 reportNumber(" ", N->OutEdges[J].Node, 10);
1027 report("\n");
1028 }
1029}
1030
1031void Graph::dumpEdgeFreqs() const {
1032 reportNumber(
1033 "Dumping edge frequencies for graph with num edges: ", D.NumEdges, 10);
1034 for (int I = 0; I < D.NumEdges; ++I) {
1035 reportNumber("* Src: ", D.Edges[I].FromNode, 10);
1036 reportNumber(" Dst: ", D.Edges[I].ToNode, 10);
1037 reportNumber(" Cnt: ", EdgeFreqs[I], 10);
1038 }
1039}
1040
1041/// Auxiliary map structure for fast lookups of which calls map to each node of
1042/// the function CFG
1043struct NodeToCallsMap {
1044 struct MapEntry {
1045 uint32_t NumCalls;
1046 uint32_t *Calls;
1047 };
1048 MapEntry *Entries;
1049 BumpPtrAllocator &Alloc;
1050 const uint32_t NumNodes;
1051
1052 NodeToCallsMap(BumpPtrAllocator &Alloc, const FunctionDescription &D,
1053 uint32_t NumNodes)
1054 : Alloc(Alloc), NumNodes(NumNodes) {
1055 Entries = new (Alloc, 0) MapEntry[NumNodes];
1056 for (int I = 0; I < D.NumCalls; ++I) {
1057 DEBUG(reportNumber("Registering call in node ", D.Calls[I].FromNode, 10)){};
1058 ++Entries[D.Calls[I].FromNode].NumCalls;
1059 }
1060 for (int I = 0; I < NumNodes; ++I) {
1061 Entries[I].Calls = Entries[I].NumCalls ? new (Alloc)
1062 uint32_t[Entries[I].NumCalls]
1063 : nullptr;
1064 Entries[I].NumCalls = 0;
1065 }
1066 for (int I = 0; I < D.NumCalls; ++I) {
1067 MapEntry &Entry = Entries[D.Calls[I].FromNode];
1068 Entry.Calls[Entry.NumCalls++] = I;
1069 }
1070 }
1071
1072 /// Set the frequency of all calls in node \p NodeID to Freq. However, if
1073 /// the calls have their own counters and do not depend on the basic block
1074 /// counter, this means they have landing pads and throw exceptions. In this
1075 /// case, set their frequency with their counters and return the maximum
1076 /// value observed in such counters. This will be used as the new frequency
1077 /// at basic block entry. This is used to fix the CFG edge frequencies in the
1078 /// presence of exceptions.
1079 uint64_t visitAllCallsIn(uint32_t NodeID, uint64_t Freq, uint64_t *CallFreqs,
1080 const FunctionDescription &D,
1081 const uint64_t *Counters,
1082 ProfileWriterContext &Ctx) const {
1083 const MapEntry &Entry = Entries[NodeID];
1084 uint64_t MaxValue = 0ull;
1085 for (int I = 0, E = Entry.NumCalls; I != E; ++I) {
29
Assuming 'I' is not equal to 'E'
30
Loop condition is true. Entering loop body
1086 const uint32_t CallID = Entry.Calls[I];
1087 DEBUG(reportNumber(" Setting freq for call ID: ", CallID, 10)){};
1088 const CallDescription &CallDesc = D.Calls[CallID];
1089 if (CallDesc.Counter == 0xffffffff) {
31
Assuming field 'Counter' is not equal to -1
32
Taking false branch
1090 CallFreqs[CallID] = Freq;
1091 DEBUG(reportNumber(" with : ", Freq, 10)){};
1092 } else {
1093 const uint64_t CounterVal = Counters[CallDesc.Counter];
1094 CallFreqs[CallID] = CounterVal;
33
Array access (from variable 'CallFreqs') results in a null pointer dereference
1095 MaxValue = CounterVal > MaxValue ? CounterVal : MaxValue;
1096 DEBUG(reportNumber(" with (private counter) : ", CounterVal, 10)){};
1097 }
1098 DEBUG(reportNumber(" Address: 0x", CallDesc.TargetAddress, 16)){};
1099 if (CallFreqs[CallID] > 0)
1100 Ctx.CallFlowTable->get(CallDesc.TargetAddress).Calls +=
1101 CallFreqs[CallID];
1102 }
1103 return MaxValue;
1104 }
1105
1106 ~NodeToCallsMap() {
1107 for (int I = NumNodes - 1; I >= 0; --I)
1108 if (Entries[I].Calls)
1109 Alloc.deallocate(Entries[I].Calls);
1110 Alloc.deallocate(Entries);
1111 }
1112};
1113
1114/// Fill an array with the frequency of each edge in the function represented
1115/// by G, as well as another array for each call.
1116void Graph::computeEdgeFrequencies(const uint64_t *Counters,
1117 ProfileWriterContext &Ctx) {
1118 if (NumNodes == 0)
1
Assuming field 'NumNodes' is not equal to 0
1119 return;
1120
1121 EdgeFreqs = D.NumEdges ? new (Alloc, 0) uint64_t [D.NumEdges] : nullptr;
2
Taking false branch
3
Assuming field 'NumEdges' is 0
4
'?' condition is false
1122 CallFreqs = D.NumCalls ? new (Alloc, 0) uint64_t [D.NumCalls] : nullptr;
5
Assuming field 'NumCalls' is 0
6
'?' condition is false
7
Null pointer value stored to field 'CallFreqs'
1123
1124 // Setup a lookup for calls present in each node (BB)
1125 NodeToCallsMap *CallMap = new (Alloc) NodeToCallsMap(Alloc, D, NumNodes);
1126
1127 // Perform a bottom-up, BFS traversal of the spanning tree in G. Edges in the
1128 // spanning tree don't have explicit counters. We must infer their value using
1129 // a linear combination of other counters (sum of counters of the outgoing
1130 // edges minus sum of counters of the incoming edges).
1131 uint32_t *Stack = new (Alloc) uint32_t [NumNodes];
1132 uint32_t StackTop = 0;
1133 enum Status : uint8_t { S_NEW = 0, S_VISITING, S_VISITED };
1134 Status *Visited = new (Alloc, 0) Status[NumNodes];
1135 uint64_t *LeafFrequency = new (Alloc, 0) uint64_t[NumNodes];
1136 uint64_t *EntryAddress = new (Alloc, 0) uint64_t[NumNodes];
1137
1138 // Setup a fast lookup for frequency of leaf nodes, which have special
1139 // basic block frequency instrumentation (they are not edge profiled).
1140 for (int I = 0; I < D.NumLeafNodes; ++I) {
8
Assuming 'I' is >= field 'NumLeafNodes'
9
Loop condition is false. Execution continues on line 1149
1141 LeafFrequency[D.LeafNodes[I].Node] = Counters[D.LeafNodes[I].Counter];
1142 DEBUG({{}
1143 if (Counters[D.LeafNodes[I].Counter] > 0) {{}
1144 reportNumber("Leaf Node# ", D.LeafNodes[I].Node, 10);{}
1145 reportNumber(" Counter: ", Counters[D.LeafNodes[I].Counter], 10);{}
1146 }{}
1147 }){};
1148 }
1149 for (int I = 0; I < D.NumEntryNodes; ++I) {
10
Assuming 'I' is >= field 'NumEntryNodes'
11
Loop condition is false. Execution continues on line 1157
1150 EntryAddress[D.EntryNodes[I].Node] = D.EntryNodes[I].Address;
1151 DEBUG({{}
1152 reportNumber("Entry Node# ", D.EntryNodes[I].Node, 10);{}
1153 reportNumber(" Address: ", D.EntryNodes[I].Address, 16);{}
1154 }){};
1155 }
1156 // Add all root nodes to the stack
1157 for (int I = 0; I
11.1
'I' is < field 'NumNodes'
< NumNodes
; ++I)
12
Loop condition is true. Entering loop body
15
Assuming 'I' is >= field 'NumNodes'
16
Loop condition is false. Execution continues on line 1162
1158 if (SpanningTreeNodes[I].NumInEdges == 0)
13
Assuming field 'NumInEdges' is equal to 0
14
Taking true branch
1159 Stack[StackTop++] = I;
1160
1161 // Empty stack?
1162 if (StackTop
16.1
'StackTop' is not equal to 0
== 0) {
17
Taking false branch
1163 DEBUG(report("Empty stack!\n")){};
1164 Alloc.deallocate(EntryAddress);
1165 Alloc.deallocate(LeafFrequency);
1166 Alloc.deallocate(Visited);
1167 Alloc.deallocate(Stack);
1168 CallMap->~NodeToCallsMap();
1169 Alloc.deallocate(CallMap);
1170 if (CallFreqs)
1171 Alloc.deallocate(CallFreqs);
1172 if (EdgeFreqs)
1173 Alloc.deallocate(EdgeFreqs);
1174 EdgeFreqs = nullptr;
1175 CallFreqs = nullptr;
1176 return;
1177 }
1178 // Add all known edge counts, will infer the rest
1179 for (int I = 0; I < D.NumEdges; ++I) {
18
Loop condition is false. Execution continues on line 1186
1180 const uint32_t C = D.Edges[I].Counter;
1181 if (C == 0xffffffff) // inferred counter - we will compute its value
1182 continue;
1183 EdgeFreqs[I] = Counters[C];
1184 }
1185
1186 while (StackTop > 0) {
19
Loop condition is true. Entering loop body
1187 const uint32_t Cur = Stack[--StackTop];
1188 DEBUG({{}
1189 if (Visited[Cur] == S_VISITING){}
1190 report("(visiting) ");{}
1191 else{}
1192 report("(new) ");{}
1193 reportNumber("Cur: ", Cur, 10);{}
1194 }){};
1195
1196 // This shouldn't happen in a tree
1197 assert(Visited[Cur] != S_VISITED, "should not have visited nodes in stack");
20
Assuming the condition is true
1198 if (Visited[Cur] == S_NEW) {
21
Assuming the condition is false
22
Taking false branch
1199 Visited[Cur] = S_VISITING;
1200 Stack[StackTop++] = Cur;
1201 assert(StackTop <= NumNodes, "stack grew too large");
1202 for (int I = 0, E = SpanningTreeNodes[Cur].NumOutEdges; I < E; ++I) {
1203 const uint32_t Succ = SpanningTreeNodes[Cur].OutEdges[I].Node;
1204 Stack[StackTop++] = Succ;
1205 assert(StackTop <= NumNodes, "stack grew too large");
1206 }
1207 continue;
1208 }
1209 Visited[Cur] = S_VISITED;
1210
1211 // Establish our node frequency based on outgoing edges, which should all be
1212 // resolved by now.
1213 int64_t CurNodeFreq = LeafFrequency[Cur];
1214 // Not a leaf?
1215 if (!CurNodeFreq) {
23
Assuming 'CurNodeFreq' is not equal to 0
24
Taking false branch
1216 for (int I = 0, E = CFGNodes[Cur].NumOutEdges; I != E; ++I) {
1217 const uint32_t SuccEdge = CFGNodes[Cur].OutEdges[I].ID;
1218 CurNodeFreq += EdgeFreqs[SuccEdge];
1219 }
1220 }
1221 if (CurNodeFreq
24.1
'CurNodeFreq' is >= 0
< 0)
25
Taking false branch
1222 CurNodeFreq = 0;
1223
1224 const uint64_t CallFreq = CallMap->visitAllCallsIn(
28
Calling 'NodeToCallsMap::visitAllCallsIn'
1225 Cur, CurNodeFreq
25.1
'CurNodeFreq' is > 0
> 0 ? CurNodeFreq : 0, CallFreqs, D, Counters, Ctx)
;
26
'?' condition is true
27
Passing null pointer value via 3rd parameter 'CallFreqs'
1226
1227 // Exception handling affected our output flow? Fix with calls info
1228 DEBUG({{}
1229 if (CallFreq > CurNodeFreq){}
1230 report("Bumping node frequency with call info\n");{}
1231 }){};
1232 CurNodeFreq = CallFreq > CurNodeFreq ? CallFreq : CurNodeFreq;
1233
1234 if (CurNodeFreq > 0) {
1235 if (uint64_t Addr = EntryAddress[Cur]) {
1236 DEBUG({}
1237 reportNumber(" Setting flow at entry point address 0x", Addr, 16)){};
1238 DEBUG(reportNumber(" with: ", CurNodeFreq, 10)){};
1239 Ctx.CallFlowTable->get(Addr).Val = CurNodeFreq;
1240 }
1241 }
1242
1243 // No parent? Reached a tree root, limit to call frequency updating.
1244 if (SpanningTreeNodes[Cur].NumInEdges == 0)
1245 continue;
1246
1247 assert(SpanningTreeNodes[Cur].NumInEdges == 1, "must have 1 parent");
1248 const uint32_t Parent = SpanningTreeNodes[Cur].InEdges[0].Node;
1249 const uint32_t ParentEdge = SpanningTreeNodes[Cur].InEdges[0].ID;
1250
1251 // Calculate parent edge freq.
1252 int64_t ParentEdgeFreq = CurNodeFreq;
1253 for (int I = 0, E = CFGNodes[Cur].NumInEdges; I != E; ++I) {
1254 const uint32_t PredEdge = CFGNodes[Cur].InEdges[I].ID;
1255 ParentEdgeFreq -= EdgeFreqs[PredEdge];
1256 }
1257
1258 // Sometimes the conservative CFG that BOLT builds will lead to incorrect
1259 // flow computation. For example, in a BB that transitively calls the exit
1260 // syscall, BOLT will add a fall-through successor even though it should not
1261 // have any successors. So this block execution will likely be wrong. We
1262 // tolerate this imperfection since this case should be quite infrequent.
1263 if (ParentEdgeFreq < 0) {
1264 DEBUG(dumpEdgeFreqs()){};
1265 DEBUG(report("WARNING: incorrect flow")){};
1266 ParentEdgeFreq = 0;
1267 }
1268 DEBUG(reportNumber(" Setting freq for ParentEdge: ", ParentEdge, 10)){};
1269 DEBUG(reportNumber(" with ParentEdgeFreq: ", ParentEdgeFreq, 10)){};
1270 EdgeFreqs[ParentEdge] = ParentEdgeFreq;
1271 }
1272
1273 Alloc.deallocate(EntryAddress);
1274 Alloc.deallocate(LeafFrequency);
1275 Alloc.deallocate(Visited);
1276 Alloc.deallocate(Stack);
1277 CallMap->~NodeToCallsMap();
1278 Alloc.deallocate(CallMap);
1279 DEBUG(dumpEdgeFreqs()){};
1280}
1281
1282/// Write to \p FD all of the edge profiles for function \p FuncDesc. Uses
1283/// \p Alloc to allocate helper dynamic structures used to compute profile for
1284/// edges that we do not explicitly instrument.
1285const uint8_t *writeFunctionProfile(int FD, ProfileWriterContext &Ctx,
1286 const uint8_t *FuncDesc,
1287 BumpPtrAllocator &Alloc) {
1288 const FunctionDescription F(FuncDesc);
1289 const uint8_t *next = FuncDesc + F.getSize();
1290
1291#if !defined(__APPLE__)
1292 uint64_t *bolt_instr_locations = __bolt_instr_locations;
1293#else
1294 uint64_t *bolt_instr_locations = _bolt_instr_locations_getter();
1295#endif
1296
1297 // Skip funcs we know are cold
1298#ifndef ENABLE_DEBUG
1299 uint64_t CountersFreq = 0;
1300 for (int I = 0; I < F.NumLeafNodes; ++I)
1301 CountersFreq += bolt_instr_locations[F.LeafNodes[I].Counter];
1302
1303 if (CountersFreq == 0) {
1304 for (int I = 0; I < F.NumEdges; ++I) {
1305 const uint32_t C = F.Edges[I].Counter;
1306 if (C == 0xffffffff)
1307 continue;
1308 CountersFreq += bolt_instr_locations[C];
1309 }
1310 if (CountersFreq == 0) {
1311 for (int I = 0; I < F.NumCalls; ++I) {
1312 const uint32_t C = F.Calls[I].Counter;
1313 if (C == 0xffffffff)
1314 continue;
1315 CountersFreq += bolt_instr_locations[C];
1316 }
1317 if (CountersFreq == 0)
1318 return next;
1319 }
1320 }
1321#endif
1322
1323 Graph *G = new (Alloc) Graph(Alloc, F, bolt_instr_locations, Ctx);
1324 DEBUG(G->dump()){};
1325
1326 if (!G->EdgeFreqs && !G->CallFreqs) {
1327 G->~Graph();
1328 Alloc.deallocate(G);
1329 return next;
1330 }
1331
1332 for (int I = 0; I < F.NumEdges; ++I) {
1333 const uint64_t Freq = G->EdgeFreqs[I];
1334 if (Freq == 0)
1335 continue;
1336 const EdgeDescription *Desc = &F.Edges[I];
1337 char LineBuf[BufSize];
1338 char *Ptr = LineBuf;
1339 Ptr = serializeLoc(Ctx, Ptr, Desc->From, BufSize);
1340 Ptr = serializeLoc(Ctx, Ptr, Desc->To, BufSize - (Ptr - LineBuf));
1341 Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 22);
1342 Ptr = intToStr(Ptr, Freq, 10);
1343 *Ptr++ = '\n';
1344 __write(FD, LineBuf, Ptr - LineBuf);
1345 }
1346
1347 for (int I = 0; I < F.NumCalls; ++I) {
1348 const uint64_t Freq = G->CallFreqs[I];
1349 if (Freq == 0)
1350 continue;
1351 char LineBuf[BufSize];
1352 char *Ptr = LineBuf;
1353 const CallDescription *Desc = &F.Calls[I];
1354 Ptr = serializeLoc(Ctx, Ptr, Desc->From, BufSize);
1355 Ptr = serializeLoc(Ctx, Ptr, Desc->To, BufSize - (Ptr - LineBuf));
1356 Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25);
1357 Ptr = intToStr(Ptr, Freq, 10);
1358 *Ptr++ = '\n';
1359 __write(FD, LineBuf, Ptr - LineBuf);
1360 }
1361
1362 G->~Graph();
1363 Alloc.deallocate(G);
1364 return next;
1365}
1366
1367#if !defined(__APPLE__)
1368const IndCallTargetDescription *
1369ProfileWriterContext::lookupIndCallTarget(uint64_t Target) const {
1370 uint32_t B = 0;
1371 uint32_t E = __bolt_instr_num_ind_targets;
1372 if (E == 0)
1373 return nullptr;
1374 do {
1375 uint32_t I = (E - B) / 2 + B;
1376 if (IndCallTargets[I].Address == Target)
1377 return &IndCallTargets[I];
1378 if (IndCallTargets[I].Address < Target)
1379 B = I + 1;
1380 else
1381 E = I;
1382 } while (B < E);
1383 return nullptr;
1384}
1385
1386/// Write a single indirect call <src, target> pair to the fdata file
1387void visitIndCallCounter(IndirectCallHashTable::MapEntry &Entry,
1388 int FD, int CallsiteID,
1389 ProfileWriterContext *Ctx) {
1390 if (Entry.Val == 0)
1391 return;
1392 DEBUG(reportNumber("Target func 0x", Entry.Key, 16)){};
1393 DEBUG(reportNumber("Target freq: ", Entry.Val, 10)){};
1394 const IndCallDescription *CallsiteDesc =
1395 &Ctx->IndCallDescriptions[CallsiteID];
1396 const IndCallTargetDescription *TargetDesc =
1397 Ctx->lookupIndCallTarget(Entry.Key - TextBaseAddress);
1398 if (!TargetDesc) {
1399 DEBUG(report("Failed to lookup indirect call target\n")){};
1400 char LineBuf[BufSize];
1401 char *Ptr = LineBuf;
1402 Ptr = serializeLoc(*Ctx, Ptr, *CallsiteDesc, BufSize);
1403 Ptr = strCopy(Ptr, "0 [unknown] 0 0 ", BufSize - (Ptr - LineBuf) - 40);
1404 Ptr = intToStr(Ptr, Entry.Val, 10);
1405 *Ptr++ = '\n';
1406 __write(FD, LineBuf, Ptr - LineBuf);
1407 return;
1408 }
1409 Ctx->CallFlowTable->get(TargetDesc->Address).Calls += Entry.Val;
1410 char LineBuf[BufSize];
1411 char *Ptr = LineBuf;
1412 Ptr = serializeLoc(*Ctx, Ptr, *CallsiteDesc, BufSize);
1413 Ptr = serializeLoc(*Ctx, Ptr, TargetDesc->Loc, BufSize - (Ptr - LineBuf));
1414 Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25);
1415 Ptr = intToStr(Ptr, Entry.Val, 10);
1416 *Ptr++ = '\n';
1417 __write(FD, LineBuf, Ptr - LineBuf);
1418}
1419
1420/// Write to \p FD all of the indirect call profiles.
1421void writeIndirectCallProfile(int FD, ProfileWriterContext &Ctx) {
1422 for (int I = 0; I < __bolt_instr_num_ind_calls; ++I) {
1423 DEBUG(reportNumber("IndCallsite #", I, 10)){};
1424 GlobalIndCallCounters[I].forEachElement(visitIndCallCounter, FD, I, &Ctx);
1425 }
1426}
1427
1428/// Check a single call flow for a callee versus all known callers. If there are
1429/// less callers than what the callee expects, write the difference with source
1430/// [unknown] in the profile.
1431void visitCallFlowEntry(CallFlowHashTable::MapEntry &Entry, int FD,
1432 ProfileWriterContext *Ctx) {
1433 DEBUG(reportNumber("Call flow entry address: 0x", Entry.Key, 16)){};
1434 DEBUG(reportNumber("Calls: ", Entry.Calls, 10)){};
1435 DEBUG(reportNumber("Reported entry frequency: ", Entry.Val, 10)){};
1436 DEBUG({{}
1437 if (Entry.Calls > Entry.Val){}
1438 report(" More calls than expected!\n");{}
1439 }){};
1440 if (Entry.Val <= Entry.Calls)
1441 return;
1442 DEBUG(reportNumber({}
1443 " Balancing calls with traffic: ", Entry.Val - Entry.Calls, 10)){};
1444 const IndCallTargetDescription *TargetDesc =
1445 Ctx->lookupIndCallTarget(Entry.Key);
1446 if (!TargetDesc) {
1447 // There is probably something wrong with this callee and this should be
1448 // investigated, but I don't want to assert and lose all data collected.
1449 DEBUG(report("WARNING: failed to look up call target!\n")){};
1450 return;
1451 }
1452 char LineBuf[BufSize];
1453 char *Ptr = LineBuf;
1454 Ptr = strCopy(Ptr, "0 [unknown] 0 ", BufSize);
1455 Ptr = serializeLoc(*Ctx, Ptr, TargetDesc->Loc, BufSize - (Ptr - LineBuf));
1456 Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25);
1457 Ptr = intToStr(Ptr, Entry.Val - Entry.Calls, 10);
1458 *Ptr++ = '\n';
1459 __write(FD, LineBuf, Ptr - LineBuf);
1460}
1461
1462/// Open fdata file for writing and return a valid file descriptor, aborting
1463/// program upon failure.
1464int openProfile() {
1465 // Build the profile name string by appending our PID
1466 char Buf[BufSize];
1467 char *Ptr = Buf;
1468 uint64_t PID = __getpid();
1469 Ptr = strCopy(Buf, __bolt_instr_filename, BufSize);
1470 if (__bolt_instr_use_pid) {
1471 Ptr = strCopy(Ptr, ".", BufSize - (Ptr - Buf + 1));
1472 Ptr = intToStr(Ptr, PID, 10);
1473 Ptr = strCopy(Ptr, ".fdata", BufSize - (Ptr - Buf + 1));
1474 }
1475 *Ptr++ = '\0';
1476 uint64_t FD = __open(Buf, O_WRONLY1 | O_TRUNC512 | O_CREAT64,
1477 /*mode=*/0666);
1478 if (static_cast<int64_t>(FD) < 0) {
1479 report("Error while trying to open profile file for writing: ");
1480 report(Buf);
1481 reportNumber("\nFailed with error number: 0x",
1482 0 - static_cast<int64_t>(FD), 16);
1483 __exit(1);
1484 }
1485 return FD;
1486}
1487
1488#endif
1489
1490} // anonymous namespace
1491
1492#if !defined(__APPLE__)
1493
1494/// Reset all counters in case you want to start profiling a new phase of your
1495/// program independently of prior phases.
1496/// The address of this function is printed by BOLT and this can be called by
1497/// any attached debugger during runtime. There is a useful oneliner for gdb:
1498///
1499/// gdb -p $(pgrep -xo PROCESSNAME) -ex 'p ((void(*)())0xdeadbeef)()' \
1500/// -ex 'set confirm off' -ex quit
1501///
1502/// Where 0xdeadbeef is this function address and PROCESSNAME your binary file
1503/// name.
1504extern "C" void __bolt_instr_clear_counters() {
1505 memset(reinterpret_cast<char *>(__bolt_instr_locations), 0,
1506 __bolt_num_counters * 8);
1507 for (int I = 0; I < __bolt_instr_num_ind_calls; ++I)
1508 GlobalIndCallCounters[I].resetCounters();
1509}
1510
1511/// This is the entry point for profile writing.
1512/// There are three ways of getting here:
1513///
1514/// * Program execution ended, finalization methods are running and BOLT
1515/// hooked into FINI from your binary dynamic section;
1516/// * You used the sleep timer option and during initialization we forked
1517/// a separate process that will call this function periodically;
1518/// * BOLT prints this function address so you can attach a debugger and
1519/// call this function directly to get your profile written to disk
1520/// on demand.
1521///
1522extern "C" void __attribute((force_align_arg_pointer))
1523__bolt_instr_data_dump(int FD) {
1524 // Already dumping
1525 if (!GlobalWriteProfileMutex->acquire())
1526 return;
1527
1528 int ret = __lseek(FD, 0, SEEK_SET0);
1529 assert(ret == 0, "Failed to lseek!");
1530 ret = __ftruncate(FD, 0);
1531 assert(ret == 0, "Failed to ftruncate!");
1532 BumpPtrAllocator HashAlloc;
1533 HashAlloc.setMaxSize(0x6400000);
1534 ProfileWriterContext Ctx = readDescriptions();
1535 Ctx.CallFlowTable = new (HashAlloc, 0) CallFlowHashTable(HashAlloc);
1536
1537 DEBUG(printStats(Ctx)){};
1538
1539 BumpPtrAllocator Alloc;
1540 Alloc.setMaxSize(0x6400000);
1541 const uint8_t *FuncDesc = Ctx.FuncDescriptions;
1542 for (int I = 0, E = __bolt_instr_num_funcs; I < E; ++I) {
1543 FuncDesc = writeFunctionProfile(FD, Ctx, FuncDesc, Alloc);
1544 Alloc.clear();
1545 DEBUG(reportNumber("FuncDesc now: ", (uint64_t)FuncDesc, 16)){};
1546 }
1547 assert(FuncDesc == (void *)Ctx.Strings,
1548 "FuncDesc ptr must be equal to stringtable");
1549
1550 writeIndirectCallProfile(FD, Ctx);
1551 Ctx.CallFlowTable->forEachElement(visitCallFlowEntry, FD, &Ctx);
1552
1553 __fsync(FD);
1554 __munmap(Ctx.MMapPtr, Ctx.MMapSize);
1555 __close(Ctx.FileDesc);
1556 HashAlloc.destroy();
1557 GlobalWriteProfileMutex->release();
1558 DEBUG(report("Finished writing profile.\n")){};
1559}
1560
1561/// Event loop for our child process spawned during setup to dump profile data
1562/// at user-specified intervals
1563void watchProcess() {
1564 timespec ts, rem;
1565 uint64_t Ellapsed = 0ull;
1566 int FD = openProfile();
1567 uint64_t ppid;
1568 if (__bolt_instr_wait_forks) {
1569 // Store parent pgid
1570 ppid = -__getpgid(0);
1571 // And leave parent process group
1572 __setpgid(0, 0);
1573 } else {
1574 // Store parent pid
1575 ppid = __getppid();
1576 if (ppid == 1) {
1577 // Parent already dead
1578 __bolt_instr_data_dump(FD);
1579 goto out;
1580 }
1581 }
1582
1583 ts.tv_sec = 1;
1584 ts.tv_nsec = 0;
1585 while (1) {
1586 __nanosleep(&ts, &rem);
1587 // This means our parent process or all its forks are dead,
1588 // so no need for us to keep dumping.
1589 if (__kill(ppid, 0) < 0) {
1590 if (__bolt_instr_no_counters_clear)
1591 __bolt_instr_data_dump(FD);
1592 break;
1593 }
1594
1595 if (++Ellapsed < __bolt_instr_sleep_time)
1596 continue;
1597
1598 Ellapsed = 0;
1599 __bolt_instr_data_dump(FD);
1600 if (__bolt_instr_no_counters_clear == false)
1601 __bolt_instr_clear_counters();
1602 }
1603
1604out:;
1605 DEBUG(report("My parent process is dead, bye!\n")){};
1606 __close(FD);
1607 __exit(0);
1608}
1609
1610extern "C" void __bolt_instr_indirect_call();
1611extern "C" void __bolt_instr_indirect_tailcall();
1612
1613/// Initialization code
1614extern "C" void __attribute((force_align_arg_pointer)) __bolt_instr_setup() {
1615 __bolt_ind_call_counter_func_pointer = __bolt_instr_indirect_call;
1616 __bolt_ind_tailcall_counter_func_pointer = __bolt_instr_indirect_tailcall;
1617 TextBaseAddress = getTextBaseAddress();
1618
1619 const uint64_t CountersStart =
1620 reinterpret_cast<uint64_t>(&__bolt_instr_locations[0]);
1621 const uint64_t CountersEnd = alignTo(
1622 reinterpret_cast<uint64_t>(&__bolt_instr_locations[__bolt_num_counters]),
1623 0x1000);
1624 DEBUG(reportNumber("replace mmap start: ", CountersStart, 16)){};
1625 DEBUG(reportNumber("replace mmap stop: ", CountersEnd, 16)){};
1626 assert(CountersEnd > CountersStart, "no counters");
1627
1628 const bool Shared = !__bolt_instr_use_pid;
1629 const uint64_t MapPrivateOrShared = Shared ? MAP_SHARED0x01 : MAP_PRIVATE0x02;
1630
1631 void *Ret =
1632 __mmap(CountersStart, CountersEnd - CountersStart, PROT_READ0x1 | PROT_WRITE0x2,
1633 MAP_ANONYMOUS0x20 | MapPrivateOrShared | MAP_FIXED0x10, -1, 0);
1634 assert(Ret != MAP_FAILED((void *)-1), "__bolt_instr_setup: Failed to mmap counters!");
1635
1636 GlobalMetadataStorage = __mmap(0, 4096, PROT_READ0x1 | PROT_WRITE0x2,
1637 MapPrivateOrShared | MAP_ANONYMOUS0x20, -1, 0);
1638 assert(GlobalMetadataStorage != MAP_FAILED((void *)-1),
1639 "__bolt_instr_setup: failed to mmap page for metadata!");
1640
1641 GlobalAlloc = new (GlobalMetadataStorage) BumpPtrAllocator;
1642 // Conservatively reserve 100MiB
1643 GlobalAlloc->setMaxSize(0x6400000);
1644 GlobalAlloc->setShared(Shared);
1645 GlobalWriteProfileMutex = new (*GlobalAlloc, 0) Mutex();
1646 if (__bolt_instr_num_ind_calls > 0)
1647 GlobalIndCallCounters =
1648 new (*GlobalAlloc, 0) IndirectCallHashTable[__bolt_instr_num_ind_calls];
1649
1650 if (__bolt_instr_sleep_time != 0) {
1651 // Separate instrumented process to the own process group
1652 if (__bolt_instr_wait_forks)
1653 __setpgid(0, 0);
1654
1655 if (long PID = __fork())
1656 return;
1657 watchProcess();
1658 }
1659}
1660
1661extern "C" __attribute((force_align_arg_pointer)) void
1662instrumentIndirectCall(uint64_t Target, uint64_t IndCallID) {
1663 GlobalIndCallCounters[IndCallID].incrementVal(Target, *GlobalAlloc);
1664}
1665
1666/// We receive as in-stack arguments the identifier of the indirect call site
1667/// as well as the target address for the call
1668extern "C" __attribute((naked)) void __bolt_instr_indirect_call()
1669{
1670#if defined(__aarch64__)
1671 // clang-format off
1672 __asm__ __volatile__(SAVE_ALL"push %%rax\n" "push %%rbx\n" "push %%rcx\n" "push %%rdx\n" "push %%rdi\n"
"push %%rsi\n" "push %%rbp\n" "push %%r8\n" "push %%r9\n" "push %%r10\n"
"push %%r11\n" "push %%r12\n" "push %%r13\n" "push %%r14\n" "push %%r15\n"
"sub $8, %%rsp\n"
1673 "ldp x0, x1, [sp, #288]\n"
1674 "bl instrumentIndirectCall\n"
1675 RESTORE_ALL"add $8, %%rsp\n" "pop %%r15\n" "pop %%r14\n" "pop %%r13\n" "pop %%r12\n"
"pop %%r11\n" "pop %%r10\n" "pop %%r9\n" "pop %%r8\n" "pop %%rbp\n"
"pop %%rsi\n" "pop %%rdi\n" "pop %%rdx\n" "pop %%rcx\n" "pop %%rbx\n"
"pop %%rax\n"
1676 "ret\n"
1677 :::);
1678 // clang-format on
1679#else
1680 // clang-format off
1681 __asm__ __volatile__(SAVE_ALL"push %%rax\n" "push %%rbx\n" "push %%rcx\n" "push %%rdx\n" "push %%rdi\n"
"push %%rsi\n" "push %%rbp\n" "push %%r8\n" "push %%r9\n" "push %%r10\n"
"push %%r11\n" "push %%r12\n" "push %%r13\n" "push %%r14\n" "push %%r15\n"
"sub $8, %%rsp\n"
1682 "mov 0xa0(%%rsp), %%rdi\n"
1683 "mov 0x98(%%rsp), %%rsi\n"
1684 "call instrumentIndirectCall\n"
1685 RESTORE_ALL"add $8, %%rsp\n" "pop %%r15\n" "pop %%r14\n" "pop %%r13\n" "pop %%r12\n"
"pop %%r11\n" "pop %%r10\n" "pop %%r9\n" "pop %%r8\n" "pop %%rbp\n"
"pop %%rsi\n" "pop %%rdi\n" "pop %%rdx\n" "pop %%rcx\n" "pop %%rbx\n"
"pop %%rax\n"
1686 "ret\n"
1687 :::);
1688 // clang-format on
1689#endif
1690}
1691
1692extern "C" __attribute((naked)) void __bolt_instr_indirect_tailcall()
1693{
1694#if defined(__aarch64__)
1695 // clang-format off
1696 __asm__ __volatile__(SAVE_ALL"push %%rax\n" "push %%rbx\n" "push %%rcx\n" "push %%rdx\n" "push %%rdi\n"
"push %%rsi\n" "push %%rbp\n" "push %%r8\n" "push %%r9\n" "push %%r10\n"
"push %%r11\n" "push %%r12\n" "push %%r13\n" "push %%r14\n" "push %%r15\n"
"sub $8, %%rsp\n"
1697 "ldp x0, x1, [sp, #288]\n"
1698 "bl instrumentIndirectCall\n"
1699 RESTORE_ALL"add $8, %%rsp\n" "pop %%r15\n" "pop %%r14\n" "pop %%r13\n" "pop %%r12\n"
"pop %%r11\n" "pop %%r10\n" "pop %%r9\n" "pop %%r8\n" "pop %%rbp\n"
"pop %%rsi\n" "pop %%rdi\n" "pop %%rdx\n" "pop %%rcx\n" "pop %%rbx\n"
"pop %%rax\n"
1700 "ret\n"
1701 :::);
1702 // clang-format on
1703#else
1704 // clang-format off
1705 __asm__ __volatile__(SAVE_ALL"push %%rax\n" "push %%rbx\n" "push %%rcx\n" "push %%rdx\n" "push %%rdi\n"
"push %%rsi\n" "push %%rbp\n" "push %%r8\n" "push %%r9\n" "push %%r10\n"
"push %%r11\n" "push %%r12\n" "push %%r13\n" "push %%r14\n" "push %%r15\n"
"sub $8, %%rsp\n"
1706 "mov 0x98(%%rsp), %%rdi\n"
1707 "mov 0x90(%%rsp), %%rsi\n"
1708 "call instrumentIndirectCall\n"
1709 RESTORE_ALL"add $8, %%rsp\n" "pop %%r15\n" "pop %%r14\n" "pop %%r13\n" "pop %%r12\n"
"pop %%r11\n" "pop %%r10\n" "pop %%r9\n" "pop %%r8\n" "pop %%rbp\n"
"pop %%rsi\n" "pop %%rdi\n" "pop %%rdx\n" "pop %%rcx\n" "pop %%rbx\n"
"pop %%rax\n"
1710 "ret\n"
1711 :::);
1712 // clang-format on
1713#endif
1714}
1715
1716/// This is hooking ELF's entry, it needs to save all machine state.
1717extern "C" __attribute((naked)) void __bolt_instr_start()
1718{
1719#if defined(__aarch64__)
1720 // clang-format off
1721 __asm__ __volatile__(SAVE_ALL"push %%rax\n" "push %%rbx\n" "push %%rcx\n" "push %%rdx\n" "push %%rdi\n"
"push %%rsi\n" "push %%rbp\n" "push %%r8\n" "push %%r9\n" "push %%r10\n"
"push %%r11\n" "push %%r12\n" "push %%r13\n" "push %%r14\n" "push %%r15\n"
"sub $8, %%rsp\n"
1722 "bl __bolt_instr_setup\n"
1723 RESTORE_ALL"add $8, %%rsp\n" "pop %%r15\n" "pop %%r14\n" "pop %%r13\n" "pop %%r12\n"
"pop %%r11\n" "pop %%r10\n" "pop %%r9\n" "pop %%r8\n" "pop %%rbp\n"
"pop %%rsi\n" "pop %%rdi\n" "pop %%rdx\n" "pop %%rcx\n" "pop %%rbx\n"
"pop %%rax\n"
1724 "adrp x16, __bolt_start_trampoline\n"
1725 "add x16, x16, #:lo12:__bolt_start_trampoline\n"
1726 "br x16\n"
1727 :::);
1728 // clang-format on
1729#else
1730 // clang-format off
1731 __asm__ __volatile__(SAVE_ALL"push %%rax\n" "push %%rbx\n" "push %%rcx\n" "push %%rdx\n" "push %%rdi\n"
"push %%rsi\n" "push %%rbp\n" "push %%r8\n" "push %%r9\n" "push %%r10\n"
"push %%r11\n" "push %%r12\n" "push %%r13\n" "push %%r14\n" "push %%r15\n"
"sub $8, %%rsp\n"
1732 "call __bolt_instr_setup\n"
1733 RESTORE_ALL"add $8, %%rsp\n" "pop %%r15\n" "pop %%r14\n" "pop %%r13\n" "pop %%r12\n"
"pop %%r11\n" "pop %%r10\n" "pop %%r9\n" "pop %%r8\n" "pop %%rbp\n"
"pop %%rsi\n" "pop %%rdi\n" "pop %%rdx\n" "pop %%rcx\n" "pop %%rbx\n"
"pop %%rax\n"
1734 "jmp __bolt_start_trampoline\n"
1735 :::);
1736 // clang-format on
1737#endif
1738}
1739
1740/// This is hooking into ELF's DT_FINI
1741extern "C" void __bolt_instr_fini() {
1742#if defined(__aarch64__)
1743 // clang-format off
1744 __asm__ __volatile__(SAVE_ALL"push %%rax\n" "push %%rbx\n" "push %%rcx\n" "push %%rdx\n" "push %%rdi\n"
"push %%rsi\n" "push %%rbp\n" "push %%r8\n" "push %%r9\n" "push %%r10\n"
"push %%r11\n" "push %%r12\n" "push %%r13\n" "push %%r14\n" "push %%r15\n"
"sub $8, %%rsp\n"
1745 "adrp x16, __bolt_fini_trampoline\n"
1746 "add x16, x16, #:lo12:__bolt_fini_trampoline\n"
1747 "blr x16\n"
1748 RESTORE_ALL"add $8, %%rsp\n" "pop %%r15\n" "pop %%r14\n" "pop %%r13\n" "pop %%r12\n"
"pop %%r11\n" "pop %%r10\n" "pop %%r9\n" "pop %%r8\n" "pop %%rbp\n"
"pop %%rsi\n" "pop %%rdi\n" "pop %%rdx\n" "pop %%rcx\n" "pop %%rbx\n"
"pop %%rax\n"
1749 :::);
1750 // clang-format on
1751#else
1752 __asm__ __volatile__("call __bolt_fini_trampoline\n" :::);
1753#endif
1754 if (__bolt_instr_sleep_time == 0) {
1755 int FD = openProfile();
1756 __bolt_instr_data_dump(FD);
1757 __close(FD);
1758 }
1759 DEBUG(report("Finished.\n")){};
1760}
1761
1762#endif
1763
1764#if defined(__APPLE__)
1765
1766extern "C" void __bolt_instr_data_dump() {
1767 ProfileWriterContext Ctx = readDescriptions();
1768
1769 int FD = 2;
1770 BumpPtrAllocator Alloc;
1771 const uint8_t *FuncDesc = Ctx.FuncDescriptions;
1772 uint32_t bolt_instr_num_funcs = _bolt_instr_num_funcs_getter();
1773
1774 for (int I = 0, E = bolt_instr_num_funcs; I < E; ++I) {
1775 FuncDesc = writeFunctionProfile(FD, Ctx, FuncDesc, Alloc);
1776 Alloc.clear();
1777 DEBUG(reportNumber("FuncDesc now: ", (uint64_t)FuncDesc, 16)){};
1778 }
1779 assert(FuncDesc == (void *)Ctx.Strings,
1780 "FuncDesc ptr must be equal to stringtable");
1781}
1782
1783// On OSX/iOS the final symbol name of an extern "C" function/variable contains
1784// one extra leading underscore: _bolt_instr_setup -> __bolt_instr_setup.
1785extern "C"
1786__attribute__((section("__TEXT,__setup")))
1787__attribute__((force_align_arg_pointer))
1788void _bolt_instr_setup() {
1789 __asm__ __volatile__(SAVE_ALL"push %%rax\n" "push %%rbx\n" "push %%rcx\n" "push %%rdx\n" "push %%rdi\n"
"push %%rsi\n" "push %%rbp\n" "push %%r8\n" "push %%r9\n" "push %%r10\n"
"push %%r11\n" "push %%r12\n" "push %%r13\n" "push %%r14\n" "push %%r15\n"
"sub $8, %%rsp\n"
:::);
1790
1791 report("Hello!\n");
1792
1793 __asm__ __volatile__(RESTORE_ALL"add $8, %%rsp\n" "pop %%r15\n" "pop %%r14\n" "pop %%r13\n" "pop %%r12\n"
"pop %%r11\n" "pop %%r10\n" "pop %%r9\n" "pop %%r8\n" "pop %%rbp\n"
"pop %%rsi\n" "pop %%rdi\n" "pop %%rdx\n" "pop %%rcx\n" "pop %%rbx\n"
"pop %%rax\n"
:::);
1794}
1795
1796extern "C"
1797__attribute__((section("__TEXT,__fini")))
1798__attribute__((force_align_arg_pointer))
1799void _bolt_instr_fini() {
1800 report("Bye!\n");
1801 __bolt_instr_data_dump();
1802}
1803
1804#endif