Bug Summary

File:projects/compiler-rt/lib/xray/xray_fdr_logging.cc
Warning:line 291, column 3
Null pointer passed as an argument to a 'nonnull' parameter

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name xray_fdr_logging.cc -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D XRAY_HAS_EXCEPTIONS=1 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/projects/compiler-rt/lib/xray -I /build/llvm-toolchain-snapshot-7~svn329677/projects/compiler-rt/lib/xray -I /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn329677/include -I /build/llvm-toolchain-snapshot-7~svn329677/projects/compiler-rt/lib/xray/.. -I /build/llvm-toolchain-snapshot-7~svn329677/projects/compiler-rt/lib/xray/../../include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -Wno-unused-parameter -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -Wno-unused-parameter -Wno-variadic-macros -Wno-non-virtual-dtor -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/projects/compiler-rt/lib/xray -ferror-limit 19 -fmessage-length 0 -fvisibility hidden -fvisibility-inlines-hidden -fno-builtin -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-04-11-031539-24776-1 -x c++ /build/llvm-toolchain-snapshot-7~svn329677/projects/compiler-rt/lib/xray/xray_fdr_logging.cc

/build/llvm-toolchain-snapshot-7~svn329677/projects/compiler-rt/lib/xray/xray_fdr_logging.cc

1//===-- xray_fdr_logging.cc ------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of XRay, a dynamic runtime instrumentation system.
11//
12// Here we implement the Flight Data Recorder mode for XRay, where we use
13// compact structures to store records in memory as well as when writing out the
14// data to files.
15//
16//===----------------------------------------------------------------------===//
17#include "xray_fdr_logging.h"
18#include <errno(*__errno_location ()).h>
19#include <sys/syscall.h>
20#include <sys/time.h>
21#include <time.h>
22#include <unistd.h>
23
24#include "sanitizer_common/sanitizer_atomic.h"
25#include "sanitizer_common/sanitizer_common.h"
26#include "xray/xray_interface.h"
27#include "xray/xray_records.h"
28#include "xray_buffer_queue.h"
29#include "xray_defs.h"
30#include "xray_fdr_logging_impl.h"
31#include "xray_flags.h"
32#include "xray_tsc.h"
33#include "xray_utils.h"
34
35namespace __xray {
36
37// Global BufferQueue.
38BufferQueue *BQ = nullptr;
39
40__sanitizer::atomic_sint32_t LogFlushStatus = {
41 XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING};
42
43FDRLoggingOptions FDROptions;
44
45__sanitizer::SpinMutex FDROptionsMutex;
46
47// Must finalize before flushing.
48XRayLogFlushStatus fdrLoggingFlush() XRAY_NEVER_INSTRUMENT {
49 if (__sanitizer::atomic_load(&LoggingStatus,
50 __sanitizer::memory_order_acquire) !=
51 XRayLogInitStatus::XRAY_LOG_FINALIZED) {
52 if (__sanitizer::Verbosity())
53 Report("Not flushing log, implementation is not finalized.\n");
54 return XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
55 }
56
57 s32 Result = XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
58 if (!__sanitizer::atomic_compare_exchange_strong(
59 &LogFlushStatus, &Result, XRayLogFlushStatus::XRAY_LOG_FLUSHING,
60 __sanitizer::memory_order_release)) {
61
62 if (__sanitizer::Verbosity())
63 Report("Not flushing log, implementation is still finalizing.\n");
64 return static_cast<XRayLogFlushStatus>(Result);
65 }
66
67 if (BQ == nullptr) {
68 if (__sanitizer::Verbosity())
69 Report("Cannot flush when global buffer queue is null.\n");
70 return XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
71 }
72
73 // We wait a number of milliseconds to allow threads to see that we've
74 // finalised before attempting to flush the log.
75 __sanitizer::SleepForMillis(flags()->xray_fdr_log_grace_period_ms);
76
77 // We write out the file in the following format:
78 //
79 // 1) We write down the XRay file header with version 1, type FDR_LOG.
80 // 2) Then we use the 'apply' member of the BufferQueue that's live, to
81 // ensure that at this point in time we write down the buffers that have
82 // been released (and marked "used") -- we dump the full buffer for now
83 // (fixed-sized) and let the tools reading the buffers deal with the data
84 // afterwards.
85 //
86 int Fd = -1;
87 {
88 __sanitizer::SpinMutexLock Guard(&FDROptionsMutex);
89 Fd = FDROptions.Fd;
90 }
91 if (Fd == -1)
92 Fd = getLogFD();
93 if (Fd == -1) {
94 auto Result = XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
95 __sanitizer::atomic_store(&LogFlushStatus, Result,
96 __sanitizer::memory_order_release);
97 return Result;
98 }
99
100 // Test for required CPU features and cache the cycle frequency
101 static bool TSCSupported = probeRequiredCPUFeatures();
102 static uint64_t CycleFrequency =
103 TSCSupported ? getTSCFrequency() : __xray::NanosecondsPerSecond;
104
105 XRayFileHeader Header;
106
107 // Version 2 of the log writes the extents of the buffer, instead of relying
108 // on an end-of-buffer record.
109 Header.Version = 2;
110 Header.Type = FileTypes::FDR_LOG;
111 Header.CycleFrequency = CycleFrequency;
112
113 // FIXME: Actually check whether we have 'constant_tsc' and 'nonstop_tsc'
114 // before setting the values in the header.
115 Header.ConstantTSC = 1;
116 Header.NonstopTSC = 1;
117 Header.FdrData = FdrAdditionalHeaderData{BQ->ConfiguredBufferSize()};
118 retryingWriteAll(Fd, reinterpret_cast<char *>(&Header),
119 reinterpret_cast<char *>(&Header) + sizeof(Header));
120
121 BQ->apply([&](const BufferQueue::Buffer &B) {
122 // Starting at version 2 of the FDR logging implementation, we only write
123 // the records identified by the extents of the buffer. We use the Extents
124 // from the Buffer and write that out as the first record in the buffer.
125 // We still use a Metadata record, but fill in the extents instead for the
126 // data.
127 MetadataRecord ExtentsRecord;
128 auto BufferExtents = __sanitizer::atomic_load(
129 &B.Extents->Size, __sanitizer::memory_order_acquire);
130 assert(BufferExtents <= B.Size)(static_cast <bool> (BufferExtents <= B.Size) ? void
(0) : __assert_fail ("BufferExtents <= B.Size", "/build/llvm-toolchain-snapshot-7~svn329677/projects/compiler-rt/lib/xray/xray_fdr_logging.cc"
, 130, __extension__ __PRETTY_FUNCTION__))
;
131 ExtentsRecord.Type = uint8_t(RecordType::Metadata);
132 ExtentsRecord.RecordKind =
133 uint8_t(MetadataRecord::RecordKinds::BufferExtents);
134 std::memcpy(ExtentsRecord.Data, &BufferExtents, sizeof(BufferExtents));
135 if (BufferExtents > 0) {
136 retryingWriteAll(Fd, reinterpret_cast<char *>(&ExtentsRecord),
137 reinterpret_cast<char *>(&ExtentsRecord) +
138 sizeof(MetadataRecord));
139 retryingWriteAll(Fd, reinterpret_cast<char *>(B.Data),
140 reinterpret_cast<char *>(B.Data) + BufferExtents);
141 }
142 });
143
144 __sanitizer::atomic_store(&LogFlushStatus,
145 XRayLogFlushStatus::XRAY_LOG_FLUSHED,
146 __sanitizer::memory_order_release);
147 return XRayLogFlushStatus::XRAY_LOG_FLUSHED;
148}
149
150XRayLogInitStatus fdrLoggingFinalize() XRAY_NEVER_INSTRUMENT {
151 s32 CurrentStatus = XRayLogInitStatus::XRAY_LOG_INITIALIZED;
152 if (!__sanitizer::atomic_compare_exchange_strong(
153 &LoggingStatus, &CurrentStatus,
154 XRayLogInitStatus::XRAY_LOG_FINALIZING,
155 __sanitizer::memory_order_release)) {
156 if (__sanitizer::Verbosity())
157 Report("Cannot finalize log, implementation not initialized.\n");
158 return static_cast<XRayLogInitStatus>(CurrentStatus);
159 }
160
161 // Do special things to make the log finalize itself, and not allow any more
162 // operations to be performed until re-initialized.
163 BQ->finalize();
164
165 __sanitizer::atomic_store(&LoggingStatus,
166 XRayLogInitStatus::XRAY_LOG_FINALIZED,
167 __sanitizer::memory_order_release);
168 return XRayLogInitStatus::XRAY_LOG_FINALIZED;
169}
170
171XRayLogInitStatus fdrLoggingReset() XRAY_NEVER_INSTRUMENT {
172 s32 CurrentStatus = XRayLogInitStatus::XRAY_LOG_FINALIZED;
173 if (__sanitizer::atomic_compare_exchange_strong(
174 &LoggingStatus, &CurrentStatus,
175 XRayLogInitStatus::XRAY_LOG_INITIALIZED,
176 __sanitizer::memory_order_release))
177 return static_cast<XRayLogInitStatus>(CurrentStatus);
178
179 // Release the in-memory buffer queue.
180 delete BQ;
181 BQ = nullptr;
182
183 // Spin until the flushing status is flushed.
184 s32 CurrentFlushingStatus = XRayLogFlushStatus::XRAY_LOG_FLUSHED;
185 while (__sanitizer::atomic_compare_exchange_weak(
186 &LogFlushStatus, &CurrentFlushingStatus,
187 XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING,
188 __sanitizer::memory_order_release)) {
189 if (CurrentFlushingStatus == XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING)
190 break;
191 CurrentFlushingStatus = XRayLogFlushStatus::XRAY_LOG_FLUSHED;
192 }
193
194 // At this point, we know that the status is flushed, and that we can assume
195 return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
196}
197
198struct TSCAndCPU {
199 uint64_t TSC = 0;
200 unsigned char CPU = 0;
201};
202
203static TSCAndCPU getTimestamp() XRAY_NEVER_INSTRUMENT {
204 // We want to get the TSC as early as possible, so that we can check whether
205 // we've seen this CPU before. We also do it before we load anything else, to
206 // allow for forward progress with the scheduling.
207 TSCAndCPU Result;
208
209 // Test once for required CPU features
210 static bool TSCSupported = probeRequiredCPUFeatures();
211
212 if (TSCSupported) {
213 Result.TSC = __xray::readTSC(Result.CPU);
214 } else {
215 // FIXME: This code needs refactoring as it appears in multiple locations
216 timespec TS;
217 int result = clock_gettime(CLOCK_REALTIME0, &TS);
218 if (result != 0) {
219 Report("clock_gettime(2) return %d, errno=%d", result, int(errno(*__errno_location ())));
220 TS = {0, 0};
221 }
222 Result.CPU = 0;
223 Result.TSC = TS.tv_sec * __xray::NanosecondsPerSecond + TS.tv_nsec;
224 }
225 return Result;
226}
227
228void fdrLoggingHandleArg0(int32_t FuncId,
229 XRayEntryType Entry) XRAY_NEVER_INSTRUMENT {
230 auto TC = getTimestamp();
231 __xray_fdr_internal::processFunctionHook(FuncId, Entry, TC.TSC, TC.CPU, 0,
232 clock_gettime, BQ);
233}
234
235void fdrLoggingHandleArg1(int32_t FuncId, XRayEntryType Entry,
236 uint64_t Arg) XRAY_NEVER_INSTRUMENT {
237 auto TC = getTimestamp();
238 __xray_fdr_internal::processFunctionHook(FuncId, Entry, TC.TSC, TC.CPU, Arg,
1
Calling 'processFunctionHook'
239 clock_gettime, BQ);
240}
241
242void fdrLoggingHandleCustomEvent(void *Event,
243 std::size_t EventSize) XRAY_NEVER_INSTRUMENT {
244 using namespace __xray_fdr_internal;
245 auto TC = getTimestamp();
246 auto &TSC = TC.TSC;
247 auto &CPU = TC.CPU;
248 RecursionGuard Guard{Running};
249 if (!Guard)
250 return;
251 if (EventSize > std::numeric_limits<int32_t>::max()) {
252 using Empty = struct {};
253 static Empty Once = [&] {
254 Report("Event size too large = %zu ; > max = %d\n", EventSize,
255 std::numeric_limits<int32_t>::max());
256 return Empty();
257 }();
258 (void)Once;
259 }
260 int32_t ReducedEventSize = static_cast<int32_t>(EventSize);
261 auto &TLD = getThreadLocalData();
262 if (!isLogInitializedAndReady(TLD.BQ, TSC, CPU, clock_gettime))
263 return;
264
265 // Here we need to prepare the log to handle:
266 // - The metadata record we're going to write. (16 bytes)
267 // - The additional data we're going to write. Currently, that's the size of
268 // the event we're going to dump into the log as free-form bytes.
269 if (!prepareBuffer(TSC, CPU, clock_gettime, MetadataRecSize + EventSize)) {
270 TLD.BQ = nullptr;
271 return;
272 }
273
274 // Write the custom event metadata record, which consists of the following
275 // information:
276 // - 8 bytes (64-bits) for the full TSC when the event started.
277 // - 4 bytes (32-bits) for the length of the data.
278 MetadataRecord CustomEvent;
279 CustomEvent.Type = uint8_t(RecordType::Metadata);
280 CustomEvent.RecordKind =
281 uint8_t(MetadataRecord::RecordKinds::CustomEventMarker);
282 constexpr auto TSCSize = sizeof(TC.TSC);
283 std::memcpy(&CustomEvent.Data, &ReducedEventSize, sizeof(int32_t));
284 std::memcpy(&CustomEvent.Data[sizeof(int32_t)], &TSC, TSCSize);
285 std::memcpy(TLD.RecordPtr, &CustomEvent, sizeof(CustomEvent));
286 TLD.RecordPtr += sizeof(CustomEvent);
287 std::memcpy(TLD.RecordPtr, Event, ReducedEventSize);
288 incrementExtents(MetadataRecSize + EventSize);
289 endBufferIfFull();
290}
291
292XRayLogInitStatus fdrLoggingInit(std::size_t BufferSize, std::size_t BufferMax,
293 void *Options,
294 size_t OptionsSize) XRAY_NEVER_INSTRUMENT {
295 if (OptionsSize != sizeof(FDRLoggingOptions)) {
296 if (__sanitizer::Verbosity())
297 Report("Cannot initialize FDR logging; wrong size for options: %d\n",
298 OptionsSize);
299 return static_cast<XRayLogInitStatus>(__sanitizer::atomic_load(
300 &LoggingStatus, __sanitizer::memory_order_acquire));
301 }
302 s32 CurrentStatus = XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
303 if (!__sanitizer::atomic_compare_exchange_strong(
304 &LoggingStatus, &CurrentStatus,
305 XRayLogInitStatus::XRAY_LOG_INITIALIZING,
306 __sanitizer::memory_order_release)) {
307 if (__sanitizer::Verbosity())
308 Report("Cannot initialize already initialized implementation.\n");
309 return static_cast<XRayLogInitStatus>(CurrentStatus);
310 }
311
312 {
313 __sanitizer::SpinMutexLock Guard(&FDROptionsMutex);
314 memcpy(&FDROptions, Options, OptionsSize);
315 }
316
317 bool Success = false;
318
319 if (BQ != nullptr) {
320 delete BQ;
321 BQ = nullptr;
322 }
323
324 if (BQ == nullptr)
325 BQ = new BufferQueue(BufferSize, BufferMax, Success);
326
327 if (!Success) {
328 Report("BufferQueue init failed.\n");
329 if (BQ != nullptr) {
330 delete BQ;
331 BQ = nullptr;
332 }
333 return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
334 }
335
336 static bool UNUSED__attribute__((unused)) Once = [] {
337 pthread_key_create(&__xray_fdr_internal::Key, +[](void *) {
338 auto &TLD = __xray_fdr_internal::getThreadLocalData();
339 if (TLD.BQ == nullptr)
340 return;
341 auto EC = TLD.BQ->releaseBuffer(TLD.Buffer);
342 if (EC != BufferQueue::ErrorCode::Ok)
343 Report("At thread exit, failed to release buffer at %p; error=%s\n",
344 TLD.Buffer.Data, BufferQueue::getErrorString(EC));
345 });
346 return false;
347 }();
348
349 // Arg1 handler should go in first to avoid concurrent code accidentally
350 // falling back to arg0 when it should have ran arg1.
351 __xray_set_handler_arg1(fdrLoggingHandleArg1);
352 // Install the actual handleArg0 handler after initialising the buffers.
353 __xray_set_handler(fdrLoggingHandleArg0);
354 __xray_set_customevent_handler(fdrLoggingHandleCustomEvent);
355
356 __sanitizer::atomic_store(&LoggingStatus,
357 XRayLogInitStatus::XRAY_LOG_INITIALIZED,
358 __sanitizer::memory_order_release);
359
360 if (__sanitizer::Verbosity())
361 Report("XRay FDR init successful.\n");
362 return XRayLogInitStatus::XRAY_LOG_INITIALIZED;
363}
364
365bool fdrLogDynamicInitializer() XRAY_NEVER_INSTRUMENT {
366 using namespace __xray;
367 XRayLogImpl Impl{
368 fdrLoggingInit,
369 fdrLoggingFinalize,
370 fdrLoggingHandleArg0,
371 fdrLoggingFlush,
372 };
373 auto RegistrationResult = __xray_log_register_mode("xray-fdr", Impl);
374 if (RegistrationResult != XRayLogRegisterStatus::XRAY_REGISTRATION_OK &&
375 __sanitizer::Verbosity())
376 Report("Cannot register XRay FDR mode to 'xray-fdr'; error = %d\n",
377 RegistrationResult);
378 if (flags()->xray_fdr_log ||
379 !__sanitizer::internal_strcmp(flags()->xray_mode, "xray-fdr"))
380 __xray_set_log_impl(Impl);
381 return true;
382}
383
384} // namespace __xray
385
386static auto UNUSED__attribute__((unused)) Unused = __xray::fdrLogDynamicInitializer();

/build/llvm-toolchain-snapshot-7~svn329677/projects/compiler-rt/lib/xray/xray_fdr_logging_impl.h

1//===-- xray_fdr_logging_impl.h ---------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of XRay, a dynamic runtime instrumentation system.
11//
12// Here we implement the thread local state management and record i/o for Flight
13// Data Recorder mode for XRay, where we use compact structures to store records
14// in memory as well as when writing out the data to files.
15//
16//===----------------------------------------------------------------------===//
17#ifndef XRAY_XRAY_FDR_LOGGING_IMPL_H
18#define XRAY_XRAY_FDR_LOGGING_IMPL_H
19
20#include <cassert>
21#include <cstddef>
22#include <cstring>
23#include <limits>
24#include <pthread.h>
25#include <sys/syscall.h>
26#include <time.h>
27#include <type_traits>
28#include <unistd.h>
29
30#include "sanitizer_common/sanitizer_common.h"
31#include "xray/xray_log_interface.h"
32#include "xray_buffer_queue.h"
33#include "xray_defs.h"
34#include "xray_fdr_log_records.h"
35#include "xray_flags.h"
36#include "xray_tsc.h"
37
38namespace __xray {
39
40__sanitizer::atomic_sint32_t LoggingStatus = {
41 XRayLogInitStatus::XRAY_LOG_UNINITIALIZED};
42
43/// We expose some of the state transitions when FDR logging mode is operating
44/// such that we can simulate a series of log events that may occur without
45/// and test with determinism without worrying about the real CPU time.
46///
47/// Because the code uses thread_local allocation extensively as part of its
48/// design, callers that wish to test events occuring on different threads
49/// will actually have to run them on different threads.
50///
51/// This also means that it is possible to break invariants maintained by
52/// cooperation with xray_fdr_logging class, so be careful and think twice.
53namespace __xray_fdr_internal {
54
55/// Writes the new buffer record and wallclock time that begin a buffer for the
56/// current thread.
57static void writeNewBufferPreamble(tid_t Tid, timespec TS);
58
59/// Writes a Function Record to the buffer associated with the current thread.
60static void writeFunctionRecord(int FuncId, uint32_t TSCDelta,
61 XRayEntryType EntryType);
62
63/// Sets up a new buffer in thread_local storage and writes a preamble. The
64/// wall_clock_reader function is used to populate the WallTimeRecord entry.
65static void setupNewBuffer(int (*wall_clock_reader)(clockid_t,
66 struct timespec *));
67
68/// TSC Wrap records are written when a TSC delta encoding scheme overflows.
69static void writeTSCWrapMetadata(uint64_t TSC);
70
71// Group together thread-local-data in a struct, then hide it behind a function
72// call so that it can be initialized on first use instead of as a global. We
73// force the alignment to 64-bytes for x86 cache line alignment, as this
74// structure is used in the hot path of implementation.
75struct alignas(64) ThreadLocalData {
76 BufferQueue::Buffer Buffer;
77 char *RecordPtr = nullptr;
78 // The number of FunctionEntry records immediately preceding RecordPtr.
79 uint8_t NumConsecutiveFnEnters = 0;
80
81 // The number of adjacent, consecutive pairs of FunctionEntry, Tail Exit
82 // records preceding RecordPtr.
83 uint8_t NumTailCalls = 0;
84
85 // We use a thread_local variable to keep track of which CPUs we've already
86 // run, and the TSC times for these CPUs. This allows us to stop repeating the
87 // CPU field in the function records.
88 //
89 // We assume that we'll support only 65536 CPUs for x86_64.
90 uint16_t CurrentCPU = std::numeric_limits<uint16_t>::max();
91 uint64_t LastTSC = 0;
92 uint64_t LastFunctionEntryTSC = 0;
93
94 // Make sure a thread that's ever called handleArg0 has a thread-local
95 // live reference to the buffer queue for this particular instance of
96 // FDRLogging, and that we're going to clean it up when the thread exits.
97 BufferQueue *BQ = nullptr;
98};
99
100static_assert(std::is_trivially_destructible<ThreadLocalData>::value,
101 "ThreadLocalData must be trivially destructible");
102
103static constexpr auto MetadataRecSize = sizeof(MetadataRecord);
104static constexpr auto FunctionRecSize = sizeof(FunctionRecord);
105
106// Use a global pthread key to identify thread-local data for logging.
107static pthread_key_t Key;
108
109// This function will initialize the thread-local data structure used by the FDR
110// logging implementation and return a reference to it. The implementation
111// details require a bit of care to maintain.
112//
113// First, some requirements on the implementation in general:
114//
115// - XRay handlers should not call any memory allocation routines that may
116// delegate to an instrumented implementation. This means functions like
117// malloc() and free() should not be called while instrumenting.
118//
119// - We would like to use some thread-local data initialized on first-use of
120// the XRay instrumentation. These allow us to implement unsynchronized
121// routines that access resources associated with the thread.
122//
123// The implementation here uses a few mechanisms that allow us to provide both
124// the requirements listed above. We do this by:
125//
126// 1. Using a thread-local aligned storage buffer for representing the
127// ThreadLocalData struct. This data will be uninitialized memory by
128// design.
129//
130// 2. Not requiring a thread exit handler/implementation, keeping the
131// thread-local as purely a collection of references/data that do not
132// require cleanup.
133//
134// We're doing this to avoid using a `thread_local` object that has a
135// non-trivial destructor, because the C++ runtime might call std::malloc(...)
136// to register calls to destructors. Deadlocks may arise when, for example, an
137// externally provided malloc implementation is XRay instrumented, and
138// initializing the thread-locals involves calling into malloc. A malloc
139// implementation that does global synchronization might be holding a lock for a
140// critical section, calling a function that might be XRay instrumented (and
141// thus in turn calling into malloc by virtue of registration of the
142// thread_local's destructor).
143static ThreadLocalData &getThreadLocalData() {
144 static_assert(alignof(ThreadLocalData) >= 64,
145 "ThreadLocalData must be cache line aligned.");
146 thread_local ThreadLocalData TLD;
147 thread_local bool UNUSED__attribute__((unused)) ThreadOnce = [] {
148 pthread_setspecific(Key, &TLD);
149 return false;
150 }();
151 return TLD;
152}
153
154//-----------------------------------------------------------------------------|
155// The rest of the file is implementation. |
156//-----------------------------------------------------------------------------|
157// Functions are implemented in the header for inlining since we don't want |
158// to grow the stack when we've hijacked the binary for logging. |
159//-----------------------------------------------------------------------------|
160
161namespace {
162
163class RecursionGuard {
164 volatile bool &Running;
165 const bool Valid;
166
167public:
168 explicit RecursionGuard(volatile bool &R) : Running(R), Valid(!R) {
169 if (Valid)
170 Running = true;
171 }
172
173 RecursionGuard(const RecursionGuard &) = delete;
174 RecursionGuard(RecursionGuard &&) = delete;
175 RecursionGuard &operator=(const RecursionGuard &) = delete;
176 RecursionGuard &operator=(RecursionGuard &&) = delete;
177
178 explicit operator bool() const { return Valid; }
179
180 ~RecursionGuard() noexcept {
181 if (Valid)
182 Running = false;
183 }
184};
185
186} // namespace
187
188static void writeNewBufferPreamble(tid_t Tid,
189 timespec TS) XRAY_NEVER_INSTRUMENT {
190 static constexpr int InitRecordsCount = 2;
191 auto &TLD = getThreadLocalData();
192 MetadataRecord Metadata[InitRecordsCount];
193 {
194 // Write out a MetadataRecord to signify that this is the start of a new
195 // buffer, associated with a particular thread, with a new CPU. For the
196 // data, we have 15 bytes to squeeze as much information as we can. At this
197 // point we only write down the following bytes:
198 // - Thread ID (tid_t, cast to 4 bytes type due to Darwin being 8 bytes)
199 auto &NewBuffer = Metadata[0];
200 NewBuffer.Type = uint8_t(RecordType::Metadata);
201 NewBuffer.RecordKind = uint8_t(MetadataRecord::RecordKinds::NewBuffer);
202 int32_t tid = static_cast<int32_t>(Tid);
203 std::memcpy(&NewBuffer.Data, &tid, sizeof(tid));
204 }
205
206 // Also write the WalltimeMarker record.
207 {
208 static_assert(sizeof(time_t) <= 8, "time_t needs to be at most 8 bytes");
209 auto &WalltimeMarker = Metadata[1];
210 WalltimeMarker.Type = uint8_t(RecordType::Metadata);
211 WalltimeMarker.RecordKind =
212 uint8_t(MetadataRecord::RecordKinds::WalltimeMarker);
213
214 // We only really need microsecond precision here, and enforce across
215 // platforms that we need 64-bit seconds and 32-bit microseconds encoded in
216 // the Metadata record.
217 int32_t Micros = TS.tv_nsec / 1000;
218 int64_t Seconds = TS.tv_sec;
219 std::memcpy(WalltimeMarker.Data, &Seconds, sizeof(Seconds));
220 std::memcpy(WalltimeMarker.Data + sizeof(Seconds), &Micros, sizeof(Micros));
221 }
222
223 TLD.NumConsecutiveFnEnters = 0;
224 TLD.NumTailCalls = 0;
225 if (TLD.BQ == nullptr || TLD.BQ->finalizing())
226 return;
227 std::memcpy(TLD.RecordPtr, Metadata, sizeof(Metadata));
228 TLD.RecordPtr += sizeof(Metadata);
229 // Since we write out the extents as the first metadata record of the
230 // buffer, we need to write out the extents including the extents record.
231 __sanitizer::atomic_store(&TLD.Buffer.Extents->Size, sizeof(Metadata),
232 __sanitizer::memory_order_release);
233}
234
235inline void setupNewBuffer(int (*wall_clock_reader)(
236 clockid_t, struct timespec *)) XRAY_NEVER_INSTRUMENT {
237 auto &TLD = getThreadLocalData();
238 auto &B = TLD.Buffer;
239 TLD.RecordPtr = static_cast<char *>(B.Data);
240 tid_t Tid = __sanitizer::GetTid();
241 timespec TS{0, 0};
242 // This is typically clock_gettime, but callers have injection ability.
243 wall_clock_reader(CLOCK_MONOTONIC1, &TS);
244 writeNewBufferPreamble(Tid, TS);
245 TLD.NumConsecutiveFnEnters = 0;
246 TLD.NumTailCalls = 0;
247}
248
249static void incrementExtents(size_t Add) {
250 auto &TLD = getThreadLocalData();
251 __sanitizer::atomic_fetch_add(&TLD.Buffer.Extents->Size, Add,
252 __sanitizer::memory_order_acq_rel);
253}
254
255static void decrementExtents(size_t Subtract) {
256 auto &TLD = getThreadLocalData();
257 __sanitizer::atomic_fetch_sub(&TLD.Buffer.Extents->Size, Subtract,
258 __sanitizer::memory_order_acq_rel);
259}
260
261inline void writeNewCPUIdMetadata(uint16_t CPU,
262 uint64_t TSC) XRAY_NEVER_INSTRUMENT {
263 auto &TLD = getThreadLocalData();
264 MetadataRecord NewCPUId;
265 NewCPUId.Type = uint8_t(RecordType::Metadata);
266 NewCPUId.RecordKind = uint8_t(MetadataRecord::RecordKinds::NewCPUId);
267
268 // The data for the New CPU will contain the following bytes:
269 // - CPU ID (uint16_t, 2 bytes)
270 // - Full TSC (uint64_t, 8 bytes)
271 // Total = 10 bytes.
272 std::memcpy(&NewCPUId.Data, &CPU, sizeof(CPU));
273 std::memcpy(&NewCPUId.Data[sizeof(CPU)], &TSC, sizeof(TSC));
274 std::memcpy(TLD.RecordPtr, &NewCPUId, sizeof(MetadataRecord));
275 TLD.RecordPtr += sizeof(MetadataRecord);
276 TLD.NumConsecutiveFnEnters = 0;
277 TLD.NumTailCalls = 0;
278 incrementExtents(sizeof(MetadataRecord));
279}
280
281inline void writeTSCWrapMetadata(uint64_t TSC) XRAY_NEVER_INSTRUMENT {
282 auto &TLD = getThreadLocalData();
283 MetadataRecord TSCWrap;
284 TSCWrap.Type = uint8_t(RecordType::Metadata);
285 TSCWrap.RecordKind = uint8_t(MetadataRecord::RecordKinds::TSCWrap);
286
287 // The data for the TSCWrap record contains the following bytes:
288 // - Full TSC (uint64_t, 8 bytes)
289 // Total = 8 bytes.
290 std::memcpy(&TSCWrap.Data, &TSC, sizeof(TSC));
291 std::memcpy(TLD.RecordPtr, &TSCWrap, sizeof(MetadataRecord));
25
Null pointer passed as an argument to a 'nonnull' parameter
292 TLD.RecordPtr += sizeof(MetadataRecord);
293 TLD.NumConsecutiveFnEnters = 0;
294 TLD.NumTailCalls = 0;
295 incrementExtents(sizeof(MetadataRecord));
296}
297
298// Call Argument metadata records store the arguments to a function in the
299// order of their appearance; holes are not supported by the buffer format.
300static inline void writeCallArgumentMetadata(uint64_t A) XRAY_NEVER_INSTRUMENT {
301 auto &TLD = getThreadLocalData();
302 MetadataRecord CallArg;
303 CallArg.Type = uint8_t(RecordType::Metadata);
304 CallArg.RecordKind = uint8_t(MetadataRecord::RecordKinds::CallArgument);
305
306 std::memcpy(CallArg.Data, &A, sizeof(A));
307 std::memcpy(TLD.RecordPtr, &CallArg, sizeof(MetadataRecord));
308 TLD.RecordPtr += sizeof(MetadataRecord);
309 incrementExtents(sizeof(MetadataRecord));
310}
311
312static inline void
313writeFunctionRecord(int FuncId, uint32_t TSCDelta,
314 XRayEntryType EntryType) XRAY_NEVER_INSTRUMENT {
315 FunctionRecord FuncRecord;
316 FuncRecord.Type = uint8_t(RecordType::Function);
317 // Only take 28 bits of the function id.
318 FuncRecord.FuncId = FuncId & ~(0x0F << 28);
319 FuncRecord.TSCDelta = TSCDelta;
320
321 auto &TLD = getThreadLocalData();
322 switch (EntryType) {
323 case XRayEntryType::ENTRY:
324 ++TLD.NumConsecutiveFnEnters;
325 FuncRecord.RecordKind = uint8_t(FunctionRecord::RecordKinds::FunctionEnter);
326 break;
327 case XRayEntryType::LOG_ARGS_ENTRY:
328 // We should not rewind functions with logged args.
329 TLD.NumConsecutiveFnEnters = 0;
330 TLD.NumTailCalls = 0;
331 FuncRecord.RecordKind = uint8_t(FunctionRecord::RecordKinds::FunctionEnter);
332 break;
333 case XRayEntryType::EXIT:
334 // If we've decided to log the function exit, we will never erase the log
335 // before it.
336 TLD.NumConsecutiveFnEnters = 0;
337 TLD.NumTailCalls = 0;
338 FuncRecord.RecordKind = uint8_t(FunctionRecord::RecordKinds::FunctionExit);
339 break;
340 case XRayEntryType::TAIL:
341 // If we just entered the function we're tail exiting from or erased every
342 // invocation since then, this function entry tail pair is a candidate to
343 // be erased when the child function exits.
344 if (TLD.NumConsecutiveFnEnters > 0) {
345 ++TLD.NumTailCalls;
346 TLD.NumConsecutiveFnEnters = 0;
347 } else {
348 // We will never be able to erase this tail call since we have logged
349 // something in between the function entry and tail exit.
350 TLD.NumTailCalls = 0;
351 TLD.NumConsecutiveFnEnters = 0;
352 }
353 FuncRecord.RecordKind =
354 uint8_t(FunctionRecord::RecordKinds::FunctionTailExit);
355 break;
356 case XRayEntryType::CUSTOM_EVENT: {
357 // This is a bug in patching, so we'll report it once and move on.
358 static bool Once = [&] {
359 Report("Internal error: patched an XRay custom event call as a function; "
360 "func id = %d\n",
361 FuncId);
362 return true;
363 }();
364 (void)Once;
365 return;
366 }
367 }
368
369 std::memcpy(TLD.RecordPtr, &FuncRecord, sizeof(FunctionRecord));
370 TLD.RecordPtr += sizeof(FunctionRecord);
371 incrementExtents(sizeof(FunctionRecord));
372}
373
374static uint64_t thresholdTicks() {
375 static uint64_t TicksPerSec = probeRequiredCPUFeatures()
376 ? getTSCFrequency()
377 : __xray::NanosecondsPerSecond;
378 static const uint64_t ThresholdTicks =
379 TicksPerSec * flags()->xray_fdr_log_func_duration_threshold_us / 1000000;
380 return ThresholdTicks;
381}
382
383// Re-point the thread local pointer into this thread's Buffer before the recent
384// "Function Entry" record and any "Tail Call Exit" records after that.
385static void rewindRecentCall(uint64_t TSC, uint64_t &LastTSC,
386 uint64_t &LastFunctionEntryTSC, int32_t FuncId) {
387 auto &TLD = getThreadLocalData();
388 TLD.RecordPtr -= FunctionRecSize;
389 decrementExtents(FunctionRecSize);
390 FunctionRecord FuncRecord;
391 std::memcpy(&FuncRecord, TLD.RecordPtr, FunctionRecSize);
392 assert(FuncRecord.RecordKind ==(static_cast <bool> (FuncRecord.RecordKind == uint8_t(FunctionRecord
::RecordKinds::FunctionEnter) && "Expected to find function entry recording when rewinding."
) ? void (0) : __assert_fail ("FuncRecord.RecordKind == uint8_t(FunctionRecord::RecordKinds::FunctionEnter) && \"Expected to find function entry recording when rewinding.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/projects/compiler-rt/lib/xray/xray_fdr_logging_impl.h"
, 394, __extension__ __PRETTY_FUNCTION__))
393 uint8_t(FunctionRecord::RecordKinds::FunctionEnter) &&(static_cast <bool> (FuncRecord.RecordKind == uint8_t(FunctionRecord
::RecordKinds::FunctionEnter) && "Expected to find function entry recording when rewinding."
) ? void (0) : __assert_fail ("FuncRecord.RecordKind == uint8_t(FunctionRecord::RecordKinds::FunctionEnter) && \"Expected to find function entry recording when rewinding.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/projects/compiler-rt/lib/xray/xray_fdr_logging_impl.h"
, 394, __extension__ __PRETTY_FUNCTION__))
394 "Expected to find function entry recording when rewinding.")(static_cast <bool> (FuncRecord.RecordKind == uint8_t(FunctionRecord
::RecordKinds::FunctionEnter) && "Expected to find function entry recording when rewinding."
) ? void (0) : __assert_fail ("FuncRecord.RecordKind == uint8_t(FunctionRecord::RecordKinds::FunctionEnter) && \"Expected to find function entry recording when rewinding.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/projects/compiler-rt/lib/xray/xray_fdr_logging_impl.h"
, 394, __extension__ __PRETTY_FUNCTION__))
;
395 assert(FuncRecord.FuncId == (FuncId & ~(0x0F << 28)) &&(static_cast <bool> (FuncRecord.FuncId == (FuncId &
~(0x0F << 28)) && "Expected matching function id when rewinding Exit"
) ? void (0) : __assert_fail ("FuncRecord.FuncId == (FuncId & ~(0x0F << 28)) && \"Expected matching function id when rewinding Exit\""
, "/build/llvm-toolchain-snapshot-7~svn329677/projects/compiler-rt/lib/xray/xray_fdr_logging_impl.h"
, 396, __extension__ __PRETTY_FUNCTION__))
396 "Expected matching function id when rewinding Exit")(static_cast <bool> (FuncRecord.FuncId == (FuncId &
~(0x0F << 28)) && "Expected matching function id when rewinding Exit"
) ? void (0) : __assert_fail ("FuncRecord.FuncId == (FuncId & ~(0x0F << 28)) && \"Expected matching function id when rewinding Exit\""
, "/build/llvm-toolchain-snapshot-7~svn329677/projects/compiler-rt/lib/xray/xray_fdr_logging_impl.h"
, 396, __extension__ __PRETTY_FUNCTION__))
;
397 --TLD.NumConsecutiveFnEnters;
398 LastTSC -= FuncRecord.TSCDelta;
399
400 // We unwound one call. Update the state and return without writing a log.
401 if (TLD.NumConsecutiveFnEnters != 0) {
402 LastFunctionEntryTSC -= FuncRecord.TSCDelta;
403 return;
404 }
405
406 // Otherwise we've rewound the stack of all function entries, we might be
407 // able to rewind further by erasing tail call functions that are being
408 // exited from via this exit.
409 LastFunctionEntryTSC = 0;
410 auto RewindingTSC = LastTSC;
411 auto RewindingRecordPtr = TLD.RecordPtr - FunctionRecSize;
412 while (TLD.NumTailCalls > 0) {
413 // Rewind the TSC back over the TAIL EXIT record.
414 FunctionRecord ExpectedTailExit;
415 std::memcpy(&ExpectedTailExit, RewindingRecordPtr, FunctionRecSize);
416
417 assert(ExpectedTailExit.RecordKind ==(static_cast <bool> (ExpectedTailExit.RecordKind == uint8_t
(FunctionRecord::RecordKinds::FunctionTailExit) && "Expected to find tail exit when rewinding."
) ? void (0) : __assert_fail ("ExpectedTailExit.RecordKind == uint8_t(FunctionRecord::RecordKinds::FunctionTailExit) && \"Expected to find tail exit when rewinding.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/projects/compiler-rt/lib/xray/xray_fdr_logging_impl.h"
, 419, __extension__ __PRETTY_FUNCTION__))
418 uint8_t(FunctionRecord::RecordKinds::FunctionTailExit) &&(static_cast <bool> (ExpectedTailExit.RecordKind == uint8_t
(FunctionRecord::RecordKinds::FunctionTailExit) && "Expected to find tail exit when rewinding."
) ? void (0) : __assert_fail ("ExpectedTailExit.RecordKind == uint8_t(FunctionRecord::RecordKinds::FunctionTailExit) && \"Expected to find tail exit when rewinding.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/projects/compiler-rt/lib/xray/xray_fdr_logging_impl.h"
, 419, __extension__ __PRETTY_FUNCTION__))
419 "Expected to find tail exit when rewinding.")(static_cast <bool> (ExpectedTailExit.RecordKind == uint8_t
(FunctionRecord::RecordKinds::FunctionTailExit) && "Expected to find tail exit when rewinding."
) ? void (0) : __assert_fail ("ExpectedTailExit.RecordKind == uint8_t(FunctionRecord::RecordKinds::FunctionTailExit) && \"Expected to find tail exit when rewinding.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/projects/compiler-rt/lib/xray/xray_fdr_logging_impl.h"
, 419, __extension__ __PRETTY_FUNCTION__))
;
420 RewindingRecordPtr -= FunctionRecSize;
421 RewindingTSC -= ExpectedTailExit.TSCDelta;
422 FunctionRecord ExpectedFunctionEntry;
423 std::memcpy(&ExpectedFunctionEntry, RewindingRecordPtr, FunctionRecSize);
424 assert(ExpectedFunctionEntry.RecordKind ==(static_cast <bool> (ExpectedFunctionEntry.RecordKind ==
uint8_t(FunctionRecord::RecordKinds::FunctionEnter) &&
"Expected to find function entry when rewinding tail call.")
? void (0) : __assert_fail ("ExpectedFunctionEntry.RecordKind == uint8_t(FunctionRecord::RecordKinds::FunctionEnter) && \"Expected to find function entry when rewinding tail call.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/projects/compiler-rt/lib/xray/xray_fdr_logging_impl.h"
, 426, __extension__ __PRETTY_FUNCTION__))
425 uint8_t(FunctionRecord::RecordKinds::FunctionEnter) &&(static_cast <bool> (ExpectedFunctionEntry.RecordKind ==
uint8_t(FunctionRecord::RecordKinds::FunctionEnter) &&
"Expected to find function entry when rewinding tail call.")
? void (0) : __assert_fail ("ExpectedFunctionEntry.RecordKind == uint8_t(FunctionRecord::RecordKinds::FunctionEnter) && \"Expected to find function entry when rewinding tail call.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/projects/compiler-rt/lib/xray/xray_fdr_logging_impl.h"
, 426, __extension__ __PRETTY_FUNCTION__))
426 "Expected to find function entry when rewinding tail call.")(static_cast <bool> (ExpectedFunctionEntry.RecordKind ==
uint8_t(FunctionRecord::RecordKinds::FunctionEnter) &&
"Expected to find function entry when rewinding tail call.")
? void (0) : __assert_fail ("ExpectedFunctionEntry.RecordKind == uint8_t(FunctionRecord::RecordKinds::FunctionEnter) && \"Expected to find function entry when rewinding tail call.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/projects/compiler-rt/lib/xray/xray_fdr_logging_impl.h"
, 426, __extension__ __PRETTY_FUNCTION__))
;
427 assert(ExpectedFunctionEntry.FuncId == ExpectedTailExit.FuncId &&(static_cast <bool> (ExpectedFunctionEntry.FuncId == ExpectedTailExit
.FuncId && "Expected funcids to match when rewinding tail call."
) ? void (0) : __assert_fail ("ExpectedFunctionEntry.FuncId == ExpectedTailExit.FuncId && \"Expected funcids to match when rewinding tail call.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/projects/compiler-rt/lib/xray/xray_fdr_logging_impl.h"
, 428, __extension__ __PRETTY_FUNCTION__))
428 "Expected funcids to match when rewinding tail call.")(static_cast <bool> (ExpectedFunctionEntry.FuncId == ExpectedTailExit
.FuncId && "Expected funcids to match when rewinding tail call."
) ? void (0) : __assert_fail ("ExpectedFunctionEntry.FuncId == ExpectedTailExit.FuncId && \"Expected funcids to match when rewinding tail call.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/projects/compiler-rt/lib/xray/xray_fdr_logging_impl.h"
, 428, __extension__ __PRETTY_FUNCTION__))
;
429
430 // This tail call exceeded the threshold duration. It will not be erased.
431 if ((TSC - RewindingTSC) >= thresholdTicks()) {
432 TLD.NumTailCalls = 0;
433 return;
434 }
435
436 // We can erase a tail exit pair that we're exiting through since
437 // its duration is under threshold.
438 --TLD.NumTailCalls;
439 RewindingRecordPtr -= FunctionRecSize;
440 RewindingTSC -= ExpectedFunctionEntry.TSCDelta;
441 TLD.RecordPtr -= 2 * FunctionRecSize;
442 LastTSC = RewindingTSC;
443 decrementExtents(2 * FunctionRecSize);
444 }
445}
446
447inline bool releaseThreadLocalBuffer(BufferQueue &BQArg) {
448 auto &TLD = getThreadLocalData();
449 auto EC = BQArg.releaseBuffer(TLD.Buffer);
450 if (EC != BufferQueue::ErrorCode::Ok) {
451 Report("Failed to release buffer at %p; error=%s\n", TLD.Buffer.Data,
452 BufferQueue::getErrorString(EC));
453 return false;
454 }
455 return true;
456}
457
458inline bool prepareBuffer(uint64_t TSC, unsigned char CPU,
459 int (*wall_clock_reader)(clockid_t,
460 struct timespec *),
461 size_t MaxSize) XRAY_NEVER_INSTRUMENT {
462 auto &TLD = getThreadLocalData();
463 char *BufferStart = static_cast<char *>(TLD.Buffer.Data);
464 if ((TLD.RecordPtr + MaxSize) > (BufferStart + TLD.Buffer.Size)) {
465 if (!releaseThreadLocalBuffer(*TLD.BQ))
466 return false;
467 auto EC = TLD.BQ->getBuffer(TLD.Buffer);
468 if (EC != BufferQueue::ErrorCode::Ok) {
469 Report("Failed to acquire a buffer; error=%s\n",
470 BufferQueue::getErrorString(EC));
471 return false;
472 }
473 setupNewBuffer(wall_clock_reader);
474
475 // Always write the CPU metadata as the first record in the buffer.
476 writeNewCPUIdMetadata(CPU, TSC);
477 }
478 return true;
479}
480
481inline bool
482isLogInitializedAndReady(BufferQueue *LBQ, uint64_t TSC, unsigned char CPU,
483 int (*wall_clock_reader)(clockid_t, struct timespec *))
484 XRAY_NEVER_INSTRUMENT {
485 // Bail out right away if logging is not initialized yet.
486 // We should take the opportunity to release the buffer though.
487 auto Status = __sanitizer::atomic_load(&LoggingStatus,
488 __sanitizer::memory_order_acquire);
489 auto &TLD = getThreadLocalData();
490 if (Status != XRayLogInitStatus::XRAY_LOG_INITIALIZED) {
6
Assuming 'Status' is equal to XRAY_LOG_INITIALIZED
7
Taking false branch
491 if (TLD.RecordPtr != nullptr &&
492 (Status == XRayLogInitStatus::XRAY_LOG_FINALIZING ||
493 Status == XRayLogInitStatus::XRAY_LOG_FINALIZED)) {
494 if (!releaseThreadLocalBuffer(*LBQ))
495 return false;
496 TLD.RecordPtr = nullptr;
497 return false;
498 }
499 return false;
500 }
501
502 if (__sanitizer::atomic_load(&LoggingStatus,
9
Taking true branch
503 __sanitizer::memory_order_acquire) !=
504 XRayLogInitStatus::XRAY_LOG_INITIALIZED ||
505 LBQ->finalizing()) {
8
Assuming the condition is true
506 if (!releaseThreadLocalBuffer(*LBQ))
10
Taking false branch
507 return false;
508 TLD.RecordPtr = nullptr;
11
Null pointer value stored to 'TLD.RecordPtr'
509 }
510
511 if (TLD.Buffer.Data == nullptr) {
12
Assuming the condition is false
13
Taking false branch
512 auto EC = LBQ->getBuffer(TLD.Buffer);
513 if (EC != BufferQueue::ErrorCode::Ok) {
514 auto LS = __sanitizer::atomic_load(&LoggingStatus,
515 __sanitizer::memory_order_acquire);
516 if (LS != XRayLogInitStatus::XRAY_LOG_FINALIZING &&
517 LS != XRayLogInitStatus::XRAY_LOG_FINALIZED)
518 Report("Failed to acquire a buffer; error=%s\n",
519 BufferQueue::getErrorString(EC));
520 return false;
521 }
522
523 setupNewBuffer(wall_clock_reader);
524
525 // Always write the CPU metadata as the first record in the buffer.
526 writeNewCPUIdMetadata(CPU, TSC);
527 }
528
529 if (TLD.CurrentCPU == std::numeric_limits<uint16_t>::max()) {
14
Assuming the condition is false
15
Taking false branch
530 // This means this is the first CPU this thread has ever run on. We set
531 // the current CPU and record this as the first TSC we've seen.
532 TLD.CurrentCPU = CPU;
533 writeNewCPUIdMetadata(CPU, TSC);
534 }
535
536 return true;
537} // namespace __xray_fdr_internal
538
539// Compute the TSC difference between the time of measurement and the previous
540// event. There are a few interesting situations we need to account for:
541//
542// - The thread has migrated to a different CPU. If this is the case, then
543// we write down the following records:
544//
545// 1. A 'NewCPUId' Metadata record.
546// 2. A FunctionRecord with a 0 for the TSCDelta field.
547//
548// - The TSC delta is greater than the 32 bits we can store in a
549// FunctionRecord. In this case we write down the following records:
550//
551// 1. A 'TSCWrap' Metadata record.
552// 2. A FunctionRecord with a 0 for the TSCDelta field.
553//
554// - The TSC delta is representable within the 32 bits we can store in a
555// FunctionRecord. In this case we write down just a FunctionRecord with
556// the correct TSC delta.
557inline uint32_t writeCurrentCPUTSC(ThreadLocalData &TLD, uint64_t TSC,
558 uint8_t CPU) {
559 if (CPU != TLD.CurrentCPU) {
20
Assuming the condition is false
21
Taking false branch
560 // We've moved to a new CPU.
561 writeNewCPUIdMetadata(CPU, TSC);
562 return 0;
563 }
564 // If the delta is greater than the range for a uint32_t, then we write out
565 // the TSC wrap metadata entry with the full TSC, and the TSC for the
566 // function record be 0.
567 uint64_t Delta = TSC - TLD.LastTSC;
568 if (Delta <= std::numeric_limits<uint32_t>::max())
22
Assuming the condition is false
23
Taking false branch
569 return Delta;
570
571 writeTSCWrapMetadata(TSC);
24
Calling 'writeTSCWrapMetadata'
572 return 0;
573}
574
575inline void endBufferIfFull() XRAY_NEVER_INSTRUMENT {
576 auto &TLD = getThreadLocalData();
577 auto BufferStart = static_cast<char *>(TLD.Buffer.Data);
578 if ((TLD.RecordPtr + MetadataRecSize) - BufferStart <=
579 ptrdiff_t{MetadataRecSize}) {
580 if (!releaseThreadLocalBuffer(*TLD.BQ))
581 return;
582 TLD.RecordPtr = nullptr;
583 }
584}
585
586thread_local volatile bool Running = false;
587
588/// Here's where the meat of the processing happens. The writer captures
589/// function entry, exit and tail exit points with a time and will create
590/// TSCWrap, NewCPUId and Function records as necessary. The writer might
591/// walk backward through its buffer and erase trivial functions to avoid
592/// polluting the log and may use the buffer queue to obtain or release a
593/// buffer.
594inline void processFunctionHook(int32_t FuncId, XRayEntryType Entry,
595 uint64_t TSC, unsigned char CPU, uint64_t Arg1,
596 int (*wall_clock_reader)(clockid_t,
597 struct timespec *),
598 BufferQueue *BQ) XRAY_NEVER_INSTRUMENT {
599 // Prevent signal handler recursion, so in case we're already in a log writing
600 // mode and the signal handler comes in (and is also instrumented) then we
601 // don't want to be clobbering potentially partial writes already happening in
602 // the thread. We use a simple thread_local latch to only allow one on-going
603 // handleArg0 to happen at any given time.
604 RecursionGuard Guard{Running};
605 if (!Guard) {
2
Taking false branch
606 assert(Running == true && "RecursionGuard is buggy!")(static_cast <bool> (Running == true && "RecursionGuard is buggy!"
) ? void (0) : __assert_fail ("Running == true && \"RecursionGuard is buggy!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/projects/compiler-rt/lib/xray/xray_fdr_logging_impl.h"
, 606, __extension__ __PRETTY_FUNCTION__))
;
607 return;
608 }
609
610 auto &TLD = getThreadLocalData();
611
612 // In case the reference has been cleaned up before, we make sure we
613 // initialize it to the provided BufferQueue.
614 if (TLD.BQ == nullptr)
3
Assuming the condition is false
4
Taking false branch
615 TLD.BQ = BQ;
616
617 if (!isLogInitializedAndReady(TLD.BQ, TSC, CPU, wall_clock_reader))
5
Calling 'isLogInitializedAndReady'
16
Returning from 'isLogInitializedAndReady'
17
Taking false branch
618 return;
619
620 // Before we go setting up writing new function entries, we need to be really
621 // careful about the pointer math we're doing. This means we need to ensure
622 // that the record we are about to write is going to fit into the buffer,
623 // without overflowing the buffer.
624 //
625 // To do this properly, we use the following assumptions:
626 //
627 // - The least number of bytes we will ever write is 8
628 // (sizeof(FunctionRecord)) only if the delta between the previous entry
629 // and this entry is within 32 bits.
630 // - The most number of bytes we will ever write is 8 + 16 + 16 = 40.
631 // This is computed by:
632 //
633 // MaxSize = sizeof(FunctionRecord) + 2 * sizeof(MetadataRecord)
634 //
635 // These arise in the following cases:
636 //
637 // 1. When the delta between the TSC we get and the previous TSC for the
638 // same CPU is outside of the uint32_t range, we end up having to
639 // write a MetadataRecord to indicate a "tsc wrap" before the actual
640 // FunctionRecord.
641 // 2. When we learn that we've moved CPUs, we need to write a
642 // MetadataRecord to indicate a "cpu change", and thus write out the
643 // current TSC for that CPU before writing out the actual
644 // FunctionRecord.
645 // 3. When we learn about a new CPU ID, we need to write down a "new cpu
646 // id" MetadataRecord before writing out the actual FunctionRecord.
647 // 4. The second MetadataRecord is the optional function call argument.
648 //
649 // So the math we need to do is to determine whether writing 40 bytes past the
650 // current pointer exceeds the buffer's maximum size. If we don't have enough
651 // space to write 40 bytes in the buffer, we need get a new Buffer, set it up
652 // properly before doing any further writing.
653 size_t MaxSize = FunctionRecSize + 2 * MetadataRecSize;
654 if (!prepareBuffer(TSC, CPU, wall_clock_reader, MaxSize)) {
18
Taking false branch
655 TLD.BQ = nullptr;
656 return;
657 }
658
659 // By this point, we are now ready to write up to 40 bytes (explained above).
660 assert((TLD.RecordPtr + MaxSize) - static_cast<char *>(TLD.Buffer.Data) >=(static_cast <bool> ((TLD.RecordPtr + MaxSize) - static_cast
<char *>(TLD.Buffer.Data) >= static_cast<ptrdiff_t
>(MetadataRecSize) && "Misconfigured BufferQueue provided; Buffer size not large enough."
) ? void (0) : __assert_fail ("(TLD.RecordPtr + MaxSize) - static_cast<char *>(TLD.Buffer.Data) >= static_cast<ptrdiff_t>(MetadataRecSize) && \"Misconfigured BufferQueue provided; Buffer size not large enough.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/projects/compiler-rt/lib/xray/xray_fdr_logging_impl.h"
, 662, __extension__ __PRETTY_FUNCTION__))
661 static_cast<ptrdiff_t>(MetadataRecSize) &&(static_cast <bool> ((TLD.RecordPtr + MaxSize) - static_cast
<char *>(TLD.Buffer.Data) >= static_cast<ptrdiff_t
>(MetadataRecSize) && "Misconfigured BufferQueue provided; Buffer size not large enough."
) ? void (0) : __assert_fail ("(TLD.RecordPtr + MaxSize) - static_cast<char *>(TLD.Buffer.Data) >= static_cast<ptrdiff_t>(MetadataRecSize) && \"Misconfigured BufferQueue provided; Buffer size not large enough.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/projects/compiler-rt/lib/xray/xray_fdr_logging_impl.h"
, 662, __extension__ __PRETTY_FUNCTION__))
662 "Misconfigured BufferQueue provided; Buffer size not large enough.")(static_cast <bool> ((TLD.RecordPtr + MaxSize) - static_cast
<char *>(TLD.Buffer.Data) >= static_cast<ptrdiff_t
>(MetadataRecSize) && "Misconfigured BufferQueue provided; Buffer size not large enough."
) ? void (0) : __assert_fail ("(TLD.RecordPtr + MaxSize) - static_cast<char *>(TLD.Buffer.Data) >= static_cast<ptrdiff_t>(MetadataRecSize) && \"Misconfigured BufferQueue provided; Buffer size not large enough.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/projects/compiler-rt/lib/xray/xray_fdr_logging_impl.h"
, 662, __extension__ __PRETTY_FUNCTION__))
;
663
664 auto RecordTSCDelta = writeCurrentCPUTSC(TLD, TSC, CPU);
19
Calling 'writeCurrentCPUTSC'
665 TLD.LastTSC = TSC;
666 TLD.CurrentCPU = CPU;
667 switch (Entry) {
668 case XRayEntryType::ENTRY:
669 case XRayEntryType::LOG_ARGS_ENTRY:
670 // Update the thread local state for the next invocation.
671 TLD.LastFunctionEntryTSC = TSC;
672 break;
673 case XRayEntryType::TAIL:
674 case XRayEntryType::EXIT:
675 // Break out and write the exit record if we can't erase any functions.
676 if (TLD.NumConsecutiveFnEnters == 0 ||
677 (TSC - TLD.LastFunctionEntryTSC) >= thresholdTicks())
678 break;
679 rewindRecentCall(TSC, TLD.LastTSC, TLD.LastFunctionEntryTSC, FuncId);
680 return; // without writing log.
681 case XRayEntryType::CUSTOM_EVENT: {
682 // This is a bug in patching, so we'll report it once and move on.
683 static bool Once = [&] {
684 Report("Internal error: patched an XRay custom event call as a function; "
685 "func id = %d",
686 FuncId);
687 return true;
688 }();
689 (void)Once;
690 return;
691 }
692 }
693
694 writeFunctionRecord(FuncId, RecordTSCDelta, Entry);
695 if (Entry == XRayEntryType::LOG_ARGS_ENTRY)
696 writeCallArgumentMetadata(Arg1);
697
698 // If we've exhausted the buffer by this time, we then release the buffer to
699 // make sure that other threads may start using this buffer.
700 endBufferIfFull();
701}
702
703} // namespace __xray_fdr_internal
704} // namespace __xray
705
706#endif // XRAY_XRAY_FDR_LOGGING_IMPL_H