Bug Summary

File:projects/compiler-rt/lib/xray/xray_fdr_logging.cc
Warning:line 232, column 3
Null pointer passed as an argument to a 'nonnull' parameter

Annotated Source Code

1//===-- xray_fdr_logging.cc ------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of XRay, a dynamic runtime instruementation system.
11//
12// Here we implement the Flight Data Recorder mode for XRay, where we use
13// compact structures to store records in memory as well as when writing out the
14// data to files.
15//
16//===----------------------------------------------------------------------===//
17#include "xray_fdr_logging.h"
18#include <algorithm>
19#include <bitset>
20#include <cassert>
21#include <cstring>
22#include <memory>
23#include <sys/syscall.h>
24#include <sys/time.h>
25#include <time.h>
26#include <unistd.h>
27#include <unordered_map>
28
29#include "sanitizer_common/sanitizer_common.h"
30#include "xray/xray_interface.h"
31#include "xray/xray_records.h"
32#include "xray_buffer_queue.h"
33#include "xray_defs.h"
34#include "xray_flags.h"
35#include "xray_tsc.h"
36#include "xray_utils.h"
37
38namespace __xray {
39
40// Global BufferQueue.
41std::shared_ptr<BufferQueue> BQ;
42
43std::atomic<XRayLogInitStatus> LoggingStatus{
44 XRayLogInitStatus::XRAY_LOG_UNINITIALIZED};
45
46std::atomic<XRayLogFlushStatus> LogFlushStatus{
47 XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING};
48
49std::unique_ptr<FDRLoggingOptions> FDROptions;
50
51XRayLogInitStatus fdrLoggingInit(std::size_t BufferSize, std::size_t BufferMax,
52 void *Options,
53 size_t OptionsSize) XRAY_NEVER_INSTRUMENT {
54 assert(OptionsSize == sizeof(FDRLoggingOptions))((OptionsSize == sizeof(FDRLoggingOptions)) ? static_cast<
void> (0) : __assert_fail ("OptionsSize == sizeof(FDRLoggingOptions)"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn297779/projects/compiler-rt/lib/xray/xray_fdr_logging.cc"
, 54, __PRETTY_FUNCTION__))
;
55 XRayLogInitStatus CurrentStatus = XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
56 if (!LoggingStatus.compare_exchange_strong(
57 CurrentStatus, XRayLogInitStatus::XRAY_LOG_INITIALIZING,
58 std::memory_order_release, std::memory_order_relaxed))
59 return CurrentStatus;
60
61 FDROptions.reset(new FDRLoggingOptions());
62 *FDROptions = *reinterpret_cast<FDRLoggingOptions *>(Options);
63 if (FDROptions->ReportErrors)
64 SetPrintfAndReportCallback(printToStdErr);
65
66 bool Success = false;
67 BQ = std::make_shared<BufferQueue>(BufferSize, BufferMax, Success);
68 if (!Success) {
69 Report("BufferQueue init failed.\n");
70 return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
71 }
72
73 // Install the actual handleArg0 handler after initialising the buffers.
74 __xray_set_handler(fdrLoggingHandleArg0);
75
76 LoggingStatus.store(XRayLogInitStatus::XRAY_LOG_INITIALIZED,
77 std::memory_order_release);
78 return XRayLogInitStatus::XRAY_LOG_INITIALIZED;
79}
80
81// Must finalize before flushing.
82XRayLogFlushStatus fdrLoggingFlush() XRAY_NEVER_INSTRUMENT {
83 if (LoggingStatus.load(std::memory_order_acquire) !=
84 XRayLogInitStatus::XRAY_LOG_FINALIZED)
85 return XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
86
87 XRayLogFlushStatus Result = XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
88 if (!LogFlushStatus.compare_exchange_strong(
89 Result, XRayLogFlushStatus::XRAY_LOG_FLUSHING,
90 std::memory_order_release, std::memory_order_relaxed))
91 return Result;
92
93 // Make a copy of the BufferQueue pointer to prevent other threads that may be
94 // resetting it from blowing away the queue prematurely while we're dealing
95 // with it.
96 auto LocalBQ = BQ;
97
98 // We write out the file in the following format:
99 //
100 // 1) We write down the XRay file header with version 1, type FDR_LOG.
101 // 2) Then we use the 'apply' member of the BufferQueue that's live, to
102 // ensure that at this point in time we write down the buffers that have
103 // been released (and marked "used") -- we dump the full buffer for now
104 // (fixed-sized) and let the tools reading the buffers deal with the data
105 // afterwards.
106 //
107 int Fd = FDROptions->Fd;
108 if (Fd == -1)
109 Fd = getLogFD();
110 if (Fd == -1) {
111 auto Result = XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
112 LogFlushStatus.store(Result, std::memory_order_release);
113 return Result;
114 }
115
116 XRayFileHeader Header;
117 Header.Version = 1;
118 Header.Type = FileTypes::FDR_LOG;
119 Header.CycleFrequency = getTSCFrequency();
120 // FIXME: Actually check whether we have 'constant_tsc' and 'nonstop_tsc'
121 // before setting the values in the header.
122 Header.ConstantTSC = 1;
123 Header.NonstopTSC = 1;
124 clock_gettime(CLOCK_REALTIME0, &Header.TS);
125 retryingWriteAll(Fd, reinterpret_cast<char *>(&Header),
126 reinterpret_cast<char *>(&Header) + sizeof(Header));
127 LocalBQ->apply([&](const BufferQueue::Buffer &B) {
128 retryingWriteAll(Fd, reinterpret_cast<char *>(B.Buffer),
129 reinterpret_cast<char *>(B.Buffer) + B.Size);
130 });
131 LogFlushStatus.store(XRayLogFlushStatus::XRAY_LOG_FLUSHED,
132 std::memory_order_release);
133 return XRayLogFlushStatus::XRAY_LOG_FLUSHED;
134}
135
136XRayLogInitStatus fdrLoggingFinalize() XRAY_NEVER_INSTRUMENT {
137 XRayLogInitStatus CurrentStatus = XRayLogInitStatus::XRAY_LOG_INITIALIZED;
138 if (!LoggingStatus.compare_exchange_strong(
139 CurrentStatus, XRayLogInitStatus::XRAY_LOG_FINALIZING,
140 std::memory_order_release, std::memory_order_relaxed))
141 return CurrentStatus;
142
143 // Do special things to make the log finalize itself, and not allow any more
144 // operations to be performed until re-initialized.
145 BQ->finalize();
146
147 LoggingStatus.store(XRayLogInitStatus::XRAY_LOG_FINALIZED,
148 std::memory_order_release);
149 return XRayLogInitStatus::XRAY_LOG_FINALIZED;
150}
151
152XRayLogInitStatus fdrLoggingReset() XRAY_NEVER_INSTRUMENT {
153 XRayLogInitStatus CurrentStatus = XRayLogInitStatus::XRAY_LOG_FINALIZED;
154 if (!LoggingStatus.compare_exchange_strong(
155 CurrentStatus, XRayLogInitStatus::XRAY_LOG_UNINITIALIZED,
156 std::memory_order_release, std::memory_order_relaxed))
157 return CurrentStatus;
158
159 // Release the in-memory buffer queue.
160 BQ.reset();
161
162 // Spin until the flushing status is flushed.
163 XRayLogFlushStatus CurrentFlushingStatus =
164 XRayLogFlushStatus::XRAY_LOG_FLUSHED;
165 while (!LogFlushStatus.compare_exchange_weak(
166 CurrentFlushingStatus, XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING,
167 std::memory_order_release, std::memory_order_relaxed)) {
168 if (CurrentFlushingStatus == XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING)
169 break;
170 CurrentFlushingStatus = XRayLogFlushStatus::XRAY_LOG_FLUSHED;
171 }
172
173 // At this point, we know that the status is flushed, and that we can assume
174 return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
175}
176
177namespace {
178thread_local BufferQueue::Buffer Buffer;
179thread_local char *RecordPtr = nullptr;
180
181void setupNewBuffer(const BufferQueue::Buffer &Buffer) XRAY_NEVER_INSTRUMENT {
182 RecordPtr = static_cast<char *>(Buffer.Buffer);
183
184 static constexpr int InitRecordsCount = 2;
185 std::aligned_storage<sizeof(MetadataRecord)>::type Records[InitRecordsCount];
186 {
187 // Write out a MetadataRecord to signify that this is the start of a new
188 // buffer, associated with a particular thread, with a new CPU. For the
189 // data, we have 15 bytes to squeeze as much information as we can. At this
190 // point we only write down the following bytes:
191 // - Thread ID (pid_t, 4 bytes)
192 auto &NewBuffer = *reinterpret_cast<MetadataRecord *>(&Records[0]);
193 NewBuffer.Type = uint8_t(RecordType::Metadata);
194 NewBuffer.RecordKind = uint8_t(MetadataRecord::RecordKinds::NewBuffer);
195 pid_t Tid = syscall(SYS_gettid186);
196 std::memcpy(&NewBuffer.Data, &Tid, sizeof(pid_t));
197 }
198
199 // Also write the WalltimeMarker record.
200 {
201 static_assert(sizeof(time_t) <= 8, "time_t needs to be at most 8 bytes");
202 auto &WalltimeMarker = *reinterpret_cast<MetadataRecord *>(&Records[1]);
203 WalltimeMarker.Type = uint8_t(RecordType::Metadata);
204 WalltimeMarker.RecordKind =
205 uint8_t(MetadataRecord::RecordKinds::WalltimeMarker);
206 timespec TS{0, 0};
207 clock_gettime(CLOCK_MONOTONIC1, &TS);
208
209 // We only really need microsecond precision here, and enforce across
210 // platforms that we need 64-bit seconds and 32-bit microseconds encoded in
211 // the Metadata record.
212 int32_t Micros = TS.tv_nsec / 1000;
213 int64_t Seconds = TS.tv_sec;
214 std::memcpy(WalltimeMarker.Data, &Seconds, sizeof(Seconds));
215 std::memcpy(WalltimeMarker.Data + sizeof(Seconds), &Micros, sizeof(Micros));
216 }
217 std::memcpy(RecordPtr, Records, sizeof(MetadataRecord) * InitRecordsCount);
218 RecordPtr += sizeof(MetadataRecord) * InitRecordsCount;
219}
220
221void writeNewCPUIdMetadata(uint16_t CPU, uint64_t TSC) XRAY_NEVER_INSTRUMENT {
222 MetadataRecord NewCPUId;
223 NewCPUId.Type = uint8_t(RecordType::Metadata);
224 NewCPUId.RecordKind = uint8_t(MetadataRecord::RecordKinds::NewCPUId);
225
226 // The data for the New CPU will contain the following bytes:
227 // - CPU ID (uint16_t, 2 bytes)
228 // - Full TSC (uint64_t, 8 bytes)
229 // Total = 12 bytes.
230 std::memcpy(&NewCPUId.Data, &CPU, sizeof(CPU));
231 std::memcpy(&NewCPUId.Data[sizeof(CPU)], &TSC, sizeof(TSC));
232 std::memcpy(RecordPtr, &NewCPUId, sizeof(MetadataRecord));
10
Null pointer passed as an argument to a 'nonnull' parameter
233 RecordPtr += sizeof(MetadataRecord);
234}
235
236void writeEOBMetadata() XRAY_NEVER_INSTRUMENT {
237 MetadataRecord EOBMeta;
238 EOBMeta.Type = uint8_t(RecordType::Metadata);
239 EOBMeta.RecordKind = uint8_t(MetadataRecord::RecordKinds::EndOfBuffer);
240 // For now we don't write any bytes into the Data field.
241 std::memcpy(RecordPtr, &EOBMeta, sizeof(MetadataRecord));
242 RecordPtr += sizeof(MetadataRecord);
243}
244
245void writeTSCWrapMetadata(uint64_t TSC) XRAY_NEVER_INSTRUMENT {
246 MetadataRecord TSCWrap;
247 TSCWrap.Type = uint8_t(RecordType::Metadata);
248 TSCWrap.RecordKind = uint8_t(MetadataRecord::RecordKinds::TSCWrap);
249
250 // The data for the TSCWrap record contains the following bytes:
251 // - Full TSC (uint64_t, 8 bytes)
252 // Total = 8 bytes.
253 std::memcpy(&TSCWrap.Data, &TSC, sizeof(TSC));
254 std::memcpy(RecordPtr, &TSCWrap, sizeof(MetadataRecord));
255 RecordPtr += sizeof(MetadataRecord);
256}
257
258constexpr auto MetadataRecSize = sizeof(MetadataRecord);
259constexpr auto FunctionRecSize = sizeof(FunctionRecord);
260
261class ThreadExitBufferCleanup {
262 std::weak_ptr<BufferQueue> Buffers;
263 BufferQueue::Buffer &Buffer;
264
265public:
266 explicit ThreadExitBufferCleanup(std::weak_ptr<BufferQueue> BQ,
267 BufferQueue::Buffer &Buffer)
268 XRAY_NEVER_INSTRUMENT : Buffers(BQ),
269 Buffer(Buffer) {}
270
271 ~ThreadExitBufferCleanup() noexcept XRAY_NEVER_INSTRUMENT {
272 if (RecordPtr == nullptr)
273 return;
274
275 // We make sure that upon exit, a thread will write out the EOB
276 // MetadataRecord in the thread-local log, and also release the buffer to
277 // the queue.
278 assert((RecordPtr + MetadataRecSize) - static_cast<char *>(Buffer.Buffer) >=(((RecordPtr + MetadataRecSize) - static_cast<char *>(Buffer
.Buffer) >= static_cast<ptrdiff_t>(MetadataRecSize))
? static_cast<void> (0) : __assert_fail ("(RecordPtr + MetadataRecSize) - static_cast<char *>(Buffer.Buffer) >= static_cast<ptrdiff_t>(MetadataRecSize)"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn297779/projects/compiler-rt/lib/xray/xray_fdr_logging.cc"
, 279, __PRETTY_FUNCTION__))
279 static_cast<ptrdiff_t>(MetadataRecSize))(((RecordPtr + MetadataRecSize) - static_cast<char *>(Buffer
.Buffer) >= static_cast<ptrdiff_t>(MetadataRecSize))
? static_cast<void> (0) : __assert_fail ("(RecordPtr + MetadataRecSize) - static_cast<char *>(Buffer.Buffer) >= static_cast<ptrdiff_t>(MetadataRecSize)"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn297779/projects/compiler-rt/lib/xray/xray_fdr_logging.cc"
, 279, __PRETTY_FUNCTION__))
;
280 if (auto BQ = Buffers.lock()) {
281 writeEOBMetadata();
282 if (auto EC = BQ->releaseBuffer(Buffer))
283 Report("Failed to release buffer at %p; error=%s\n", Buffer.Buffer,
284 EC.message().c_str());
285 return;
286 }
287 }
288};
289
290class RecursionGuard {
291 bool &Running;
292 const bool Valid;
293
294public:
295 explicit RecursionGuard(bool &R) : Running(R), Valid(!R) {
296 if (Valid)
297 Running = true;
298 }
299
300 RecursionGuard(const RecursionGuard &) = delete;
301 RecursionGuard(RecursionGuard &&) = delete;
302 RecursionGuard &operator=(const RecursionGuard &) = delete;
303 RecursionGuard &operator=(RecursionGuard &&) = delete;
304
305 explicit operator bool() const { return Valid; }
306
307 ~RecursionGuard() noexcept {
308 if (Valid)
309 Running = false;
310 }
311};
312
313inline bool loggingInitialized() {
314 return LoggingStatus.load(std::memory_order_acquire) ==
315 XRayLogInitStatus::XRAY_LOG_INITIALIZED;
316}
317
318} // namespace
319
320void fdrLoggingHandleArg0(int32_t FuncId,
321 XRayEntryType Entry) XRAY_NEVER_INSTRUMENT {
322 // We want to get the TSC as early as possible, so that we can check whether
323 // we've seen this CPU before. We also do it before we load anything else, to
324 // allow for forward progress with the scheduling.
325 unsigned char CPU;
326 uint64_t TSC = __xray::readTSC(CPU);
327
328 // Bail out right away if logging is not initialized yet.
329 if (LoggingStatus.load(std::memory_order_acquire) !=
1
Assuming the condition is false
2
Taking false branch
330 XRayLogInitStatus::XRAY_LOG_INITIALIZED)
331 return;
332
333 // We use a thread_local variable to keep track of which CPUs we've already
334 // run, and the TSC times for these CPUs. This allows us to stop repeating the
335 // CPU field in the function records.
336 //
337 // We assume that we'll support only 65536 CPUs for x86_64.
338 thread_local uint16_t CurrentCPU = std::numeric_limits<uint16_t>::max();
339 thread_local uint64_t LastTSC = 0;
340
341 // Make sure a thread that's ever called handleArg0 has a thread-local
342 // live reference to the buffer queue for this particular instance of
343 // FDRLogging, and that we're going to clean it up when the thread exits.
344 thread_local auto LocalBQ = BQ;
345 thread_local ThreadExitBufferCleanup Cleanup(LocalBQ, Buffer);
346
347 // Prevent signal handler recursion, so in case we're already in a log writing
348 // mode and the signal handler comes in (and is also instrumented) then we
349 // don't want to be clobbering potentially partial writes already happening in
350 // the thread. We use a simple thread_local latch to only allow one on-going
351 // handleArg0 to happen at any given time.
352 thread_local bool Running = false;
353 RecursionGuard Guard{Running};
354 if (!Guard) {
3
Taking false branch
355 assert(Running == true && "RecursionGuard is buggy!")((Running == true && "RecursionGuard is buggy!") ? static_cast
<void> (0) : __assert_fail ("Running == true && \"RecursionGuard is buggy!\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn297779/projects/compiler-rt/lib/xray/xray_fdr_logging.cc"
, 355, __PRETTY_FUNCTION__))
;
356 return;
357 }
358
359 if (!loggingInitialized() || LocalBQ->finalizing()) {
360 writeEOBMetadata();
361 if (auto EC = BQ->releaseBuffer(Buffer)) {
4
Taking false branch
362 Report("Failed to release buffer at %p; error=%s\n", Buffer.Buffer,
363 EC.message().c_str());
364 return;
365 }
366 RecordPtr = nullptr;
5
Null pointer value stored to 'RecordPtr'
367 }
368
369 if (Buffer.Buffer == nullptr) {
6
Assuming the condition is false
7
Taking false branch
370 if (auto EC = LocalBQ->getBuffer(Buffer)) {
371 auto LS = LoggingStatus.load(std::memory_order_acquire);
372 if (LS != XRayLogInitStatus::XRAY_LOG_FINALIZING &&
373 LS != XRayLogInitStatus::XRAY_LOG_FINALIZED)
374 Report("Failed to acquire a buffer; error=%s\n", EC.message().c_str());
375 return;
376 }
377
378 setupNewBuffer(Buffer);
379 }
380
381 if (CurrentCPU == std::numeric_limits<uint16_t>::max()) {
8
Taking true branch
382 // This means this is the first CPU this thread has ever run on. We set the
383 // current CPU and record this as the first TSC we've seen.
384 CurrentCPU = CPU;
385 writeNewCPUIdMetadata(CPU, TSC);
9
Calling 'writeNewCPUIdMetadata'
386 }
387
388 // Before we go setting up writing new function entries, we need to be really
389 // careful about the pointer math we're doing. This means we need to ensure
390 // that the record we are about to write is going to fit into the buffer,
391 // without overflowing the buffer.
392 //
393 // To do this properly, we use the following assumptions:
394 //
395 // - The least number of bytes we will ever write is 8
396 // (sizeof(FunctionRecord)) only if the delta between the previous entry
397 // and this entry is within 32 bits.
398 // - The most number of bytes we will ever write is 8 + 16 = 24. This is
399 // computed by:
400 //
401 // sizeof(FunctionRecord) + sizeof(MetadataRecord)
402 //
403 // These arise in the following cases:
404 //
405 // 1. When the delta between the TSC we get and the previous TSC for the
406 // same CPU is outside of the uint32_t range, we end up having to
407 // write a MetadataRecord to indicate a "tsc wrap" before the actual
408 // FunctionRecord.
409 // 2. When we learn that we've moved CPUs, we need to write a
410 // MetadataRecord to indicate a "cpu change", and thus write out the
411 // current TSC for that CPU before writing out the actual
412 // FunctionRecord.
413 // 3. When we learn about a new CPU ID, we need to write down a "new cpu
414 // id" MetadataRecord before writing out the actual FunctionRecord.
415 //
416 // - An End-of-Buffer (EOB) MetadataRecord is 16 bytes.
417 //
418 // So the math we need to do is to determine whether writing 24 bytes past the
419 // current pointer leaves us with enough bytes to write the EOB
420 // MetadataRecord. If we don't have enough space after writing as much as 24
421 // bytes in the end of the buffer, we need to write out the EOB, get a new
422 // Buffer, set it up properly before doing any further writing.
423 //
424 char *BufferStart = static_cast<char *>(Buffer.Buffer);
425 if ((RecordPtr + (MetadataRecSize + FunctionRecSize)) - BufferStart <
426 static_cast<ptrdiff_t>(MetadataRecSize)) {
427 writeEOBMetadata();
428 if (auto EC = LocalBQ->releaseBuffer(Buffer)) {
429 Report("Failed to release buffer at %p; error=%s\n", Buffer.Buffer,
430 EC.message().c_str());
431 return;
432 }
433 if (auto EC = LocalBQ->getBuffer(Buffer)) {
434 Report("Failed to acquire a buffer; error=%s\n", EC.message().c_str());
435 return;
436 }
437 setupNewBuffer(Buffer);
438 }
439
440 // By this point, we are now ready to write at most 24 bytes (one metadata
441 // record and one function record).
442 BufferStart = static_cast<char *>(Buffer.Buffer);
443 assert((RecordPtr + (MetadataRecSize + FunctionRecSize)) - BufferStart >=(((RecordPtr + (MetadataRecSize + FunctionRecSize)) - BufferStart
>= static_cast<ptrdiff_t>(MetadataRecSize) &&
"Misconfigured BufferQueue provided; Buffer size not large enough."
) ? static_cast<void> (0) : __assert_fail ("(RecordPtr + (MetadataRecSize + FunctionRecSize)) - BufferStart >= static_cast<ptrdiff_t>(MetadataRecSize) && \"Misconfigured BufferQueue provided; Buffer size not large enough.\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn297779/projects/compiler-rt/lib/xray/xray_fdr_logging.cc"
, 445, __PRETTY_FUNCTION__))
444 static_cast<ptrdiff_t>(MetadataRecSize) &&(((RecordPtr + (MetadataRecSize + FunctionRecSize)) - BufferStart
>= static_cast<ptrdiff_t>(MetadataRecSize) &&
"Misconfigured BufferQueue provided; Buffer size not large enough."
) ? static_cast<void> (0) : __assert_fail ("(RecordPtr + (MetadataRecSize + FunctionRecSize)) - BufferStart >= static_cast<ptrdiff_t>(MetadataRecSize) && \"Misconfigured BufferQueue provided; Buffer size not large enough.\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn297779/projects/compiler-rt/lib/xray/xray_fdr_logging.cc"
, 445, __PRETTY_FUNCTION__))
445 "Misconfigured BufferQueue provided; Buffer size not large enough.")(((RecordPtr + (MetadataRecSize + FunctionRecSize)) - BufferStart
>= static_cast<ptrdiff_t>(MetadataRecSize) &&
"Misconfigured BufferQueue provided; Buffer size not large enough."
) ? static_cast<void> (0) : __assert_fail ("(RecordPtr + (MetadataRecSize + FunctionRecSize)) - BufferStart >= static_cast<ptrdiff_t>(MetadataRecSize) && \"Misconfigured BufferQueue provided; Buffer size not large enough.\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn297779/projects/compiler-rt/lib/xray/xray_fdr_logging.cc"
, 445, __PRETTY_FUNCTION__))
;
446
447 std::aligned_storage<sizeof(FunctionRecord), alignof(FunctionRecord)>::type
448 AlignedFuncRecordBuffer;
449 auto &FuncRecord =
450 *reinterpret_cast<FunctionRecord *>(&AlignedFuncRecordBuffer);
451 FuncRecord.Type = uint8_t(RecordType::Function);
452
453 // Only get the lower 28 bits of the function id.
454 FuncRecord.FuncId = FuncId & ~(0x0F << 28);
455
456 // Here we compute the TSC Delta. There are a few interesting situations we
457 // need to account for:
458 //
459 // - The thread has migrated to a different CPU. If this is the case, then
460 // we write down the following records:
461 //
462 // 1. A 'NewCPUId' Metadata record.
463 // 2. A FunctionRecord with a 0 for the TSCDelta field.
464 //
465 // - The TSC delta is greater than the 32 bits we can store in a
466 // FunctionRecord. In this case we write down the following records:
467 //
468 // 1. A 'TSCWrap' Metadata record.
469 // 2. A FunctionRecord with a 0 for the TSCDelta field.
470 //
471 // - The TSC delta is representable within the 32 bits we can store in a
472 // FunctionRecord. In this case we write down just a FunctionRecord with
473 // the correct TSC delta.
474 //
475 FuncRecord.TSCDelta = 0;
476 if (CPU != CurrentCPU) {
477 // We've moved to a new CPU.
478 writeNewCPUIdMetadata(CPU, TSC);
479 } else {
480 // If the delta is greater than the range for a uint32_t, then we write out
481 // the TSC wrap metadata entry with the full TSC, and the TSC for the
482 // function record be 0.
483 auto Delta = LastTSC - TSC;
484 if (Delta > (1ULL << 32) - 1)
485 writeTSCWrapMetadata(TSC);
486 else
487 FuncRecord.TSCDelta = Delta;
488 }
489
490 // We then update our "LastTSC" and "CurrentCPU" thread-local variables to aid
491 // us in future computations of this TSC delta value.
492 LastTSC = TSC;
493 CurrentCPU = CPU;
494
495 switch (Entry) {
496 case XRayEntryType::ENTRY:
497 case XRayEntryType::LOG_ARGS_ENTRY:
498 FuncRecord.RecordKind = uint8_t(FunctionRecord::RecordKinds::FunctionEnter);
499 break;
500 case XRayEntryType::EXIT:
501 FuncRecord.RecordKind = uint8_t(FunctionRecord::RecordKinds::FunctionExit);
502 break;
503 case XRayEntryType::TAIL:
504 FuncRecord.RecordKind =
505 uint8_t(FunctionRecord::RecordKinds::FunctionTailExit);
506 break;
507 }
508
509 std::memcpy(RecordPtr, &AlignedFuncRecordBuffer, sizeof(FunctionRecord));
510 RecordPtr += sizeof(FunctionRecord);
511
512 // If we've exhausted the buffer by this time, we then release the buffer to
513 // make sure that other threads may start using this buffer.
514 if ((RecordPtr + MetadataRecSize) - BufferStart == MetadataRecSize) {
515 writeEOBMetadata();
516 if (auto EC = LocalBQ->releaseBuffer(Buffer)) {
517 Report("Failed releasing buffer at %p; error=%s\n", Buffer.Buffer,
518 EC.message().c_str());
519 return;
520 }
521 RecordPtr = nullptr;
522 }
523}
524
525} // namespace __xray
526
527static auto UNUSED__attribute__((unused)) Unused = [] {
528 using namespace __xray;
529 if (flags()->xray_fdr_log) {
530 XRayLogImpl Impl{
531 fdrLoggingInit, fdrLoggingFinalize, fdrLoggingHandleArg0,
532 fdrLoggingFlush,
533 };
534 __xray_set_log_impl(Impl);
535 }
536 return true;
537}();