Bug Summary

File:compiler-rt/lib/tsan/../sanitizer_common/sanitizer_vector.h
Warning:line 47, column 5
Returning null reference

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name tsan_rtl_report.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -pic-is-pie -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -target-feature +sse4.2 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I projects/compiler-rt/lib/tsan -I /build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan -I include -I /build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include -I /build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan/.. -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-unused-command-line-argument -Wno-unknown-warning-option -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-unused-parameter -Wno-variadic-macros -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/build-llvm -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -fno-builtin -fno-rtti -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-11-10-160236-22541-1 -x c++ /build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp

/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp

1//===-- tsan_rtl_report.cpp -----------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11//===----------------------------------------------------------------------===//
12
13#include "sanitizer_common/sanitizer_libc.h"
14#include "sanitizer_common/sanitizer_placement_new.h"
15#include "sanitizer_common/sanitizer_stackdepot.h"
16#include "sanitizer_common/sanitizer_common.h"
17#include "sanitizer_common/sanitizer_stacktrace.h"
18#include "tsan_platform.h"
19#include "tsan_rtl.h"
20#include "tsan_suppressions.h"
21#include "tsan_symbolize.h"
22#include "tsan_report.h"
23#include "tsan_sync.h"
24#include "tsan_mman.h"
25#include "tsan_flags.h"
26#include "tsan_fd.h"
27
28namespace __tsan {
29
30using namespace __sanitizer;
31
32static ReportStack *SymbolizeStack(StackTrace trace);
33
34// Can be overriden by an application/test to intercept reports.
35#ifdef TSAN_EXTERNAL_HOOKS
36bool OnReport(const ReportDesc *rep, bool suppressed);
37#else
38SANITIZER_WEAK_CXX_DEFAULT_IMPLextern "C++" __attribute__((visibility("default"))) __attribute__
((weak)) __attribute__((noinline))
39bool OnReport(const ReportDesc *rep, bool suppressed) {
40 (void)rep;
41 return suppressed;
42}
43#endif
44
45SANITIZER_WEAK_DEFAULT_IMPLextern "C" __attribute__((visibility("default"))) __attribute__
((weak)) __attribute__((noinline))
46void __tsan_on_report(const ReportDesc *rep) {
47 (void)rep;
48}
49
50static void StackStripMain(SymbolizedStack *frames) {
51 SymbolizedStack *last_frame = nullptr;
52 SymbolizedStack *last_frame2 = nullptr;
53 for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
54 last_frame2 = last_frame;
55 last_frame = cur;
56 }
57
58 if (last_frame2 == 0)
59 return;
60#if !SANITIZER_GO0
61 const char *last = last_frame->info.function;
62 const char *last2 = last_frame2->info.function;
63 // Strip frame above 'main'
64 if (last2 && 0 == internal_strcmp(last2, "main")) {
65 last_frame->ClearAll();
66 last_frame2->next = nullptr;
67 // Strip our internal thread start routine.
68 } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
69 last_frame->ClearAll();
70 last_frame2->next = nullptr;
71 // Strip global ctors init, .preinit_array and main caller.
72 } else if (last && (0 == internal_strcmp(last, "__do_global_ctors_aux") ||
73 0 == internal_strcmp(last, "__libc_csu_init") ||
74 0 == internal_strcmp(last, "__libc_start_main"))) {
75 last_frame->ClearAll();
76 last_frame2->next = nullptr;
77 // If both are 0, then we probably just failed to symbolize.
78 } else if (last || last2) {
79 // Ensure that we recovered stack completely. Trimmed stack
80 // can actually happen if we do not instrument some code,
81 // so it's only a debug print. However we must try hard to not miss it
82 // due to our fault.
83 DPrintf("Bottom stack frame is missed\n");
84 }
85#else
86 // The last frame always point into runtime (gosched0, goexit0, runtime.main).
87 last_frame->ClearAll();
88 last_frame2->next = nullptr;
89#endif
90}
91
92ReportStack *SymbolizeStackId(u32 stack_id) {
93 if (stack_id == 0)
94 return 0;
95 StackTrace stack = StackDepotGet(stack_id);
96 if (stack.trace == nullptr)
97 return nullptr;
98 return SymbolizeStack(stack);
99}
100
101static ReportStack *SymbolizeStack(StackTrace trace) {
102 if (trace.size == 0)
103 return 0;
104 SymbolizedStack *top = nullptr;
105 for (uptr si = 0; si < trace.size; si++) {
106 const uptr pc = trace.trace[si];
107 uptr pc1 = pc;
108 // We obtain the return address, but we're interested in the previous
109 // instruction.
110 if ((pc & kExternalPCBit) == 0)
111 pc1 = StackTrace::GetPreviousInstructionPc(pc);
112 SymbolizedStack *ent = SymbolizeCode(pc1);
113 CHECK_NE(ent, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ent)); __sanitizer
::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect(!!(!
(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 113, "(" "(ent)" ") " "!=" " (" "(0)" ")", v1, v2); } while
(false)
;
114 SymbolizedStack *last = ent;
115 while (last->next) {
116 last->info.address = pc; // restore original pc for report
117 last = last->next;
118 }
119 last->info.address = pc; // restore original pc for report
120 last->next = top;
121 top = ent;
122 }
123 StackStripMain(top);
124
125 auto *stack = New<ReportStack>();
126 stack->frames = top;
127 return stack;
128}
129
130bool ShouldReport(ThreadState *thr, ReportType typ) {
131 // We set thr->suppress_reports in the fork context.
132 // Taking any locking in the fork context can lead to deadlocks.
133 // If any locks are already taken, it's too late to do this check.
134 CheckedMutex::CheckNoLocks();
135 // For the same reason check we didn't lock thread_registry yet.
136 if (SANITIZER_DEBUG0)
137 ThreadRegistryLock l(&ctx->thread_registry);
138 if (!flags()->report_bugs || thr->suppress_reports)
139 return false;
140 switch (typ) {
141 case ReportTypeSignalUnsafe:
142 return flags()->report_signal_unsafe;
143 case ReportTypeThreadLeak:
144#if !SANITIZER_GO0
145 // It's impossible to join phantom threads
146 // in the child after fork.
147 if (ctx->after_multithreaded_fork)
148 return false;
149#endif
150 return flags()->report_thread_leaks;
151 case ReportTypeMutexDestroyLocked:
152 return flags()->report_destroy_locked;
153 default:
154 return true;
155 }
156}
157
158ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) {
159 ctx->thread_registry.CheckLocked();
160 rep_ = New<ReportDesc>();
161 rep_->typ = typ;
162 rep_->tag = tag;
163 ctx->report_mtx.Lock();
164}
165
166ScopedReportBase::~ScopedReportBase() {
167 ctx->report_mtx.Unlock();
168 DestroyAndFree(rep_);
169}
170
171void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) {
172 ReportStack **rs = rep_->stacks.PushBack();
173 *rs = SymbolizeStack(stack);
174 (*rs)->suppressable = suppressable;
175}
176
177void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s,
178 StackTrace stack, const MutexSet *mset) {
179 auto *mop = New<ReportMop>();
180 rep_->mops.PushBack(mop);
181 mop->tid = s.tid();
182 mop->addr = addr + s.addr0();
183 mop->size = s.size();
184 mop->write = s.IsWrite();
185 mop->atomic = s.IsAtomic();
186 mop->stack = SymbolizeStack(stack);
187 mop->external_tag = external_tag;
188 if (mop->stack)
189 mop->stack->suppressable = true;
190 for (uptr i = 0; i < mset->Size(); i++) {
191 MutexSet::Desc d = mset->Get(i);
192 u64 mid = this->AddMutex(d.id);
193 ReportMopMutex mtx = {mid, d.write};
194 mop->mset.PushBack(mtx);
195 }
196}
197
198void ScopedReportBase::AddUniqueTid(Tid unique_tid) {
199 rep_->unique_tids.PushBack(unique_tid);
200}
201
202void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) {
203 for (uptr i = 0; i < rep_->threads.Size(); i++) {
204 if ((u32)rep_->threads[i]->id == tctx->tid)
205 return;
206 }
207 auto *rt = New<ReportThread>();
208 rep_->threads.PushBack(rt);
209 rt->id = tctx->tid;
210 rt->os_id = tctx->os_id;
211 rt->running = (tctx->status == ThreadStatusRunning);
212 rt->name = internal_strdup(tctx->name);
213 rt->parent_tid = tctx->parent_tid;
214 rt->thread_type = tctx->thread_type;
215 rt->stack = 0;
216 rt->stack = SymbolizeStackId(tctx->creation_stack_id);
217 if (rt->stack)
218 rt->stack->suppressable = suppressable;
219}
220
221#if !SANITIZER_GO0
222static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) {
223 int unique_id = *(int *)arg;
224 return tctx->unique_id == (u32)unique_id;
225}
226
227static ThreadContext *FindThreadByUidLocked(Tid unique_id) {
228 ctx->thread_registry.CheckLocked();
229 return static_cast<ThreadContext *>(
230 ctx->thread_registry.FindThreadContextLocked(
231 FindThreadByUidLockedCallback, &unique_id));
232}
233
234static ThreadContext *FindThreadByTidLocked(Tid tid) {
235 ctx->thread_registry.CheckLocked();
236 return static_cast<ThreadContext *>(
237 ctx->thread_registry.GetThreadLocked(tid));
238}
239
240static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
241 uptr addr = (uptr)arg;
242 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
243 if (tctx->status != ThreadStatusRunning)
244 return false;
245 ThreadState *thr = tctx->thr;
246 CHECK(thr)do { __sanitizer::u64 v1 = (__sanitizer::u64)((thr)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 246, "(" "(thr)" ") " "!=" " (" "0" ")", v1, v2); } while (
false)
;
247 return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
248 (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
249}
250
251ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
252 ctx->thread_registry.CheckLocked();
253 ThreadContext *tctx =
254 static_cast<ThreadContext *>(ctx->thread_registry.FindThreadContextLocked(
255 IsInStackOrTls, (void *)addr));
256 if (!tctx)
257 return 0;
258 ThreadState *thr = tctx->thr;
259 CHECK(thr)do { __sanitizer::u64 v1 = (__sanitizer::u64)((thr)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 259, "(" "(thr)" ") " "!=" " (" "0" ")", v1, v2); } while (
false)
;
260 *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
261 return tctx;
262}
263#endif
264
265void ScopedReportBase::AddThread(Tid unique_tid, bool suppressable) {
266#if !SANITIZER_GO0
267 if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
268 AddThread(tctx, suppressable);
269#endif
270}
271
272void ScopedReportBase::AddMutex(const SyncVar *s) {
273 for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
274 if (rep_->mutexes[i]->id == s->uid)
275 return;
276 }
277 auto *rm = New<ReportMutex>();
278 rep_->mutexes.PushBack(rm);
279 rm->id = s->uid;
280 rm->addr = s->addr;
281 rm->destroyed = false;
282 rm->stack = SymbolizeStackId(s->creation_stack_id);
283}
284
285u64 ScopedReportBase::AddMutex(u64 id) {
286 u64 uid = 0;
287 u64 mid = id;
288 uptr addr = SyncVar::SplitId(id, &uid);
289 SyncVar *s = ctx->metamap.GetSyncIfExists(addr);
290 // Check that the mutex is still alive.
291 // Another mutex can be created at the same address,
292 // so check uid as well.
293 if (s && s->CheckId(uid)) {
294 Lock l(&s->mtx);
295 mid = s->uid;
296 AddMutex(s);
297 } else {
298 AddDeadMutex(id);
299 }
300 return mid;
301}
302
303void ScopedReportBase::AddDeadMutex(u64 id) {
304 for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
305 if (rep_->mutexes[i]->id == id)
306 return;
307 }
308 auto *rm = New<ReportMutex>();
309 rep_->mutexes.PushBack(rm);
310 rm->id = id;
311 rm->addr = 0;
312 rm->destroyed = true;
313 rm->stack = 0;
314}
315
316void ScopedReportBase::AddLocation(uptr addr, uptr size) {
317 if (addr == 0)
318 return;
319#if !SANITIZER_GO0
320 int fd = -1;
321 Tid creat_tid = kInvalidTid;
322 StackID creat_stack = 0;
323 if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
324 auto *loc = New<ReportLocation>();
325 loc->type = ReportLocationFD;
326 loc->fd = fd;
327 loc->tid = creat_tid;
328 loc->stack = SymbolizeStackId(creat_stack);
329 rep_->locs.PushBack(loc);
330 ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
331 if (tctx)
332 AddThread(tctx);
333 return;
334 }
335 MBlock *b = 0;
336 uptr block_begin = 0;
337 Allocator *a = allocator();
338 if (a->PointerIsMine((void*)addr)) {
339 block_begin = (uptr)a->GetBlockBegin((void *)addr);
340 if (block_begin)
341 b = ctx->metamap.GetBlock(block_begin);
342 }
343 if (!b)
344 b = JavaHeapBlock(addr, &block_begin);
345 if (b != 0) {
346 ThreadContext *tctx = FindThreadByTidLocked(b->tid);
347 auto *loc = New<ReportLocation>();
348 loc->type = ReportLocationHeap;
349 loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
350 loc->heap_chunk_size = b->siz;
351 loc->external_tag = b->tag;
352 loc->tid = tctx ? tctx->tid : b->tid;
353 loc->stack = SymbolizeStackId(b->stk);
354 rep_->locs.PushBack(loc);
355 if (tctx)
356 AddThread(tctx);
357 return;
358 }
359 bool is_stack = false;
360 if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
361 auto *loc = New<ReportLocation>();
362 loc->type = is_stack ? ReportLocationStack : ReportLocationTLS;
363 loc->tid = tctx->tid;
364 rep_->locs.PushBack(loc);
365 AddThread(tctx);
366 }
367#endif
368 if (ReportLocation *loc = SymbolizeData(addr)) {
369 loc->suppressable = true;
370 rep_->locs.PushBack(loc);
371 return;
372 }
373}
374
375#if !SANITIZER_GO0
376void ScopedReportBase::AddSleep(StackID stack_id) {
377 rep_->sleep = SymbolizeStackId(stack_id);
378}
379#endif
380
381void ScopedReportBase::SetCount(int count) { rep_->count = count; }
382
383const ReportDesc *ScopedReportBase::GetReport() const { return rep_; }
384
385ScopedReport::ScopedReport(ReportType typ, uptr tag)
386 : ScopedReportBase(typ, tag) {}
387
388ScopedReport::~ScopedReport() {}
389
390void RestoreStack(Tid tid, const u64 epoch, VarSizeStackTrace *stk,
391 MutexSet *mset, uptr *tag) {
392 // This function restores stack trace and mutex set for the thread/epoch.
393 // It does so by getting stack trace and mutex set at the beginning of
394 // trace part, and then replaying the trace till the given epoch.
395 Trace* trace = ThreadTrace(tid);
396 ReadLock l(&trace->mtx);
397 const int partidx = (epoch / kTracePartSize) % TraceParts();
398 TraceHeader* hdr = &trace->headers[partidx];
399 if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize)
16
Assuming 'epoch' is >= field 'epoch0'
17
Assuming the condition is false
18
Taking false branch
400 return;
401 CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((RoundDown(epoch
, kTracePartSize))); __sanitizer::u64 v2 = (__sanitizer::u64)
((hdr->epoch0)); if (__builtin_expect(!!(!(v1 == v2)), 0))
__sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 401, "(" "(RoundDown(epoch, kTracePartSize))" ") " "==" " ("
"(hdr->epoch0)" ")", v1, v2); } while (false)
;
19
Assuming 'v1' is equal to 'v2'
20
Taking false branch
21
Loop condition is false. Exiting loop
402 const u64 epoch0 = RoundDown(epoch, TraceSize());
403 const u64 eend = epoch % TraceSize();
404 const u64 ebegin = RoundDown(eend, kTracePartSize);
405 DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
406 tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
407 Vector<uptr> stack;
22
Calling default constructor for 'Vector<unsigned long>'
25
Returning from default constructor for 'Vector<unsigned long>'
408 stack.Resize(hdr->stack0.size + 64);
409 for (uptr i = 0; i < hdr->stack0.size; i++) {
26
Loop condition is true. Entering loop body
410 stack[i] = hdr->stack0.trace[i];
27
Calling 'Vector::operator[]'
411 DPrintf2(" #%02zu: pc=%zx\n", i, stack[i]);
412 }
413 if (mset)
414 *mset = hdr->mset0;
415 uptr pos = hdr->stack0.size;
416 Event *events = (Event*)GetThreadTrace(tid);
417 for (uptr i = ebegin; i <= eend; i++) {
418 Event ev = events[i];
419 EventType typ = (EventType)(ev >> kEventPCBits);
420 uptr pc = (uptr)(ev & ((1ull << kEventPCBits) - 1));
421 DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
422 if (typ == EventTypeMop) {
423 stack[pos] = pc;
424 } else if (typ == EventTypeFuncEnter) {
425 if (stack.Size() < pos + 2)
426 stack.Resize(pos + 2);
427 stack[pos++] = pc;
428 } else if (typ == EventTypeFuncExit) {
429 if (pos > 0)
430 pos--;
431 }
432 if (mset) {
433 if (typ == EventTypeLock) {
434 mset->Add(pc, true, epoch0 + i);
435 } else if (typ == EventTypeUnlock) {
436 mset->Del(pc, true);
437 } else if (typ == EventTypeRLock) {
438 mset->Add(pc, false, epoch0 + i);
439 } else if (typ == EventTypeRUnlock) {
440 mset->Del(pc, false);
441 }
442 }
443 for (uptr j = 0; j <= pos; j++)
444 DPrintf2(" #%zu: %zx\n", j, stack[j]);
445 }
446 if (pos == 0 && stack[0] == 0)
447 return;
448 pos++;
449 stk->Init(&stack[0], pos);
450 ExtractTagFromStack(stk, tag);
451}
452
453namespace v3 {
454
455// Replays the trace up to last_pos position in the last part
456// or up to the provided epoch/sid (whichever is earlier)
457// and calls the provided function f for each event.
458template <typename Func>
459void TraceReplay(Trace *trace, TracePart *last, Event *last_pos, Sid sid,
460 Epoch epoch, Func f) {
461 TracePart *part = trace->parts.Front();
462 Sid ev_sid = kFreeSid;
463 Epoch ev_epoch = kEpochOver;
464 for (;;) {
465 DCHECK_EQ(part->trace, trace);
466 // Note: an event can't start in the last element.
467 // Since an event can take up to 2 elements,
468 // we ensure we have at least 2 before adding an event.
469 Event *end = &part->events[TracePart::kSize - 1];
470 if (part == last)
471 end = last_pos;
472 for (Event *evp = &part->events[0]; evp < end; evp++) {
473 Event *evp0 = evp;
474 if (!evp->is_access && !evp->is_func) {
475 switch (evp->type) {
476 case EventType::kTime: {
477 auto *ev = reinterpret_cast<EventTime *>(evp);
478 ev_sid = static_cast<Sid>(ev->sid);
479 ev_epoch = static_cast<Epoch>(ev->epoch);
480 if (ev_sid == sid && ev_epoch > epoch)
481 return;
482 break;
483 }
484 case EventType::kAccessExt:
485 FALLTHROUGH[[clang::fallthrough]];
486 case EventType::kAccessRange:
487 FALLTHROUGH[[clang::fallthrough]];
488 case EventType::kLock:
489 FALLTHROUGH[[clang::fallthrough]];
490 case EventType::kRLock:
491 // These take 2 Event elements.
492 evp++;
493 break;
494 case EventType::kUnlock:
495 // This takes 1 Event element.
496 break;
497 }
498 }
499 CHECK_NE(ev_sid, kFreeSid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ev_sid)); __sanitizer
::u64 v2 = (__sanitizer::u64)((kFreeSid)); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 499, "(" "(ev_sid)" ") " "!=" " (" "(kFreeSid)" ")", v1, v2
); } while (false)
;
500 CHECK_NE(ev_epoch, kEpochOver)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ev_epoch)); __sanitizer
::u64 v2 = (__sanitizer::u64)((kEpochOver)); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 500, "(" "(ev_epoch)" ") " "!=" " (" "(kEpochOver)" ")", v1
, v2); } while (false)
;
501 f(ev_sid, ev_epoch, evp0);
502 }
503 if (part == last)
504 return;
505 part = trace->parts.Next(part);
506 CHECK(part)do { __sanitizer::u64 v1 = (__sanitizer::u64)((part)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 506, "(" "(part)" ") " "!=" " (" "0" ")", v1, v2); } while (
false)
;
507 }
508 CHECK(0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((0)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 508, "(" "(0)" ") " "!=" " (" "0" ")", v1, v2); } while (false
)
;
509}
510
511static void RestoreStackMatch(VarSizeStackTrace *pstk, MutexSet *pmset,
512 Vector<uptr> *stack, MutexSet *mset, uptr pc,
513 bool *found) {
514 DPrintf2(" MATCHED\n");
515 *pmset = *mset;
516 stack->PushBack(pc);
517 pstk->Init(&(*stack)[0], stack->Size());
518 stack->PopBack();
519 *found = true;
520}
521
522// Checks if addr1|size1 is fully contained in addr2|size2.
523// We check for fully contained instread of just overlapping
524// because a memory access is always traced once, but can be
525// split into multiple accesses in the shadow.
526static constexpr bool IsWithinAccess(uptr addr1, uptr size1, uptr addr2,
527 uptr size2) {
528 return addr1 >= addr2 && addr1 + size1 <= addr2 + size2;
529}
530
531// Replays the trace of thread tid up to the target event identified
532// by sid/epoch/addr/size/typ and restores and returns stack, mutex set
533// and tag for that event. If there are multiple such events, it returns
534// the last one. Returns false if the event is not present in the trace.
535bool RestoreStack(Tid tid, EventType type, Sid sid, Epoch epoch, uptr addr,
536 uptr size, AccessType typ, VarSizeStackTrace *pstk,
537 MutexSet *pmset, uptr *ptag) {
538 // This function restores stack trace and mutex set for the thread/epoch.
539 // It does so by getting stack trace and mutex set at the beginning of
540 // trace part, and then replaying the trace till the given epoch.
541 DPrintf2("RestoreStack: tid=%u sid=%u@%u addr=0x%zx/%zu typ=%x\n", tid,
542 static_cast<int>(sid), static_cast<int>(epoch), addr, size,
543 static_cast<int>(typ));
544 ctx->slot_mtx.CheckLocked(); // needed to prevent trace part recycling
545 ctx->thread_registry.CheckLocked();
546 ThreadContext *tctx =
547 static_cast<ThreadContext *>(ctx->thread_registry.GetThreadLocked(tid));
548 Trace *trace = &tctx->trace;
549 // Snapshot first/last parts and the current position in the last part.
550 TracePart *first_part;
551 TracePart *last_part;
552 Event *last_pos;
553 {
554 Lock lock(&trace->mtx);
555 first_part = trace->parts.Front();
556 if (!first_part)
557 return false;
558 last_part = trace->parts.Back();
559 last_pos = trace->final_pos;
560 if (tctx->thr)
561 last_pos = (Event *)atomic_load_relaxed(&tctx->thr->trace_pos);
562 }
563 DynamicMutexSet mset;
564 Vector<uptr> stack;
565 uptr prev_pc = 0;
566 bool found = false;
567 bool is_read = typ & kAccessRead;
568 bool is_atomic = typ & kAccessAtomic;
569 bool is_free = typ & kAccessFree;
570 TraceReplay(
571 trace, last_part, last_pos, sid, epoch,
572 [&](Sid ev_sid, Epoch ev_epoch, Event *evp) {
573 bool match = ev_sid == sid && ev_epoch == epoch;
574 if (evp->is_access) {
575 if (evp->is_func == 0 && evp->type == EventType::kAccessExt &&
576 evp->_ == 0) // NopEvent
577 return;
578 auto *ev = reinterpret_cast<EventAccess *>(evp);
579 uptr ev_addr = RestoreAddr(ev->addr);
580 uptr ev_size = 1 << ev->size_log;
581 uptr ev_pc =
582 prev_pc + ev->pc_delta - (1 << (EventAccess::kPCBits - 1));
583 prev_pc = ev_pc;
584 DPrintf2(" Access: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n", ev_pc,
585 ev_addr, ev_size, ev->is_read, ev->is_atomic);
586 if (match && type == EventType::kAccessExt &&
587 IsWithinAccess(addr, size, ev_addr, ev_size) &&
588 is_read == ev->is_read && is_atomic == ev->is_atomic && !is_free)
589 RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found);
590 return;
591 }
592 if (evp->is_func) {
593 auto *ev = reinterpret_cast<EventFunc *>(evp);
594 if (ev->pc) {
595 DPrintf2(" FuncEnter: pc=0x%llx\n", ev->pc);
596 stack.PushBack(ev->pc);
597 } else {
598 DPrintf2(" FuncExit\n");
599 CHECK(stack.Size())do { __sanitizer::u64 v1 = (__sanitizer::u64)((stack.Size()))
; __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 599, "(" "(stack.Size())" ") " "!=" " (" "0" ")", v1, v2); }
while (false)
;
600 stack.PopBack();
601 }
602 return;
603 }
604 switch (evp->type) {
605 case EventType::kAccessExt: {
606 auto *ev = reinterpret_cast<EventAccessExt *>(evp);
607 uptr ev_addr = RestoreAddr(ev->addr);
608 uptr ev_size = 1 << ev->size_log;
609 prev_pc = ev->pc;
610 DPrintf2(" AccessExt: pc=0x%llx addr=0x%zx/%zu type=%u/%u\n",
611 ev->pc, ev_addr, ev_size, ev->is_read, ev->is_atomic);
612 if (match && type == EventType::kAccessExt &&
613 IsWithinAccess(addr, size, ev_addr, ev_size) &&
614 is_read == ev->is_read && is_atomic == ev->is_atomic &&
615 !is_free)
616 RestoreStackMatch(pstk, pmset, &stack, mset, ev->pc, &found);
617 break;
618 }
619 case EventType::kAccessRange: {
620 auto *ev = reinterpret_cast<EventAccessRange *>(evp);
621 uptr ev_addr = RestoreAddr(ev->addr);
622 uptr ev_size =
623 (ev->size_hi << EventAccessRange::kSizeLoBits) + ev->size_lo;
624 uptr ev_pc = RestoreAddr(ev->pc);
625 prev_pc = ev_pc;
626 DPrintf2(" Range: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n", ev_pc,
627 ev_addr, ev_size, ev->is_read, ev->is_free);
628 if (match && type == EventType::kAccessExt &&
629 IsWithinAccess(addr, size, ev_addr, ev_size) &&
630 is_read == ev->is_read && !is_atomic && is_free == ev->is_free)
631 RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found);
632 break;
633 }
634 case EventType::kLock:
635 FALLTHROUGH[[clang::fallthrough]];
636 case EventType::kRLock: {
637 auto *ev = reinterpret_cast<EventLock *>(evp);
638 bool is_write = ev->type == EventType::kLock;
639 uptr ev_addr = RestoreAddr(ev->addr);
640 uptr ev_pc = RestoreAddr(ev->pc);
641 StackID stack_id =
642 (ev->stack_hi << EventLock::kStackIDLoBits) + ev->stack_lo;
643 DPrintf2(" Lock: pc=0x%zx addr=0x%zx stack=%u write=%d\n", ev_pc,
644 ev_addr, stack_id, is_write);
645 mset->AddAddr(ev_addr, stack_id, is_write);
646 // Events with ev_pc == 0 are written to the beginning of trace
647 // part as initial mutex set (are not real).
648 if (match && type == EventType::kLock && addr == ev_addr && ev_pc)
649 RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found);
650 break;
651 }
652 case EventType::kUnlock: {
653 auto *ev = reinterpret_cast<EventUnlock *>(evp);
654 uptr ev_addr = RestoreAddr(ev->addr);
655 DPrintf2(" Unlock: addr=0x%zx\n", ev_addr);
656 mset->DelAddr(ev_addr);
657 break;
658 }
659 case EventType::kTime:
660 // TraceReplay already extracted sid/epoch from it,
661 // nothing else to do here.
662 break;
663 }
664 });
665 ExtractTagFromStack(pstk, ptag);
666 return found;
667}
668
669} // namespace v3
670
671bool RacyStacks::operator==(const RacyStacks &other) const {
672 if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
673 return true;
674 if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
675 return true;
676 return false;
677}
678
679static bool FindRacyStacks(const RacyStacks &hash) {
680 for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
681 if (hash == ctx->racy_stacks[i]) {
682 VPrintf(2, "ThreadSanitizer: suppressing report as doubled (stack)\n")do { if ((uptr)Verbosity() >= (2)) Printf("ThreadSanitizer: suppressing report as doubled (stack)\n"
); } while (0)
;
683 return true;
684 }
685 }
686 return false;
687}
688
689static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2]) {
690 if (!flags()->suppress_equal_stacks)
691 return false;
692 RacyStacks hash;
693 hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
694 hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
695 {
696 ReadLock lock(&ctx->racy_mtx);
697 if (FindRacyStacks(hash))
698 return true;
699 }
700 Lock lock(&ctx->racy_mtx);
701 if (FindRacyStacks(hash))
702 return true;
703 ctx->racy_stacks.PushBack(hash);
704 return false;
705}
706
707static bool FindRacyAddress(const RacyAddress &ra0) {
708 for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
709 RacyAddress ra2 = ctx->racy_addresses[i];
710 uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
711 uptr minend = min(ra0.addr_max, ra2.addr_max);
712 if (maxbeg < minend) {
713 VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n")do { if ((uptr)Verbosity() >= (2)) Printf("ThreadSanitizer: suppressing report as doubled (addr)\n"
); } while (0)
;
714 return true;
715 }
716 }
717 return false;
718}
719
720static bool HandleRacyAddress(ThreadState *thr, uptr addr_min, uptr addr_max) {
721 if (!flags()->suppress_equal_addresses)
722 return false;
723 RacyAddress ra0 = {addr_min, addr_max};
724 {
725 ReadLock lock(&ctx->racy_mtx);
726 if (FindRacyAddress(ra0))
727 return true;
728 }
729 Lock lock(&ctx->racy_mtx);
730 if (FindRacyAddress(ra0))
731 return true;
732 ctx->racy_addresses.PushBack(ra0);
733 return false;
734}
735
736bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
737 // These should have been checked in ShouldReport.
738 // It's too late to check them here, we have already taken locks.
739 CHECK(flags()->report_bugs)do { __sanitizer::u64 v1 = (__sanitizer::u64)((flags()->report_bugs
)); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 739, "(" "(flags()->report_bugs)" ") " "!=" " (" "0" ")"
, v1, v2); } while (false)
;
740 CHECK(!thr->suppress_reports)do { __sanitizer::u64 v1 = (__sanitizer::u64)((!thr->suppress_reports
)); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 740, "(" "(!thr->suppress_reports)" ") " "!=" " (" "0" ")"
, v1, v2); } while (false)
;
741 atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
742 const ReportDesc *rep = srep.GetReport();
743 CHECK_EQ(thr->current_report, nullptr)do { __sanitizer::u64 v1 = (__sanitizer::u64)((thr->current_report
)); __sanitizer::u64 v2 = (__sanitizer::u64)((nullptr)); if (
__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 743, "(" "(thr->current_report)" ") " "==" " (" "(nullptr)"
")", v1, v2); } while (false)
;
744 thr->current_report = rep;
745 Suppression *supp = 0;
746 uptr pc_or_addr = 0;
747 for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++)
748 pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
749 for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++)
750 pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp);
751 for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++)
752 pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
753 for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++)
754 pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp);
755 if (pc_or_addr != 0) {
756 Lock lock(&ctx->fired_suppressions_mtx);
757 FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp};
758 ctx->fired_suppressions.push_back(s);
759 }
760 {
761 bool old_is_freeing = thr->is_freeing;
762 thr->is_freeing = false;
763 bool suppressed = OnReport(rep, pc_or_addr != 0);
764 thr->is_freeing = old_is_freeing;
765 if (suppressed) {
766 thr->current_report = nullptr;
767 return false;
768 }
769 }
770 PrintReport(rep);
771 __tsan_on_report(rep);
772 ctx->nreported++;
773 if (flags()->halt_on_error)
774 Die();
775 thr->current_report = nullptr;
776 return true;
777}
778
779bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) {
780 ReadLock lock(&ctx->fired_suppressions_mtx);
781 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
782 if (ctx->fired_suppressions[k].type != type)
783 continue;
784 for (uptr j = 0; j < trace.size; j++) {
785 FiredSuppression *s = &ctx->fired_suppressions[k];
786 if (trace.trace[j] == s->pc_or_addr) {
787 if (s->supp)
788 atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
789 return true;
790 }
791 }
792 }
793 return false;
794}
795
796static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) {
797 ReadLock lock(&ctx->fired_suppressions_mtx);
798 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
799 if (ctx->fired_suppressions[k].type != type)
800 continue;
801 FiredSuppression *s = &ctx->fired_suppressions[k];
802 if (addr == s->pc_or_addr) {
803 if (s->supp)
804 atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
805 return true;
806 }
807 }
808 return false;
809}
810
811static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
812 Shadow s0(thr->racy_state[0]);
813 Shadow s1(thr->racy_state[1]);
814 CHECK(!(s0.IsAtomic() && s1.IsAtomic()))do { __sanitizer::u64 v1 = (__sanitizer::u64)((!(s0.IsAtomic(
) && s1.IsAtomic()))); __sanitizer::u64 v2 = (__sanitizer
::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer
::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 814, "(" "(!(s0.IsAtomic() && s1.IsAtomic()))" ") "
"!=" " (" "0" ")", v1, v2); } while (false)
;
815 if (!s0.IsAtomic() && !s1.IsAtomic())
816 return true;
817 if (s0.IsAtomic() && s1.IsFreed())
818 return true;
819 if (s1.IsAtomic() && thr->is_freeing)
820 return true;
821 return false;
822}
823
824void ReportRace(ThreadState *thr) {
825 CheckedMutex::CheckNoLocks();
826
827 // Symbolizer makes lots of intercepted calls. If we try to process them,
828 // at best it will cause deadlocks on internal mutexes.
829 ScopedIgnoreInterceptors ignore;
830
831 if (!ShouldReport(thr, ReportTypeRace))
832 return;
833 if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
1
Assuming field 'report_atomic_races' is true
834 return;
835
836 bool freed = false;
837 {
838 Shadow s(thr->racy_state[1]);
839 freed = s.GetFreedAndReset();
840 thr->racy_state[1] = s.raw();
841 }
842
843 uptr addr = ShadowToMem(thr->racy_shadow_addr);
844 uptr addr_min = 0;
845 uptr addr_max = 0;
846 {
847 uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
848 uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
849 uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
850 uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
851 addr_min = min(a0, a1);
852 addr_max = max(e0, e1);
853 if (IsExpectedReport(addr_min, addr_max - addr_min))
2
Assuming the condition is false
3
Taking false branch
854 return;
855 }
856 if (HandleRacyAddress(thr, addr_min, addr_max))
4
Taking false branch
857 return;
858
859 ReportType typ = ReportTypeRace;
860 if (thr->is_vptr_access && freed)
5
Assuming field 'is_vptr_access' is false
861 typ = ReportTypeVptrUseAfterFree;
862 else if (thr->is_vptr_access
5.1
Field 'is_vptr_access' is false
5.1
Field 'is_vptr_access' is false
)
6
Taking false branch
863 typ = ReportTypeVptrRace;
864 else if (freed)
7
Assuming 'freed' is false
8
Taking false branch
865 typ = ReportTypeUseAfterFree;
866
867 if (IsFiredSuppression(ctx, typ, addr))
9
Assuming the condition is false
10
Taking false branch
868 return;
869
870 const uptr kMop = 2;
871 VarSizeStackTrace traces[kMop];
872 uptr tags[kMop] = {kExternalTagNone};
873 uptr toppc = TraceTopPC(thr);
874 if (toppc >> kEventPCBits) {
11
Assuming the condition is false
12
Taking false branch
875 // This is a work-around for a known issue.
876 // The scenario where this happens is rather elaborate and requires
877 // an instrumented __sanitizer_report_error_summary callback and
878 // a __tsan_symbolize_external callback and a race during a range memory
879 // access larger than 8 bytes. MemoryAccessRange adds the current PC to
880 // the trace and starts processing memory accesses. A first memory access
881 // triggers a race, we report it and call the instrumented
882 // __sanitizer_report_error_summary, which adds more stuff to the trace
883 // since it is intrumented. Then a second memory access in MemoryAccessRange
884 // also triggers a race and we get here and call TraceTopPC to get the
885 // current PC, however now it contains some unrelated events from the
886 // callback. Most likely, TraceTopPC will now return a EventTypeFuncExit
887 // event. Later we subtract -1 from it (in GetPreviousInstructionPc)
888 // and the resulting PC has kExternalPCBit set, so we pass it to
889 // __tsan_symbolize_external_ex. __tsan_symbolize_external_ex is within its
890 // rights to crash since the PC is completely bogus.
891 // test/tsan/double_race.cpp contains a test case for this.
892 toppc = 0;
893 }
894 ObtainCurrentStack(thr, toppc, &traces[0], &tags[0]);
895 if (IsFiredSuppression(ctx, typ, traces[0]))
13
Assuming the condition is false
14
Taking false branch
896 return;
897
898 DynamicMutexSet mset2;
899 Shadow s2(thr->racy_state[1]);
900 RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2, &tags[1]);
15
Calling 'RestoreStack'
901 if (IsFiredSuppression(ctx, typ, traces[1]))
902 return;
903
904 if (HandleRacyStacks(thr, traces))
905 return;
906
907 // If any of the accesses has a tag, treat this as an "external" race.
908 uptr tag = kExternalTagNone;
909 for (uptr i = 0; i < kMop; i++) {
910 if (tags[i] != kExternalTagNone) {
911 typ = ReportTypeExternalRace;
912 tag = tags[i];
913 break;
914 }
915 }
916
917 ThreadRegistryLock l0(&ctx->thread_registry);
918 ScopedReport rep(typ, tag);
919 for (uptr i = 0; i < kMop; i++) {
920 Shadow s(thr->racy_state[i]);
921 rep.AddMemoryAccess(addr, tags[i], s, traces[i],
922 i == 0 ? &thr->mset : mset2);
923 }
924
925 for (uptr i = 0; i < kMop; i++) {
926 FastState s(thr->racy_state[i]);
927 ThreadContext *tctx = static_cast<ThreadContext *>(
928 ctx->thread_registry.GetThreadLocked(s.tid()));
929 if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
930 continue;
931 rep.AddThread(tctx);
932 }
933
934 rep.AddLocation(addr_min, addr_max - addr_min);
935
936#if !SANITIZER_GO0
937 {
938 Shadow s(thr->racy_state[1]);
939 if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
940 rep.AddSleep(thr->last_sleep_stack_id);
941 }
942#endif
943
944 OutputReport(thr, rep);
945}
946
947void PrintCurrentStack(ThreadState *thr, uptr pc) {
948 VarSizeStackTrace trace;
949 ObtainCurrentStack(thr, pc, &trace);
950 PrintStack(SymbolizeStack(trace));
951}
952
953// Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes
954// __sanitizer_print_stack_trace exists in the actual unwinded stack, but
955// tail-call to PrintCurrentStackSlow breaks this assumption because
956// __sanitizer_print_stack_trace disappears after tail-call.
957// However, this solution is not reliable enough, please see dvyukov's comment
958// http://reviews.llvm.org/D19148#406208
959// Also see PR27280 comment 2 and 3 for breaking examples and analysis.
960ALWAYS_INLINEinline __attribute__((always_inline)) USED__attribute__((used)) void PrintCurrentStackSlow(uptr pc) {
961#if !SANITIZER_GO0
962 uptr bp = GET_CURRENT_FRAME()(__sanitizer::uptr) __builtin_frame_address(0);
963 auto *ptrace = New<BufferedStackTrace>();
964 ptrace->Unwind(pc, bp, nullptr, false);
965
966 for (uptr i = 0; i < ptrace->size / 2; i++) {
967 uptr tmp = ptrace->trace_buffer[i];
968 ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
969 ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
970 }
971 PrintStack(SymbolizeStack(*ptrace));
972#endif
973}
974
975} // namespace __tsan
976
977using namespace __tsan;
978
979extern "C" {
980SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default")))
981void __sanitizer_print_stack_trace() {
982 PrintCurrentStackSlow(StackTrace::GetCurrentPc());
983}
984} // extern "C"

/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan/../sanitizer_common/sanitizer_vector.h

1//===-- sanitizer_vector.h -------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is shared between sanitizers run-time libraries.
10//
11//===----------------------------------------------------------------------===//
12
13// Low-fat STL-like vector container.
14
15#ifndef SANITIZER_VECTOR_H
16#define SANITIZER_VECTOR_H
17
18#include "sanitizer_common/sanitizer_allocator_internal.h"
19#include "sanitizer_common/sanitizer_libc.h"
20
21namespace __sanitizer {
22
23template<typename T>
24class Vector {
25 public:
26 Vector() : begin_(), end_(), last_() {}
23
Null pointer value stored to 'stack.begin_'
24
Returning without writing to 'this->begin_'
27
28 ~Vector() {
29 if (begin_)
30 InternalFree(begin_);
31 }
32
33 void Reset() {
34 if (begin_)
35 InternalFree(begin_);
36 begin_ = 0;
37 end_ = 0;
38 last_ = 0;
39 }
40
41 uptr Size() const {
42 return end_ - begin_;
43 }
44
45 T &operator[](uptr i) {
46 DCHECK_LT(i, end_ - begin_);
47 return begin_[i];
28
Returning null reference
48 }
49
50 const T &operator[](uptr i) const {
51 DCHECK_LT(i, end_ - begin_);
52 return begin_[i];
53 }
54
55 T *PushBack() {
56 EnsureSize(Size() + 1);
57 T *p = &end_[-1];
58 internal_memset(p, 0, sizeof(*p));
59 return p;
60 }
61
62 T *PushBack(const T& v) {
63 EnsureSize(Size() + 1);
64 T *p = &end_[-1];
65 internal_memcpy(p, &v, sizeof(*p));
66 return p;
67 }
68
69 void PopBack() {
70 DCHECK_GT(end_, begin_);
71 end_--;
72 }
73
74 void Resize(uptr size) {
75 if (size == 0) {
76 end_ = begin_;
77 return;
78 }
79 uptr old_size = Size();
80 if (size <= old_size) {
81 end_ = begin_ + size;
82 return;
83 }
84 EnsureSize(size);
85 if (old_size < size) {
86 for (uptr i = old_size; i < size; i++)
87 internal_memset(&begin_[i], 0, sizeof(begin_[i]));
88 }
89 }
90
91 private:
92 T *begin_;
93 T *end_;
94 T *last_;
95
96 void EnsureSize(uptr size) {
97 if (size <= Size())
98 return;
99 if (size <= (uptr)(last_ - begin_)) {
100 end_ = begin_ + size;
101 return;
102 }
103 uptr cap0 = last_ - begin_;
104 uptr cap = cap0 * 5 / 4; // 25% growth
105 if (cap == 0)
106 cap = 16;
107 if (cap < size)
108 cap = size;
109 T *p = (T*)InternalAlloc(cap * sizeof(T));
110 if (cap0) {
111 internal_memcpy(p, begin_, cap0 * sizeof(T));
112 InternalFree(begin_);
113 }
114 begin_ = p;
115 end_ = begin_ + size;
116 last_ = begin_ + cap;
117 }
118
119 Vector(const Vector&);
120 void operator=(const Vector&);
121};
122} // namespace __sanitizer
123
124#endif // #ifndef SANITIZER_VECTOR_H