Bug Summary

File:compiler-rt/lib/tsan/../sanitizer_common/sanitizer_vector.h
Warning:line 47, column 5
Returning null reference

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name tsan_rtl_report.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -pic-is-pie -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -target-feature +sse4.2 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I projects/compiler-rt/lib/tsan -I /build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/compiler-rt/lib/tsan -I include -I /build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include -I /build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/compiler-rt/lib/tsan/.. -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-unused-command-line-argument -Wno-unknown-warning-option -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-unused-parameter -Wno-variadic-macros -Wno-format-pedantic -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/build-llvm -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -fno-builtin -fno-rtti -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-26-234817-15343-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp

/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp

1//===-- tsan_rtl_report.cpp -----------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11//===----------------------------------------------------------------------===//
12
13#include "sanitizer_common/sanitizer_libc.h"
14#include "sanitizer_common/sanitizer_placement_new.h"
15#include "sanitizer_common/sanitizer_stackdepot.h"
16#include "sanitizer_common/sanitizer_common.h"
17#include "sanitizer_common/sanitizer_stacktrace.h"
18#include "tsan_platform.h"
19#include "tsan_rtl.h"
20#include "tsan_suppressions.h"
21#include "tsan_symbolize.h"
22#include "tsan_report.h"
23#include "tsan_sync.h"
24#include "tsan_mman.h"
25#include "tsan_flags.h"
26#include "tsan_fd.h"
27
28namespace __tsan {
29
30using namespace __sanitizer;
31
32static ReportStack *SymbolizeStack(StackTrace trace);
33
34// Can be overriden by an application/test to intercept reports.
35#ifdef TSAN_EXTERNAL_HOOKS
36bool OnReport(const ReportDesc *rep, bool suppressed);
37#else
38SANITIZER_WEAK_CXX_DEFAULT_IMPLextern "C++" __attribute__((visibility("default"))) __attribute__
((weak)) __attribute__((noinline))
39bool OnReport(const ReportDesc *rep, bool suppressed) {
40 (void)rep;
41 return suppressed;
42}
43#endif
44
45SANITIZER_WEAK_DEFAULT_IMPLextern "C" __attribute__((visibility("default"))) __attribute__
((weak)) __attribute__((noinline))
46void __tsan_on_report(const ReportDesc *rep) {
47 (void)rep;
48}
49
50static void StackStripMain(SymbolizedStack *frames) {
51 SymbolizedStack *last_frame = nullptr;
52 SymbolizedStack *last_frame2 = nullptr;
53 for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
54 last_frame2 = last_frame;
55 last_frame = cur;
56 }
57
58 if (last_frame2 == 0)
59 return;
60#if !SANITIZER_GO0
61 const char *last = last_frame->info.function;
62 const char *last2 = last_frame2->info.function;
63 // Strip frame above 'main'
64 if (last2 && 0 == internal_strcmp(last2, "main")) {
65 last_frame->ClearAll();
66 last_frame2->next = nullptr;
67 // Strip our internal thread start routine.
68 } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
69 last_frame->ClearAll();
70 last_frame2->next = nullptr;
71 // Strip global ctors init, .preinit_array and main caller.
72 } else if (last && (0 == internal_strcmp(last, "__do_global_ctors_aux") ||
73 0 == internal_strcmp(last, "__libc_csu_init") ||
74 0 == internal_strcmp(last, "__libc_start_main"))) {
75 last_frame->ClearAll();
76 last_frame2->next = nullptr;
77 // If both are 0, then we probably just failed to symbolize.
78 } else if (last || last2) {
79 // Ensure that we recovered stack completely. Trimmed stack
80 // can actually happen if we do not instrument some code,
81 // so it's only a debug print. However we must try hard to not miss it
82 // due to our fault.
83 DPrintf("Bottom stack frame is missed\n");
84 }
85#else
86 // The last frame always point into runtime (gosched0, goexit0, runtime.main).
87 last_frame->ClearAll();
88 last_frame2->next = nullptr;
89#endif
90}
91
92ReportStack *SymbolizeStackId(u32 stack_id) {
93 if (stack_id == 0)
94 return 0;
95 StackTrace stack = StackDepotGet(stack_id);
96 if (stack.trace == nullptr)
97 return nullptr;
98 return SymbolizeStack(stack);
99}
100
101static ReportStack *SymbolizeStack(StackTrace trace) {
102 if (trace.size == 0)
103 return 0;
104 SymbolizedStack *top = nullptr;
105 for (uptr si = 0; si < trace.size; si++) {
106 const uptr pc = trace.trace[si];
107 uptr pc1 = pc;
108 // We obtain the return address, but we're interested in the previous
109 // instruction.
110 if ((pc & kExternalPCBit) == 0)
111 pc1 = StackTrace::GetPreviousInstructionPc(pc);
112 SymbolizedStack *ent = SymbolizeCode(pc1);
113 CHECK_NE(ent, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ent)); __sanitizer
::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect(!!(!
(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 113, "(" "(ent)" ") " "!=" " (" "(0)" ")", v1, v2); } while
(false)
;
114 SymbolizedStack *last = ent;
115 while (last->next) {
116 last->info.address = pc; // restore original pc for report
117 last = last->next;
118 }
119 last->info.address = pc; // restore original pc for report
120 last->next = top;
121 top = ent;
122 }
123 StackStripMain(top);
124
125 auto *stack = New<ReportStack>();
126 stack->frames = top;
127 return stack;
128}
129
130bool ShouldReport(ThreadState *thr, ReportType typ) {
131 // We set thr->suppress_reports in the fork context.
132 // Taking any locking in the fork context can lead to deadlocks.
133 // If any locks are already taken, it's too late to do this check.
134 CheckedMutex::CheckNoLocks();
135 // For the same reason check we didn't lock thread_registry yet.
136 if (SANITIZER_DEBUG0)
137 ThreadRegistryLock l(&ctx->thread_registry);
138 if (!flags()->report_bugs || thr->suppress_reports)
139 return false;
140 switch (typ) {
141 case ReportTypeSignalUnsafe:
142 return flags()->report_signal_unsafe;
143 case ReportTypeThreadLeak:
144#if !SANITIZER_GO0
145 // It's impossible to join phantom threads
146 // in the child after fork.
147 if (ctx->after_multithreaded_fork)
148 return false;
149#endif
150 return flags()->report_thread_leaks;
151 case ReportTypeMutexDestroyLocked:
152 return flags()->report_destroy_locked;
153 default:
154 return true;
155 }
156}
157
158ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) {
159 ctx->thread_registry.CheckLocked();
160 rep_ = New<ReportDesc>();
161 rep_->typ = typ;
162 rep_->tag = tag;
163 ctx->report_mtx.Lock();
164}
165
166ScopedReportBase::~ScopedReportBase() {
167 ctx->report_mtx.Unlock();
168 DestroyAndFree(rep_);
169}
170
171void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) {
172 ReportStack **rs = rep_->stacks.PushBack();
173 *rs = SymbolizeStack(stack);
174 (*rs)->suppressable = suppressable;
175}
176
177void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s,
178 StackTrace stack, const MutexSet *mset) {
179 auto *mop = New<ReportMop>();
180 rep_->mops.PushBack(mop);
181 mop->tid = s.tid();
182 mop->addr = addr + s.addr0();
183 mop->size = s.size();
184 mop->write = s.IsWrite();
185 mop->atomic = s.IsAtomic();
186 mop->stack = SymbolizeStack(stack);
187 mop->external_tag = external_tag;
188 if (mop->stack)
189 mop->stack->suppressable = true;
190 for (uptr i = 0; i < mset->Size(); i++) {
191 MutexSet::Desc d = mset->Get(i);
192 u64 mid = this->AddMutex(d.id);
193 ReportMopMutex mtx = {mid, d.write};
194 mop->mset.PushBack(mtx);
195 }
196}
197
198void ScopedReportBase::AddUniqueTid(Tid unique_tid) {
199 rep_->unique_tids.PushBack(unique_tid);
200}
201
202void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) {
203 for (uptr i = 0; i < rep_->threads.Size(); i++) {
204 if ((u32)rep_->threads[i]->id == tctx->tid)
205 return;
206 }
207 auto *rt = New<ReportThread>();
208 rep_->threads.PushBack(rt);
209 rt->id = tctx->tid;
210 rt->os_id = tctx->os_id;
211 rt->running = (tctx->status == ThreadStatusRunning);
212 rt->name = internal_strdup(tctx->name);
213 rt->parent_tid = tctx->parent_tid;
214 rt->thread_type = tctx->thread_type;
215 rt->stack = 0;
216 rt->stack = SymbolizeStackId(tctx->creation_stack_id);
217 if (rt->stack)
218 rt->stack->suppressable = suppressable;
219}
220
221#if !SANITIZER_GO0
222static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) {
223 int unique_id = *(int *)arg;
224 return tctx->unique_id == (u32)unique_id;
225}
226
227static ThreadContext *FindThreadByUidLocked(Tid unique_id) {
228 ctx->thread_registry.CheckLocked();
229 return static_cast<ThreadContext *>(
230 ctx->thread_registry.FindThreadContextLocked(
231 FindThreadByUidLockedCallback, &unique_id));
232}
233
234static ThreadContext *FindThreadByTidLocked(Tid tid) {
235 ctx->thread_registry.CheckLocked();
236 return static_cast<ThreadContext *>(
237 ctx->thread_registry.GetThreadLocked(tid));
238}
239
240static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
241 uptr addr = (uptr)arg;
242 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
243 if (tctx->status != ThreadStatusRunning)
244 return false;
245 ThreadState *thr = tctx->thr;
246 CHECK(thr)do { __sanitizer::u64 v1 = (__sanitizer::u64)((thr)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 246, "(" "(thr)" ") " "!=" " (" "0" ")", v1, v2); } while (
false)
;
247 return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
248 (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
249}
250
251ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
252 ctx->thread_registry.CheckLocked();
253 ThreadContext *tctx =
254 static_cast<ThreadContext *>(ctx->thread_registry.FindThreadContextLocked(
255 IsInStackOrTls, (void *)addr));
256 if (!tctx)
257 return 0;
258 ThreadState *thr = tctx->thr;
259 CHECK(thr)do { __sanitizer::u64 v1 = (__sanitizer::u64)((thr)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 259, "(" "(thr)" ") " "!=" " (" "0" ")", v1, v2); } while (
false)
;
260 *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
261 return tctx;
262}
263#endif
264
265void ScopedReportBase::AddThread(Tid unique_tid, bool suppressable) {
266#if !SANITIZER_GO0
267 if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
268 AddThread(tctx, suppressable);
269#endif
270}
271
272void ScopedReportBase::AddMutex(const SyncVar *s) {
273 for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
274 if (rep_->mutexes[i]->id == s->uid)
275 return;
276 }
277 auto *rm = New<ReportMutex>();
278 rep_->mutexes.PushBack(rm);
279 rm->id = s->uid;
280 rm->addr = s->addr;
281 rm->destroyed = false;
282 rm->stack = SymbolizeStackId(s->creation_stack_id);
283}
284
285u64 ScopedReportBase::AddMutex(u64 id) {
286 u64 uid = 0;
287 u64 mid = id;
288 uptr addr = SyncVar::SplitId(id, &uid);
289 SyncVar *s = ctx->metamap.GetSyncIfExists(addr);
290 // Check that the mutex is still alive.
291 // Another mutex can be created at the same address,
292 // so check uid as well.
293 if (s && s->CheckId(uid)) {
294 Lock l(&s->mtx);
295 mid = s->uid;
296 AddMutex(s);
297 } else {
298 AddDeadMutex(id);
299 }
300 return mid;
301}
302
303void ScopedReportBase::AddDeadMutex(u64 id) {
304 for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
305 if (rep_->mutexes[i]->id == id)
306 return;
307 }
308 auto *rm = New<ReportMutex>();
309 rep_->mutexes.PushBack(rm);
310 rm->id = id;
311 rm->addr = 0;
312 rm->destroyed = true;
313 rm->stack = 0;
314}
315
316void ScopedReportBase::AddLocation(uptr addr, uptr size) {
317 if (addr == 0)
318 return;
319#if !SANITIZER_GO0
320 int fd = -1;
321 Tid creat_tid = kInvalidTid;
322 StackID creat_stack = 0;
323 if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
324 auto *loc = New<ReportLocation>();
325 loc->type = ReportLocationFD;
326 loc->fd = fd;
327 loc->tid = creat_tid;
328 loc->stack = SymbolizeStackId(creat_stack);
329 rep_->locs.PushBack(loc);
330 ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
331 if (tctx)
332 AddThread(tctx);
333 return;
334 }
335 MBlock *b = 0;
336 uptr block_begin = 0;
337 Allocator *a = allocator();
338 if (a->PointerIsMine((void*)addr)) {
339 block_begin = (uptr)a->GetBlockBegin((void *)addr);
340 if (block_begin)
341 b = ctx->metamap.GetBlock(block_begin);
342 }
343 if (!b)
344 b = JavaHeapBlock(addr, &block_begin);
345 if (b != 0) {
346 ThreadContext *tctx = FindThreadByTidLocked(b->tid);
347 auto *loc = New<ReportLocation>();
348 loc->type = ReportLocationHeap;
349 loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
350 loc->heap_chunk_size = b->siz;
351 loc->external_tag = b->tag;
352 loc->tid = tctx ? tctx->tid : b->tid;
353 loc->stack = SymbolizeStackId(b->stk);
354 rep_->locs.PushBack(loc);
355 if (tctx)
356 AddThread(tctx);
357 return;
358 }
359 bool is_stack = false;
360 if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
361 auto *loc = New<ReportLocation>();
362 loc->type = is_stack ? ReportLocationStack : ReportLocationTLS;
363 loc->tid = tctx->tid;
364 rep_->locs.PushBack(loc);
365 AddThread(tctx);
366 }
367#endif
368 if (ReportLocation *loc = SymbolizeData(addr)) {
369 loc->suppressable = true;
370 rep_->locs.PushBack(loc);
371 return;
372 }
373}
374
375#if !SANITIZER_GO0
376void ScopedReportBase::AddSleep(StackID stack_id) {
377 rep_->sleep = SymbolizeStackId(stack_id);
378}
379#endif
380
381void ScopedReportBase::SetCount(int count) { rep_->count = count; }
382
383const ReportDesc *ScopedReportBase::GetReport() const { return rep_; }
384
385ScopedReport::ScopedReport(ReportType typ, uptr tag)
386 : ScopedReportBase(typ, tag) {}
387
388ScopedReport::~ScopedReport() {}
389
390void RestoreStack(Tid tid, const u64 epoch, VarSizeStackTrace *stk,
391 MutexSet *mset, uptr *tag) {
392 // This function restores stack trace and mutex set for the thread/epoch.
393 // It does so by getting stack trace and mutex set at the beginning of
394 // trace part, and then replaying the trace till the given epoch.
395 Trace* trace = ThreadTrace(tid);
396 ReadLock l(&trace->mtx);
397 const int partidx = (epoch / kTracePartSize) % TraceParts();
398 TraceHeader* hdr = &trace->headers[partidx];
399 if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize)
16
Assuming 'epoch' is >= field 'epoch0'
17
Assuming the condition is false
18
Taking false branch
400 return;
401 CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((RoundDown(epoch
, kTracePartSize))); __sanitizer::u64 v2 = (__sanitizer::u64)
((hdr->epoch0)); if (__builtin_expect(!!(!(v1 == v2)), 0))
__sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 401, "(" "(RoundDown(epoch, kTracePartSize))" ") " "==" " ("
"(hdr->epoch0)" ")", v1, v2); } while (false)
;
19
Assuming 'v1' is equal to 'v2'
20
Taking false branch
21
Loop condition is false. Exiting loop
402 const u64 epoch0 = RoundDown(epoch, TraceSize());
403 const u64 eend = epoch % TraceSize();
404 const u64 ebegin = RoundDown(eend, kTracePartSize);
405 DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
406 tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
407 Vector<uptr> stack;
22
Calling default constructor for 'Vector<unsigned long>'
25
Returning from default constructor for 'Vector<unsigned long>'
408 stack.Resize(hdr->stack0.size + 64);
409 for (uptr i = 0; i < hdr->stack0.size; i++) {
26
Loop condition is true. Entering loop body
410 stack[i] = hdr->stack0.trace[i];
27
Calling 'Vector::operator[]'
411 DPrintf2(" #%02zu: pc=%zx\n", i, stack[i]);
412 }
413 if (mset)
414 *mset = hdr->mset0;
415 uptr pos = hdr->stack0.size;
416 Event *events = (Event*)GetThreadTrace(tid);
417 for (uptr i = ebegin; i <= eend; i++) {
418 Event ev = events[i];
419 EventType typ = (EventType)(ev >> kEventPCBits);
420 uptr pc = (uptr)(ev & ((1ull << kEventPCBits) - 1));
421 DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
422 if (typ == EventTypeMop) {
423 stack[pos] = pc;
424 } else if (typ == EventTypeFuncEnter) {
425 if (stack.Size() < pos + 2)
426 stack.Resize(pos + 2);
427 stack[pos++] = pc;
428 } else if (typ == EventTypeFuncExit) {
429 if (pos > 0)
430 pos--;
431 }
432 if (mset) {
433 if (typ == EventTypeLock) {
434 mset->Add(pc, true, epoch0 + i);
435 } else if (typ == EventTypeUnlock) {
436 mset->Del(pc, true);
437 } else if (typ == EventTypeRLock) {
438 mset->Add(pc, false, epoch0 + i);
439 } else if (typ == EventTypeRUnlock) {
440 mset->Del(pc, false);
441 }
442 }
443 for (uptr j = 0; j <= pos; j++)
444 DPrintf2(" #%zu: %zx\n", j, stack[j]);
445 }
446 if (pos == 0 && stack[0] == 0)
447 return;
448 pos++;
449 stk->Init(&stack[0], pos);
450 ExtractTagFromStack(stk, tag);
451}
452
453namespace v3 {
454
455// Replays the trace up to last_pos position in the last part
456// or up to the provided epoch/sid (whichever is earlier)
457// and calls the provided function f for each event.
458template <typename Func>
459void TraceReplay(Trace *trace, TracePart *last, Event *last_pos, Sid sid,
460 Epoch epoch, Func f) {
461 TracePart *part = trace->parts.Front();
462 Sid ev_sid = kFreeSid;
463 Epoch ev_epoch = kEpochOver;
464 for (;;) {
465 DCHECK_EQ(part->trace, trace);
466 // Note: an event can't start in the last element.
467 // Since an event can take up to 2 elements,
468 // we ensure we have at least 2 before adding an event.
469 Event *end = &part->events[TracePart::kSize - 1];
470 if (part == last)
471 end = last_pos;
472 for (Event *evp = &part->events[0]; evp < end; evp++) {
473 Event *evp0 = evp;
474 if (!evp->is_access && !evp->is_func) {
475 switch (evp->type) {
476 case EventType::kTime: {
477 auto *ev = reinterpret_cast<EventTime *>(evp);
478 ev_sid = static_cast<Sid>(ev->sid);
479 ev_epoch = static_cast<Epoch>(ev->epoch);
480 if (ev_sid == sid && ev_epoch > epoch)
481 return;
482 break;
483 }
484 case EventType::kAccessExt:
485 FALLTHROUGH[[clang::fallthrough]];
486 case EventType::kAccessRange:
487 FALLTHROUGH[[clang::fallthrough]];
488 case EventType::kLock:
489 FALLTHROUGH[[clang::fallthrough]];
490 case EventType::kRLock:
491 // These take 2 Event elements.
492 evp++;
493 break;
494 case EventType::kUnlock:
495 // This takes 1 Event element.
496 break;
497 }
498 }
499 CHECK_NE(ev_sid, kFreeSid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ev_sid)); __sanitizer
::u64 v2 = (__sanitizer::u64)((kFreeSid)); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 499, "(" "(ev_sid)" ") " "!=" " (" "(kFreeSid)" ")", v1, v2
); } while (false)
;
500 CHECK_NE(ev_epoch, kEpochOver)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ev_epoch)); __sanitizer
::u64 v2 = (__sanitizer::u64)((kEpochOver)); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 500, "(" "(ev_epoch)" ") " "!=" " (" "(kEpochOver)" ")", v1
, v2); } while (false)
;
501 f(ev_sid, ev_epoch, evp0);
502 }
503 if (part == last)
504 return;
505 part = trace->parts.Next(part);
506 CHECK(part)do { __sanitizer::u64 v1 = (__sanitizer::u64)((part)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 506, "(" "(part)" ") " "!=" " (" "0" ")", v1, v2); } while (
false)
;
507 }
508 CHECK(0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((0)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 508, "(" "(0)" ") " "!=" " (" "0" ")", v1, v2); } while (false
)
;
509}
510
511static void RestoreStackMatch(VarSizeStackTrace *pstk, MutexSet *pmset,
512 Vector<uptr> *stack, MutexSet *mset, uptr pc,
513 bool *found) {
514 DPrintf2(" MATCHED\n");
515 *pmset = *mset;
516 stack->PushBack(pc);
517 pstk->Init(&(*stack)[0], stack->Size());
518 stack->PopBack();
519 *found = true;
520}
521
522// Checks if addr1|size1 is fully contained in addr2|size2.
523// We check for fully contained instread of just overlapping
524// because a memory access is always traced once, but can be
525// split into multiple accesses in the shadow.
526static constexpr bool IsWithinAccess(uptr addr1, uptr size1, uptr addr2,
527 uptr size2) {
528 return addr1 >= addr2 && addr1 + size1 <= addr2 + size2;
529}
530
531// Replays the trace of thread tid up to the target event identified
532// by sid/epoch/addr/size/typ and restores and returns stack, mutex set
533// and tag for that event. If there are multiple such events, it returns
534// the last one. Returns false if the event is not present in the trace.
535bool RestoreStack(Tid tid, EventType type, Sid sid, Epoch epoch, uptr addr,
536 uptr size, AccessType typ, VarSizeStackTrace *pstk,
537 MutexSet *pmset, uptr *ptag) {
538 // This function restores stack trace and mutex set for the thread/epoch.
539 // It does so by getting stack trace and mutex set at the beginning of
540 // trace part, and then replaying the trace till the given epoch.
541 DPrintf2("RestoreStack: tid=%u sid=%u@%u addr=0x%zx/%zu typ=%x\n", tid,
542 static_cast<int>(sid), static_cast<int>(epoch), addr, size,
543 static_cast<int>(typ));
544 ctx->slot_mtx.CheckLocked(); // needed to prevent trace part recycling
545 ctx->thread_registry.CheckLocked();
546 ThreadContext *tctx =
547 static_cast<ThreadContext *>(ctx->thread_registry.GetThreadLocked(tid));
548 Trace *trace = &tctx->trace;
549 // Snapshot first/last parts and the current position in the last part.
550 TracePart *first_part;
551 TracePart *last_part;
552 Event *last_pos;
553 {
554 Lock lock(&trace->mtx);
555 first_part = trace->parts.Front();
556 if (!first_part)
557 return false;
558 last_part = trace->parts.Back();
559 last_pos = trace->final_pos;
560 if (tctx->thr)
561 last_pos = (Event *)atomic_load_relaxed(&tctx->thr->trace_pos);
562 }
563 // Too large for stack.
564 alignas(MutexSet) static char mset_storage[sizeof(MutexSet)];
565 MutexSet &mset = *new (mset_storage) MutexSet();
566 Vector<uptr> stack;
567 uptr prev_pc = 0;
568 bool found = false;
569 bool is_read = typ & kAccessRead;
570 bool is_atomic = typ & kAccessAtomic;
571 bool is_free = typ & kAccessFree;
572 TraceReplay(
573 trace, last_part, last_pos, sid, epoch,
574 [&](Sid ev_sid, Epoch ev_epoch, Event *evp) {
575 bool match = ev_sid == sid && ev_epoch == epoch;
576 if (evp->is_access) {
577 if (evp->is_func == 0 && evp->type == EventType::kAccessExt &&
578 evp->_ == 0) // NopEvent
579 return;
580 auto *ev = reinterpret_cast<EventAccess *>(evp);
581 uptr ev_addr = RestoreAddr(ev->addr);
582 uptr ev_size = 1 << ev->size_log;
583 uptr ev_pc =
584 prev_pc + ev->pc_delta - (1 << (EventAccess::kPCBits - 1));
585 prev_pc = ev_pc;
586 DPrintf2(" Access: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n", ev_pc,
587 ev_addr, ev_size, ev->is_read, ev->is_atomic);
588 if (match && type == EventType::kAccessExt &&
589 IsWithinAccess(addr, size, ev_addr, ev_size) &&
590 is_read == ev->is_read && is_atomic == ev->is_atomic && !is_free)
591 RestoreStackMatch(pstk, pmset, &stack, &mset, ev_pc, &found);
592 return;
593 }
594 if (evp->is_func) {
595 auto *ev = reinterpret_cast<EventFunc *>(evp);
596 if (ev->pc) {
597 DPrintf2(" FuncEnter: pc=0x%llx\n", ev->pc);
598 stack.PushBack(ev->pc);
599 } else {
600 DPrintf2(" FuncExit\n");
601 CHECK(stack.Size())do { __sanitizer::u64 v1 = (__sanitizer::u64)((stack.Size()))
; __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 601, "(" "(stack.Size())" ") " "!=" " (" "0" ")", v1, v2); }
while (false)
;
602 stack.PopBack();
603 }
604 return;
605 }
606 switch (evp->type) {
607 case EventType::kAccessExt: {
608 auto *ev = reinterpret_cast<EventAccessExt *>(evp);
609 uptr ev_addr = RestoreAddr(ev->addr);
610 uptr ev_size = 1 << ev->size_log;
611 prev_pc = ev->pc;
612 DPrintf2(" AccessExt: pc=0x%llx addr=0x%zx/%zu type=%u/%u\n",
613 ev->pc, ev_addr, ev_size, ev->is_read, ev->is_atomic);
614 if (match && type == EventType::kAccessExt &&
615 IsWithinAccess(addr, size, ev_addr, ev_size) &&
616 is_read == ev->is_read && is_atomic == ev->is_atomic &&
617 !is_free)
618 RestoreStackMatch(pstk, pmset, &stack, &mset, ev->pc, &found);
619 break;
620 }
621 case EventType::kAccessRange: {
622 auto *ev = reinterpret_cast<EventAccessRange *>(evp);
623 uptr ev_addr = RestoreAddr(ev->addr);
624 uptr ev_size =
625 (ev->size_hi << EventAccessRange::kSizeLoBits) + ev->size_lo;
626 uptr ev_pc = RestoreAddr(ev->pc);
627 prev_pc = ev_pc;
628 DPrintf2(" Range: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n", ev_pc,
629 ev_addr, ev_size, ev->is_read, ev->is_free);
630 if (match && type == EventType::kAccessExt &&
631 IsWithinAccess(addr, size, ev_addr, ev_size) &&
632 is_read == ev->is_read && !is_atomic && is_free == ev->is_free)
633 RestoreStackMatch(pstk, pmset, &stack, &mset, ev_pc, &found);
634 break;
635 }
636 case EventType::kLock:
637 FALLTHROUGH[[clang::fallthrough]];
638 case EventType::kRLock: {
639 auto *ev = reinterpret_cast<EventLock *>(evp);
640 bool is_write = ev->type == EventType::kLock;
641 uptr ev_addr = RestoreAddr(ev->addr);
642 uptr ev_pc = RestoreAddr(ev->pc);
643 StackID stack_id =
644 (ev->stack_hi << EventLock::kStackIDLoBits) + ev->stack_lo;
645 DPrintf2(" Lock: pc=0x%zx addr=0x%zx stack=%u write=%d\n", ev_pc,
646 ev_addr, stack_id, is_write);
647 mset.AddAddr(ev_addr, stack_id, is_write);
648 // Events with ev_pc == 0 are written to the beginning of trace
649 // part as initial mutex set (are not real).
650 if (match && type == EventType::kLock && addr == ev_addr && ev_pc)
651 RestoreStackMatch(pstk, pmset, &stack, &mset, ev_pc, &found);
652 break;
653 }
654 case EventType::kUnlock: {
655 auto *ev = reinterpret_cast<EventUnlock *>(evp);
656 uptr ev_addr = RestoreAddr(ev->addr);
657 DPrintf2(" Unlock: addr=0x%zx\n", ev_addr);
658 mset.DelAddr(ev_addr);
659 break;
660 }
661 case EventType::kTime:
662 // TraceReplay already extracted sid/epoch from it,
663 // nothing else to do here.
664 break;
665 }
666 });
667 ExtractTagFromStack(pstk, ptag);
668 return found;
669}
670
671} // namespace v3
672
673bool RacyStacks::operator==(const RacyStacks &other) const {
674 if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
675 return true;
676 if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
677 return true;
678 return false;
679}
680
681static bool FindRacyStacks(const RacyStacks &hash) {
682 for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
683 if (hash == ctx->racy_stacks[i]) {
684 VPrintf(2, "ThreadSanitizer: suppressing report as doubled (stack)\n")do { if ((uptr)Verbosity() >= (2)) Printf("ThreadSanitizer: suppressing report as doubled (stack)\n"
); } while (0)
;
685 return true;
686 }
687 }
688 return false;
689}
690
691static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2]) {
692 if (!flags()->suppress_equal_stacks)
693 return false;
694 RacyStacks hash;
695 hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
696 hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
697 {
698 ReadLock lock(&ctx->racy_mtx);
699 if (FindRacyStacks(hash))
700 return true;
701 }
702 Lock lock(&ctx->racy_mtx);
703 if (FindRacyStacks(hash))
704 return true;
705 ctx->racy_stacks.PushBack(hash);
706 return false;
707}
708
709static bool FindRacyAddress(const RacyAddress &ra0) {
710 for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
711 RacyAddress ra2 = ctx->racy_addresses[i];
712 uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
713 uptr minend = min(ra0.addr_max, ra2.addr_max);
714 if (maxbeg < minend) {
715 VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n")do { if ((uptr)Verbosity() >= (2)) Printf("ThreadSanitizer: suppressing report as doubled (addr)\n"
); } while (0)
;
716 return true;
717 }
718 }
719 return false;
720}
721
722static bool HandleRacyAddress(ThreadState *thr, uptr addr_min, uptr addr_max) {
723 if (!flags()->suppress_equal_addresses)
724 return false;
725 RacyAddress ra0 = {addr_min, addr_max};
726 {
727 ReadLock lock(&ctx->racy_mtx);
728 if (FindRacyAddress(ra0))
729 return true;
730 }
731 Lock lock(&ctx->racy_mtx);
732 if (FindRacyAddress(ra0))
733 return true;
734 ctx->racy_addresses.PushBack(ra0);
735 return false;
736}
737
738bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
739 // These should have been checked in ShouldReport.
740 // It's too late to check them here, we have already taken locks.
741 CHECK(flags()->report_bugs)do { __sanitizer::u64 v1 = (__sanitizer::u64)((flags()->report_bugs
)); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 741, "(" "(flags()->report_bugs)" ") " "!=" " (" "0" ")"
, v1, v2); } while (false)
;
742 CHECK(!thr->suppress_reports)do { __sanitizer::u64 v1 = (__sanitizer::u64)((!thr->suppress_reports
)); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 742, "(" "(!thr->suppress_reports)" ") " "!=" " (" "0" ")"
, v1, v2); } while (false)
;
743 atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
744 const ReportDesc *rep = srep.GetReport();
745 CHECK_EQ(thr->current_report, nullptr)do { __sanitizer::u64 v1 = (__sanitizer::u64)((thr->current_report
)); __sanitizer::u64 v2 = (__sanitizer::u64)((nullptr)); if (
__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 745, "(" "(thr->current_report)" ") " "==" " (" "(nullptr)"
")", v1, v2); } while (false)
;
746 thr->current_report = rep;
747 Suppression *supp = 0;
748 uptr pc_or_addr = 0;
749 for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++)
750 pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
751 for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++)
752 pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp);
753 for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++)
754 pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
755 for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++)
756 pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp);
757 if (pc_or_addr != 0) {
758 Lock lock(&ctx->fired_suppressions_mtx);
759 FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp};
760 ctx->fired_suppressions.push_back(s);
761 }
762 {
763 bool old_is_freeing = thr->is_freeing;
764 thr->is_freeing = false;
765 bool suppressed = OnReport(rep, pc_or_addr != 0);
766 thr->is_freeing = old_is_freeing;
767 if (suppressed) {
768 thr->current_report = nullptr;
769 return false;
770 }
771 }
772 PrintReport(rep);
773 __tsan_on_report(rep);
774 ctx->nreported++;
775 if (flags()->halt_on_error)
776 Die();
777 thr->current_report = nullptr;
778 return true;
779}
780
781bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) {
782 ReadLock lock(&ctx->fired_suppressions_mtx);
783 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
784 if (ctx->fired_suppressions[k].type != type)
785 continue;
786 for (uptr j = 0; j < trace.size; j++) {
787 FiredSuppression *s = &ctx->fired_suppressions[k];
788 if (trace.trace[j] == s->pc_or_addr) {
789 if (s->supp)
790 atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
791 return true;
792 }
793 }
794 }
795 return false;
796}
797
798static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) {
799 ReadLock lock(&ctx->fired_suppressions_mtx);
800 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
801 if (ctx->fired_suppressions[k].type != type)
802 continue;
803 FiredSuppression *s = &ctx->fired_suppressions[k];
804 if (addr == s->pc_or_addr) {
805 if (s->supp)
806 atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
807 return true;
808 }
809 }
810 return false;
811}
812
813static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
814 Shadow s0(thr->racy_state[0]);
815 Shadow s1(thr->racy_state[1]);
816 CHECK(!(s0.IsAtomic() && s1.IsAtomic()))do { __sanitizer::u64 v1 = (__sanitizer::u64)((!(s0.IsAtomic(
) && s1.IsAtomic()))); __sanitizer::u64 v2 = (__sanitizer
::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer
::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp"
, 816, "(" "(!(s0.IsAtomic() && s1.IsAtomic()))" ") "
"!=" " (" "0" ")", v1, v2); } while (false)
;
817 if (!s0.IsAtomic() && !s1.IsAtomic())
818 return true;
819 if (s0.IsAtomic() && s1.IsFreed())
820 return true;
821 if (s1.IsAtomic() && thr->is_freeing)
822 return true;
823 return false;
824}
825
826void ReportRace(ThreadState *thr) {
827 CheckedMutex::CheckNoLocks();
828
829 // Symbolizer makes lots of intercepted calls. If we try to process them,
830 // at best it will cause deadlocks on internal mutexes.
831 ScopedIgnoreInterceptors ignore;
832
833 if (!ShouldReport(thr, ReportTypeRace))
834 return;
835 if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
1
Assuming field 'report_atomic_races' is true
836 return;
837
838 bool freed = false;
839 {
840 Shadow s(thr->racy_state[1]);
841 freed = s.GetFreedAndReset();
842 thr->racy_state[1] = s.raw();
843 }
844
845 uptr addr = ShadowToMem(thr->racy_shadow_addr);
846 uptr addr_min = 0;
847 uptr addr_max = 0;
848 {
849 uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
850 uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
851 uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
852 uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
853 addr_min = min(a0, a1);
854 addr_max = max(e0, e1);
855 if (IsExpectedReport(addr_min, addr_max - addr_min))
2
Assuming the condition is false
3
Taking false branch
856 return;
857 }
858 if (HandleRacyAddress(thr, addr_min, addr_max))
4
Taking false branch
859 return;
860
861 ReportType typ = ReportTypeRace;
862 if (thr->is_vptr_access && freed)
5
Assuming field 'is_vptr_access' is false
863 typ = ReportTypeVptrUseAfterFree;
864 else if (thr->is_vptr_access
5.1
Field 'is_vptr_access' is false
5.1
Field 'is_vptr_access' is false
)
6
Taking false branch
865 typ = ReportTypeVptrRace;
866 else if (freed)
7
Assuming 'freed' is false
8
Taking false branch
867 typ = ReportTypeUseAfterFree;
868
869 if (IsFiredSuppression(ctx, typ, addr))
9
Assuming the condition is false
10
Taking false branch
870 return;
871
872 const uptr kMop = 2;
873 VarSizeStackTrace traces[kMop];
874 uptr tags[kMop] = {kExternalTagNone};
875 uptr toppc = TraceTopPC(thr);
876 if (toppc >> kEventPCBits) {
11
Assuming the condition is false
12
Taking false branch
877 // This is a work-around for a known issue.
878 // The scenario where this happens is rather elaborate and requires
879 // an instrumented __sanitizer_report_error_summary callback and
880 // a __tsan_symbolize_external callback and a race during a range memory
881 // access larger than 8 bytes. MemoryAccessRange adds the current PC to
882 // the trace and starts processing memory accesses. A first memory access
883 // triggers a race, we report it and call the instrumented
884 // __sanitizer_report_error_summary, which adds more stuff to the trace
885 // since it is intrumented. Then a second memory access in MemoryAccessRange
886 // also triggers a race and we get here and call TraceTopPC to get the
887 // current PC, however now it contains some unrelated events from the
888 // callback. Most likely, TraceTopPC will now return a EventTypeFuncExit
889 // event. Later we subtract -1 from it (in GetPreviousInstructionPc)
890 // and the resulting PC has kExternalPCBit set, so we pass it to
891 // __tsan_symbolize_external_ex. __tsan_symbolize_external_ex is within its
892 // rights to crash since the PC is completely bogus.
893 // test/tsan/double_race.cpp contains a test case for this.
894 toppc = 0;
895 }
896 ObtainCurrentStack(thr, toppc, &traces[0], &tags[0]);
897 if (IsFiredSuppression(ctx, typ, traces[0]))
13
Assuming the condition is false
14
Taking false branch
898 return;
899
900 // MutexSet is too large to live on stack.
901 Vector<u64> mset_buffer;
902 mset_buffer.Resize(sizeof(MutexSet) / sizeof(u64) + 1);
903 MutexSet *mset2 = new(&mset_buffer[0]) MutexSet();
904
905 Shadow s2(thr->racy_state[1]);
906 RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2, &tags[1]);
15
Calling 'RestoreStack'
907 if (IsFiredSuppression(ctx, typ, traces[1]))
908 return;
909
910 if (HandleRacyStacks(thr, traces))
911 return;
912
913 // If any of the accesses has a tag, treat this as an "external" race.
914 uptr tag = kExternalTagNone;
915 for (uptr i = 0; i < kMop; i++) {
916 if (tags[i] != kExternalTagNone) {
917 typ = ReportTypeExternalRace;
918 tag = tags[i];
919 break;
920 }
921 }
922
923 ThreadRegistryLock l0(&ctx->thread_registry);
924 ScopedReport rep(typ, tag);
925 for (uptr i = 0; i < kMop; i++) {
926 Shadow s(thr->racy_state[i]);
927 rep.AddMemoryAccess(addr, tags[i], s, traces[i],
928 i == 0 ? &thr->mset : mset2);
929 }
930
931 for (uptr i = 0; i < kMop; i++) {
932 FastState s(thr->racy_state[i]);
933 ThreadContext *tctx = static_cast<ThreadContext *>(
934 ctx->thread_registry.GetThreadLocked(s.tid()));
935 if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
936 continue;
937 rep.AddThread(tctx);
938 }
939
940 rep.AddLocation(addr_min, addr_max - addr_min);
941
942#if !SANITIZER_GO0
943 {
944 Shadow s(thr->racy_state[1]);
945 if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
946 rep.AddSleep(thr->last_sleep_stack_id);
947 }
948#endif
949
950 OutputReport(thr, rep);
951}
952
953void PrintCurrentStack(ThreadState *thr, uptr pc) {
954 VarSizeStackTrace trace;
955 ObtainCurrentStack(thr, pc, &trace);
956 PrintStack(SymbolizeStack(trace));
957}
958
959// Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes
960// __sanitizer_print_stack_trace exists in the actual unwinded stack, but
961// tail-call to PrintCurrentStackSlow breaks this assumption because
962// __sanitizer_print_stack_trace disappears after tail-call.
963// However, this solution is not reliable enough, please see dvyukov's comment
964// http://reviews.llvm.org/D19148#406208
965// Also see PR27280 comment 2 and 3 for breaking examples and analysis.
966ALWAYS_INLINEinline __attribute__((always_inline)) USED__attribute__((used)) void PrintCurrentStackSlow(uptr pc) {
967#if !SANITIZER_GO0
968 uptr bp = GET_CURRENT_FRAME()(__sanitizer::uptr) __builtin_frame_address(0);
969 auto *ptrace = New<BufferedStackTrace>();
970 ptrace->Unwind(pc, bp, nullptr, false);
971
972 for (uptr i = 0; i < ptrace->size / 2; i++) {
973 uptr tmp = ptrace->trace_buffer[i];
974 ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
975 ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
976 }
977 PrintStack(SymbolizeStack(*ptrace));
978#endif
979}
980
981} // namespace __tsan
982
983using namespace __tsan;
984
985extern "C" {
986SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default")))
987void __sanitizer_print_stack_trace() {
988 PrintCurrentStackSlow(StackTrace::GetCurrentPc());
989}
990} // extern "C"

/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/compiler-rt/lib/tsan/../sanitizer_common/sanitizer_vector.h

1//===-- sanitizer_vector.h -------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is shared between sanitizers run-time libraries.
10//
11//===----------------------------------------------------------------------===//
12
13// Low-fat STL-like vector container.
14
15#ifndef SANITIZER_VECTOR_H
16#define SANITIZER_VECTOR_H
17
18#include "sanitizer_common/sanitizer_allocator_internal.h"
19#include "sanitizer_common/sanitizer_libc.h"
20
21namespace __sanitizer {
22
23template<typename T>
24class Vector {
25 public:
26 Vector() : begin_(), end_(), last_() {}
23
Null pointer value stored to 'stack.begin_'
24
Returning without writing to 'this->begin_'
27
28 ~Vector() {
29 if (begin_)
30 InternalFree(begin_);
31 }
32
33 void Reset() {
34 if (begin_)
35 InternalFree(begin_);
36 begin_ = 0;
37 end_ = 0;
38 last_ = 0;
39 }
40
41 uptr Size() const {
42 return end_ - begin_;
43 }
44
45 T &operator[](uptr i) {
46 DCHECK_LT(i, end_ - begin_);
47 return begin_[i];
28
Returning null reference
48 }
49
50 const T &operator[](uptr i) const {
51 DCHECK_LT(i, end_ - begin_);
52 return begin_[i];
53 }
54
55 T *PushBack() {
56 EnsureSize(Size() + 1);
57 T *p = &end_[-1];
58 internal_memset(p, 0, sizeof(*p));
59 return p;
60 }
61
62 T *PushBack(const T& v) {
63 EnsureSize(Size() + 1);
64 T *p = &end_[-1];
65 internal_memcpy(p, &v, sizeof(*p));
66 return p;
67 }
68
69 void PopBack() {
70 DCHECK_GT(end_, begin_);
71 end_--;
72 }
73
74 void Resize(uptr size) {
75 if (size == 0) {
76 end_ = begin_;
77 return;
78 }
79 uptr old_size = Size();
80 if (size <= old_size) {
81 end_ = begin_ + size;
82 return;
83 }
84 EnsureSize(size);
85 if (old_size < size) {
86 for (uptr i = old_size; i < size; i++)
87 internal_memset(&begin_[i], 0, sizeof(begin_[i]));
88 }
89 }
90
91 private:
92 T *begin_;
93 T *end_;
94 T *last_;
95
96 void EnsureSize(uptr size) {
97 if (size <= Size())
98 return;
99 if (size <= (uptr)(last_ - begin_)) {
100 end_ = begin_ + size;
101 return;
102 }
103 uptr cap0 = last_ - begin_;
104 uptr cap = cap0 * 5 / 4; // 25% growth
105 if (cap == 0)
106 cap = 16;
107 if (cap < size)
108 cap = size;
109 T *p = (T*)InternalAlloc(cap * sizeof(T));
110 if (cap0) {
111 internal_memcpy(p, begin_, cap0 * sizeof(T));
112 InternalFree(begin_);
113 }
114 begin_ = p;
115 end_ = begin_ + size;
116 last_ = begin_ + cap;
117 }
118
119 Vector(const Vector&);
120 void operator=(const Vector&);
121};
122} // namespace __sanitizer
123
124#endif // #ifndef SANITIZER_VECTOR_H