Bug Summary

File:compiler-rt/lib/tsan/rtl/tsan_rtl.h
Warning:line 718, column 3
Called C++ object pointer is uninitialized

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name tsan_rtl_access.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -pic-is-pie -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -target-feature +sse4.2 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I projects/compiler-rt/lib/tsan -I /build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan -I include -I /build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include -I /build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan/.. -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-unused-command-line-argument -Wno-unknown-warning-option -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-unused-parameter -Wno-variadic-macros -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/build-llvm -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -fno-builtin -fno-rtti -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-11-10-160236-22541-1 -x c++ /build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp

/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp

1//===-- tsan_rtl_access.cpp -----------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11// Definitions of memory access and function entry/exit entry points.
12//===----------------------------------------------------------------------===//
13
14#include "tsan_rtl.h"
15
16namespace __tsan {
17
18namespace v3 {
19
20ALWAYS_INLINEinline __attribute__((always_inline)) USED__attribute__((used)) bool TryTraceMemoryAccess(ThreadState *thr, uptr pc,
21 uptr addr, uptr size,
22 AccessType typ) {
23 DCHECK(size == 1 || size == 2 || size == 4 || size == 8);
24 if (!kCollectHistory)
25 return true;
26 EventAccess *ev;
27 if (UNLIKELY(!TraceAcquire(thr, &ev))__builtin_expect(!!(!TraceAcquire(thr, &ev)), 0))
28 return false;
29 u64 size_log = size == 1 ? 0 : size == 2 ? 1 : size == 4 ? 2 : 3;
30 uptr pc_delta = pc - thr->trace_prev_pc + (1 << (EventAccess::kPCBits - 1));
31 thr->trace_prev_pc = pc;
32 if (LIKELY(pc_delta < (1 << EventAccess::kPCBits))__builtin_expect(!!(pc_delta < (1 << EventAccess::kPCBits
)), 1)
) {
33 ev->is_access = 1;
34 ev->is_read = !!(typ & kAccessRead);
35 ev->is_atomic = !!(typ & kAccessAtomic);
36 ev->size_log = size_log;
37 ev->pc_delta = pc_delta;
38 DCHECK_EQ(ev->pc_delta, pc_delta);
39 ev->addr = CompressAddr(addr);
40 TraceRelease(thr, ev);
41 return true;
42 }
43 auto *evex = reinterpret_cast<EventAccessExt *>(ev);
44 evex->is_access = 0;
45 evex->is_func = 0;
46 evex->type = EventType::kAccessExt;
47 evex->is_read = !!(typ & kAccessRead);
48 evex->is_atomic = !!(typ & kAccessAtomic);
49 evex->size_log = size_log;
50 evex->addr = CompressAddr(addr);
51 evex->pc = pc;
52 TraceRelease(thr, evex);
53 return true;
54}
55
56ALWAYS_INLINEinline __attribute__((always_inline)) USED__attribute__((used)) bool TryTraceMemoryAccessRange(ThreadState *thr, uptr pc,
57 uptr addr, uptr size,
58 AccessType typ) {
59 if (!kCollectHistory)
60 return true;
61 EventAccessRange *ev;
62 if (UNLIKELY(!TraceAcquire(thr, &ev))__builtin_expect(!!(!TraceAcquire(thr, &ev)), 0))
63 return false;
64 thr->trace_prev_pc = pc;
65 ev->is_access = 0;
66 ev->is_func = 0;
67 ev->type = EventType::kAccessRange;
68 ev->is_read = !!(typ & kAccessRead);
69 ev->is_free = !!(typ & kAccessFree);
70 ev->size_lo = size;
71 ev->pc = CompressAddr(pc);
72 ev->addr = CompressAddr(addr);
73 ev->size_hi = size >> EventAccessRange::kSizeLoBits;
74 TraceRelease(thr, ev);
75 return true;
76}
77
78void TraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
79 AccessType typ) {
80 if (LIKELY(TryTraceMemoryAccessRange(thr, pc, addr, size, typ))__builtin_expect(!!(TryTraceMemoryAccessRange(thr, pc, addr, size
, typ)), 1)
)
81 return;
82 TraceSwitchPart(thr);
83 UNUSED__attribute__((unused)) bool res = TryTraceMemoryAccessRange(thr, pc, addr, size, typ);
84 DCHECK(res);
85}
86
87void TraceFunc(ThreadState *thr, uptr pc) {
88 if (LIKELY(TryTraceFunc(thr, pc))__builtin_expect(!!(TryTraceFunc(thr, pc)), 1))
89 return;
90 TraceSwitchPart(thr);
91 UNUSED__attribute__((unused)) bool res = TryTraceFunc(thr, pc);
92 DCHECK(res);
93}
94
95void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,
96 StackID stk) {
97 DCHECK(type == EventType::kLock || type == EventType::kRLock);
98 if (!kCollectHistory)
99 return;
100 EventLock ev;
101 ev.is_access = 0;
102 ev.is_func = 0;
103 ev.type = type;
104 ev.pc = CompressAddr(pc);
105 ev.stack_lo = stk;
106 ev.stack_hi = stk >> EventLock::kStackIDLoBits;
107 ev._ = 0;
108 ev.addr = CompressAddr(addr);
109 TraceEvent(thr, ev);
110}
111
112void TraceMutexUnlock(ThreadState *thr, uptr addr) {
113 if (!kCollectHistory)
114 return;
115 EventUnlock ev;
116 ev.is_access = 0;
117 ev.is_func = 0;
118 ev.type = EventType::kUnlock;
119 ev._ = 0;
120 ev.addr = CompressAddr(addr);
121 TraceEvent(thr, ev);
122}
123
124void TraceTime(ThreadState *thr) {
125 if (!kCollectHistory
0.1
'kCollectHistory' is true
0.1
'kCollectHistory' is true
)
1
Taking false branch
126 return;
127 EventTime ev;
128 ev.is_access = 0;
129 ev.is_func = 0;
130 ev.type = EventType::kTime;
131 ev.sid = static_cast<u64>(thr->sid);
132 ev.epoch = static_cast<u64>(thr->epoch);
133 ev._ = 0;
134 TraceEvent(thr, ev);
2
Calling 'TraceEvent<__tsan::v3::EventTime>'
135}
136
137} // namespace v3
138
139ALWAYS_INLINEinline __attribute__((always_inline))
140Shadow LoadShadow(u64 *p) {
141 u64 raw = atomic_load((atomic_uint64_t *)p, memory_order_relaxed);
142 return Shadow(raw);
143}
144
145ALWAYS_INLINEinline __attribute__((always_inline))
146void StoreShadow(u64 *sp, u64 s) {
147 atomic_store((atomic_uint64_t *)sp, s, memory_order_relaxed);
148}
149
150ALWAYS_INLINEinline __attribute__((always_inline))
151void StoreIfNotYetStored(u64 *sp, u64 *s) {
152 StoreShadow(sp, *s);
153 *s = 0;
154}
155
156extern "C" void __tsan_report_race();
157
158ALWAYS_INLINEinline __attribute__((always_inline))
159void HandleRace(ThreadState *thr, u64 *shadow_mem, Shadow cur, Shadow old) {
160 thr->racy_state[0] = cur.raw();
161 thr->racy_state[1] = old.raw();
162 thr->racy_shadow_addr = shadow_mem;
163#if !SANITIZER_GO0
164 HACKY_CALL(__tsan_report_race)__asm__ __volatile__("sub $1024, %%rsp;" ".cfi_adjust_cfa_offset "
"1024" ";" ".hidden " "__tsan_report_race" "_thunk;" "call "
"__tsan_report_race" "_thunk;" "add $1024, %%rsp;" ".cfi_adjust_cfa_offset "
"-1024" ";" ::: "memory", "cc");
;
165#else
166 ReportRace(thr);
167#endif
168}
169
170static inline bool HappensBefore(Shadow old, ThreadState *thr) {
171 return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
172}
173
174ALWAYS_INLINEinline __attribute__((always_inline))
175void MemoryAccessImpl1(ThreadState *thr, uptr addr, int kAccessSizeLog,
176 bool kAccessIsWrite, bool kIsAtomic, u64 *shadow_mem,
177 Shadow cur) {
178 // This potentially can live in an MMX/SSE scratch register.
179 // The required intrinsics are:
180 // __m128i _mm_move_epi64(__m128i*);
181 // _mm_storel_epi64(u64*, __m128i);
182 u64 store_word = cur.raw();
183 bool stored = false;
184
185 // scan all the shadow values and dispatch to 4 categories:
186 // same, replace, candidate and race (see comments below).
187 // we consider only 3 cases regarding access sizes:
188 // equal, intersect and not intersect. initially I considered
189 // larger and smaller as well, it allowed to replace some
190 // 'candidates' with 'same' or 'replace', but I think
191 // it's just not worth it (performance- and complexity-wise).
192
193 Shadow old(0);
194
195 // It release mode we manually unroll the loop,
196 // because empirically gcc generates better code this way.
197 // However, we can't afford unrolling in debug mode, because the function
198 // consumes almost 4K of stack. Gtest gives only 4K of stack to death test
199 // threads, which is not enough for the unrolled loop.
200#if SANITIZER_DEBUG0
201 for (int idx = 0; idx < 4; idx++) {
202# include "tsan_update_shadow_word.inc"
203 }
204#else
205 int idx = 0;
206# include "tsan_update_shadow_word.inc"
207 idx = 1;
208 if (stored) {
209# include "tsan_update_shadow_word.inc"
210 } else {
211# include "tsan_update_shadow_word.inc"
212 }
213 idx = 2;
214 if (stored) {
215# include "tsan_update_shadow_word.inc"
216 } else {
217# include "tsan_update_shadow_word.inc"
218 }
219 idx = 3;
220 if (stored) {
221# include "tsan_update_shadow_word.inc"
222 } else {
223# include "tsan_update_shadow_word.inc"
224 }
225#endif
226
227 // we did not find any races and had already stored
228 // the current access info, so we are done
229 if (LIKELY(stored)__builtin_expect(!!(stored), 1))
230 return;
231 // choose a random candidate slot and replace it
232 StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
233 return;
234RACE:
235 HandleRace(thr, shadow_mem, cur, old);
236 return;
237}
238
239void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
240 AccessType typ) {
241 DCHECK(!(typ & kAccessAtomic));
242 const bool kAccessIsWrite = !(typ & kAccessRead);
243 const bool kIsAtomic = false;
244 while (size) {
245 int size1 = 1;
246 int kAccessSizeLog = kSizeLog1;
247 if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) {
248 size1 = 8;
249 kAccessSizeLog = kSizeLog8;
250 } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) {
251 size1 = 4;
252 kAccessSizeLog = kSizeLog4;
253 } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) {
254 size1 = 2;
255 kAccessSizeLog = kSizeLog2;
256 }
257 MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic);
258 addr += size1;
259 size -= size1;
260 }
261}
262
263ALWAYS_INLINEinline __attribute__((always_inline))
264bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
265 Shadow cur(a);
266 for (uptr i = 0; i < kShadowCnt; i++) {
267 Shadow old(LoadShadow(&s[i]));
268 if (Shadow::Addr0AndSizeAreEqual(cur, old) &&
269 old.TidWithIgnore() == cur.TidWithIgnore() &&
270 old.epoch() > sync_epoch && old.IsAtomic() == cur.IsAtomic() &&
271 old.IsRead() <= cur.IsRead())
272 return true;
273 }
274 return false;
275}
276
277#if TSAN_VECTORIZE1
278# define SHUF(v0, v1, i0, i1, i2, i3)_mm_castps_si128(((__m128)__builtin_ia32_shufps((__v4sf)(__m128
)(_mm_castsi128_ps(v0)), (__v4sf)(__m128)(_mm_castsi128_ps(v1
)), (int)((i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))))
\
279 _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(v0), \((__m128)__builtin_ia32_shufps((__v4sf)(__m128)(_mm_castsi128_ps
(v0)), (__v4sf)(__m128)(_mm_castsi128_ps(v1)), (int)((i0)*1 +
(i1)*4 + (i2)*16 + (i3)*64)))
280 _mm_castsi128_ps(v1), \((__m128)__builtin_ia32_shufps((__v4sf)(__m128)(_mm_castsi128_ps
(v0)), (__v4sf)(__m128)(_mm_castsi128_ps(v1)), (int)((i0)*1 +
(i1)*4 + (i2)*16 + (i3)*64)))
281 (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64)((__m128)__builtin_ia32_shufps((__v4sf)(__m128)(_mm_castsi128_ps
(v0)), (__v4sf)(__m128)(_mm_castsi128_ps(v1)), (int)((i0)*1 +
(i1)*4 + (i2)*16 + (i3)*64)))
)
282ALWAYS_INLINEinline __attribute__((always_inline))
283bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
284 // This is an optimized version of ContainsSameAccessSlow.
285 // load current access into access[0:63]
286 const m128 access = _mm_cvtsi64_si128(a);
287 // duplicate high part of access in addr0:
288 // addr0[0:31] = access[32:63]
289 // addr0[32:63] = access[32:63]
290 // addr0[64:95] = access[32:63]
291 // addr0[96:127] = access[32:63]
292 const m128 addr0 = SHUF(access, access, 1, 1, 1, 1)_mm_castps_si128(((__m128)__builtin_ia32_shufps((__v4sf)(__m128
)(_mm_castsi128_ps(access)), (__v4sf)(__m128)(_mm_castsi128_ps
(access)), (int)((1)*1 + (1)*4 + (1)*16 + (1)*64))))
;
293 // load 4 shadow slots
294 const m128 shadow0 = _mm_load_si128((__m128i *)s);
295 const m128 shadow1 = _mm_load_si128((__m128i *)s + 1);
296 // load high parts of 4 shadow slots into addr_vect:
297 // addr_vect[0:31] = shadow0[32:63]
298 // addr_vect[32:63] = shadow0[96:127]
299 // addr_vect[64:95] = shadow1[32:63]
300 // addr_vect[96:127] = shadow1[96:127]
301 m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3)_mm_castps_si128(((__m128)__builtin_ia32_shufps((__v4sf)(__m128
)(_mm_castsi128_ps(shadow0)), (__v4sf)(__m128)(_mm_castsi128_ps
(shadow1)), (int)((1)*1 + (3)*4 + (1)*16 + (3)*64))))
;
302 if (!is_write) {
303 // set IsRead bit in addr_vect
304 const m128 rw_mask1 = _mm_cvtsi64_si128(1 << 15);
305 const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0)_mm_castps_si128(((__m128)__builtin_ia32_shufps((__v4sf)(__m128
)(_mm_castsi128_ps(rw_mask1)), (__v4sf)(__m128)(_mm_castsi128_ps
(rw_mask1)), (int)((0)*1 + (0)*4 + (0)*16 + (0)*64))))
;
306 addr_vect = _mm_or_si128(addr_vect, rw_mask);
307 }
308 // addr0 == addr_vect?
309 const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect);
310 // epoch1[0:63] = sync_epoch
311 const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch);
312 // epoch[0:31] = sync_epoch[0:31]
313 // epoch[32:63] = sync_epoch[0:31]
314 // epoch[64:95] = sync_epoch[0:31]
315 // epoch[96:127] = sync_epoch[0:31]
316 const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0)_mm_castps_si128(((__m128)__builtin_ia32_shufps((__v4sf)(__m128
)(_mm_castsi128_ps(epoch1)), (__v4sf)(__m128)(_mm_castsi128_ps
(epoch1)), (int)((0)*1 + (0)*4 + (0)*16 + (0)*64))))
;
317 // load low parts of shadow cell epochs into epoch_vect:
318 // epoch_vect[0:31] = shadow0[0:31]
319 // epoch_vect[32:63] = shadow0[64:95]
320 // epoch_vect[64:95] = shadow1[0:31]
321 // epoch_vect[96:127] = shadow1[64:95]
322 const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2)_mm_castps_si128(((__m128)__builtin_ia32_shufps((__v4sf)(__m128
)(_mm_castsi128_ps(shadow0)), (__v4sf)(__m128)(_mm_castsi128_ps
(shadow1)), (int)((0)*1 + (2)*4 + (0)*16 + (2)*64))))
;
323 // epoch_vect >= sync_epoch?
324 const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch);
325 // addr_res & epoch_res
326 const m128 res = _mm_and_si128(addr_res, epoch_res);
327 // mask[0] = res[7]
328 // mask[1] = res[15]
329 // ...
330 // mask[15] = res[127]
331 const int mask = _mm_movemask_epi8(res);
332 return mask != 0;
333}
334#endif
335
336ALWAYS_INLINEinline __attribute__((always_inline))
337bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
338#if TSAN_VECTORIZE1
339 bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
340 // NOTE: this check can fail if the shadow is concurrently mutated
341 // by other threads. But it still can be useful if you modify
342 // ContainsSameAccessFast and want to ensure that it's not completely broken.
343 // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
344 return res;
345#else
346 return ContainsSameAccessSlow(s, a, sync_epoch, is_write);
347#endif
348}
349
350ALWAYS_INLINEinline __attribute__((always_inline)) USED__attribute__((used)) void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
351 int kAccessSizeLog, bool kAccessIsWrite,
352 bool kIsAtomic) {
353 RawShadow *shadow_mem = MemToShadow(addr);
354 DPrintf2(
355 "#%d: MemoryAccess: @%p %p size=%d"
356 " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
357 (int)thr->fast_state.tid(), (void *)pc, (void *)addr,
358 (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
359 (uptr)shadow_mem[0], (uptr)shadow_mem[1], (uptr)shadow_mem[2],
360 (uptr)shadow_mem[3]);
361#if SANITIZER_DEBUG0
362 if (!IsAppMem(addr)) {
363 Printf("Access to non app mem %zx\n", addr);
364 DCHECK(IsAppMem(addr));
365 }
366 if (!IsShadowMem(shadow_mem)) {
367 Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
368 DCHECK(IsShadowMem(shadow_mem));
369 }
370#endif
371
372 if (!SANITIZER_GO0 && !kAccessIsWrite && *shadow_mem == kShadowRodata) {
373 // Access to .rodata section, no races here.
374 // Measurements show that it can be 10-20% of all memory accesses.
375 return;
376 }
377
378 FastState fast_state = thr->fast_state;
379 if (UNLIKELY(fast_state.GetIgnoreBit())__builtin_expect(!!(fast_state.GetIgnoreBit()), 0)) {
380 return;
381 }
382
383 Shadow cur(fast_state);
384 cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
385 cur.SetWrite(kAccessIsWrite);
386 cur.SetAtomic(kIsAtomic);
387
388 if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(), thr->fast_synch_epoch,__builtin_expect(!!(ContainsSameAccess(shadow_mem, cur.raw(),
thr->fast_synch_epoch, kAccessIsWrite)), 1)
389 kAccessIsWrite))__builtin_expect(!!(ContainsSameAccess(shadow_mem, cur.raw(),
thr->fast_synch_epoch, kAccessIsWrite)), 1)
) {
390 return;
391 }
392
393 if (kCollectHistory) {
394 fast_state.IncrementEpoch();
395 thr->fast_state = fast_state;
396 TraceAddEvent(thr, fast_state, EventTypeMop, pc);
397 cur.IncrementEpoch();
398 }
399
400 MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
401 shadow_mem, cur);
402}
403
404// Called by MemoryAccessRange in tsan_rtl_thread.cpp
405ALWAYS_INLINEinline __attribute__((always_inline)) USED__attribute__((used)) void MemoryAccessImpl(ThreadState *thr, uptr addr,
406 int kAccessSizeLog,
407 bool kAccessIsWrite, bool kIsAtomic,
408 u64 *shadow_mem, Shadow cur) {
409 if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(), thr->fast_synch_epoch,__builtin_expect(!!(ContainsSameAccess(shadow_mem, cur.raw(),
thr->fast_synch_epoch, kAccessIsWrite)), 1)
410 kAccessIsWrite))__builtin_expect(!!(ContainsSameAccess(shadow_mem, cur.raw(),
thr->fast_synch_epoch, kAccessIsWrite)), 1)
) {
411 return;
412 }
413
414 MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
415 shadow_mem, cur);
416}
417
418static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
419 u64 val) {
420 (void)thr;
421 (void)pc;
422 if (size == 0)
423 return;
424 // FIXME: fix me.
425 uptr offset = addr % kShadowCell;
426 if (offset) {
427 offset = kShadowCell - offset;
428 if (size <= offset)
429 return;
430 addr += offset;
431 size -= offset;
432 }
433 DCHECK_EQ(addr % 8, 0);
434 // If a user passes some insane arguments (memset(0)),
435 // let it just crash as usual.
436 if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
437 return;
438 // Don't want to touch lots of shadow memory.
439 // If a program maps 10MB stack, there is no need reset the whole range.
440 size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
441 // UnmapOrDie/MmapFixedNoReserve does not work on Windows.
442 if (SANITIZER_WINDOWS0 || size < common_flags()->clear_shadow_mmap_threshold) {
443 RawShadow *p = MemToShadow(addr);
444 CHECK(IsShadowMem(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsShadowMem(p)
)); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp"
, 444, "(" "(IsShadowMem(p))" ") " "!=" " (" "0" ")", v1, v2)
; } while (false)
;
445 CHECK(IsShadowMem(p + size * kShadowCnt / kShadowCell - 1))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsShadowMem(p +
size * kShadowCnt / kShadowCell - 1))); __sanitizer::u64 v2 =
(__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)),
0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp"
, 445, "(" "(IsShadowMem(p + size * kShadowCnt / kShadowCell - 1))"
") " "!=" " (" "0" ")", v1, v2); } while (false)
;
446 // FIXME: may overwrite a part outside the region
447 for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
448 p[i++] = val;
449 for (uptr j = 1; j < kShadowCnt; j++) p[i++] = 0;
450 }
451 } else {
452 // The region is big, reset only beginning and end.
453 const uptr kPageSize = GetPageSizeCached();
454 RawShadow *begin = MemToShadow(addr);
455 RawShadow *end = begin + size / kShadowCell * kShadowCnt;
456 RawShadow *p = begin;
457 // Set at least first kPageSize/2 to page boundary.
458 while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
459 *p++ = val;
460 for (uptr j = 1; j < kShadowCnt; j++) *p++ = 0;
461 }
462 // Reset middle part.
463 RawShadow *p1 = p;
464 p = RoundDown(end, kPageSize);
465 if (!MmapFixedSuperNoReserve((uptr)p1, (uptr)p - (uptr)p1))
466 Die();
467 // Set the ending.
468 while (p < end) {
469 *p++ = val;
470 for (uptr j = 1; j < kShadowCnt; j++) *p++ = 0;
471 }
472 }
473}
474
475void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
476 MemoryRangeSet(thr, pc, addr, size, 0);
477}
478
479void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
480 // Processing more than 1k (4k of shadow) is expensive,
481 // can cause excessive memory consumption (user does not necessary touch
482 // the whole range) and most likely unnecessary.
483 if (size > 1024)
484 size = 1024;
485 CHECK_EQ(thr->is_freeing, false)do { __sanitizer::u64 v1 = (__sanitizer::u64)((thr->is_freeing
)); __sanitizer::u64 v2 = (__sanitizer::u64)((false)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp"
, 485, "(" "(thr->is_freeing)" ") " "==" " (" "(false)" ")"
, v1, v2); } while (false)
;
486 thr->is_freeing = true;
487 MemoryAccessRange(thr, pc, addr, size, true);
488 thr->is_freeing = false;
489 if (kCollectHistory) {
490 thr->fast_state.IncrementEpoch();
491 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
492 }
493 Shadow s(thr->fast_state);
494 s.ClearIgnoreBit();
495 s.MarkAsFreed();
496 s.SetWrite(true);
497 s.SetAddr0AndSizeLog(0, 3);
498 MemoryRangeSet(thr, pc, addr, size, s.raw());
499}
500
501void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
502 if (kCollectHistory) {
503 thr->fast_state.IncrementEpoch();
504 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
505 }
506 Shadow s(thr->fast_state);
507 s.ClearIgnoreBit();
508 s.SetWrite(true);
509 s.SetAddr0AndSizeLog(0, 3);
510 MemoryRangeSet(thr, pc, addr, size, s.raw());
511}
512
513void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
514 uptr size) {
515 if (thr->ignore_reads_and_writes == 0)
516 MemoryRangeImitateWrite(thr, pc, addr, size);
517 else
518 MemoryResetRange(thr, pc, addr, size);
519}
520
521void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
522 bool is_write) {
523 if (size == 0)
524 return;
525
526 RawShadow *shadow_mem = MemToShadow(addr);
527 DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n", thr->tid,
528 (void *)pc, (void *)addr, (int)size, is_write);
529
530#if SANITIZER_DEBUG0
531 if (!IsAppMem(addr)) {
532 Printf("Access to non app mem %zx\n", addr);
533 DCHECK(IsAppMem(addr));
534 }
535 if (!IsAppMem(addr + size - 1)) {
536 Printf("Access to non app mem %zx\n", addr + size - 1);
537 DCHECK(IsAppMem(addr + size - 1));
538 }
539 if (!IsShadowMem(shadow_mem)) {
540 Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
541 DCHECK(IsShadowMem(shadow_mem));
542 }
543 if (!IsShadowMem(shadow_mem + size * kShadowCnt / 8 - 1)) {
544 Printf("Bad shadow addr %p (%zx)\n", shadow_mem + size * kShadowCnt / 8 - 1,
545 addr + size - 1);
546 DCHECK(IsShadowMem(shadow_mem + size * kShadowCnt / 8 - 1));
547 }
548#endif
549
550 if (*shadow_mem == kShadowRodata) {
551 DCHECK(!is_write);
552 // Access to .rodata section, no races here.
553 // Measurements show that it can be 10-20% of all memory accesses.
554 return;
555 }
556
557 FastState fast_state = thr->fast_state;
558 if (fast_state.GetIgnoreBit())
559 return;
560
561 fast_state.IncrementEpoch();
562 thr->fast_state = fast_state;
563 TraceAddEvent(thr, fast_state, EventTypeMop, pc);
564
565 bool unaligned = (addr % kShadowCell) != 0;
566
567 // Handle unaligned beginning, if any.
568 for (; addr % kShadowCell && size; addr++, size--) {
569 int const kAccessSizeLog = 0;
570 Shadow cur(fast_state);
571 cur.SetWrite(is_write);
572 cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
573 MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, shadow_mem,
574 cur);
575 }
576 if (unaligned)
577 shadow_mem += kShadowCnt;
578 // Handle middle part, if any.
579 for (; size >= kShadowCell; addr += kShadowCell, size -= kShadowCell) {
580 int const kAccessSizeLog = 3;
581 Shadow cur(fast_state);
582 cur.SetWrite(is_write);
583 cur.SetAddr0AndSizeLog(0, kAccessSizeLog);
584 MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, shadow_mem,
585 cur);
586 shadow_mem += kShadowCnt;
587 }
588 // Handle ending, if any.
589 for (; size; addr++, size--) {
590 int const kAccessSizeLog = 0;
591 Shadow cur(fast_state);
592 cur.SetWrite(is_write);
593 cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
594 MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, shadow_mem,
595 cur);
596 }
597}
598
599} // namespace __tsan
600
601#if !SANITIZER_GO0
602// Must be included in this file to make sure everything is inlined.
603# include "tsan_interface.inc"
604#endif

/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/tsan/rtl/tsan_rtl.h

1//===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11// Main internal TSan header file.
12//
13// Ground rules:
14// - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
15// function-scope locals)
16// - All functions/classes/etc reside in namespace __tsan, except for those
17// declared in tsan_interface.h.
18// - Platform-specific files should be used instead of ifdefs (*).
19// - No system headers included in header files (*).
20// - Platform specific headres included only into platform-specific files (*).
21//
22// (*) Except when inlining is critical for performance.
23//===----------------------------------------------------------------------===//
24
25#ifndef TSAN_RTL_H
26#define TSAN_RTL_H
27
28#include "sanitizer_common/sanitizer_allocator.h"
29#include "sanitizer_common/sanitizer_allocator_internal.h"
30#include "sanitizer_common/sanitizer_asm.h"
31#include "sanitizer_common/sanitizer_common.h"
32#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
33#include "sanitizer_common/sanitizer_libignore.h"
34#include "sanitizer_common/sanitizer_suppressions.h"
35#include "sanitizer_common/sanitizer_thread_registry.h"
36#include "sanitizer_common/sanitizer_vector.h"
37#include "tsan_clock.h"
38#include "tsan_defs.h"
39#include "tsan_flags.h"
40#include "tsan_ignoreset.h"
41#include "tsan_mman.h"
42#include "tsan_mutexset.h"
43#include "tsan_platform.h"
44#include "tsan_report.h"
45#include "tsan_shadow.h"
46#include "tsan_stack_trace.h"
47#include "tsan_sync.h"
48#include "tsan_trace.h"
49
50#if SANITIZER_WORDSIZE64 != 64
51# error "ThreadSanitizer is supported only on 64-bit platforms"
52#endif
53
54namespace __tsan {
55
56#if !SANITIZER_GO0
57struct MapUnmapCallback;
58#if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__)
59
60struct AP32 {
61 static const uptr kSpaceBeg = 0;
62 static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE(1ULL << 47);
63 static const uptr kMetadataSize = 0;
64 typedef __sanitizer::CompactSizeClassMap SizeClassMap;
65 static const uptr kRegionSizeLog = 20;
66 using AddressSpaceView = LocalAddressSpaceView;
67 typedef __tsan::MapUnmapCallback MapUnmapCallback;
68 static const uptr kFlags = 0;
69};
70typedef SizeClassAllocator32<AP32> PrimaryAllocator;
71#else
72struct AP64 { // Allocator64 parameters. Deliberately using a short name.
73# if defined(__s390x__)
74 typedef MappingS390x Mapping;
75# else
76 typedef Mapping48AddressSpace Mapping;
77# endif
78 static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
79 static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg;
80 static const uptr kMetadataSize = 0;
81 typedef DefaultSizeClassMap SizeClassMap;
82 typedef __tsan::MapUnmapCallback MapUnmapCallback;
83 static const uptr kFlags = 0;
84 using AddressSpaceView = LocalAddressSpaceView;
85};
86typedef SizeClassAllocator64<AP64> PrimaryAllocator;
87#endif
88typedef CombinedAllocator<PrimaryAllocator> Allocator;
89typedef Allocator::AllocatorCache AllocatorCache;
90Allocator *allocator();
91#endif
92
93struct ThreadSignalContext;
94
95struct JmpBuf {
96 uptr sp;
97 int int_signal_send;
98 bool in_blocking_func;
99 uptr in_signal_handler;
100 uptr *shadow_stack_pos;
101};
102
103// A Processor represents a physical thread, or a P for Go.
104// It is used to store internal resources like allocate cache, and does not
105// participate in race-detection logic (invisible to end user).
106// In C++ it is tied to an OS thread just like ThreadState, however ideally
107// it should be tied to a CPU (this way we will have fewer allocator caches).
108// In Go it is tied to a P, so there are significantly fewer Processor's than
109// ThreadState's (which are tied to Gs).
110// A ThreadState must be wired with a Processor to handle events.
111struct Processor {
112 ThreadState *thr; // currently wired thread, or nullptr
113#if !SANITIZER_GO0
114 AllocatorCache alloc_cache;
115 InternalAllocatorCache internal_alloc_cache;
116#endif
117 DenseSlabAllocCache block_cache;
118 DenseSlabAllocCache sync_cache;
119 DenseSlabAllocCache clock_cache;
120 DDPhysicalThread *dd_pt;
121};
122
123#if !SANITIZER_GO0
124// ScopedGlobalProcessor temporary setups a global processor for the current
125// thread, if it does not have one. Intended for interceptors that can run
126// at the very thread end, when we already destroyed the thread processor.
127struct ScopedGlobalProcessor {
128 ScopedGlobalProcessor();
129 ~ScopedGlobalProcessor();
130};
131#endif
132
133// This struct is stored in TLS.
134struct ThreadState {
135 FastState fast_state;
136 // Synch epoch represents the threads's epoch before the last synchronization
137 // action. It allows to reduce number of shadow state updates.
138 // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
139 // if we are processing write to X from the same thread at epoch=200,
140 // we do nothing, because both writes happen in the same 'synch epoch'.
141 // That is, if another memory access does not race with the former write,
142 // it does not race with the latter as well.
143 // QUESTION: can we can squeeze this into ThreadState::Fast?
144 // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
145 // taken by epoch between synchs.
146 // This way we can save one load from tls.
147 u64 fast_synch_epoch;
148 // Technically `current` should be a separate THREADLOCAL variable;
149 // but it is placed here in order to share cache line with previous fields.
150 ThreadState* current;
151 // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
152 // We do not distinguish beteween ignoring reads and writes
153 // for better performance.
154 int ignore_reads_and_writes;
155 atomic_sint32_t pending_signals;
156 int ignore_sync;
157 int suppress_reports;
158 // Go does not support ignores.
159#if !SANITIZER_GO0
160 IgnoreSet mop_ignore_set;
161 IgnoreSet sync_ignore_set;
162 // C/C++ uses fixed size shadow stack.
163 uptr shadow_stack[kShadowStackSize];
164#else
165 // Go uses malloc-allocated shadow stack with dynamic size.
166 uptr *shadow_stack;
167#endif
168 uptr *shadow_stack_end;
169 uptr *shadow_stack_pos;
170 RawShadow *racy_shadow_addr;
171 RawShadow racy_state[2];
172 MutexSet mset;
173 ThreadClock clock;
174#if !SANITIZER_GO0
175 Vector<JmpBuf> jmp_bufs;
176 int ignore_interceptors;
177#endif
178 const Tid tid;
179 const int unique_id;
180 bool in_symbolizer;
181 bool in_ignored_lib;
182 bool is_inited;
183 bool is_dead;
184 bool is_freeing;
185 bool is_vptr_access;
186 const uptr stk_addr;
187 const uptr stk_size;
188 const uptr tls_addr;
189 const uptr tls_size;
190 ThreadContext *tctx;
191
192 DDLogicalThread *dd_lt;
193
194 // Current wired Processor, or nullptr. Required to handle any events.
195 Processor *proc1;
196#if !SANITIZER_GO0
197 Processor *proc() { return proc1; }
198#else
199 Processor *proc();
200#endif
201
202 atomic_uintptr_t in_signal_handler;
203 ThreadSignalContext *signal_ctx;
204
205#if !SANITIZER_GO0
206 StackID last_sleep_stack_id;
207 ThreadClock last_sleep_clock;
208#endif
209
210 // Set in regions of runtime that must be signal-safe and fork-safe.
211 // If set, malloc must not be called.
212 int nomalloc;
213
214 const ReportDesc *current_report;
215
216 // Current position in tctx->trace.Back()->events (Event*).
217 atomic_uintptr_t trace_pos;
218 // PC of the last memory access, used to compute PC deltas in the trace.
219 uptr trace_prev_pc;
220 Sid sid;
221 Epoch epoch;
222
223 explicit ThreadState(Context *ctx, Tid tid, int unique_id, u64 epoch,
224 unsigned reuse_count, uptr stk_addr, uptr stk_size,
225 uptr tls_addr, uptr tls_size);
226} ALIGNED(SANITIZER_CACHE_LINE_SIZE)__attribute__((aligned(64)));
227
228#if !SANITIZER_GO0
229#if SANITIZER_MAC0 || SANITIZER_ANDROID0
230ThreadState *cur_thread();
231void set_cur_thread(ThreadState *thr);
232void cur_thread_finalize();
233inline ThreadState *cur_thread_init() { return cur_thread(); }
234# else
235__attribute__((tls_model("initial-exec")))
236extern THREADLOCAL__thread char cur_thread_placeholder[];
237inline ThreadState *cur_thread() {
238 return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current;
239}
240inline ThreadState *cur_thread_init() {
241 ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder);
242 if (UNLIKELY(!thr->current)__builtin_expect(!!(!thr->current), 0))
243 thr->current = thr;
244 return thr->current;
245}
246inline void set_cur_thread(ThreadState *thr) {
247 reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr;
248}
249inline void cur_thread_finalize() { }
250# endif // SANITIZER_MAC || SANITIZER_ANDROID
251#endif // SANITIZER_GO
252
253class ThreadContext final : public ThreadContextBase {
254 public:
255 explicit ThreadContext(Tid tid);
256 ~ThreadContext();
257 ThreadState *thr;
258 StackID creation_stack_id;
259 SyncClock sync;
260 // Epoch at which the thread had started.
261 // If we see an event from the thread stamped by an older epoch,
262 // the event is from a dead thread that shared tid with this thread.
263 u64 epoch0;
264 u64 epoch1;
265
266 v3::Trace trace;
267
268 // Override superclass callbacks.
269 void OnDead() override;
270 void OnJoined(void *arg) override;
271 void OnFinished() override;
272 void OnStarted(void *arg) override;
273 void OnCreated(void *arg) override;
274 void OnReset() override;
275 void OnDetached(void *arg) override;
276};
277
278struct RacyStacks {
279 MD5Hash hash[2];
280 bool operator==(const RacyStacks &other) const;
281};
282
283struct RacyAddress {
284 uptr addr_min;
285 uptr addr_max;
286};
287
288struct FiredSuppression {
289 ReportType type;
290 uptr pc_or_addr;
291 Suppression *supp;
292};
293
294struct Context {
295 Context();
296
297 bool initialized;
298#if !SANITIZER_GO0
299 bool after_multithreaded_fork;
300#endif
301
302 MetaMap metamap;
303
304 Mutex report_mtx;
305 int nreported;
306 atomic_uint64_t last_symbolize_time_ns;
307
308 void *background_thread;
309 atomic_uint32_t stop_background_thread;
310
311 ThreadRegistry thread_registry;
312
313 Mutex racy_mtx;
314 Vector<RacyStacks> racy_stacks;
315 Vector<RacyAddress> racy_addresses;
316 // Number of fired suppressions may be large enough.
317 Mutex fired_suppressions_mtx;
318 InternalMmapVector<FiredSuppression> fired_suppressions;
319 DDetector *dd;
320
321 ClockAlloc clock_alloc;
322
323 Flags flags;
324 fd_t memprof_fd;
325
326 Mutex slot_mtx;
327};
328
329extern Context *ctx; // The one and the only global runtime context.
330
331ALWAYS_INLINEinline __attribute__((always_inline)) Flags *flags() {
332 return &ctx->flags;
333}
334
335struct ScopedIgnoreInterceptors {
336 ScopedIgnoreInterceptors() {
337#if !SANITIZER_GO0
338 cur_thread()->ignore_interceptors++;
339#endif
340 }
341
342 ~ScopedIgnoreInterceptors() {
343#if !SANITIZER_GO0
344 cur_thread()->ignore_interceptors--;
345#endif
346 }
347};
348
349const char *GetObjectTypeFromTag(uptr tag);
350const char *GetReportHeaderFromTag(uptr tag);
351uptr TagFromShadowStackFrame(uptr pc);
352
353class ScopedReportBase {
354 public:
355 void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, StackTrace stack,
356 const MutexSet *mset);
357 void AddStack(StackTrace stack, bool suppressable = false);
358 void AddThread(const ThreadContext *tctx, bool suppressable = false);
359 void AddThread(Tid unique_tid, bool suppressable = false);
360 void AddUniqueTid(Tid unique_tid);
361 void AddMutex(const SyncVar *s);
362 u64 AddMutex(u64 id);
363 void AddLocation(uptr addr, uptr size);
364 void AddSleep(StackID stack_id);
365 void SetCount(int count);
366
367 const ReportDesc *GetReport() const;
368
369 protected:
370 ScopedReportBase(ReportType typ, uptr tag);
371 ~ScopedReportBase();
372
373 private:
374 ReportDesc *rep_;
375 // Symbolizer makes lots of intercepted calls. If we try to process them,
376 // at best it will cause deadlocks on internal mutexes.
377 ScopedIgnoreInterceptors ignore_interceptors_;
378
379 void AddDeadMutex(u64 id);
380
381 ScopedReportBase(const ScopedReportBase &) = delete;
382 void operator=(const ScopedReportBase &) = delete;
383};
384
385class ScopedReport : public ScopedReportBase {
386 public:
387 explicit ScopedReport(ReportType typ, uptr tag = kExternalTagNone);
388 ~ScopedReport();
389
390 private:
391 ScopedErrorReportLock lock_;
392};
393
394bool ShouldReport(ThreadState *thr, ReportType typ);
395ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
396void RestoreStack(Tid tid, const u64 epoch, VarSizeStackTrace *stk,
397 MutexSet *mset, uptr *tag = nullptr);
398
399// The stack could look like:
400// <start> | <main> | <foo> | tag | <bar>
401// This will extract the tag and keep:
402// <start> | <main> | <foo> | <bar>
403template<typename StackTraceTy>
404void ExtractTagFromStack(StackTraceTy *stack, uptr *tag = nullptr) {
405 if (stack->size < 2) return;
406 uptr possible_tag_pc = stack->trace[stack->size - 2];
407 uptr possible_tag = TagFromShadowStackFrame(possible_tag_pc);
408 if (possible_tag == kExternalTagNone) return;
409 stack->trace_buffer[stack->size - 2] = stack->trace_buffer[stack->size - 1];
410 stack->size -= 1;
411 if (tag) *tag = possible_tag;
412}
413
414template<typename StackTraceTy>
415void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack,
416 uptr *tag = nullptr) {
417 uptr size = thr->shadow_stack_pos - thr->shadow_stack;
418 uptr start = 0;
419 if (size + !!toppc > kStackTraceMax) {
420 start = size + !!toppc - kStackTraceMax;
421 size = kStackTraceMax - !!toppc;
422 }
423 stack->Init(&thr->shadow_stack[start], size, toppc);
424 ExtractTagFromStack(stack, tag);
425}
426
427#define GET_STACK_TRACE_FATAL(thr, pc)VarSizeStackTrace stack; ObtainCurrentStack(thr, pc, &stack
); stack.ReverseOrder();
\
428 VarSizeStackTrace stack; \
429 ObtainCurrentStack(thr, pc, &stack); \
430 stack.ReverseOrder();
431
432void MapShadow(uptr addr, uptr size);
433void MapThreadTrace(uptr addr, uptr size, const char *name);
434void DontNeedShadowFor(uptr addr, uptr size);
435void UnmapShadow(ThreadState *thr, uptr addr, uptr size);
436void InitializeShadowMemory();
437void InitializeInterceptors();
438void InitializeLibIgnore();
439void InitializeDynamicAnnotations();
440
441void ForkBefore(ThreadState *thr, uptr pc);
442void ForkParentAfter(ThreadState *thr, uptr pc);
443void ForkChildAfter(ThreadState *thr, uptr pc);
444
445void ReportRace(ThreadState *thr);
446bool OutputReport(ThreadState *thr, const ScopedReport &srep);
447bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
448bool IsExpectedReport(uptr addr, uptr size);
449
450#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
451# define DPrintf Printf
452#else
453# define DPrintf(...)
454#endif
455
456#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
457# define DPrintf2 Printf
458#else
459# define DPrintf2(...)
460#endif
461
462StackID CurrentStackId(ThreadState *thr, uptr pc);
463ReportStack *SymbolizeStackId(StackID stack_id);
464void PrintCurrentStack(ThreadState *thr, uptr pc);
465void PrintCurrentStackSlow(uptr pc); // uses libunwind
466MBlock *JavaHeapBlock(uptr addr, uptr *start);
467
468void Initialize(ThreadState *thr);
469void MaybeSpawnBackgroundThread();
470int Finalize(ThreadState *thr);
471
472void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
473void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
474
475void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
476 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
477void MemoryAccessImpl(ThreadState *thr, uptr addr,
478 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
479 u64 *shadow_mem, Shadow cur);
480void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
481 uptr size, bool is_write);
482void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
483 AccessType typ);
484
485const int kSizeLog1 = 0;
486const int kSizeLog2 = 1;
487const int kSizeLog4 = 2;
488const int kSizeLog8 = 3;
489
490ALWAYS_INLINEinline __attribute__((always_inline))
491void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
492 AccessType typ) {
493 int size_log;
494 switch (size) {
495 case 1:
496 size_log = kSizeLog1;
497 break;
498 case 2:
499 size_log = kSizeLog2;
500 break;
501 case 4:
502 size_log = kSizeLog4;
503 break;
504 default:
505 DCHECK_EQ(size, 8);
506 size_log = kSizeLog8;
507 break;
508 }
509 bool is_write = !(typ & kAccessRead);
510 bool is_atomic = typ & kAccessAtomic;
511 if (typ & kAccessVptr)
512 thr->is_vptr_access = true;
513 if (typ & kAccessFree)
514 thr->is_freeing = true;
515 MemoryAccess(thr, pc, addr, size_log, is_write, is_atomic);
516 if (typ & kAccessVptr)
517 thr->is_vptr_access = false;
518 if (typ & kAccessFree)
519 thr->is_freeing = false;
520}
521
522void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
523void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
524void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
525void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
526 uptr size);
527
528void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
529void ThreadIgnoreEnd(ThreadState *thr);
530void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
531void ThreadIgnoreSyncEnd(ThreadState *thr);
532
533void FuncEntry(ThreadState *thr, uptr pc);
534void FuncExit(ThreadState *thr);
535
536Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
537void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
538 ThreadType thread_type);
539void ThreadFinish(ThreadState *thr);
540Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid);
541void ThreadJoin(ThreadState *thr, uptr pc, Tid tid);
542void ThreadDetach(ThreadState *thr, uptr pc, Tid tid);
543void ThreadFinalize(ThreadState *thr);
544void ThreadSetName(ThreadState *thr, const char *name);
545int ThreadCount(ThreadState *thr);
546void ProcessPendingSignalsImpl(ThreadState *thr);
547void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid);
548
549Processor *ProcCreate();
550void ProcDestroy(Processor *proc);
551void ProcWire(Processor *proc, ThreadState *thr);
552void ProcUnwire(Processor *proc, ThreadState *thr);
553
554// Note: the parameter is called flagz, because flags is already taken
555// by the global function that returns flags.
556void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
557void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
558void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
559void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0,
560 int rec = 1);
561int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
562void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
563void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
564void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
565void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
566void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
567void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr);
568
569void Acquire(ThreadState *thr, uptr pc, uptr addr);
570// AcquireGlobal synchronizes the current thread with all other threads.
571// In terms of happens-before relation, it draws a HB edge from all threads
572// (where they happen to execute right now) to the current thread. We use it to
573// handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
574// right before executing finalizers. This provides a coarse, but simple
575// approximation of the actual required synchronization.
576void AcquireGlobal(ThreadState *thr);
577void Release(ThreadState *thr, uptr pc, uptr addr);
578void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr);
579void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
580void AfterSleep(ThreadState *thr, uptr pc);
581void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
582void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
583void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
584void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c);
585void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
586
587// The hacky call uses custom calling convention and an assembly thunk.
588// It is considerably faster that a normal call for the caller
589// if it is not executed (it is intended for slow paths from hot functions).
590// The trick is that the call preserves all registers and the compiler
591// does not treat it as a call.
592// If it does not work for you, use normal call.
593#if !SANITIZER_DEBUG0 && defined(__x86_64__1) && !SANITIZER_MAC0
594// The caller may not create the stack frame for itself at all,
595// so we create a reserve stack frame for it (1024b must be enough).
596#define HACKY_CALL(f)__asm__ __volatile__("sub $1024, %%rsp;" ".cfi_adjust_cfa_offset "
"1024" ";" ".hidden " "f" "_thunk;" "call " "f" "_thunk;" "add $1024, %%rsp;"
".cfi_adjust_cfa_offset " "-1024" ";" ::: "memory", "cc");
\
597 __asm__ __volatile__("sub $1024, %%rsp;" \
598 CFI_INL_ADJUST_CFA_OFFSET(1024)".cfi_adjust_cfa_offset " "1024" ";" \
599 ".hidden " #f "_thunk;" \
600 "call " #f "_thunk;" \
601 "add $1024, %%rsp;" \
602 CFI_INL_ADJUST_CFA_OFFSET(-1024)".cfi_adjust_cfa_offset " "-1024" ";" \
603 ::: "memory", "cc");
604#else
605#define HACKY_CALL(f)__asm__ __volatile__("sub $1024, %%rsp;" ".cfi_adjust_cfa_offset "
"1024" ";" ".hidden " "f" "_thunk;" "call " "f" "_thunk;" "add $1024, %%rsp;"
".cfi_adjust_cfa_offset " "-1024" ";" ::: "memory", "cc");
f()
606#endif
607
608void TraceSwitch(ThreadState *thr);
609uptr TraceTopPC(ThreadState *thr);
610uptr TraceSize();
611uptr TraceParts();
612Trace *ThreadTrace(Tid tid);
613
614extern "C" void __tsan_trace_switch();
615void ALWAYS_INLINEinline __attribute__((always_inline)) TraceAddEvent(ThreadState *thr, FastState fs,
616 EventType typ, u64 addr) {
617 if (!kCollectHistory)
618 return;
619 DCHECK_GE((int)typ, 0);
620 DCHECK_LE((int)typ, 7);
621 DCHECK_EQ(GetLsb(addr, kEventPCBits), addr);
622 u64 pos = fs.GetTracePos();
623 if (UNLIKELY((pos % kTracePartSize) == 0)__builtin_expect(!!((pos % kTracePartSize) == 0), 0)) {
624#if !SANITIZER_GO0
625 HACKY_CALL(__tsan_trace_switch)__asm__ __volatile__("sub $1024, %%rsp;" ".cfi_adjust_cfa_offset "
"1024" ";" ".hidden " "__tsan_trace_switch" "_thunk;" "call "
"__tsan_trace_switch" "_thunk;" "add $1024, %%rsp;" ".cfi_adjust_cfa_offset "
"-1024" ";" ::: "memory", "cc");
;
626#else
627 TraceSwitch(thr);
628#endif
629 }
630 Event *trace = (Event*)GetThreadTrace(fs.tid());
631 Event *evp = &trace[pos];
632 Event ev = (u64)addr | ((u64)typ << kEventPCBits);
633 *evp = ev;
634}
635
636#if !SANITIZER_GO0
637uptr ALWAYS_INLINEinline __attribute__((always_inline)) HeapEnd() {
638 return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
639}
640#endif
641
642ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags);
643void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber);
644void FiberSwitch(ThreadState *thr, uptr pc, ThreadState *fiber, unsigned flags);
645
646// These need to match __tsan_switch_to_fiber_* flags defined in
647// tsan_interface.h. See documentation there as well.
648enum FiberSwitchFlags {
649 FiberSwitchFlagNoSync = 1 << 0, // __tsan_switch_to_fiber_no_sync
650};
651
652ALWAYS_INLINEinline __attribute__((always_inline)) void ProcessPendingSignals(ThreadState *thr) {
653 if (UNLIKELY(atomic_load_relaxed(&thr->pending_signals))__builtin_expect(!!(atomic_load_relaxed(&thr->pending_signals
)), 0)
)
654 ProcessPendingSignalsImpl(thr);
655}
656
657extern bool is_initialized;
658
659ALWAYS_INLINEinline __attribute__((always_inline))
660void LazyInitialize(ThreadState *thr) {
661 // If we can use .preinit_array, assume that __tsan_init
662 // called from .preinit_array initializes runtime before
663 // any instrumented code.
664#if !SANITIZER_CAN_USE_PREINIT_ARRAY1
665 if (UNLIKELY(!is_initialized)__builtin_expect(!!(!is_initialized), 0))
666 Initialize(thr);
667#endif
668}
669
670namespace v3 {
671
672void TraceSwitchPart(ThreadState *thr);
673bool RestoreStack(Tid tid, EventType type, Sid sid, Epoch epoch, uptr addr,
674 uptr size, AccessType typ, VarSizeStackTrace *pstk,
675 MutexSet *pmset, uptr *ptag);
676
677template <typename EventT>
678ALWAYS_INLINEinline __attribute__((always_inline)) WARN_UNUSED_RESULT__attribute__((warn_unused_result)) bool TraceAcquire(ThreadState *thr,
679 EventT **ev) {
680 Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
681#if SANITIZER_DEBUG0
682 // TraceSwitch acquires these mutexes,
683 // so we lock them here to detect deadlocks more reliably.
684 { Lock lock(&ctx->slot_mtx); }
685 { Lock lock(&thr->tctx->trace.mtx); }
686 TracePart *current = thr->tctx->trace.parts.Back();
687 if (current) {
688 DCHECK_GE(pos, &current->events[0]);
689 DCHECK_LE(pos, &current->events[TracePart::kSize]);
690 } else {
691 DCHECK_EQ(pos, nullptr);
692 }
693#endif
694 // TracePart is allocated with mmap and is at least 4K aligned.
695 // So the following check is a faster way to check for part end.
696 // It may have false positives in the middle of the trace,
697 // they are filtered out in TraceSwitch.
698 if (UNLIKELY(((uptr)(pos + 1) & TracePart::kAlignment) == 0)__builtin_expect(!!(((uptr)(pos + 1) & TracePart::kAlignment
) == 0), 0)
)
5
Assuming the condition is true
6
Taking true branch
11
Assuming the condition is true
12
Taking true branch
699 return false;
7
Returning without writing to '*ev'
13
Returning without writing to '*ev'
700 *ev = reinterpret_cast<EventT *>(pos);
701 return true;
702}
703
704template <typename EventT>
705ALWAYS_INLINEinline __attribute__((always_inline)) void TraceRelease(ThreadState *thr, EventT *evp) {
706 DCHECK_LE(evp + 1, &thr->tctx->trace.parts.Back()->events[TracePart::kSize]);
707 atomic_store_relaxed(&thr->trace_pos, (uptr)(evp + 1));
708}
709
710template <typename EventT>
711void TraceEvent(ThreadState *thr, EventT ev) {
712 EventT *evp;
3
'evp' declared without an initial value
713 if (!TraceAcquire(thr, &evp)) {
4
Calling 'TraceAcquire<__tsan::v3::EventTime>'
8
Returning from 'TraceAcquire<__tsan::v3::EventTime>'
9
Taking true branch
714 TraceSwitchPart(thr);
715 UNUSED__attribute__((unused)) bool res = TraceAcquire(thr, &evp);
10
Calling 'TraceAcquire<__tsan::v3::EventTime>'
14
Returning from 'TraceAcquire<__tsan::v3::EventTime>'
716 DCHECK(res);
717 }
718 *evp = ev;
15
Called C++ object pointer is uninitialized
719 TraceRelease(thr, evp);
720}
721
722ALWAYS_INLINEinline __attribute__((always_inline)) WARN_UNUSED_RESULT__attribute__((warn_unused_result)) bool TryTraceFunc(ThreadState *thr,
723 uptr pc = 0) {
724 if (!kCollectHistory)
725 return true;
726 EventFunc *ev;
727 if (UNLIKELY(!TraceAcquire(thr, &ev))__builtin_expect(!!(!TraceAcquire(thr, &ev)), 0))
728 return false;
729 ev->is_access = 0;
730 ev->is_func = 1;
731 ev->pc = pc;
732 TraceRelease(thr, ev);
733 return true;
734}
735
736WARN_UNUSED_RESULT__attribute__((warn_unused_result))
737bool TryTraceMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
738 AccessType typ);
739WARN_UNUSED_RESULT__attribute__((warn_unused_result))
740bool TryTraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
741 AccessType typ);
742void TraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
743 AccessType typ);
744void TraceFunc(ThreadState *thr, uptr pc = 0);
745void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,
746 StackID stk);
747void TraceMutexUnlock(ThreadState *thr, uptr addr);
748void TraceTime(ThreadState *thr);
749
750} // namespace v3
751
752void GrowShadowStack(ThreadState *thr);
753
754ALWAYS_INLINEinline __attribute__((always_inline))
755void FuncEntry(ThreadState *thr, uptr pc) {
756 DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void *)pc);
757 if (kCollectHistory) {
758 thr->fast_state.IncrementEpoch();
759 TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
760 }
761
762 // Shadow stack maintenance can be replaced with
763 // stack unwinding during trace switch (which presumably must be faster).
764 DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
765#if !SANITIZER_GO0
766 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
767#else
768 if (thr->shadow_stack_pos == thr->shadow_stack_end)
769 GrowShadowStack(thr);
770#endif
771 thr->shadow_stack_pos[0] = pc;
772 thr->shadow_stack_pos++;
773}
774
775ALWAYS_INLINEinline __attribute__((always_inline))
776void FuncExit(ThreadState *thr) {
777 DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
778 if (kCollectHistory) {
779 thr->fast_state.IncrementEpoch();
780 TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
781 }
782
783 DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
784#if !SANITIZER_GO0
785 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
786#endif
787 thr->shadow_stack_pos--;
788}
789
790#if !SANITIZER_GO0
791extern void (*on_initialize)(void);
792extern int (*on_finalize)(int);
793#endif
794
795} // namespace __tsan
796
797#endif // TSAN_RTL_H