File: | compiler-rt/lib/tsan/rtl/tsan_rtl.cpp |
Warning: | line 121, column 23 Storage type is aligned to 1 bytes but allocated type is aligned to 8 bytes |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- tsan_rtl.cpp ------------------------------------------------------===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This file is a part of ThreadSanitizer (TSan), a race detector. | |||
10 | // | |||
11 | // Main file (entry points) for the TSan run-time. | |||
12 | //===----------------------------------------------------------------------===// | |||
13 | ||||
14 | #include "tsan_rtl.h" | |||
15 | ||||
16 | #include "sanitizer_common/sanitizer_atomic.h" | |||
17 | #include "sanitizer_common/sanitizer_common.h" | |||
18 | #include "sanitizer_common/sanitizer_file.h" | |||
19 | #include "sanitizer_common/sanitizer_libc.h" | |||
20 | #include "sanitizer_common/sanitizer_placement_new.h" | |||
21 | #include "sanitizer_common/sanitizer_stackdepot.h" | |||
22 | #include "sanitizer_common/sanitizer_symbolizer.h" | |||
23 | #include "tsan_defs.h" | |||
24 | #include "tsan_interface.h" | |||
25 | #include "tsan_mman.h" | |||
26 | #include "tsan_platform.h" | |||
27 | #include "tsan_suppressions.h" | |||
28 | #include "tsan_symbolize.h" | |||
29 | #include "ubsan/ubsan_init.h" | |||
30 | ||||
31 | #ifdef __SSE3__1 | |||
32 | // <emmintrin.h> transitively includes <stdlib.h>, | |||
33 | // and it's prohibited to include std headers into tsan runtime. | |||
34 | // So we do this dirty trick. | |||
35 | #define _MM_MALLOC_H_INCLUDED | |||
36 | #define __MM_MALLOC_H | |||
37 | #include <emmintrin.h> | |||
38 | typedef __m128i m128; | |||
39 | #endif | |||
40 | ||||
41 | volatile int __tsan_resumed = 0; | |||
42 | ||||
43 | extern "C" void __tsan_resume() { | |||
44 | __tsan_resumed = 1; | |||
45 | } | |||
46 | ||||
47 | namespace __tsan { | |||
48 | ||||
49 | #if !SANITIZER_GO0 && !SANITIZER_MAC0 | |||
50 | __attribute__((tls_model("initial-exec"))) | |||
51 | THREADLOCAL__thread char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64)__attribute__((aligned(64))); | |||
52 | #endif | |||
53 | static char ctx_placeholder[sizeof(Context)] ALIGNED(64)__attribute__((aligned(64))); | |||
54 | Context *ctx; | |||
55 | ||||
56 | // Can be overriden by a front-end. | |||
57 | #ifdef TSAN_EXTERNAL_HOOKS | |||
58 | bool OnFinalize(bool failed); | |||
59 | void OnInitialize(); | |||
60 | #else | |||
61 | #include <dlfcn.h> | |||
62 | SANITIZER_WEAK_CXX_DEFAULT_IMPLextern "C++" __attribute__((visibility("default"))) __attribute__ ((weak)) __attribute__((noinline)) | |||
63 | bool OnFinalize(bool failed) { | |||
64 | #if !SANITIZER_GO0 | |||
65 | if (auto *ptr = dlsym(RTLD_DEFAULT((void *) 0), "__tsan_on_finalize")) | |||
66 | return reinterpret_cast<decltype(&__tsan_on_finalize)>(ptr)(failed); | |||
67 | #endif | |||
68 | return failed; | |||
69 | } | |||
70 | SANITIZER_WEAK_CXX_DEFAULT_IMPLextern "C++" __attribute__((visibility("default"))) __attribute__ ((weak)) __attribute__((noinline)) | |||
71 | void OnInitialize() { | |||
72 | #if !SANITIZER_GO0 | |||
73 | if (auto *ptr = dlsym(RTLD_DEFAULT((void *) 0), "__tsan_on_initialize")) { | |||
74 | return reinterpret_cast<decltype(&__tsan_on_initialize)>(ptr)(); | |||
75 | } | |||
76 | #endif | |||
77 | } | |||
78 | #endif | |||
79 | ||||
80 | static char thread_registry_placeholder[sizeof(ThreadRegistry)]; | |||
81 | ||||
82 | static ThreadContextBase *CreateThreadContext(u32 tid) { | |||
83 | // Map thread trace when context is created. | |||
84 | char name[50]; | |||
85 | internal_snprintf(name, sizeof(name), "trace %u", tid); | |||
86 | MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name); | |||
87 | const uptr hdr = GetThreadTraceHeader(tid); | |||
88 | internal_snprintf(name, sizeof(name), "trace header %u", tid); | |||
89 | MapThreadTrace(hdr, sizeof(Trace), name); | |||
90 | new((void*)hdr) Trace(); | |||
91 | // We are going to use only a small part of the trace with the default | |||
92 | // value of history_size. However, the constructor writes to the whole trace. | |||
93 | // Release the unused part. | |||
94 | uptr hdr_end = hdr + sizeof(Trace); | |||
95 | hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts()); | |||
96 | hdr_end = RoundUp(hdr_end, GetPageSizeCached()); | |||
97 | if (hdr_end < hdr + sizeof(Trace)) { | |||
98 | ReleaseMemoryPagesToOS(hdr_end, hdr + sizeof(Trace)); | |||
99 | uptr unused = hdr + sizeof(Trace) - hdr_end; | |||
100 | if (hdr_end != (uptr)MmapFixedNoAccess(hdr_end, unused)) { | |||
101 | Report("ThreadSanitizer: failed to mprotect(%p, %p)\n", | |||
102 | hdr_end, unused); | |||
103 | CHECK("unable to mprotect" && 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)(("unable to mprotect" && 0)); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-13~++20210711100610+f0393deb3367/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp" , 103, "(" "(\"unable to mprotect\" && 0)" ") " "!=" " (" "0" ")", v1, v2); } while (false); | |||
104 | } | |||
105 | } | |||
106 | void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext)); | |||
107 | return new(mem) ThreadContext(tid); | |||
108 | } | |||
109 | ||||
110 | #if !SANITIZER_GO0 | |||
111 | static const u32 kThreadQuarantineSize = 16; | |||
112 | #else | |||
113 | static const u32 kThreadQuarantineSize = 64; | |||
114 | #endif | |||
115 | ||||
116 | Context::Context() | |||
117 | : initialized(), | |||
118 | report_mtx(MutexTypeReport, StatMtxReport), | |||
119 | nreported(), | |||
120 | nmissed_expected(), | |||
121 | thread_registry(new (thread_registry_placeholder) ThreadRegistry( | |||
| ||||
122 | CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse)), | |||
123 | racy_mtx(MutexTypeRacy, StatMtxRacy), | |||
124 | racy_stacks(), | |||
125 | racy_addresses(), | |||
126 | fired_suppressions_mtx(MutexTypeFired, StatMtxFired), | |||
127 | clock_alloc(LINKER_INITIALIZED, "clock allocator") { | |||
128 | fired_suppressions.reserve(8); | |||
129 | } | |||
130 | ||||
131 | // The objects are allocated in TLS, so one may rely on zero-initialization. | |||
132 | ThreadState::ThreadState(Context *ctx, u32 tid, int unique_id, u64 epoch, | |||
133 | unsigned reuse_count, uptr stk_addr, uptr stk_size, | |||
134 | uptr tls_addr, uptr tls_size) | |||
135 | : fast_state(tid, epoch) | |||
136 | // Do not touch these, rely on zero initialization, | |||
137 | // they may be accessed before the ctor. | |||
138 | // , ignore_reads_and_writes() | |||
139 | // , ignore_interceptors() | |||
140 | , | |||
141 | clock(tid, reuse_count) | |||
142 | #if !SANITIZER_GO0 | |||
143 | , | |||
144 | jmp_bufs() | |||
145 | #endif | |||
146 | , | |||
147 | tid(tid), | |||
148 | unique_id(unique_id), | |||
149 | stk_addr(stk_addr), | |||
150 | stk_size(stk_size), | |||
151 | tls_addr(tls_addr), | |||
152 | tls_size(tls_size) | |||
153 | #if !SANITIZER_GO0 | |||
154 | , | |||
155 | last_sleep_clock(tid) | |||
156 | #endif | |||
157 | { | |||
158 | } | |||
159 | ||||
160 | #if !SANITIZER_GO0 | |||
161 | static void MemoryProfiler(Context *ctx, fd_t fd, int i) { | |||
162 | uptr n_threads; | |||
163 | uptr n_running_threads; | |||
164 | ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads); | |||
165 | InternalMmapVector<char> buf(4096); | |||
166 | WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads); | |||
167 | WriteToFile(fd, buf.data(), internal_strlen(buf.data())); | |||
168 | } | |||
169 | ||||
170 | static void *BackgroundThread(void *arg) { | |||
171 | // This is a non-initialized non-user thread, nothing to see here. | |||
172 | // We don't use ScopedIgnoreInterceptors, because we want ignores to be | |||
173 | // enabled even when the thread function exits (e.g. during pthread thread | |||
174 | // shutdown code). | |||
175 | cur_thread_init(); | |||
176 | cur_thread()->ignore_interceptors++; | |||
177 | const u64 kMs2Ns = 1000 * 1000; | |||
178 | ||||
179 | fd_t mprof_fd = kInvalidFd((fd_t)-1); | |||
180 | if (flags()->profile_memory && flags()->profile_memory[0]) { | |||
181 | if (internal_strcmp(flags()->profile_memory, "stdout") == 0) { | |||
182 | mprof_fd = 1; | |||
183 | } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) { | |||
184 | mprof_fd = 2; | |||
185 | } else { | |||
186 | InternalScopedString filename; | |||
187 | filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid()); | |||
188 | fd_t fd = OpenFile(filename.data(), WrOnly); | |||
189 | if (fd == kInvalidFd((fd_t)-1)) { | |||
190 | Printf("ThreadSanitizer: failed to open memory profile file '%s'\n", | |||
191 | filename.data()); | |||
192 | } else { | |||
193 | mprof_fd = fd; | |||
194 | } | |||
195 | } | |||
196 | } | |||
197 | ||||
198 | u64 last_flush = NanoTime(); | |||
199 | uptr last_rss = 0; | |||
200 | for (int i = 0; | |||
201 | atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0; | |||
202 | i++) { | |||
203 | SleepForMillis(100); | |||
204 | u64 now = NanoTime(); | |||
205 | ||||
206 | // Flush memory if requested. | |||
207 | if (flags()->flush_memory_ms > 0) { | |||
208 | if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) { | |||
209 | VPrintf(1, "ThreadSanitizer: periodic memory flush\n")do { if ((uptr)Verbosity() >= (1)) Printf("ThreadSanitizer: periodic memory flush\n" ); } while (0); | |||
210 | FlushShadowMemory(); | |||
211 | last_flush = NanoTime(); | |||
212 | } | |||
213 | } | |||
214 | // GetRSS can be expensive on huge programs, so don't do it every 100ms. | |||
215 | if (flags()->memory_limit_mb > 0) { | |||
216 | uptr rss = GetRSS(); | |||
217 | uptr limit = uptr(flags()->memory_limit_mb) << 20; | |||
218 | VPrintf(1, "ThreadSanitizer: memory flush check"do { if ((uptr)Verbosity() >= (1)) Printf("ThreadSanitizer: memory flush check" " RSS=%llu LAST=%llu LIMIT=%llu\n", (u64)rss >> 20, (u64 )last_rss >> 20, (u64)limit >> 20); } while (0) | |||
219 | " RSS=%llu LAST=%llu LIMIT=%llu\n",do { if ((uptr)Verbosity() >= (1)) Printf("ThreadSanitizer: memory flush check" " RSS=%llu LAST=%llu LIMIT=%llu\n", (u64)rss >> 20, (u64 )last_rss >> 20, (u64)limit >> 20); } while (0) | |||
220 | (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20)do { if ((uptr)Verbosity() >= (1)) Printf("ThreadSanitizer: memory flush check" " RSS=%llu LAST=%llu LIMIT=%llu\n", (u64)rss >> 20, (u64 )last_rss >> 20, (u64)limit >> 20); } while (0); | |||
221 | if (2 * rss > limit + last_rss) { | |||
222 | VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n")do { if ((uptr)Verbosity() >= (1)) Printf("ThreadSanitizer: flushing memory due to RSS\n" ); } while (0); | |||
223 | FlushShadowMemory(); | |||
224 | rss = GetRSS(); | |||
225 | VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20)do { if ((uptr)Verbosity() >= (1)) Printf("ThreadSanitizer: memory flushed RSS=%llu\n" , (u64)rss>>20); } while (0); | |||
226 | } | |||
227 | last_rss = rss; | |||
228 | } | |||
229 | ||||
230 | // Write memory profile if requested. | |||
231 | if (mprof_fd != kInvalidFd((fd_t)-1)) | |||
232 | MemoryProfiler(ctx, mprof_fd, i); | |||
233 | ||||
234 | // Flush symbolizer cache if requested. | |||
235 | if (flags()->flush_symbolizer_ms > 0) { | |||
236 | u64 last = atomic_load(&ctx->last_symbolize_time_ns, | |||
237 | memory_order_relaxed); | |||
238 | if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) { | |||
239 | Lock l(&ctx->report_mtx); | |||
240 | ScopedErrorReportLock l2; | |||
241 | SymbolizeFlush(); | |||
242 | atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed); | |||
243 | } | |||
244 | } | |||
245 | } | |||
246 | return nullptr; | |||
247 | } | |||
248 | ||||
249 | static void StartBackgroundThread() { | |||
250 | ctx->background_thread = internal_start_thread(&BackgroundThread, 0); | |||
251 | } | |||
252 | ||||
253 | #ifndef __mips__ | |||
254 | static void StopBackgroundThread() { | |||
255 | atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed); | |||
256 | internal_join_thread(ctx->background_thread); | |||
257 | ctx->background_thread = 0; | |||
258 | } | |||
259 | #endif | |||
260 | #endif | |||
261 | ||||
262 | void DontNeedShadowFor(uptr addr, uptr size) { | |||
263 | ReleaseMemoryPagesToOS(MemToShadow(addr), MemToShadow(addr + size)); | |||
264 | } | |||
265 | ||||
266 | #if !SANITIZER_GO0 | |||
267 | void UnmapShadow(ThreadState *thr, uptr addr, uptr size) { | |||
268 | if (size == 0) return; | |||
269 | DontNeedShadowFor(addr, size); | |||
270 | ScopedGlobalProcessor sgp; | |||
271 | ctx->metamap.ResetRange(thr->proc(), addr, size); | |||
272 | } | |||
273 | #endif | |||
274 | ||||
275 | void MapShadow(uptr addr, uptr size) { | |||
276 | // Global data is not 64K aligned, but there are no adjacent mappings, | |||
277 | // so we can get away with unaligned mapping. | |||
278 | // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment | |||
279 | const uptr kPageSize = GetPageSizeCached(); | |||
280 | uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize); | |||
281 | uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize); | |||
282 | if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin, | |||
283 | "shadow")) | |||
284 | Die(); | |||
285 | ||||
286 | // Meta shadow is 2:1, so tread carefully. | |||
287 | static bool data_mapped = false; | |||
288 | static uptr mapped_meta_end = 0; | |||
289 | uptr meta_begin = (uptr)MemToMeta(addr); | |||
290 | uptr meta_end = (uptr)MemToMeta(addr + size); | |||
291 | meta_begin = RoundDownTo(meta_begin, 64 << 10); | |||
292 | meta_end = RoundUpTo(meta_end, 64 << 10); | |||
293 | if (!data_mapped) { | |||
294 | // First call maps data+bss. | |||
295 | data_mapped = true; | |||
296 | if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin, | |||
297 | "meta shadow")) | |||
298 | Die(); | |||
299 | } else { | |||
300 | // Mapping continous heap. | |||
301 | // Windows wants 64K alignment. | |||
302 | meta_begin = RoundDownTo(meta_begin, 64 << 10); | |||
303 | meta_end = RoundUpTo(meta_end, 64 << 10); | |||
304 | if (meta_end <= mapped_meta_end) | |||
305 | return; | |||
306 | if (meta_begin < mapped_meta_end) | |||
307 | meta_begin = mapped_meta_end; | |||
308 | if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin, | |||
309 | "meta shadow")) | |||
310 | Die(); | |||
311 | mapped_meta_end = meta_end; | |||
312 | } | |||
313 | VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n",do { if ((uptr)Verbosity() >= (2)) Printf("mapped meta shadow for (%p-%p) at (%p-%p)\n" , addr, addr+size, meta_begin, meta_end); } while (0) | |||
314 | addr, addr+size, meta_begin, meta_end)do { if ((uptr)Verbosity() >= (2)) Printf("mapped meta shadow for (%p-%p) at (%p-%p)\n" , addr, addr+size, meta_begin, meta_end); } while (0); | |||
315 | } | |||
316 | ||||
317 | void MapThreadTrace(uptr addr, uptr size, const char *name) { | |||
318 | DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size); | |||
319 | CHECK_GE(addr, TraceMemBeg())do { __sanitizer::u64 v1 = (__sanitizer::u64)((addr)); __sanitizer ::u64 v2 = (__sanitizer::u64)((TraceMemBeg())); if (__builtin_expect (!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210711100610+f0393deb3367/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp" , 319, "(" "(addr)" ") " ">=" " (" "(TraceMemBeg())" ")", v1 , v2); } while (false); | |||
320 | CHECK_LE(addr + size, TraceMemEnd())do { __sanitizer::u64 v1 = (__sanitizer::u64)((addr + size)); __sanitizer::u64 v2 = (__sanitizer::u64)((TraceMemEnd())); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-13~++20210711100610+f0393deb3367/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp" , 320, "(" "(addr + size)" ") " "<=" " (" "(TraceMemEnd())" ")", v1, v2); } while (false); | |||
321 | CHECK_EQ(addr, addr & ~((64 << 10) - 1))do { __sanitizer::u64 v1 = (__sanitizer::u64)((addr)); __sanitizer ::u64 v2 = (__sanitizer::u64)((addr & ~((64 << 10) - 1))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer:: CheckFailed("/build/llvm-toolchain-snapshot-13~++20210711100610+f0393deb3367/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp" , 321, "(" "(addr)" ") " "==" " (" "(addr & ~((64 << 10) - 1))" ")", v1, v2); } while (false); // windows wants 64K alignment | |||
322 | if (!MmapFixedSuperNoReserve(addr, size, name)) { | |||
323 | Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p)\n", | |||
324 | addr, size); | |||
325 | Die(); | |||
326 | } | |||
327 | } | |||
328 | ||||
329 | static void CheckShadowMapping() { | |||
330 | uptr beg, end; | |||
331 | for (int i = 0; GetUserRegion(i, &beg, &end); i++) { | |||
332 | // Skip cases for empty regions (heap definition for architectures that | |||
333 | // do not use 64-bit allocator). | |||
334 | if (beg == end) | |||
335 | continue; | |||
336 | VPrintf(3, "checking shadow region %p-%p\n", beg, end)do { if ((uptr)Verbosity() >= (3)) Printf("checking shadow region %p-%p\n" , beg, end); } while (0); | |||
337 | uptr prev = 0; | |||
338 | for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) { | |||
339 | for (int x = -(int)kShadowCell; x <= (int)kShadowCell; x += kShadowCell) { | |||
340 | const uptr p = RoundDown(p0 + x, kShadowCell); | |||
341 | if (p < beg || p >= end) | |||
342 | continue; | |||
343 | const uptr s = MemToShadow(p); | |||
344 | const uptr m = (uptr)MemToMeta(p); | |||
345 | VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m)do { if ((uptr)Verbosity() >= (3)) Printf(" checking pointer %p: shadow=%p meta=%p\n" , p, s, m); } while (0); | |||
346 | CHECK(IsAppMem(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAppMem(p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210711100610+f0393deb3367/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp" , 346, "(" "(IsAppMem(p))" ") " "!=" " (" "0" ")", v1, v2); } while (false); | |||
347 | CHECK(IsShadowMem(s))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsShadowMem(s) )); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210711100610+f0393deb3367/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp" , 347, "(" "(IsShadowMem(s))" ") " "!=" " (" "0" ")", v1, v2) ; } while (false); | |||
348 | CHECK_EQ(p, ShadowToMem(s))do { __sanitizer::u64 v1 = (__sanitizer::u64)((p)); __sanitizer ::u64 v2 = (__sanitizer::u64)((ShadowToMem(s))); if (__builtin_expect (!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210711100610+f0393deb3367/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp" , 348, "(" "(p)" ") " "==" " (" "(ShadowToMem(s))" ")", v1, v2 ); } while (false); | |||
349 | CHECK(IsMetaMem(m))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsMetaMem(m))) ; __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210711100610+f0393deb3367/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp" , 349, "(" "(IsMetaMem(m))" ") " "!=" " (" "0" ")", v1, v2); } while (false); | |||
350 | if (prev) { | |||
351 | // Ensure that shadow and meta mappings are linear within a single | |||
352 | // user range. Lots of code that processes memory ranges assumes it. | |||
353 | const uptr prev_s = MemToShadow(prev); | |||
354 | const uptr prev_m = (uptr)MemToMeta(prev); | |||
355 | CHECK_EQ(s - prev_s, (p - prev) * kShadowMultiplier)do { __sanitizer::u64 v1 = (__sanitizer::u64)((s - prev_s)); __sanitizer ::u64 v2 = (__sanitizer::u64)(((p - prev) * kShadowMultiplier )); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-13~++20210711100610+f0393deb3367/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp" , 355, "(" "(s - prev_s)" ") " "==" " (" "((p - prev) * kShadowMultiplier)" ")", v1, v2); } while (false); | |||
356 | CHECK_EQ((m - prev_m) / kMetaShadowSize,do { __sanitizer::u64 v1 = (__sanitizer::u64)(((m - prev_m) / kMetaShadowSize)); __sanitizer::u64 v2 = (__sanitizer::u64)( ((p - prev) / kMetaShadowCell)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210711100610+f0393deb3367/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp" , 357, "(" "((m - prev_m) / kMetaShadowSize)" ") " "==" " (" "((p - prev) / kMetaShadowCell)" ")", v1, v2); } while (false) | |||
357 | (p - prev) / kMetaShadowCell)do { __sanitizer::u64 v1 = (__sanitizer::u64)(((m - prev_m) / kMetaShadowSize)); __sanitizer::u64 v2 = (__sanitizer::u64)( ((p - prev) / kMetaShadowCell)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210711100610+f0393deb3367/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp" , 357, "(" "((m - prev_m) / kMetaShadowSize)" ") " "==" " (" "((p - prev) / kMetaShadowCell)" ")", v1, v2); } while (false); | |||
358 | } | |||
359 | prev = p; | |||
360 | } | |||
361 | } | |||
362 | } | |||
363 | } | |||
364 | ||||
365 | #if !SANITIZER_GO0 | |||
366 | static void OnStackUnwind(const SignalContext &sig, const void *, | |||
367 | BufferedStackTrace *stack) { | |||
368 | stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context, | |||
369 | common_flags()->fast_unwind_on_fatal); | |||
370 | } | |||
371 | ||||
372 | static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) { | |||
373 | HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr); | |||
374 | } | |||
375 | #endif | |||
376 | ||||
377 | void CheckUnwind() { | |||
378 | // There is high probability that interceptors will check-fail as well, | |||
379 | // on the other hand there is no sense in processing interceptors | |||
380 | // since we are going to die soon. | |||
381 | ScopedIgnoreInterceptors ignore; | |||
382 | #if !SANITIZER_GO0 | |||
383 | cur_thread()->ignore_sync++; | |||
384 | cur_thread()->ignore_reads_and_writes++; | |||
385 | #endif | |||
386 | PrintCurrentStackSlow(StackTrace::GetCurrentPc()); | |||
387 | } | |||
388 | ||||
389 | void Initialize(ThreadState *thr) { | |||
390 | // Thread safe because done before all threads exist. | |||
391 | static bool is_initialized = false; | |||
392 | if (is_initialized
| |||
| ||||
393 | return; | |||
394 | is_initialized = true; | |||
395 | // We are not ready to handle interceptors yet. | |||
396 | ScopedIgnoreInterceptors ignore; | |||
397 | SanitizerToolName = "ThreadSanitizer"; | |||
398 | // Install tool-specific callbacks in sanitizer_common. | |||
399 | SetCheckUnwindCallback(CheckUnwind); | |||
400 | ||||
401 | ctx = new(ctx_placeholder) Context; | |||
402 | const char *env_name = SANITIZER_GO0 ? "GORACE" : "TSAN_OPTIONS"; | |||
403 | const char *options = GetEnv(env_name); | |||
404 | CacheBinaryName(); | |||
405 | CheckASLR(); | |||
406 | InitializeFlags(&ctx->flags, options, env_name); | |||
407 | AvoidCVE_2016_2143(); | |||
408 | __sanitizer::InitializePlatformEarly(); | |||
409 | __tsan::InitializePlatformEarly(); | |||
410 | ||||
411 | #if !SANITIZER_GO0 | |||
412 | // Re-exec ourselves if we need to set additional env or command line args. | |||
413 | MaybeReexec(); | |||
414 | ||||
415 | InitializeAllocator(); | |||
416 | ReplaceSystemMalloc(); | |||
417 | #endif | |||
418 | if (common_flags()->detect_deadlocks) | |||
419 | ctx->dd = DDetector::Create(flags()); | |||
420 | Processor *proc = ProcCreate(); | |||
421 | ProcWire(proc, thr); | |||
422 | InitializeInterceptors(); | |||
423 | CheckShadowMapping(); | |||
424 | InitializePlatform(); | |||
425 | InitializeMutex(); | |||
426 | InitializeDynamicAnnotations(); | |||
427 | #if !SANITIZER_GO0 | |||
428 | InitializeShadowMemory(); | |||
429 | InitializeAllocatorLate(); | |||
430 | InstallDeadlySignalHandlers(TsanOnDeadlySignal); | |||
431 | #endif | |||
432 | // Setup correct file descriptor for error reports. | |||
433 | __sanitizer_set_report_path(common_flags()->log_path); | |||
434 | InitializeSuppressions(); | |||
435 | #if !SANITIZER_GO0 | |||
436 | InitializeLibIgnore(); | |||
437 | Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer); | |||
438 | #endif | |||
439 | ||||
440 | VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",do { if ((uptr)Verbosity() >= (1)) Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n" , (int)internal_getpid()); } while (0) | |||
441 | (int)internal_getpid())do { if ((uptr)Verbosity() >= (1)) Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n" , (int)internal_getpid()); } while (0); | |||
442 | ||||
443 | // Initialize thread 0. | |||
444 | int tid = ThreadCreate(thr, 0, 0, true); | |||
445 | CHECK_EQ(tid, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((tid)); __sanitizer ::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect(!!(! (v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210711100610+f0393deb3367/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp" , 445, "(" "(tid)" ") " "==" " (" "(0)" ")", v1, v2); } while (false); | |||
446 | ThreadStart(thr, tid, GetTid(), ThreadType::Regular); | |||
447 | #if TSAN_CONTAINS_UBSAN1 | |||
448 | __ubsan::InitAsPlugin(); | |||
449 | #endif | |||
450 | ctx->initialized = true; | |||
451 | ||||
452 | #if !SANITIZER_GO0 | |||
453 | Symbolizer::LateInitialize(); | |||
454 | #endif | |||
455 | ||||
456 | if (flags()->stop_on_start) { | |||
457 | Printf("ThreadSanitizer is suspended at startup (pid %d)." | |||
458 | " Call __tsan_resume().\n", | |||
459 | (int)internal_getpid()); | |||
460 | while (__tsan_resumed == 0) {} | |||
461 | } | |||
462 | ||||
463 | OnInitialize(); | |||
464 | } | |||
465 | ||||
466 | void MaybeSpawnBackgroundThread() { | |||
467 | // On MIPS, TSan initialization is run before | |||
468 | // __pthread_initialize_minimal_internal() is finished, so we can not spawn | |||
469 | // new threads. | |||
470 | #if !SANITIZER_GO0 && !defined(__mips__) | |||
471 | static atomic_uint32_t bg_thread = {}; | |||
472 | if (atomic_load(&bg_thread, memory_order_relaxed) == 0 && | |||
473 | atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) { | |||
474 | StartBackgroundThread(); | |||
475 | SetSandboxingCallback(StopBackgroundThread); | |||
476 | } | |||
477 | #endif | |||
478 | } | |||
479 | ||||
480 | ||||
481 | int Finalize(ThreadState *thr) { | |||
482 | bool failed = false; | |||
483 | ||||
484 | if (common_flags()->print_module_map == 1) | |||
485 | DumpProcessMap(); | |||
486 | ||||
487 | if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1) | |||
488 | SleepForMillis(flags()->atexit_sleep_ms); | |||
489 | ||||
490 | // Wait for pending reports. | |||
491 | ctx->report_mtx.Lock(); | |||
492 | { ScopedErrorReportLock l; } | |||
493 | ctx->report_mtx.Unlock(); | |||
494 | ||||
495 | #if !SANITIZER_GO0 | |||
496 | if (Verbosity()) AllocatorPrintStats(); | |||
497 | #endif | |||
498 | ||||
499 | ThreadFinalize(thr); | |||
500 | ||||
501 | if (ctx->nreported) { | |||
502 | failed = true; | |||
503 | #if !SANITIZER_GO0 | |||
504 | Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported); | |||
505 | #else | |||
506 | Printf("Found %d data race(s)\n", ctx->nreported); | |||
507 | #endif | |||
508 | } | |||
509 | ||||
510 | if (ctx->nmissed_expected) { | |||
511 | failed = true; | |||
512 | Printf("ThreadSanitizer: missed %d expected races\n", | |||
513 | ctx->nmissed_expected); | |||
514 | } | |||
515 | ||||
516 | if (common_flags()->print_suppressions) | |||
517 | PrintMatchedSuppressions(); | |||
518 | #if !SANITIZER_GO0 | |||
519 | if (flags()->print_benign) | |||
520 | PrintMatchedBenignRaces(); | |||
521 | #endif | |||
522 | ||||
523 | failed = OnFinalize(failed); | |||
524 | ||||
525 | #if TSAN_COLLECT_STATS0 | |||
526 | StatAggregate(ctx->stat, thr->stat); | |||
527 | StatOutput(ctx->stat); | |||
528 | #endif | |||
529 | ||||
530 | return failed ? common_flags()->exitcode : 0; | |||
531 | } | |||
532 | ||||
533 | #if !SANITIZER_GO0 | |||
534 | void ForkBefore(ThreadState *thr, uptr pc) { | |||
535 | ctx->thread_registry->Lock(); | |||
536 | ctx->report_mtx.Lock(); | |||
537 | // Suppress all reports in the pthread_atfork callbacks. | |||
538 | // Reports will deadlock on the report_mtx. | |||
539 | // We could ignore sync operations as well, | |||
540 | // but so far it's unclear if it will do more good or harm. | |||
541 | // Unnecessarily ignoring things can lead to false positives later. | |||
542 | thr->suppress_reports++; | |||
543 | // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and | |||
544 | // we'll assert in CheckNoLocks() unless we ignore interceptors. | |||
545 | thr->ignore_interceptors++; | |||
546 | } | |||
547 | ||||
548 | void ForkParentAfter(ThreadState *thr, uptr pc) { | |||
549 | thr->suppress_reports--; // Enabled in ForkBefore. | |||
550 | thr->ignore_interceptors--; | |||
551 | ctx->report_mtx.Unlock(); | |||
552 | ctx->thread_registry->Unlock(); | |||
553 | } | |||
554 | ||||
555 | void ForkChildAfter(ThreadState *thr, uptr pc) { | |||
556 | thr->suppress_reports--; // Enabled in ForkBefore. | |||
557 | thr->ignore_interceptors--; | |||
558 | ctx->report_mtx.Unlock(); | |||
559 | ctx->thread_registry->Unlock(); | |||
560 | ||||
561 | uptr nthread = 0; | |||
562 | ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */); | |||
563 | VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"do { if ((uptr)Verbosity() >= (1)) Printf("ThreadSanitizer: forked new process with pid %d," " parent had %d threads\n", (int)internal_getpid(), (int)nthread ); } while (0) | |||
564 | " parent had %d threads\n", (int)internal_getpid(), (int)nthread)do { if ((uptr)Verbosity() >= (1)) Printf("ThreadSanitizer: forked new process with pid %d," " parent had %d threads\n", (int)internal_getpid(), (int)nthread ); } while (0); | |||
565 | if (nthread == 1) { | |||
566 | StartBackgroundThread(); | |||
567 | } else { | |||
568 | // We've just forked a multi-threaded process. We cannot reasonably function | |||
569 | // after that (some mutexes may be locked before fork). So just enable | |||
570 | // ignores for everything in the hope that we will exec soon. | |||
571 | ctx->after_multithreaded_fork = true; | |||
572 | thr->ignore_interceptors++; | |||
573 | ThreadIgnoreBegin(thr, pc); | |||
574 | ThreadIgnoreSyncBegin(thr, pc); | |||
575 | } | |||
576 | } | |||
577 | #endif | |||
578 | ||||
579 | #if SANITIZER_GO0 | |||
580 | NOINLINE__attribute__((noinline)) | |||
581 | void GrowShadowStack(ThreadState *thr) { | |||
582 | const int sz = thr->shadow_stack_end - thr->shadow_stack; | |||
583 | const int newsz = 2 * sz; | |||
584 | uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack, | |||
585 | newsz * sizeof(uptr)); | |||
586 | internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr)); | |||
587 | internal_free(thr->shadow_stack); | |||
588 | thr->shadow_stack = newstack; | |||
589 | thr->shadow_stack_pos = newstack + sz; | |||
590 | thr->shadow_stack_end = newstack + newsz; | |||
591 | } | |||
592 | #endif | |||
593 | ||||
594 | u32 CurrentStackId(ThreadState *thr, uptr pc) { | |||
595 | if (!thr->is_inited) // May happen during bootstrap. | |||
596 | return 0; | |||
597 | if (pc != 0) { | |||
598 | #if !SANITIZER_GO0 | |||
599 | DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); | |||
600 | #else | |||
601 | if (thr->shadow_stack_pos == thr->shadow_stack_end) | |||
602 | GrowShadowStack(thr); | |||
603 | #endif | |||
604 | thr->shadow_stack_pos[0] = pc; | |||
605 | thr->shadow_stack_pos++; | |||
606 | } | |||
607 | u32 id = StackDepotPut( | |||
608 | StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack)); | |||
609 | if (pc != 0) | |||
610 | thr->shadow_stack_pos--; | |||
611 | return id; | |||
612 | } | |||
613 | ||||
614 | void TraceSwitch(ThreadState *thr) { | |||
615 | #if !SANITIZER_GO0 | |||
616 | if (ctx->after_multithreaded_fork) | |||
617 | return; | |||
618 | #endif | |||
619 | thr->nomalloc++; | |||
620 | Trace *thr_trace = ThreadTrace(thr->tid); | |||
621 | Lock l(&thr_trace->mtx); | |||
622 | unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts(); | |||
623 | TraceHeader *hdr = &thr_trace->headers[trace]; | |||
624 | hdr->epoch0 = thr->fast_state.epoch(); | |||
625 | ObtainCurrentStack(thr, 0, &hdr->stack0); | |||
626 | hdr->mset0 = thr->mset; | |||
627 | thr->nomalloc--; | |||
628 | } | |||
629 | ||||
630 | Trace *ThreadTrace(int tid) { | |||
631 | return (Trace*)GetThreadTraceHeader(tid); | |||
632 | } | |||
633 | ||||
634 | uptr TraceTopPC(ThreadState *thr) { | |||
635 | Event *events = (Event*)GetThreadTrace(thr->tid); | |||
636 | uptr pc = events[thr->fast_state.GetTracePos()]; | |||
637 | return pc; | |||
638 | } | |||
639 | ||||
640 | uptr TraceSize() { | |||
641 | return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1)); | |||
642 | } | |||
643 | ||||
644 | uptr TraceParts() { | |||
645 | return TraceSize() / kTracePartSize; | |||
646 | } | |||
647 | ||||
648 | #if !SANITIZER_GO0 | |||
649 | extern "C" void __tsan_trace_switch() { | |||
650 | TraceSwitch(cur_thread()); | |||
651 | } | |||
652 | ||||
653 | extern "C" void __tsan_report_race() { | |||
654 | ReportRace(cur_thread()); | |||
655 | } | |||
656 | #endif | |||
657 | ||||
658 | ALWAYS_INLINEinline __attribute__((always_inline)) | |||
659 | Shadow LoadShadow(u64 *p) { | |||
660 | u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed); | |||
661 | return Shadow(raw); | |||
662 | } | |||
663 | ||||
664 | ALWAYS_INLINEinline __attribute__((always_inline)) | |||
665 | void StoreShadow(u64 *sp, u64 s) { | |||
666 | atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed); | |||
667 | } | |||
668 | ||||
669 | ALWAYS_INLINEinline __attribute__((always_inline)) | |||
670 | void StoreIfNotYetStored(u64 *sp, u64 *s) { | |||
671 | StoreShadow(sp, *s); | |||
672 | *s = 0; | |||
673 | } | |||
674 | ||||
675 | ALWAYS_INLINEinline __attribute__((always_inline)) | |||
676 | void HandleRace(ThreadState *thr, u64 *shadow_mem, | |||
677 | Shadow cur, Shadow old) { | |||
678 | thr->racy_state[0] = cur.raw(); | |||
679 | thr->racy_state[1] = old.raw(); | |||
680 | thr->racy_shadow_addr = shadow_mem; | |||
681 | #if !SANITIZER_GO0 | |||
682 | HACKY_CALL(__tsan_report_race)__asm__ __volatile__("sub $1024, %%rsp;" ".cfi_adjust_cfa_offset " "1024" ";" ".hidden " "__tsan_report_race" "_thunk;" "call " "__tsan_report_race" "_thunk;" "add $1024, %%rsp;" ".cfi_adjust_cfa_offset " "-1024" ";" ::: "memory", "cc");; | |||
683 | #else | |||
684 | ReportRace(thr); | |||
685 | #endif | |||
686 | } | |||
687 | ||||
688 | static inline bool HappensBefore(Shadow old, ThreadState *thr) { | |||
689 | return thr->clock.get(old.TidWithIgnore()) >= old.epoch(); | |||
690 | } | |||
691 | ||||
692 | ALWAYS_INLINEinline __attribute__((always_inline)) | |||
693 | void MemoryAccessImpl1(ThreadState *thr, uptr addr, | |||
694 | int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, | |||
695 | u64 *shadow_mem, Shadow cur) { | |||
696 | StatInc(thr, StatMop); | |||
697 | StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); | |||
698 | StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); | |||
699 | ||||
700 | // This potentially can live in an MMX/SSE scratch register. | |||
701 | // The required intrinsics are: | |||
702 | // __m128i _mm_move_epi64(__m128i*); | |||
703 | // _mm_storel_epi64(u64*, __m128i); | |||
704 | u64 store_word = cur.raw(); | |||
705 | bool stored = false; | |||
706 | ||||
707 | // scan all the shadow values and dispatch to 4 categories: | |||
708 | // same, replace, candidate and race (see comments below). | |||
709 | // we consider only 3 cases regarding access sizes: | |||
710 | // equal, intersect and not intersect. initially I considered | |||
711 | // larger and smaller as well, it allowed to replace some | |||
712 | // 'candidates' with 'same' or 'replace', but I think | |||
713 | // it's just not worth it (performance- and complexity-wise). | |||
714 | ||||
715 | Shadow old(0); | |||
716 | ||||
717 | // It release mode we manually unroll the loop, | |||
718 | // because empirically gcc generates better code this way. | |||
719 | // However, we can't afford unrolling in debug mode, because the function | |||
720 | // consumes almost 4K of stack. Gtest gives only 4K of stack to death test | |||
721 | // threads, which is not enough for the unrolled loop. | |||
722 | #if SANITIZER_DEBUG0 | |||
723 | for (int idx = 0; idx < 4; idx++) { | |||
724 | #include "tsan_update_shadow_word_inl.h" | |||
725 | } | |||
726 | #else | |||
727 | int idx = 0; | |||
728 | #include "tsan_update_shadow_word_inl.h" | |||
729 | idx = 1; | |||
730 | if (stored) { | |||
731 | #include "tsan_update_shadow_word_inl.h" | |||
732 | } else { | |||
733 | #include "tsan_update_shadow_word_inl.h" | |||
734 | } | |||
735 | idx = 2; | |||
736 | if (stored) { | |||
737 | #include "tsan_update_shadow_word_inl.h" | |||
738 | } else { | |||
739 | #include "tsan_update_shadow_word_inl.h" | |||
740 | } | |||
741 | idx = 3; | |||
742 | if (stored) { | |||
743 | #include "tsan_update_shadow_word_inl.h" | |||
744 | } else { | |||
745 | #include "tsan_update_shadow_word_inl.h" | |||
746 | } | |||
747 | #endif | |||
748 | ||||
749 | // we did not find any races and had already stored | |||
750 | // the current access info, so we are done | |||
751 | if (LIKELY(stored)__builtin_expect(!!(stored), 1)) | |||
752 | return; | |||
753 | // choose a random candidate slot and replace it | |||
754 | StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word); | |||
755 | StatInc(thr, StatShadowReplace); | |||
756 | return; | |||
757 | RACE: | |||
758 | HandleRace(thr, shadow_mem, cur, old); | |||
759 | return; | |||
760 | } | |||
761 | ||||
762 | void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, | |||
763 | int size, bool kAccessIsWrite, bool kIsAtomic) { | |||
764 | while (size) { | |||
765 | int size1 = 1; | |||
766 | int kAccessSizeLog = kSizeLog1; | |||
767 | if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) { | |||
768 | size1 = 8; | |||
769 | kAccessSizeLog = kSizeLog8; | |||
770 | } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) { | |||
771 | size1 = 4; | |||
772 | kAccessSizeLog = kSizeLog4; | |||
773 | } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) { | |||
774 | size1 = 2; | |||
775 | kAccessSizeLog = kSizeLog2; | |||
776 | } | |||
777 | MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic); | |||
778 | addr += size1; | |||
779 | size -= size1; | |||
780 | } | |||
781 | } | |||
782 | ||||
783 | ALWAYS_INLINEinline __attribute__((always_inline)) | |||
784 | bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) { | |||
785 | Shadow cur(a); | |||
786 | for (uptr i = 0; i < kShadowCnt; i++) { | |||
787 | Shadow old(LoadShadow(&s[i])); | |||
788 | if (Shadow::Addr0AndSizeAreEqual(cur, old) && | |||
789 | old.TidWithIgnore() == cur.TidWithIgnore() && | |||
790 | old.epoch() > sync_epoch && | |||
791 | old.IsAtomic() == cur.IsAtomic() && | |||
792 | old.IsRead() <= cur.IsRead()) | |||
793 | return true; | |||
794 | } | |||
795 | return false; | |||
796 | } | |||
797 | ||||
798 | #if defined(__SSE3__1) | |||
799 | #define SHUF(v0, v1, i0, i1, i2, i3)_mm_castps_si128((__m128)__builtin_ia32_shufps((__v4sf)(__m128 )(_mm_castsi128_ps(v0)), (__v4sf)(__m128)(_mm_castsi128_ps(v1 )), (int)((i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))) _mm_castps_si128(_mm_shuffle_ps( \(__m128)__builtin_ia32_shufps((__v4sf)(__m128)(_mm_castsi128_ps (v0)), (__v4sf)(__m128)(_mm_castsi128_ps(v1)), (int)((i0)*1 + (i1)*4 + (i2)*16 + (i3)*64)) | |||
800 | _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \(__m128)__builtin_ia32_shufps((__v4sf)(__m128)(_mm_castsi128_ps (v0)), (__v4sf)(__m128)(_mm_castsi128_ps(v1)), (int)((i0)*1 + (i1)*4 + (i2)*16 + (i3)*64)) | |||
801 | (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64)(__m128)__builtin_ia32_shufps((__v4sf)(__m128)(_mm_castsi128_ps (v0)), (__v4sf)(__m128)(_mm_castsi128_ps(v1)), (int)((i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))) | |||
802 | ALWAYS_INLINEinline __attribute__((always_inline)) | |||
803 | bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) { | |||
804 | // This is an optimized version of ContainsSameAccessSlow. | |||
805 | // load current access into access[0:63] | |||
806 | const m128 access = _mm_cvtsi64_si128(a); | |||
807 | // duplicate high part of access in addr0: | |||
808 | // addr0[0:31] = access[32:63] | |||
809 | // addr0[32:63] = access[32:63] | |||
810 | // addr0[64:95] = access[32:63] | |||
811 | // addr0[96:127] = access[32:63] | |||
812 | const m128 addr0 = SHUF(access, access, 1, 1, 1, 1)_mm_castps_si128((__m128)__builtin_ia32_shufps((__v4sf)(__m128 )(_mm_castsi128_ps(access)), (__v4sf)(__m128)(_mm_castsi128_ps (access)), (int)((1)*1 + (1)*4 + (1)*16 + (1)*64))); | |||
813 | // load 4 shadow slots | |||
814 | const m128 shadow0 = _mm_load_si128((__m128i*)s); | |||
815 | const m128 shadow1 = _mm_load_si128((__m128i*)s + 1); | |||
816 | // load high parts of 4 shadow slots into addr_vect: | |||
817 | // addr_vect[0:31] = shadow0[32:63] | |||
818 | // addr_vect[32:63] = shadow0[96:127] | |||
819 | // addr_vect[64:95] = shadow1[32:63] | |||
820 | // addr_vect[96:127] = shadow1[96:127] | |||
821 | m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3)_mm_castps_si128((__m128)__builtin_ia32_shufps((__v4sf)(__m128 )(_mm_castsi128_ps(shadow0)), (__v4sf)(__m128)(_mm_castsi128_ps (shadow1)), (int)((1)*1 + (3)*4 + (1)*16 + (3)*64))); | |||
822 | if (!is_write) { | |||
823 | // set IsRead bit in addr_vect | |||
824 | const m128 rw_mask1 = _mm_cvtsi64_si128(1<<15); | |||
825 | const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0)_mm_castps_si128((__m128)__builtin_ia32_shufps((__v4sf)(__m128 )(_mm_castsi128_ps(rw_mask1)), (__v4sf)(__m128)(_mm_castsi128_ps (rw_mask1)), (int)((0)*1 + (0)*4 + (0)*16 + (0)*64))); | |||
826 | addr_vect = _mm_or_si128(addr_vect, rw_mask); | |||
827 | } | |||
828 | // addr0 == addr_vect? | |||
829 | const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect); | |||
830 | // epoch1[0:63] = sync_epoch | |||
831 | const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch); | |||
832 | // epoch[0:31] = sync_epoch[0:31] | |||
833 | // epoch[32:63] = sync_epoch[0:31] | |||
834 | // epoch[64:95] = sync_epoch[0:31] | |||
835 | // epoch[96:127] = sync_epoch[0:31] | |||
836 | const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0)_mm_castps_si128((__m128)__builtin_ia32_shufps((__v4sf)(__m128 )(_mm_castsi128_ps(epoch1)), (__v4sf)(__m128)(_mm_castsi128_ps (epoch1)), (int)((0)*1 + (0)*4 + (0)*16 + (0)*64))); | |||
837 | // load low parts of shadow cell epochs into epoch_vect: | |||
838 | // epoch_vect[0:31] = shadow0[0:31] | |||
839 | // epoch_vect[32:63] = shadow0[64:95] | |||
840 | // epoch_vect[64:95] = shadow1[0:31] | |||
841 | // epoch_vect[96:127] = shadow1[64:95] | |||
842 | const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2)_mm_castps_si128((__m128)__builtin_ia32_shufps((__v4sf)(__m128 )(_mm_castsi128_ps(shadow0)), (__v4sf)(__m128)(_mm_castsi128_ps (shadow1)), (int)((0)*1 + (2)*4 + (0)*16 + (2)*64))); | |||
843 | // epoch_vect >= sync_epoch? | |||
844 | const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch); | |||
845 | // addr_res & epoch_res | |||
846 | const m128 res = _mm_and_si128(addr_res, epoch_res); | |||
847 | // mask[0] = res[7] | |||
848 | // mask[1] = res[15] | |||
849 | // ... | |||
850 | // mask[15] = res[127] | |||
851 | const int mask = _mm_movemask_epi8(res); | |||
852 | return mask != 0; | |||
853 | } | |||
854 | #endif | |||
855 | ||||
856 | ALWAYS_INLINEinline __attribute__((always_inline)) | |||
857 | bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) { | |||
858 | #if defined(__SSE3__1) | |||
859 | bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write); | |||
860 | // NOTE: this check can fail if the shadow is concurrently mutated | |||
861 | // by other threads. But it still can be useful if you modify | |||
862 | // ContainsSameAccessFast and want to ensure that it's not completely broken. | |||
863 | // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write)); | |||
864 | return res; | |||
865 | #else | |||
866 | return ContainsSameAccessSlow(s, a, sync_epoch, is_write); | |||
867 | #endif | |||
868 | } | |||
869 | ||||
870 | ALWAYS_INLINEinline __attribute__((always_inline)) USED__attribute__((used)) | |||
871 | void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, | |||
872 | int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) { | |||
873 | u64 *shadow_mem = (u64*)MemToShadow(addr); | |||
874 | DPrintf2("#%d: MemoryAccess: @%p %p size=%d" | |||
875 | " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n", | |||
876 | (int)thr->fast_state.tid(), (void*)pc, (void*)addr, | |||
877 | (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem, | |||
878 | (uptr)shadow_mem[0], (uptr)shadow_mem[1], | |||
879 | (uptr)shadow_mem[2], (uptr)shadow_mem[3]); | |||
880 | #if SANITIZER_DEBUG0 | |||
881 | if (!IsAppMem(addr)) { | |||
882 | Printf("Access to non app mem %zx\n", addr); | |||
883 | DCHECK(IsAppMem(addr)); | |||
884 | } | |||
885 | if (!IsShadowMem((uptr)shadow_mem)) { | |||
886 | Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr); | |||
887 | DCHECK(IsShadowMem((uptr)shadow_mem)); | |||
888 | } | |||
889 | #endif | |||
890 | ||||
891 | if (!SANITIZER_GO0 && !kAccessIsWrite && *shadow_mem == kShadowRodata) { | |||
892 | // Access to .rodata section, no races here. | |||
893 | // Measurements show that it can be 10-20% of all memory accesses. | |||
894 | StatInc(thr, StatMop); | |||
895 | StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); | |||
896 | StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); | |||
897 | StatInc(thr, StatMopRodata); | |||
898 | return; | |||
899 | } | |||
900 | ||||
901 | FastState fast_state = thr->fast_state; | |||
902 | if (UNLIKELY(fast_state.GetIgnoreBit())__builtin_expect(!!(fast_state.GetIgnoreBit()), 0)) { | |||
903 | StatInc(thr, StatMop); | |||
904 | StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); | |||
905 | StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); | |||
906 | StatInc(thr, StatMopIgnored); | |||
907 | return; | |||
908 | } | |||
909 | ||||
910 | Shadow cur(fast_state); | |||
911 | cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog); | |||
912 | cur.SetWrite(kAccessIsWrite); | |||
913 | cur.SetAtomic(kIsAtomic); | |||
914 | ||||
915 | if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),__builtin_expect(!!(ContainsSameAccess(shadow_mem, cur.raw(), thr->fast_synch_epoch, kAccessIsWrite)), 1) | |||
916 | thr->fast_synch_epoch, kAccessIsWrite))__builtin_expect(!!(ContainsSameAccess(shadow_mem, cur.raw(), thr->fast_synch_epoch, kAccessIsWrite)), 1)) { | |||
917 | StatInc(thr, StatMop); | |||
918 | StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); | |||
919 | StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); | |||
920 | StatInc(thr, StatMopSame); | |||
921 | return; | |||
922 | } | |||
923 | ||||
924 | if (kCollectHistory) { | |||
925 | fast_state.IncrementEpoch(); | |||
926 | thr->fast_state = fast_state; | |||
927 | TraceAddEvent(thr, fast_state, EventTypeMop, pc); | |||
928 | cur.IncrementEpoch(); | |||
929 | } | |||
930 | ||||
931 | MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic, | |||
932 | shadow_mem, cur); | |||
933 | } | |||
934 | ||||
935 | // Called by MemoryAccessRange in tsan_rtl_thread.cpp | |||
936 | ALWAYS_INLINEinline __attribute__((always_inline)) USED__attribute__((used)) | |||
937 | void MemoryAccessImpl(ThreadState *thr, uptr addr, | |||
938 | int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, | |||
939 | u64 *shadow_mem, Shadow cur) { | |||
940 | if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),__builtin_expect(!!(ContainsSameAccess(shadow_mem, cur.raw(), thr->fast_synch_epoch, kAccessIsWrite)), 1) | |||
941 | thr->fast_synch_epoch, kAccessIsWrite))__builtin_expect(!!(ContainsSameAccess(shadow_mem, cur.raw(), thr->fast_synch_epoch, kAccessIsWrite)), 1)) { | |||
942 | StatInc(thr, StatMop); | |||
943 | StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); | |||
944 | StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); | |||
945 | StatInc(thr, StatMopSame); | |||
946 | return; | |||
947 | } | |||
948 | ||||
949 | MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic, | |||
950 | shadow_mem, cur); | |||
951 | } | |||
952 | ||||
953 | static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size, | |||
954 | u64 val) { | |||
955 | (void)thr; | |||
956 | (void)pc; | |||
957 | if (size == 0) | |||
958 | return; | |||
959 | // FIXME: fix me. | |||
960 | uptr offset = addr % kShadowCell; | |||
961 | if (offset) { | |||
962 | offset = kShadowCell - offset; | |||
963 | if (size <= offset) | |||
964 | return; | |||
965 | addr += offset; | |||
966 | size -= offset; | |||
967 | } | |||
968 | DCHECK_EQ(addr % 8, 0); | |||
969 | // If a user passes some insane arguments (memset(0)), | |||
970 | // let it just crash as usual. | |||
971 | if (!IsAppMem(addr) || !IsAppMem(addr + size - 1)) | |||
972 | return; | |||
973 | // Don't want to touch lots of shadow memory. | |||
974 | // If a program maps 10MB stack, there is no need reset the whole range. | |||
975 | size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1); | |||
976 | // UnmapOrDie/MmapFixedNoReserve does not work on Windows. | |||
977 | if (SANITIZER_WINDOWS0 || size < common_flags()->clear_shadow_mmap_threshold) { | |||
978 | u64 *p = (u64*)MemToShadow(addr); | |||
979 | CHECK(IsShadowMem((uptr)p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsShadowMem((uptr )p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210711100610+f0393deb3367/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp" , 979, "(" "(IsShadowMem((uptr)p))" ") " "!=" " (" "0" ")", v1 , v2); } while (false); | |||
980 | CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsShadowMem((uptr )(p + size * kShadowCnt / kShadowCell - 1)))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2 )), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210711100610+f0393deb3367/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp" , 980, "(" "(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)))" ") " "!=" " (" "0" ")", v1, v2); } while (false); | |||
981 | // FIXME: may overwrite a part outside the region | |||
982 | for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) { | |||
983 | p[i++] = val; | |||
984 | for (uptr j = 1; j < kShadowCnt; j++) | |||
985 | p[i++] = 0; | |||
986 | } | |||
987 | } else { | |||
988 | // The region is big, reset only beginning and end. | |||
989 | const uptr kPageSize = GetPageSizeCached(); | |||
990 | u64 *begin = (u64*)MemToShadow(addr); | |||
991 | u64 *end = begin + size / kShadowCell * kShadowCnt; | |||
992 | u64 *p = begin; | |||
993 | // Set at least first kPageSize/2 to page boundary. | |||
994 | while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) { | |||
995 | *p++ = val; | |||
996 | for (uptr j = 1; j < kShadowCnt; j++) | |||
997 | *p++ = 0; | |||
998 | } | |||
999 | // Reset middle part. | |||
1000 | u64 *p1 = p; | |||
1001 | p = RoundDown(end, kPageSize); | |||
1002 | if (!MmapFixedSuperNoReserve((uptr)p1, (uptr)p - (uptr)p1)) | |||
1003 | Die(); | |||
1004 | // Set the ending. | |||
1005 | while (p < end) { | |||
1006 | *p++ = val; | |||
1007 | for (uptr j = 1; j < kShadowCnt; j++) | |||
1008 | *p++ = 0; | |||
1009 | } | |||
1010 | } | |||
1011 | } | |||
1012 | ||||
1013 | void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) { | |||
1014 | MemoryRangeSet(thr, pc, addr, size, 0); | |||
1015 | } | |||
1016 | ||||
1017 | void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) { | |||
1018 | // Processing more than 1k (4k of shadow) is expensive, | |||
1019 | // can cause excessive memory consumption (user does not necessary touch | |||
1020 | // the whole range) and most likely unnecessary. | |||
1021 | if (size > 1024) | |||
1022 | size = 1024; | |||
1023 | CHECK_EQ(thr->is_freeing, false)do { __sanitizer::u64 v1 = (__sanitizer::u64)((thr->is_freeing )); __sanitizer::u64 v2 = (__sanitizer::u64)((false)); if (__builtin_expect (!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210711100610+f0393deb3367/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp" , 1023, "(" "(thr->is_freeing)" ") " "==" " (" "(false)" ")" , v1, v2); } while (false); | |||
1024 | thr->is_freeing = true; | |||
1025 | MemoryAccessRange(thr, pc, addr, size, true); | |||
1026 | thr->is_freeing = false; | |||
1027 | if (kCollectHistory) { | |||
1028 | thr->fast_state.IncrementEpoch(); | |||
1029 | TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc); | |||
1030 | } | |||
1031 | Shadow s(thr->fast_state); | |||
1032 | s.ClearIgnoreBit(); | |||
1033 | s.MarkAsFreed(); | |||
1034 | s.SetWrite(true); | |||
1035 | s.SetAddr0AndSizeLog(0, 3); | |||
1036 | MemoryRangeSet(thr, pc, addr, size, s.raw()); | |||
1037 | } | |||
1038 | ||||
1039 | void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) { | |||
1040 | if (kCollectHistory) { | |||
1041 | thr->fast_state.IncrementEpoch(); | |||
1042 | TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc); | |||
1043 | } | |||
1044 | Shadow s(thr->fast_state); | |||
1045 | s.ClearIgnoreBit(); | |||
1046 | s.SetWrite(true); | |||
1047 | s.SetAddr0AndSizeLog(0, 3); | |||
1048 | MemoryRangeSet(thr, pc, addr, size, s.raw()); | |||
1049 | } | |||
1050 | ||||
1051 | void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr, | |||
1052 | uptr size) { | |||
1053 | if (thr->ignore_reads_and_writes == 0) | |||
1054 | MemoryRangeImitateWrite(thr, pc, addr, size); | |||
1055 | else | |||
1056 | MemoryResetRange(thr, pc, addr, size); | |||
1057 | } | |||
1058 | ||||
1059 | ALWAYS_INLINEinline __attribute__((always_inline)) USED__attribute__((used)) | |||
1060 | void FuncEntry(ThreadState *thr, uptr pc) { | |||
1061 | StatInc(thr, StatFuncEnter); | |||
1062 | DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc); | |||
1063 | if (kCollectHistory) { | |||
1064 | thr->fast_state.IncrementEpoch(); | |||
1065 | TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc); | |||
1066 | } | |||
1067 | ||||
1068 | // Shadow stack maintenance can be replaced with | |||
1069 | // stack unwinding during trace switch (which presumably must be faster). | |||
1070 | DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack); | |||
1071 | #if !SANITIZER_GO0 | |||
1072 | DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); | |||
1073 | #else | |||
1074 | if (thr->shadow_stack_pos == thr->shadow_stack_end) | |||
1075 | GrowShadowStack(thr); | |||
1076 | #endif | |||
1077 | thr->shadow_stack_pos[0] = pc; | |||
1078 | thr->shadow_stack_pos++; | |||
1079 | } | |||
1080 | ||||
1081 | ALWAYS_INLINEinline __attribute__((always_inline)) USED__attribute__((used)) | |||
1082 | void FuncExit(ThreadState *thr) { | |||
1083 | StatInc(thr, StatFuncExit); | |||
1084 | DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid()); | |||
1085 | if (kCollectHistory) { | |||
1086 | thr->fast_state.IncrementEpoch(); | |||
1087 | TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0); | |||
1088 | } | |||
1089 | ||||
1090 | DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack); | |||
1091 | #if !SANITIZER_GO0 | |||
1092 | DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); | |||
1093 | #endif | |||
1094 | thr->shadow_stack_pos--; | |||
1095 | } | |||
1096 | ||||
1097 | void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack) { | |||
1098 | DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid); | |||
1099 | thr->ignore_reads_and_writes++; | |||
1100 | CHECK_GT(thr->ignore_reads_and_writes, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((thr->ignore_reads_and_writes )); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect (!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210711100610+f0393deb3367/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp" , 1100, "(" "(thr->ignore_reads_and_writes)" ") " ">" " (" "(0)" ")", v1, v2); } while (false); | |||
1101 | thr->fast_state.SetIgnoreBit(); | |||
1102 | #if !SANITIZER_GO0 | |||
1103 | if (save_stack && !ctx->after_multithreaded_fork) | |||
1104 | thr->mop_ignore_set.Add(CurrentStackId(thr, pc)); | |||
1105 | #endif | |||
1106 | } | |||
1107 | ||||
1108 | void ThreadIgnoreEnd(ThreadState *thr, uptr pc) { | |||
1109 | DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid); | |||
1110 | CHECK_GT(thr->ignore_reads_and_writes, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((thr->ignore_reads_and_writes )); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect (!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210711100610+f0393deb3367/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp" , 1110, "(" "(thr->ignore_reads_and_writes)" ") " ">" " (" "(0)" ")", v1, v2); } while (false); | |||
1111 | thr->ignore_reads_and_writes--; | |||
1112 | if (thr->ignore_reads_and_writes == 0) { | |||
1113 | thr->fast_state.ClearIgnoreBit(); | |||
1114 | #if !SANITIZER_GO0 | |||
1115 | thr->mop_ignore_set.Reset(); | |||
1116 | #endif | |||
1117 | } | |||
1118 | } | |||
1119 | ||||
1120 | #if !SANITIZER_GO0 | |||
1121 | extern "C" SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default"))) | |||
1122 | uptr __tsan_testonly_shadow_stack_current_size() { | |||
1123 | ThreadState *thr = cur_thread(); | |||
1124 | return thr->shadow_stack_pos - thr->shadow_stack; | |||
1125 | } | |||
1126 | #endif | |||
1127 | ||||
1128 | void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack) { | |||
1129 | DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid); | |||
1130 | thr->ignore_sync++; | |||
1131 | CHECK_GT(thr->ignore_sync, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((thr->ignore_sync )); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect (!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210711100610+f0393deb3367/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp" , 1131, "(" "(thr->ignore_sync)" ") " ">" " (" "(0)" ")" , v1, v2); } while (false); | |||
1132 | #if !SANITIZER_GO0 | |||
1133 | if (save_stack && !ctx->after_multithreaded_fork) | |||
1134 | thr->sync_ignore_set.Add(CurrentStackId(thr, pc)); | |||
1135 | #endif | |||
1136 | } | |||
1137 | ||||
1138 | void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) { | |||
1139 | DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid); | |||
1140 | CHECK_GT(thr->ignore_sync, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((thr->ignore_sync )); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect (!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210711100610+f0393deb3367/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp" , 1140, "(" "(thr->ignore_sync)" ") " ">" " (" "(0)" ")" , v1, v2); } while (false); | |||
1141 | thr->ignore_sync--; | |||
1142 | #if !SANITIZER_GO0 | |||
1143 | if (thr->ignore_sync == 0) | |||
1144 | thr->sync_ignore_set.Reset(); | |||
1145 | #endif | |||
1146 | } | |||
1147 | ||||
1148 | bool MD5Hash::operator==(const MD5Hash &other) const { | |||
1149 | return hash[0] == other.hash[0] && hash[1] == other.hash[1]; | |||
1150 | } | |||
1151 | ||||
1152 | #if SANITIZER_DEBUG0 | |||
1153 | void build_consistency_debug() {} | |||
1154 | #else | |||
1155 | void build_consistency_release() {} | |||
1156 | #endif | |||
1157 | ||||
1158 | #if TSAN_COLLECT_STATS0 | |||
1159 | void build_consistency_stats() {} | |||
1160 | #else | |||
1161 | void build_consistency_nostats() {} | |||
1162 | #endif | |||
1163 | ||||
1164 | } // namespace __tsan | |||
1165 | ||||
1166 | #if !SANITIZER_GO0 | |||
1167 | // Must be included in this file to make sure everything is inlined. | |||
1168 | #include "tsan_interface_inl.h" | |||
1169 | #endif |