Bug Summary

File:projects/compiler-rt/lib/tsan/rtl/tsan_clock.cpp
Warning:line 578, column 16
Access to field 'blocks_' results in a dereference of a null pointer (loaded from field 'parent_')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name tsan_clock.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -pic-is-pie -mthread-model posix -mframe-pointer=none -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -target-feature +sse3 -dwarf-column-info -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-10~svn374877/build-llvm/projects/compiler-rt/lib/tsan -I /build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/tsan -I /build/llvm-toolchain-snapshot-10~svn374877/build-llvm/include -I /build/llvm-toolchain-snapshot-10~svn374877/include -I /build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/tsan/.. -U NDEBUG -isysroot . -internal-isystem ./usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem ./include -internal-externc-isystem ./usr/include -O3 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -Wno-unused-parameter -Wno-variadic-macros -Wno-non-virtual-dtor -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-10~svn374877/build-llvm/projects/compiler-rt/lib/tsan -fdebug-prefix-map=/build/llvm-toolchain-snapshot-10~svn374877=. -ferror-limit 19 -fmessage-length 0 -fvisibility hidden -fvisibility-inlines-hidden -fno-builtin -fno-rtti -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2019-10-15-233810-7101-1 -x c++ /build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/tsan/rtl/tsan_clock.cpp

/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/tsan/rtl/tsan_clock.cpp

1//===-- tsan_clock.cpp ----------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11//===----------------------------------------------------------------------===//
12#include "tsan_clock.h"
13#include "tsan_rtl.h"
14#include "sanitizer_common/sanitizer_placement_new.h"
15
16// SyncClock and ThreadClock implement vector clocks for sync variables
17// (mutexes, atomic variables, file descriptors, etc) and threads, respectively.
18// ThreadClock contains fixed-size vector clock for maximum number of threads.
19// SyncClock contains growable vector clock for currently necessary number of
20// threads.
21// Together they implement very simple model of operations, namely:
22//
23// void ThreadClock::acquire(const SyncClock *src) {
24// for (int i = 0; i < kMaxThreads; i++)
25// clock[i] = max(clock[i], src->clock[i]);
26// }
27//
28// void ThreadClock::release(SyncClock *dst) const {
29// for (int i = 0; i < kMaxThreads; i++)
30// dst->clock[i] = max(dst->clock[i], clock[i]);
31// }
32//
33// void ThreadClock::ReleaseStore(SyncClock *dst) const {
34// for (int i = 0; i < kMaxThreads; i++)
35// dst->clock[i] = clock[i];
36// }
37//
38// void ThreadClock::acq_rel(SyncClock *dst) {
39// acquire(dst);
40// release(dst);
41// }
42//
43// Conformance to this model is extensively verified in tsan_clock_test.cpp.
44// However, the implementation is significantly more complex. The complexity
45// allows to implement important classes of use cases in O(1) instead of O(N).
46//
47// The use cases are:
48// 1. Singleton/once atomic that has a single release-store operation followed
49// by zillions of acquire-loads (the acquire-load is O(1)).
50// 2. Thread-local mutex (both lock and unlock can be O(1)).
51// 3. Leaf mutex (unlock is O(1)).
52// 4. A mutex shared by 2 threads (both lock and unlock can be O(1)).
53// 5. An atomic with a single writer (writes can be O(1)).
54// The implementation dynamically adopts to workload. So if an atomic is in
55// read-only phase, these reads will be O(1); if it later switches to read/write
56// phase, the implementation will correctly handle that by switching to O(N).
57//
58// Thread-safety note: all const operations on SyncClock's are conducted under
59// a shared lock; all non-const operations on SyncClock's are conducted under
60// an exclusive lock; ThreadClock's are private to respective threads and so
61// do not need any protection.
62//
63// Description of SyncClock state:
64// clk_ - variable size vector clock, low kClkBits hold timestamp,
65// the remaining bits hold "acquired" flag (the actual value is thread's
66// reused counter);
67// if acquried == thr->reused_, then the respective thread has already
68// acquired this clock (except possibly for dirty elements).
69// dirty_ - holds up to two indeces in the vector clock that other threads
70// need to acquire regardless of "acquired" flag value;
71// release_store_tid_ - denotes that the clock state is a result of
72// release-store operation by the thread with release_store_tid_ index.
73// release_store_reused_ - reuse count of release_store_tid_.
74
75// We don't have ThreadState in these methods, so this is an ugly hack that
76// works only in C++.
77#if !SANITIZER_GO0
78# define CPP_STAT_INC(typ)StatInc(cur_thread(), typ) StatInc(cur_thread(), typ)
79#else
80# define CPP_STAT_INC(typ)StatInc(cur_thread(), typ) (void)0
81#endif
82
83namespace __tsan {
84
85static atomic_uint32_t *ref_ptr(ClockBlock *cb) {
86 return reinterpret_cast<atomic_uint32_t *>(&cb->table[ClockBlock::kRefIdx]);
87}
88
89// Drop reference to the first level block idx.
90static void UnrefClockBlock(ClockCache *c, u32 idx, uptr blocks) {
91 ClockBlock *cb = ctx->clock_alloc.Map(idx);
92 atomic_uint32_t *ref = ref_ptr(cb);
93 u32 v = atomic_load(ref, memory_order_acquire);
94 for (;;) {
95 CHECK_GT(v, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((v)); __sanitizer
::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect(!!(!
(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/tsan/rtl/tsan_clock.cpp"
, 95, "(" "(v)" ") " ">" " (" "(0)" ")", v1, v2); } while (
false)
;
96 if (v == 1)
97 break;
98 if (atomic_compare_exchange_strong(ref, &v, v - 1, memory_order_acq_rel))
99 return;
100 }
101 // First level block owns second level blocks, so them as well.
102 for (uptr i = 0; i < blocks; i++)
103 ctx->clock_alloc.Free(c, cb->table[ClockBlock::kBlockIdx - i]);
104 ctx->clock_alloc.Free(c, idx);
105}
106
107ThreadClock::ThreadClock(unsigned tid, unsigned reused)
108 : tid_(tid)
109 , reused_(reused + 1) // 0 has special meaning
110 , cached_idx_()
111 , cached_size_()
112 , cached_blocks_() {
113 CHECK_LT(tid, kMaxTidInClock)do { __sanitizer::u64 v1 = (__sanitizer::u64)((tid)); __sanitizer
::u64 v2 = (__sanitizer::u64)((kMaxTidInClock)); if (__builtin_expect
(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/tsan/rtl/tsan_clock.cpp"
, 113, "(" "(tid)" ") " "<" " (" "(kMaxTidInClock)" ")", v1
, v2); } while (false)
;
114 CHECK_EQ(reused_, ((u64)reused_ << kClkBits) >> kClkBits)do { __sanitizer::u64 v1 = (__sanitizer::u64)((reused_)); __sanitizer
::u64 v2 = (__sanitizer::u64)((((u64)reused_ << kClkBits
) >> kClkBits)); if (__builtin_expect(!!(!(v1 == v2)), 0
)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/tsan/rtl/tsan_clock.cpp"
, 114, "(" "(reused_)" ") " "==" " (" "(((u64)reused_ << kClkBits) >> kClkBits)"
")", v1, v2); } while (false)
;
115 nclk_ = tid_ + 1;
116 last_acquire_ = 0;
117 internal_memset(clk_, 0, sizeof(clk_));
118}
119
120void ThreadClock::ResetCached(ClockCache *c) {
121 if (cached_idx_) {
122 UnrefClockBlock(c, cached_idx_, cached_blocks_);
123 cached_idx_ = 0;
124 cached_size_ = 0;
125 cached_blocks_ = 0;
126 }
127}
128
129void ThreadClock::acquire(ClockCache *c, SyncClock *src) {
130 DCHECK_LE(nclk_, kMaxTid);
131 DCHECK_LE(src->size_, kMaxTid);
132 CPP_STAT_INC(StatClockAcquire)StatInc(cur_thread(), StatClockAcquire);
133
134 // Check if it's empty -> no need to do anything.
135 const uptr nclk = src->size_;
136 if (nclk == 0) {
137 CPP_STAT_INC(StatClockAcquireEmpty)StatInc(cur_thread(), StatClockAcquireEmpty);
138 return;
139 }
140
141 bool acquired = false;
142 for (unsigned i = 0; i < kDirtyTids; i++) {
143 SyncClock::Dirty dirty = src->dirty_[i];
144 unsigned tid = dirty.tid;
145 if (tid != kInvalidTid) {
146 if (clk_[tid] < dirty.epoch) {
147 clk_[tid] = dirty.epoch;
148 acquired = true;
149 }
150 }
151 }
152
153 // Check if we've already acquired src after the last release operation on src
154 if (tid_ >= nclk || src->elem(tid_).reused != reused_) {
155 // O(N) acquire.
156 CPP_STAT_INC(StatClockAcquireFull)StatInc(cur_thread(), StatClockAcquireFull);
157 nclk_ = max(nclk_, nclk);
158 u64 *dst_pos = &clk_[0];
159 for (ClockElem &src_elem : *src) {
160 u64 epoch = src_elem.epoch;
161 if (*dst_pos < epoch) {
162 *dst_pos = epoch;
163 acquired = true;
164 }
165 dst_pos++;
166 }
167
168 // Remember that this thread has acquired this clock.
169 if (nclk > tid_)
170 src->elem(tid_).reused = reused_;
171 }
172
173 if (acquired) {
174 CPP_STAT_INC(StatClockAcquiredSomething)StatInc(cur_thread(), StatClockAcquiredSomething);
175 last_acquire_ = clk_[tid_];
176 ResetCached(c);
177 }
178}
179
180void ThreadClock::release(ClockCache *c, SyncClock *dst) {
181 DCHECK_LE(nclk_, kMaxTid);
182 DCHECK_LE(dst->size_, kMaxTid);
183
184 if (dst->size_ == 0) {
185 // ReleaseStore will correctly set release_store_tid_,
186 // which can be important for future operations.
187 ReleaseStore(c, dst);
188 return;
189 }
190
191 CPP_STAT_INC(StatClockRelease)StatInc(cur_thread(), StatClockRelease);
192 // Check if we need to resize dst.
193 if (dst->size_ < nclk_)
194 dst->Resize(c, nclk_);
195
196 // Check if we had not acquired anything from other threads
197 // since the last release on dst. If so, we need to update
198 // only dst->elem(tid_).
199 if (dst->elem(tid_).epoch > last_acquire_) {
200 UpdateCurrentThread(c, dst);
201 if (dst->release_store_tid_ != tid_ ||
202 dst->release_store_reused_ != reused_)
203 dst->release_store_tid_ = kInvalidTid;
204 return;
205 }
206
207 // O(N) release.
208 CPP_STAT_INC(StatClockReleaseFull)StatInc(cur_thread(), StatClockReleaseFull);
209 dst->Unshare(c);
210 // First, remember whether we've acquired dst.
211 bool acquired = IsAlreadyAcquired(dst);
212 if (acquired)
213 CPP_STAT_INC(StatClockReleaseAcquired)StatInc(cur_thread(), StatClockReleaseAcquired);
214 // Update dst->clk_.
215 dst->FlushDirty();
216 uptr i = 0;
217 for (ClockElem &ce : *dst) {
218 ce.epoch = max(ce.epoch, clk_[i]);
219 ce.reused = 0;
220 i++;
221 }
222 // Clear 'acquired' flag in the remaining elements.
223 if (nclk_ < dst->size_)
224 CPP_STAT_INC(StatClockReleaseClearTail)StatInc(cur_thread(), StatClockReleaseClearTail);
225 for (uptr i = nclk_; i < dst->size_; i++)
226 dst->elem(i).reused = 0;
227 dst->release_store_tid_ = kInvalidTid;
228 dst->release_store_reused_ = 0;
229 // If we've acquired dst, remember this fact,
230 // so that we don't need to acquire it on next acquire.
231 if (acquired)
232 dst->elem(tid_).reused = reused_;
233}
234
235void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) {
236 DCHECK_LE(nclk_, kMaxTid);
237 DCHECK_LE(dst->size_, kMaxTid);
238 CPP_STAT_INC(StatClockStore)StatInc(cur_thread(), StatClockStore);
239
240 if (dst->size_
1.1
Field 'size_' is equal to 0
1.1
Field 'size_' is equal to 0
== 0 && cached_idx_ != 0) {
2
Assuming field 'cached_idx_' is equal to 0
3
Taking false branch
241 // Reuse the cached clock.
242 // Note: we could reuse/cache the cached clock in more cases:
243 // we could update the existing clock and cache it, or replace it with the
244 // currently cached clock and release the old one. And for a shared
245 // existing clock, we could replace it with the currently cached;
246 // or unshare, update and cache. But, for simplicity, we currnetly reuse
247 // cached clock only when the target clock is empty.
248 dst->tab_ = ctx->clock_alloc.Map(cached_idx_);
249 dst->tab_idx_ = cached_idx_;
250 dst->size_ = cached_size_;
251 dst->blocks_ = cached_blocks_;
252 CHECK_EQ(dst->dirty_[0].tid, kInvalidTid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((dst->dirty_
[0].tid)); __sanitizer::u64 v2 = (__sanitizer::u64)((kInvalidTid
)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/tsan/rtl/tsan_clock.cpp"
, 252, "(" "(dst->dirty_[0].tid)" ") " "==" " (" "(kInvalidTid)"
")", v1, v2); } while (false)
;
253 // The cached clock is shared (immutable),
254 // so this is where we store the current clock.
255 dst->dirty_[0].tid = tid_;
256 dst->dirty_[0].epoch = clk_[tid_];
257 dst->release_store_tid_ = tid_;
258 dst->release_store_reused_ = reused_;
259 // Rememeber that we don't need to acquire it in future.
260 dst->elem(tid_).reused = reused_;
261 // Grab a reference.
262 atomic_fetch_add(ref_ptr(dst->tab_), 1, memory_order_relaxed);
263 return;
264 }
265
266 // Check if we need to resize dst.
267 if (dst->size_ < nclk_)
4
Assuming field 'size_' is >= field 'nclk_'
5
Taking false branch
268 dst->Resize(c, nclk_);
269
270 if (dst->release_store_tid_ == tid_ &&
6
Assuming field 'release_store_tid_' is not equal to field 'tid_'
271 dst->release_store_reused_ == reused_ &&
272 dst->elem(tid_).epoch > last_acquire_) {
273 CPP_STAT_INC(StatClockStoreFast)StatInc(cur_thread(), StatClockStoreFast);
274 UpdateCurrentThread(c, dst);
275 return;
276 }
277
278 // O(N) release-store.
279 CPP_STAT_INC(StatClockStoreFull)StatInc(cur_thread(), StatClockStoreFull);
280 dst->Unshare(c);
281 // Note: dst can be larger than this ThreadClock.
282 // This is fine since clk_ beyond size is all zeros.
283 uptr i = 0;
284 for (ClockElem &ce : *dst) {
7
Calling 'Iter::operator++'
16
Returning from 'Iter::operator++'
17
Calling 'Iter::operator++'
285 ce.epoch = clk_[i];
286 ce.reused = 0;
287 i++;
288 }
289 for (uptr i = 0; i < kDirtyTids; i++)
290 dst->dirty_[i].tid = kInvalidTid;
291 dst->release_store_tid_ = tid_;
292 dst->release_store_reused_ = reused_;
293 // Rememeber that we don't need to acquire it in future.
294 dst->elem(tid_).reused = reused_;
295
296 // If the resulting clock is cachable, cache it for future release operations.
297 // The clock is always cachable if we released to an empty sync object.
298 if (cached_idx_ == 0 && dst->Cachable()) {
299 // Grab a reference to the ClockBlock.
300 atomic_uint32_t *ref = ref_ptr(dst->tab_);
301 if (atomic_load(ref, memory_order_acquire) == 1)
302 atomic_store_relaxed(ref, 2);
303 else
304 atomic_fetch_add(ref_ptr(dst->tab_), 1, memory_order_relaxed);
305 cached_idx_ = dst->tab_idx_;
306 cached_size_ = dst->size_;
307 cached_blocks_ = dst->blocks_;
308 }
309}
310
311void ThreadClock::acq_rel(ClockCache *c, SyncClock *dst) {
312 CPP_STAT_INC(StatClockAcquireRelease)StatInc(cur_thread(), StatClockAcquireRelease);
313 acquire(c, dst);
314 ReleaseStore(c, dst);
1
Calling 'ThreadClock::ReleaseStore'
315}
316
317// Updates only single element related to the current thread in dst->clk_.
318void ThreadClock::UpdateCurrentThread(ClockCache *c, SyncClock *dst) const {
319 // Update the threads time, but preserve 'acquired' flag.
320 for (unsigned i = 0; i < kDirtyTids; i++) {
321 SyncClock::Dirty *dirty = &dst->dirty_[i];
322 const unsigned tid = dirty->tid;
323 if (tid == tid_ || tid == kInvalidTid) {
324 CPP_STAT_INC(StatClockReleaseFast)StatInc(cur_thread(), StatClockReleaseFast);
325 dirty->tid = tid_;
326 dirty->epoch = clk_[tid_];
327 return;
328 }
329 }
330 // Reset all 'acquired' flags, O(N).
331 // We are going to touch dst elements, so we need to unshare it.
332 dst->Unshare(c);
333 CPP_STAT_INC(StatClockReleaseSlow)StatInc(cur_thread(), StatClockReleaseSlow);
334 dst->elem(tid_).epoch = clk_[tid_];
335 for (uptr i = 0; i < dst->size_; i++)
336 dst->elem(i).reused = 0;
337 dst->FlushDirty();
338}
339
340// Checks whether the current thread has already acquired src.
341bool ThreadClock::IsAlreadyAcquired(const SyncClock *src) const {
342 if (src->elem(tid_).reused != reused_)
343 return false;
344 for (unsigned i = 0; i < kDirtyTids; i++) {
345 SyncClock::Dirty dirty = src->dirty_[i];
346 if (dirty.tid != kInvalidTid) {
347 if (clk_[dirty.tid] < dirty.epoch)
348 return false;
349 }
350 }
351 return true;
352}
353
354// Sets a single element in the vector clock.
355// This function is called only from weird places like AcquireGlobal.
356void ThreadClock::set(ClockCache *c, unsigned tid, u64 v) {
357 DCHECK_LT(tid, kMaxTid);
358 DCHECK_GE(v, clk_[tid]);
359 clk_[tid] = v;
360 if (nclk_ <= tid)
361 nclk_ = tid + 1;
362 last_acquire_ = clk_[tid_];
363 ResetCached(c);
364}
365
366void ThreadClock::DebugDump(int(*printf)(const char *s, ...)) {
367 printf("clock=[");
368 for (uptr i = 0; i < nclk_; i++)
369 printf("%s%llu", i == 0 ? "" : ",", clk_[i]);
370 printf("] tid=%u/%u last_acq=%llu", tid_, reused_, last_acquire_);
371}
372
373SyncClock::SyncClock() {
374 ResetImpl();
375}
376
377SyncClock::~SyncClock() {
378 // Reset must be called before dtor.
379 CHECK_EQ(size_, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((size_)); __sanitizer
::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect(!!(!
(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/tsan/rtl/tsan_clock.cpp"
, 379, "(" "(size_)" ") " "==" " (" "(0)" ")", v1, v2); } while
(false)
;
380 CHECK_EQ(blocks_, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((blocks_)); __sanitizer
::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect(!!(!
(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/tsan/rtl/tsan_clock.cpp"
, 380, "(" "(blocks_)" ") " "==" " (" "(0)" ")", v1, v2); } while
(false)
;
381 CHECK_EQ(tab_, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((tab_)); __sanitizer
::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect(!!(!
(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/tsan/rtl/tsan_clock.cpp"
, 381, "(" "(tab_)" ") " "==" " (" "(0)" ")", v1, v2); } while
(false)
;
382 CHECK_EQ(tab_idx_, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((tab_idx_)); __sanitizer
::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect(!!(!
(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/tsan/rtl/tsan_clock.cpp"
, 382, "(" "(tab_idx_)" ") " "==" " (" "(0)" ")", v1, v2); } while
(false)
;
383}
384
385void SyncClock::Reset(ClockCache *c) {
386 if (size_)
387 UnrefClockBlock(c, tab_idx_, blocks_);
388 ResetImpl();
389}
390
391void SyncClock::ResetImpl() {
392 tab_ = 0;
393 tab_idx_ = 0;
394 size_ = 0;
395 blocks_ = 0;
396 release_store_tid_ = kInvalidTid;
397 release_store_reused_ = 0;
398 for (uptr i = 0; i < kDirtyTids; i++)
399 dirty_[i].tid = kInvalidTid;
400}
401
402void SyncClock::Resize(ClockCache *c, uptr nclk) {
403 CPP_STAT_INC(StatClockReleaseResize)StatInc(cur_thread(), StatClockReleaseResize);
404 Unshare(c);
405 if (nclk <= capacity()) {
406 // Memory is already allocated, just increase the size.
407 size_ = nclk;
408 return;
409 }
410 if (size_ == 0) {
411 // Grow from 0 to one-level table.
412 CHECK_EQ(size_, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((size_)); __sanitizer
::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect(!!(!
(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/tsan/rtl/tsan_clock.cpp"
, 412, "(" "(size_)" ") " "==" " (" "(0)" ")", v1, v2); } while
(false)
;
413 CHECK_EQ(blocks_, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((blocks_)); __sanitizer
::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect(!!(!
(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/tsan/rtl/tsan_clock.cpp"
, 413, "(" "(blocks_)" ") " "==" " (" "(0)" ")", v1, v2); } while
(false)
;
414 CHECK_EQ(tab_, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((tab_)); __sanitizer
::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect(!!(!
(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/tsan/rtl/tsan_clock.cpp"
, 414, "(" "(tab_)" ") " "==" " (" "(0)" ")", v1, v2); } while
(false)
;
415 CHECK_EQ(tab_idx_, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((tab_idx_)); __sanitizer
::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect(!!(!
(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/tsan/rtl/tsan_clock.cpp"
, 415, "(" "(tab_idx_)" ") " "==" " (" "(0)" ")", v1, v2); } while
(false)
;
416 tab_idx_ = ctx->clock_alloc.Alloc(c);
417 tab_ = ctx->clock_alloc.Map(tab_idx_);
418 internal_memset(tab_, 0, sizeof(*tab_));
419 atomic_store_relaxed(ref_ptr(tab_), 1);
420 size_ = 1;
421 } else if (size_ > blocks_ * ClockBlock::kClockCount) {
422 u32 idx = ctx->clock_alloc.Alloc(c);
423 ClockBlock *new_cb = ctx->clock_alloc.Map(idx);
424 uptr top = size_ - blocks_ * ClockBlock::kClockCount;
425 CHECK_LT(top, ClockBlock::kClockCount)do { __sanitizer::u64 v1 = (__sanitizer::u64)((top)); __sanitizer
::u64 v2 = (__sanitizer::u64)((ClockBlock::kClockCount)); if (
__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/tsan/rtl/tsan_clock.cpp"
, 425, "(" "(top)" ") " "<" " (" "(ClockBlock::kClockCount)"
")", v1, v2); } while (false)
;
426 const uptr move = top * sizeof(tab_->clock[0]);
427 internal_memcpy(&new_cb->clock[0], tab_->clock, move);
428 internal_memset(&new_cb->clock[top], 0, sizeof(*new_cb) - move);
429 internal_memset(tab_->clock, 0, move);
430 append_block(idx);
431 }
432 // At this point we have first level table allocated and all clock elements
433 // are evacuated from it to a second level block.
434 // Add second level tables as necessary.
435 while (nclk > capacity()) {
436 u32 idx = ctx->clock_alloc.Alloc(c);
437 ClockBlock *cb = ctx->clock_alloc.Map(idx);
438 internal_memset(cb, 0, sizeof(*cb));
439 append_block(idx);
440 }
441 size_ = nclk;
442}
443
444// Flushes all dirty elements into the main clock array.
445void SyncClock::FlushDirty() {
446 for (unsigned i = 0; i < kDirtyTids; i++) {
447 Dirty *dirty = &dirty_[i];
448 if (dirty->tid != kInvalidTid) {
449 CHECK_LT(dirty->tid, size_)do { __sanitizer::u64 v1 = (__sanitizer::u64)((dirty->tid)
); __sanitizer::u64 v2 = (__sanitizer::u64)((size_)); if (__builtin_expect
(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/tsan/rtl/tsan_clock.cpp"
, 449, "(" "(dirty->tid)" ") " "<" " (" "(size_)" ")", v1
, v2); } while (false)
;
450 elem(dirty->tid).epoch = dirty->epoch;
451 dirty->tid = kInvalidTid;
452 }
453 }
454}
455
456bool SyncClock::IsShared() const {
457 if (size_ == 0)
458 return false;
459 atomic_uint32_t *ref = ref_ptr(tab_);
460 u32 v = atomic_load(ref, memory_order_acquire);
461 CHECK_GT(v, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((v)); __sanitizer
::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect(!!(!
(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/tsan/rtl/tsan_clock.cpp"
, 461, "(" "(v)" ") " ">" " (" "(0)" ")", v1, v2); } while
(false)
;
462 return v > 1;
463}
464
465// Unshares the current clock if it's shared.
466// Shared clocks are immutable, so they need to be unshared before any updates.
467// Note: this does not apply to dirty entries as they are not shared.
468void SyncClock::Unshare(ClockCache *c) {
469 if (!IsShared())
470 return;
471 // First, copy current state into old.
472 SyncClock old;
473 old.tab_ = tab_;
474 old.tab_idx_ = tab_idx_;
475 old.size_ = size_;
476 old.blocks_ = blocks_;
477 old.release_store_tid_ = release_store_tid_;
478 old.release_store_reused_ = release_store_reused_;
479 for (unsigned i = 0; i < kDirtyTids; i++)
480 old.dirty_[i] = dirty_[i];
481 // Then, clear current object.
482 ResetImpl();
483 // Allocate brand new clock in the current object.
484 Resize(c, old.size_);
485 // Now copy state back into this object.
486 Iter old_iter(&old);
487 for (ClockElem &ce : *this) {
488 ce = *old_iter;
489 ++old_iter;
490 }
491 release_store_tid_ = old.release_store_tid_;
492 release_store_reused_ = old.release_store_reused_;
493 for (unsigned i = 0; i < kDirtyTids; i++)
494 dirty_[i] = old.dirty_[i];
495 // Drop reference to old and delete if necessary.
496 old.Reset(c);
497}
498
499// Can we cache this clock for future release operations?
500ALWAYS_INLINEinline __attribute__((always_inline)) bool SyncClock::Cachable() const {
501 if (size_ == 0)
502 return false;
503 for (unsigned i = 0; i < kDirtyTids; i++) {
504 if (dirty_[i].tid != kInvalidTid)
505 return false;
506 }
507 return atomic_load_relaxed(ref_ptr(tab_)) == 1;
508}
509
510// elem linearizes the two-level structure into linear array.
511// Note: this is used only for one time accesses, vector operations use
512// the iterator as it is much faster.
513ALWAYS_INLINEinline __attribute__((always_inline)) ClockElem &SyncClock::elem(unsigned tid) const {
514 DCHECK_LT(tid, size_);
515 const uptr block = tid / ClockBlock::kClockCount;
516 DCHECK_LE(block, blocks_);
517 tid %= ClockBlock::kClockCount;
518 if (block == blocks_)
519 return tab_->clock[tid];
520 u32 idx = get_block(block);
521 ClockBlock *cb = ctx->clock_alloc.Map(idx);
522 return cb->clock[tid];
523}
524
525ALWAYS_INLINEinline __attribute__((always_inline)) uptr SyncClock::capacity() const {
526 if (size_ == 0)
527 return 0;
528 uptr ratio = sizeof(ClockBlock::clock[0]) / sizeof(ClockBlock::table[0]);
529 // How many clock elements we can fit into the first level block.
530 // +1 for ref counter.
531 uptr top = ClockBlock::kClockCount - RoundUpTo(blocks_ + 1, ratio) / ratio;
532 return blocks_ * ClockBlock::kClockCount + top;
533}
534
535ALWAYS_INLINEinline __attribute__((always_inline)) u32 SyncClock::get_block(uptr bi) const {
536 DCHECK(size_);
537 DCHECK_LT(bi, blocks_);
538 return tab_->table[ClockBlock::kBlockIdx - bi];
539}
540
541ALWAYS_INLINEinline __attribute__((always_inline)) void SyncClock::append_block(u32 idx) {
542 uptr bi = blocks_++;
543 CHECK_EQ(get_block(bi), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((get_block(bi))
); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/tsan/rtl/tsan_clock.cpp"
, 543, "(" "(get_block(bi))" ") " "==" " (" "(0)" ")", v1, v2
); } while (false)
;
544 tab_->table[ClockBlock::kBlockIdx - bi] = idx;
545}
546
547// Used only by tests.
548u64 SyncClock::get(unsigned tid) const {
549 for (unsigned i = 0; i < kDirtyTids; i++) {
550 Dirty dirty = dirty_[i];
551 if (dirty.tid == tid)
552 return dirty.epoch;
553 }
554 return elem(tid).epoch;
555}
556
557// Used only by Iter test.
558u64 SyncClock::get_clean(unsigned tid) const {
559 return elem(tid).epoch;
560}
561
562void SyncClock::DebugDump(int(*printf)(const char *s, ...)) {
563 printf("clock=[");
564 for (uptr i = 0; i < size_; i++)
565 printf("%s%llu", i == 0 ? "" : ",", elem(i).epoch);
566 printf("] reused=[");
567 for (uptr i = 0; i < size_; i++)
568 printf("%s%llu", i == 0 ? "" : ",", elem(i).reused);
569 printf("] release_store_tid=%d/%d dirty_tids=%d[%llu]/%d[%llu]",
570 release_store_tid_, release_store_reused_,
571 dirty_[0].tid, dirty_[0].epoch,
572 dirty_[1].tid, dirty_[1].epoch);
573}
574
575void SyncClock::Iter::Next() {
576 // Finished with the current block, move on to the next one.
577 block_++;
578 if (block_ < parent_->blocks_) {
11
Assuming field 'block_' is >= field 'blocks_'
12
Taking false branch
21
Access to field 'blocks_' results in a dereference of a null pointer (loaded from field 'parent_')
579 // Iterate over the next second level block.
580 u32 idx = parent_->get_block(block_);
581 ClockBlock *cb = ctx->clock_alloc.Map(idx);
582 pos_ = &cb->clock[0];
583 end_ = pos_ + min(parent_->size_ - block_ * ClockBlock::kClockCount,
584 ClockBlock::kClockCount);
585 return;
586 }
587 if (block_ == parent_->blocks_ &&
13
Assuming field 'block_' is not equal to field 'blocks_'
588 parent_->size_ > parent_->blocks_ * ClockBlock::kClockCount) {
589 // Iterate over elements in the first level block.
590 pos_ = &parent_->tab_->clock[0];
591 end_ = pos_ + min(parent_->size_ - block_ * ClockBlock::kClockCount,
592 ClockBlock::kClockCount);
593 return;
594 }
595 parent_ = nullptr; // denotes end
14
Null pointer value stored to '__begin1.parent_'
596}
597} // namespace __tsan

/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/tsan/rtl/tsan_clock.h

1//===-- tsan_clock.h --------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11//===----------------------------------------------------------------------===//
12#ifndef TSAN_CLOCK_H
13#define TSAN_CLOCK_H
14
15#include "tsan_defs.h"
16#include "tsan_dense_alloc.h"
17
18namespace __tsan {
19
20typedef DenseSlabAlloc<ClockBlock, 1<<16, 1<<10> ClockAlloc;
21typedef DenseSlabAllocCache ClockCache;
22
23// The clock that lives in sync variables (mutexes, atomics, etc).
24class SyncClock {
25 public:
26 SyncClock();
27 ~SyncClock();
28
29 uptr size() const;
30
31 // These are used only in tests.
32 u64 get(unsigned tid) const;
33 u64 get_clean(unsigned tid) const;
34
35 void Resize(ClockCache *c, uptr nclk);
36 void Reset(ClockCache *c);
37
38 void DebugDump(int(*printf)(const char *s, ...));
39
40 // Clock element iterator.
41 // Note: it iterates only over the table without regard to dirty entries.
42 class Iter {
43 public:
44 explicit Iter(SyncClock* parent);
45 Iter& operator++();
46 bool operator!=(const Iter& other);
47 ClockElem &operator*();
48
49 private:
50 SyncClock *parent_;
51 // [pos_, end_) is the current continuous range of clock elements.
52 ClockElem *pos_;
53 ClockElem *end_;
54 int block_; // Current number of second level block.
55
56 NOINLINE__attribute__((noinline)) void Next();
57 };
58
59 Iter begin();
60 Iter end();
61
62 private:
63 friend class ThreadClock;
64 friend class Iter;
65 static const uptr kDirtyTids = 2;
66
67 struct Dirty {
68 u64 epoch : kClkBits;
69 u64 tid : 64 - kClkBits; // kInvalidId if not active
70 };
71
72 unsigned release_store_tid_;
73 unsigned release_store_reused_;
74 Dirty dirty_[kDirtyTids];
75 // If size_ is 0, tab_ is nullptr.
76 // If size <= 64 (kClockCount), tab_ contains pointer to an array with
77 // 64 ClockElem's (ClockBlock::clock).
78 // Otherwise, tab_ points to an array with up to 127 u32 elements,
79 // each pointing to the second-level 512b block with 64 ClockElem's.
80 // Unused space in the first level ClockBlock is used to store additional
81 // clock elements.
82 // The last u32 element in the first level ClockBlock is always used as
83 // reference counter.
84 //
85 // See the following scheme for details.
86 // All memory blocks are 512 bytes (allocated from ClockAlloc).
87 // Clock (clk) elements are 64 bits.
88 // Idx and ref are 32 bits.
89 //
90 // tab_
91 // |
92 // \/
93 // +----------------------------------------------------+
94 // | clk128 | clk129 | ...unused... | idx1 | idx0 | ref |
95 // +----------------------------------------------------+
96 // | |
97 // | \/
98 // | +----------------+
99 // | | clk0 ... clk63 |
100 // | +----------------+
101 // \/
102 // +------------------+
103 // | clk64 ... clk127 |
104 // +------------------+
105 //
106 // Note: dirty entries, if active, always override what's stored in the clock.
107 ClockBlock *tab_;
108 u32 tab_idx_;
109 u16 size_;
110 u16 blocks_; // Number of second level blocks.
111
112 void Unshare(ClockCache *c);
113 bool IsShared() const;
114 bool Cachable() const;
115 void ResetImpl();
116 void FlushDirty();
117 uptr capacity() const;
118 u32 get_block(uptr bi) const;
119 void append_block(u32 idx);
120 ClockElem &elem(unsigned tid) const;
121};
122
123// The clock that lives in threads.
124class ThreadClock {
125 public:
126 typedef DenseSlabAllocCache Cache;
127
128 explicit ThreadClock(unsigned tid, unsigned reused = 0);
129
130 u64 get(unsigned tid) const;
131 void set(ClockCache *c, unsigned tid, u64 v);
132 void set(u64 v);
133 void tick();
134 uptr size() const;
135
136 void acquire(ClockCache *c, SyncClock *src);
137 void release(ClockCache *c, SyncClock *dst);
138 void acq_rel(ClockCache *c, SyncClock *dst);
139 void ReleaseStore(ClockCache *c, SyncClock *dst);
140 void ResetCached(ClockCache *c);
141
142 void DebugReset();
143 void DebugDump(int(*printf)(const char *s, ...));
144
145 private:
146 static const uptr kDirtyTids = SyncClock::kDirtyTids;
147 // Index of the thread associated with he clock ("current thread").
148 const unsigned tid_;
149 const unsigned reused_; // tid_ reuse count.
150 // Current thread time when it acquired something from other threads.
151 u64 last_acquire_;
152
153 // Cached SyncClock (without dirty entries and release_store_tid_).
154 // We reuse it for subsequent store-release operations without intervening
155 // acquire operations. Since it is shared (and thus constant), clock value
156 // for the current thread is then stored in dirty entries in the SyncClock.
157 // We host a refernece to the table while it is cached here.
158 u32 cached_idx_;
159 u16 cached_size_;
160 u16 cached_blocks_;
161
162 // Number of active elements in the clk_ table (the rest is zeros).
163 uptr nclk_;
164 u64 clk_[kMaxTidInClock]; // Fixed size vector clock.
165
166 bool IsAlreadyAcquired(const SyncClock *src) const;
167 void UpdateCurrentThread(ClockCache *c, SyncClock *dst) const;
168};
169
170ALWAYS_INLINEinline __attribute__((always_inline)) u64 ThreadClock::get(unsigned tid) const {
171 DCHECK_LT(tid, kMaxTidInClock);
172 return clk_[tid];
173}
174
175ALWAYS_INLINEinline __attribute__((always_inline)) void ThreadClock::set(u64 v) {
176 DCHECK_GE(v, clk_[tid_]);
177 clk_[tid_] = v;
178}
179
180ALWAYS_INLINEinline __attribute__((always_inline)) void ThreadClock::tick() {
181 clk_[tid_]++;
182}
183
184ALWAYS_INLINEinline __attribute__((always_inline)) uptr ThreadClock::size() const {
185 return nclk_;
186}
187
188ALWAYS_INLINEinline __attribute__((always_inline)) SyncClock::Iter SyncClock::begin() {
189 return Iter(this);
190}
191
192ALWAYS_INLINEinline __attribute__((always_inline)) SyncClock::Iter SyncClock::end() {
193 return Iter(nullptr);
194}
195
196ALWAYS_INLINEinline __attribute__((always_inline)) uptr SyncClock::size() const {
197 return size_;
198}
199
200ALWAYS_INLINEinline __attribute__((always_inline)) SyncClock::Iter::Iter(SyncClock* parent)
201 : parent_(parent)
202 , pos_(nullptr)
203 , end_(nullptr)
204 , block_(-1) {
205 if (parent)
206 Next();
207}
208
209ALWAYS_INLINEinline __attribute__((always_inline)) SyncClock::Iter& SyncClock::Iter::operator++() {
210 pos_++;
211 if (UNLIKELY(pos_ >= end_)__builtin_expect(!!(pos_ >= end_), 0))
8
Assuming the condition is true
9
Taking true branch
18
Assuming the condition is true
19
Taking true branch
212 Next();
10
Calling 'Iter::Next'
15
Returning from 'Iter::Next'
20
Calling 'Iter::Next'
213 return *this;
214}
215
216ALWAYS_INLINEinline __attribute__((always_inline)) bool SyncClock::Iter::operator!=(const SyncClock::Iter& other) {
217 return parent_ != other.parent_;
218}
219
220ALWAYS_INLINEinline __attribute__((always_inline)) ClockElem &SyncClock::Iter::operator*() {
221 return *pos_;
222}
223} // namespace __tsan
224
225#endif // TSAN_CLOCK_H