File: | out/../deps/v8/src/base/platform/mutex.cc |
Warning: | line 106, column 3 Value stored to 'result' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | // Copyright 2013 the V8 project authors. All rights reserved. |
2 | // Use of this source code is governed by a BSD-style license that can be |
3 | // found in the LICENSE file. |
4 | |
5 | #include "src/base/platform/mutex.h" |
6 | |
7 | #include <errno(*__errno_location ()).h> |
8 | |
9 | #if DEBUG |
10 | #include <unordered_set> |
11 | #endif // DEBUG |
12 | |
13 | #if V8_OS_WIN |
14 | #include <windows.h> |
15 | #endif |
16 | |
17 | namespace v8 { |
18 | namespace base { |
19 | |
20 | #if DEBUG |
21 | namespace { |
22 | // Used for asserts to guarantee we are not re-locking a mutex on the same |
23 | // thread. If this thread has only one held shared mutex (common case), we use |
24 | // {single_held_shared_mutex}. If it has more than one we allocate a set for it. |
25 | // Said set has to manually be constructed and destroyed. |
26 | thread_local base::SharedMutex* single_held_shared_mutex = nullptr; |
27 | using TSet = std::unordered_set<base::SharedMutex*>; |
28 | thread_local TSet* held_shared_mutexes = nullptr; |
29 | |
30 | // Returns true iff {shared_mutex} is not a held mutex. |
31 | bool SharedMutexNotHeld(SharedMutex* shared_mutex) { |
32 | DCHECK_NOT_NULL(shared_mutex)((void) 0); |
33 | return single_held_shared_mutex != shared_mutex && |
34 | (!held_shared_mutexes || |
35 | held_shared_mutexes->count(shared_mutex) == 0); |
36 | } |
37 | |
38 | // Tries to hold {shared_mutex}. Returns true iff it hadn't been held prior to |
39 | // this function call. |
40 | bool TryHoldSharedMutex(SharedMutex* shared_mutex) { |
41 | DCHECK_NOT_NULL(shared_mutex)((void) 0); |
42 | if (single_held_shared_mutex) { |
43 | if (shared_mutex == single_held_shared_mutex) { |
44 | return false; |
45 | } |
46 | DCHECK_NULL(held_shared_mutexes)((void) 0); |
47 | held_shared_mutexes = new TSet({single_held_shared_mutex, shared_mutex}); |
48 | single_held_shared_mutex = nullptr; |
49 | return true; |
50 | } else if (held_shared_mutexes) { |
51 | return held_shared_mutexes->insert(shared_mutex).second; |
52 | } else { |
53 | DCHECK_NULL(single_held_shared_mutex)((void) 0); |
54 | single_held_shared_mutex = shared_mutex; |
55 | return true; |
56 | } |
57 | } |
58 | |
59 | // Tries to release {shared_mutex}. Returns true iff it had been held prior to |
60 | // this function call. |
61 | bool TryReleaseSharedMutex(SharedMutex* shared_mutex) { |
62 | DCHECK_NOT_NULL(shared_mutex)((void) 0); |
63 | if (single_held_shared_mutex == shared_mutex) { |
64 | single_held_shared_mutex = nullptr; |
65 | return true; |
66 | } |
67 | if (held_shared_mutexes && held_shared_mutexes->erase(shared_mutex)) { |
68 | if (held_shared_mutexes->empty()) { |
69 | delete held_shared_mutexes; |
70 | held_shared_mutexes = nullptr; |
71 | } |
72 | return true; |
73 | } |
74 | return false; |
75 | } |
76 | } // namespace |
77 | #endif // DEBUG |
78 | |
79 | #if V8_OS_POSIX1 |
80 | |
81 | static V8_INLINEinline __attribute__((always_inline)) void InitializeNativeHandle(pthread_mutex_t* mutex) { |
82 | int result; |
83 | #if defined(DEBUG) |
84 | // Use an error checking mutex in debug mode. |
85 | pthread_mutexattr_t attr; |
86 | result = pthread_mutexattr_init(&attr); |
87 | DCHECK_EQ(0, result)((void) 0); |
88 | result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); |
89 | DCHECK_EQ(0, result)((void) 0); |
90 | result = pthread_mutex_init(mutex, &attr); |
91 | DCHECK_EQ(0, result)((void) 0); |
92 | result = pthread_mutexattr_destroy(&attr); |
93 | #else |
94 | // Use a fast mutex (default attributes). |
95 | result = pthread_mutex_init(mutex, nullptr); |
96 | #endif // defined(DEBUG) |
97 | DCHECK_EQ(0, result)((void) 0); |
98 | USE(result)do { ::v8::base::Use unused_tmp_array_for_use_macro[]{result} ; (void)unused_tmp_array_for_use_macro; } while (false); |
99 | } |
100 | |
101 | |
102 | static V8_INLINEinline __attribute__((always_inline)) void InitializeRecursiveNativeHandle(pthread_mutex_t* mutex) { |
103 | pthread_mutexattr_t attr; |
104 | int result = pthread_mutexattr_init(&attr); |
105 | DCHECK_EQ(0, result)((void) 0); |
106 | result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); |
Value stored to 'result' is never read | |
107 | DCHECK_EQ(0, result)((void) 0); |
108 | result = pthread_mutex_init(mutex, &attr); |
109 | DCHECK_EQ(0, result)((void) 0); |
110 | result = pthread_mutexattr_destroy(&attr); |
111 | DCHECK_EQ(0, result)((void) 0); |
112 | USE(result)do { ::v8::base::Use unused_tmp_array_for_use_macro[]{result} ; (void)unused_tmp_array_for_use_macro; } while (false); |
113 | } |
114 | |
115 | |
116 | static V8_INLINEinline __attribute__((always_inline)) void DestroyNativeHandle(pthread_mutex_t* mutex) { |
117 | int result = pthread_mutex_destroy(mutex); |
118 | DCHECK_EQ(0, result)((void) 0); |
119 | USE(result)do { ::v8::base::Use unused_tmp_array_for_use_macro[]{result} ; (void)unused_tmp_array_for_use_macro; } while (false); |
120 | } |
121 | |
122 | |
123 | static V8_INLINEinline __attribute__((always_inline)) void LockNativeHandle(pthread_mutex_t* mutex) { |
124 | int result = pthread_mutex_lock(mutex); |
125 | DCHECK_EQ(0, result)((void) 0); |
126 | USE(result)do { ::v8::base::Use unused_tmp_array_for_use_macro[]{result} ; (void)unused_tmp_array_for_use_macro; } while (false); |
127 | } |
128 | |
129 | |
130 | static V8_INLINEinline __attribute__((always_inline)) void UnlockNativeHandle(pthread_mutex_t* mutex) { |
131 | int result = pthread_mutex_unlock(mutex); |
132 | DCHECK_EQ(0, result)((void) 0); |
133 | USE(result)do { ::v8::base::Use unused_tmp_array_for_use_macro[]{result} ; (void)unused_tmp_array_for_use_macro; } while (false); |
134 | } |
135 | |
136 | |
137 | static V8_INLINEinline __attribute__((always_inline)) bool TryLockNativeHandle(pthread_mutex_t* mutex) { |
138 | int result = pthread_mutex_trylock(mutex); |
139 | if (result == EBUSY16) { |
140 | return false; |
141 | } |
142 | DCHECK_EQ(0, result)((void) 0); |
143 | return true; |
144 | } |
145 | |
146 | |
147 | Mutex::Mutex() { |
148 | InitializeNativeHandle(&native_handle_); |
149 | #ifdef DEBUG |
150 | level_ = 0; |
151 | #endif |
152 | } |
153 | |
154 | |
155 | Mutex::~Mutex() { |
156 | DestroyNativeHandle(&native_handle_); |
157 | DCHECK_EQ(0, level_)((void) 0); |
158 | } |
159 | |
160 | |
161 | void Mutex::Lock() { |
162 | LockNativeHandle(&native_handle_); |
163 | AssertUnheldAndMark(); |
164 | } |
165 | |
166 | |
167 | void Mutex::Unlock() { |
168 | AssertHeldAndUnmark(); |
169 | UnlockNativeHandle(&native_handle_); |
170 | } |
171 | |
172 | |
173 | bool Mutex::TryLock() { |
174 | if (!TryLockNativeHandle(&native_handle_)) { |
175 | return false; |
176 | } |
177 | AssertUnheldAndMark(); |
178 | return true; |
179 | } |
180 | |
181 | |
182 | RecursiveMutex::RecursiveMutex() { |
183 | InitializeRecursiveNativeHandle(&native_handle_); |
184 | #ifdef DEBUG |
185 | level_ = 0; |
186 | #endif |
187 | } |
188 | |
189 | |
190 | RecursiveMutex::~RecursiveMutex() { |
191 | DestroyNativeHandle(&native_handle_); |
192 | DCHECK_EQ(0, level_)((void) 0); |
193 | } |
194 | |
195 | |
196 | void RecursiveMutex::Lock() { |
197 | LockNativeHandle(&native_handle_); |
198 | #ifdef DEBUG |
199 | DCHECK_LE(0, level_)((void) 0); |
200 | level_++; |
201 | #endif |
202 | } |
203 | |
204 | |
205 | void RecursiveMutex::Unlock() { |
206 | #ifdef DEBUG |
207 | DCHECK_LT(0, level_)((void) 0); |
208 | level_--; |
209 | #endif |
210 | UnlockNativeHandle(&native_handle_); |
211 | } |
212 | |
213 | |
214 | bool RecursiveMutex::TryLock() { |
215 | if (!TryLockNativeHandle(&native_handle_)) { |
216 | return false; |
217 | } |
218 | #ifdef DEBUG |
219 | DCHECK_LE(0, level_)((void) 0); |
220 | level_++; |
221 | #endif |
222 | return true; |
223 | } |
224 | |
225 | #if V8_OS_DARWIN |
226 | |
227 | SharedMutex::SharedMutex() { InitializeNativeHandle(&native_handle_); } |
228 | |
229 | SharedMutex::~SharedMutex() { DestroyNativeHandle(&native_handle_); } |
230 | |
231 | void SharedMutex::LockShared() { LockExclusive(); } |
232 | |
233 | void SharedMutex::LockExclusive() { |
234 | DCHECK(TryHoldSharedMutex(this))((void) 0); |
235 | LockNativeHandle(&native_handle_); |
236 | } |
237 | |
238 | void SharedMutex::UnlockShared() { UnlockExclusive(); } |
239 | |
240 | void SharedMutex::UnlockExclusive() { |
241 | DCHECK(TryReleaseSharedMutex(this))((void) 0); |
242 | UnlockNativeHandle(&native_handle_); |
243 | } |
244 | |
245 | bool SharedMutex::TryLockShared() { return TryLockExclusive(); } |
246 | |
247 | bool SharedMutex::TryLockExclusive() { |
248 | DCHECK(SharedMutexNotHeld(this))((void) 0); |
249 | if (!TryLockNativeHandle(&native_handle_)) return false; |
250 | DCHECK(TryHoldSharedMutex(this))((void) 0); |
251 | return true; |
252 | } |
253 | |
254 | #else // !V8_OS_DARWIN |
255 | |
256 | SharedMutex::SharedMutex() { pthread_rwlock_init(&native_handle_, nullptr); } |
257 | |
258 | SharedMutex::~SharedMutex() { |
259 | int result = pthread_rwlock_destroy(&native_handle_); |
260 | DCHECK_EQ(0, result)((void) 0); |
261 | USE(result)do { ::v8::base::Use unused_tmp_array_for_use_macro[]{result} ; (void)unused_tmp_array_for_use_macro; } while (false); |
262 | } |
263 | |
264 | void SharedMutex::LockShared() { |
265 | DCHECK(TryHoldSharedMutex(this))((void) 0); |
266 | int result = pthread_rwlock_rdlock(&native_handle_); |
267 | DCHECK_EQ(0, result)((void) 0); |
268 | USE(result)do { ::v8::base::Use unused_tmp_array_for_use_macro[]{result} ; (void)unused_tmp_array_for_use_macro; } while (false); |
269 | } |
270 | |
271 | void SharedMutex::LockExclusive() { |
272 | DCHECK(TryHoldSharedMutex(this))((void) 0); |
273 | int result = pthread_rwlock_wrlock(&native_handle_); |
274 | DCHECK_EQ(0, result)((void) 0); |
275 | USE(result)do { ::v8::base::Use unused_tmp_array_for_use_macro[]{result} ; (void)unused_tmp_array_for_use_macro; } while (false); |
276 | } |
277 | |
278 | void SharedMutex::UnlockShared() { |
279 | DCHECK(TryReleaseSharedMutex(this))((void) 0); |
280 | int result = pthread_rwlock_unlock(&native_handle_); |
281 | DCHECK_EQ(0, result)((void) 0); |
282 | USE(result)do { ::v8::base::Use unused_tmp_array_for_use_macro[]{result} ; (void)unused_tmp_array_for_use_macro; } while (false); |
283 | } |
284 | |
285 | void SharedMutex::UnlockExclusive() { |
286 | // Same code as {UnlockShared} on POSIX. |
287 | UnlockShared(); |
288 | } |
289 | |
290 | bool SharedMutex::TryLockShared() { |
291 | DCHECK(SharedMutexNotHeld(this))((void) 0); |
292 | bool result = pthread_rwlock_tryrdlock(&native_handle_) == 0; |
293 | if (result) DCHECK(TryHoldSharedMutex(this))((void) 0); |
294 | return result; |
295 | } |
296 | |
297 | bool SharedMutex::TryLockExclusive() { |
298 | DCHECK(SharedMutexNotHeld(this))((void) 0); |
299 | bool result = pthread_rwlock_trywrlock(&native_handle_) == 0; |
300 | if (result) DCHECK(TryHoldSharedMutex(this))((void) 0); |
301 | return result; |
302 | } |
303 | |
304 | #endif // !V8_OS_DARWIN |
305 | |
306 | #elif V8_OS_WIN |
307 | |
308 | Mutex::Mutex() : native_handle_(SRWLOCK_INIT) { |
309 | #ifdef DEBUG |
310 | level_ = 0; |
311 | #endif |
312 | } |
313 | |
314 | |
315 | Mutex::~Mutex() { |
316 | DCHECK_EQ(0, level_)((void) 0); |
317 | } |
318 | |
319 | |
320 | void Mutex::Lock() { |
321 | AcquireSRWLockExclusive(V8ToWindowsType(&native_handle_)); |
322 | AssertUnheldAndMark(); |
323 | } |
324 | |
325 | |
326 | void Mutex::Unlock() { |
327 | AssertHeldAndUnmark(); |
328 | ReleaseSRWLockExclusive(V8ToWindowsType(&native_handle_)); |
329 | } |
330 | |
331 | |
332 | bool Mutex::TryLock() { |
333 | if (!TryAcquireSRWLockExclusive(V8ToWindowsType(&native_handle_))) { |
334 | return false; |
335 | } |
336 | AssertUnheldAndMark(); |
337 | return true; |
338 | } |
339 | |
340 | |
341 | RecursiveMutex::RecursiveMutex() { |
342 | InitializeCriticalSection(V8ToWindowsType(&native_handle_)); |
343 | #ifdef DEBUG |
344 | level_ = 0; |
345 | #endif |
346 | } |
347 | |
348 | |
349 | RecursiveMutex::~RecursiveMutex() { |
350 | DeleteCriticalSection(V8ToWindowsType(&native_handle_)); |
351 | DCHECK_EQ(0, level_)((void) 0); |
352 | } |
353 | |
354 | |
355 | void RecursiveMutex::Lock() { |
356 | EnterCriticalSection(V8ToWindowsType(&native_handle_)); |
357 | #ifdef DEBUG |
358 | DCHECK_LE(0, level_)((void) 0); |
359 | level_++; |
360 | #endif |
361 | } |
362 | |
363 | |
364 | void RecursiveMutex::Unlock() { |
365 | #ifdef DEBUG |
366 | DCHECK_LT(0, level_)((void) 0); |
367 | level_--; |
368 | #endif |
369 | LeaveCriticalSection(V8ToWindowsType(&native_handle_)); |
370 | } |
371 | |
372 | |
373 | bool RecursiveMutex::TryLock() { |
374 | if (!TryEnterCriticalSection(V8ToWindowsType(&native_handle_))) { |
375 | return false; |
376 | } |
377 | #ifdef DEBUG |
378 | DCHECK_LE(0, level_)((void) 0); |
379 | level_++; |
380 | #endif |
381 | return true; |
382 | } |
383 | |
384 | SharedMutex::SharedMutex() : native_handle_(SRWLOCK_INIT) {} |
385 | |
386 | SharedMutex::~SharedMutex() {} |
387 | |
388 | void SharedMutex::LockShared() { |
389 | DCHECK(TryHoldSharedMutex(this))((void) 0); |
390 | AcquireSRWLockShared(V8ToWindowsType(&native_handle_)); |
391 | } |
392 | |
393 | void SharedMutex::LockExclusive() { |
394 | DCHECK(TryHoldSharedMutex(this))((void) 0); |
395 | AcquireSRWLockExclusive(V8ToWindowsType(&native_handle_)); |
396 | } |
397 | |
398 | void SharedMutex::UnlockShared() { |
399 | DCHECK(TryReleaseSharedMutex(this))((void) 0); |
400 | ReleaseSRWLockShared(V8ToWindowsType(&native_handle_)); |
401 | } |
402 | |
403 | void SharedMutex::UnlockExclusive() { |
404 | DCHECK(TryReleaseSharedMutex(this))((void) 0); |
405 | ReleaseSRWLockExclusive(V8ToWindowsType(&native_handle_)); |
406 | } |
407 | |
408 | bool SharedMutex::TryLockShared() { |
409 | DCHECK(SharedMutexNotHeld(this))((void) 0); |
410 | bool result = TryAcquireSRWLockShared(V8ToWindowsType(&native_handle_)); |
411 | if (result) DCHECK(TryHoldSharedMutex(this))((void) 0); |
412 | return result; |
413 | } |
414 | |
415 | bool SharedMutex::TryLockExclusive() { |
416 | DCHECK(SharedMutexNotHeld(this))((void) 0); |
417 | bool result = TryAcquireSRWLockExclusive(V8ToWindowsType(&native_handle_)); |
418 | if (result) DCHECK(TryHoldSharedMutex(this))((void) 0); |
419 | return result; |
420 | } |
421 | |
422 | #elif V8_OS_STARBOARD |
423 | |
424 | Mutex::Mutex() { SbMutexCreate(&native_handle_); } |
425 | |
426 | Mutex::~Mutex() { SbMutexDestroy(&native_handle_); } |
427 | |
428 | void Mutex::Lock() { SbMutexAcquire(&native_handle_); } |
429 | |
430 | void Mutex::Unlock() { SbMutexRelease(&native_handle_); } |
431 | |
432 | RecursiveMutex::RecursiveMutex() {} |
433 | |
434 | RecursiveMutex::~RecursiveMutex() {} |
435 | |
436 | void RecursiveMutex::Lock() { native_handle_.Acquire(); } |
437 | |
438 | void RecursiveMutex::Unlock() { native_handle_.Release(); } |
439 | |
440 | bool RecursiveMutex::TryLock() { return native_handle_.AcquireTry(); } |
441 | |
442 | SharedMutex::SharedMutex() = default; |
443 | |
444 | SharedMutex::~SharedMutex() = default; |
445 | |
446 | void SharedMutex::LockShared() { |
447 | DCHECK(TryHoldSharedMutex(this))((void) 0); |
448 | native_handle_.AcquireReadLock(); |
449 | } |
450 | |
451 | void SharedMutex::LockExclusive() { |
452 | DCHECK(TryHoldSharedMutex(this))((void) 0); |
453 | native_handle_.AcquireWriteLock(); |
454 | } |
455 | |
456 | void SharedMutex::UnlockShared() { |
457 | DCHECK(TryReleaseSharedMutex(this))((void) 0); |
458 | native_handle_.ReleaseReadLock(); |
459 | } |
460 | |
461 | void SharedMutex::UnlockExclusive() { |
462 | DCHECK(TryReleaseSharedMutex(this))((void) 0); |
463 | native_handle_.ReleaseWriteLock(); |
464 | } |
465 | |
466 | bool SharedMutex::TryLockShared() { |
467 | DCHECK(SharedMutexNotHeld(this))((void) 0); |
468 | return false; |
469 | } |
470 | |
471 | bool SharedMutex::TryLockExclusive() { |
472 | DCHECK(SharedMutexNotHeld(this))((void) 0); |
473 | return false; |
474 | } |
475 | #endif // V8_OS_STARBOARD |
476 | |
477 | } // namespace base |
478 | } // namespace v8 |