1 | // <mutex> -*- C++ -*- |
2 | |
3 | // Copyright (C) 2003-2019 Free Software Foundation, Inc. |
4 | // |
5 | // This file is part of the GNU ISO C++ Library. This library is free |
6 | // software; you can redistribute it and/or modify it under the |
7 | // terms of the GNU General Public License as published by the |
8 | // Free Software Foundation; either version 3, or (at your option) |
9 | // any later version. |
10 | |
11 | // This library is distributed in the hope that it will be useful, |
12 | // but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | // GNU General Public License for more details. |
15 | |
16 | // Under Section 7 of GPL version 3, you are granted additional |
17 | // permissions described in the GCC Runtime Library Exception, version |
18 | // 3.1, as published by the Free Software Foundation. |
19 | |
20 | // You should have received a copy of the GNU General Public License and |
21 | // a copy of the GCC Runtime Library Exception along with this program; |
22 | // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see |
23 | // <http://www.gnu.org/licenses/>. |
24 | |
25 | /** @file include/mutex |
26 | * This is a Standard C++ Library header. |
27 | */ |
28 | |
29 | #ifndef _GLIBCXX_MUTEX |
30 | #define _GLIBCXX_MUTEX 1 |
31 | |
32 | #pragma GCC system_header |
33 | |
34 | #if __cplusplus < 201103L |
35 | # include <bits/c++0x_warning.h> |
36 | #else |
37 | |
38 | #include <tuple> |
39 | #include <chrono> |
40 | #include <exception> |
41 | #include <type_traits> |
42 | #include <system_error> |
43 | #include <bits/std_mutex.h> |
44 | #include <bits/unique_lock.h> |
45 | #if ! _GTHREAD_USE_MUTEX_TIMEDLOCK |
46 | # include <condition_variable> |
47 | # include <thread> |
48 | #endif |
49 | #ifndef _GLIBCXX_HAVE_TLS |
50 | # include <bits/std_function.h> |
51 | #endif |
52 | |
53 | namespace std _GLIBCXX_VISIBILITY(default) |
54 | { |
55 | _GLIBCXX_BEGIN_NAMESPACE_VERSION |
56 | |
57 | /** |
58 | * @ingroup mutexes |
59 | * @{ |
60 | */ |
61 | |
62 | #ifdef _GLIBCXX_HAS_GTHREADS |
63 | |
64 | // Common base class for std::recursive_mutex and std::recursive_timed_mutex |
65 | class __recursive_mutex_base |
66 | { |
67 | protected: |
68 | typedef __gthread_recursive_mutex_t __native_type; |
69 | |
70 | __recursive_mutex_base(const __recursive_mutex_base&) = delete; |
71 | __recursive_mutex_base& operator=(const __recursive_mutex_base&) = delete; |
72 | |
73 | #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT |
74 | __native_type _M_mutex = __GTHREAD_RECURSIVE_MUTEX_INIT; |
75 | |
76 | __recursive_mutex_base() = default; |
77 | #else |
78 | __native_type _M_mutex; |
79 | |
80 | __recursive_mutex_base() |
81 | { |
82 | // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may) |
83 | __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex); |
84 | } |
85 | |
86 | ~__recursive_mutex_base() |
87 | { __gthread_recursive_mutex_destroy(&_M_mutex); } |
88 | #endif |
89 | }; |
90 | |
91 | /// The standard recursive mutex type. |
92 | class recursive_mutex : private __recursive_mutex_base |
93 | { |
94 | public: |
95 | typedef __native_type* native_handle_type; |
96 | |
97 | recursive_mutex() = default; |
98 | ~recursive_mutex() = default; |
99 | |
100 | recursive_mutex(const recursive_mutex&) = delete; |
101 | recursive_mutex& operator=(const recursive_mutex&) = delete; |
102 | |
103 | void |
104 | lock() |
105 | { |
106 | int __e = __gthread_recursive_mutex_lock(&_M_mutex); |
107 | |
108 | // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may) |
109 | if (__e) |
110 | __throw_system_error(__e); |
111 | } |
112 | |
113 | bool |
114 | try_lock() noexcept |
115 | { |
116 | // XXX EINVAL, EAGAIN, EBUSY |
117 | return !__gthread_recursive_mutex_trylock(&_M_mutex); |
118 | } |
119 | |
120 | void |
121 | unlock() |
122 | { |
123 | // XXX EINVAL, EAGAIN, EBUSY |
124 | __gthread_recursive_mutex_unlock(&_M_mutex); |
125 | } |
126 | |
127 | native_handle_type |
128 | native_handle() noexcept |
129 | { return &_M_mutex; } |
130 | }; |
131 | |
132 | #if _GTHREAD_USE_MUTEX_TIMEDLOCK |
133 | template<typename _Derived> |
134 | class __timed_mutex_impl |
135 | { |
136 | protected: |
137 | typedef chrono::high_resolution_clock __clock_t; |
138 | |
139 | template<typename _Rep, typename _Period> |
140 | bool |
141 | _M_try_lock_for(const chrono::duration<_Rep, _Period>& __rtime) |
142 | { |
143 | using chrono::steady_clock; |
144 | auto __rt = chrono::duration_cast<steady_clock::duration>(__rtime); |
145 | if (ratio_greater<steady_clock::period, _Period>()) |
146 | ++__rt; |
147 | return _M_try_lock_until(steady_clock::now() + __rt); |
148 | } |
149 | |
150 | template<typename _Duration> |
151 | bool |
152 | _M_try_lock_until(const chrono::time_point<__clock_t, |
153 | _Duration>& __atime) |
154 | { |
155 | auto __s = chrono::time_point_cast<chrono::seconds>(__atime); |
156 | auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s); |
157 | |
158 | __gthread_time_t __ts = { |
159 | static_cast<std::time_t>(__s.time_since_epoch().count()), |
160 | static_cast<long>(__ns.count()) |
161 | }; |
162 | |
163 | return static_cast<_Derived*>(this)->_M_timedlock(__ts); |
164 | } |
165 | |
166 | template<typename _Clock, typename _Duration> |
167 | bool |
168 | _M_try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime) |
169 | { |
170 | auto __rtime = __atime - _Clock::now(); |
171 | return _M_try_lock_until(__clock_t::now() + __rtime); |
172 | } |
173 | }; |
174 | |
175 | /// The standard timed mutex type. |
176 | class timed_mutex |
177 | : private __mutex_base, public __timed_mutex_impl<timed_mutex> |
178 | { |
179 | public: |
180 | typedef __native_type* native_handle_type; |
181 | |
182 | timed_mutex() = default; |
183 | ~timed_mutex() = default; |
184 | |
185 | timed_mutex(const timed_mutex&) = delete; |
186 | timed_mutex& operator=(const timed_mutex&) = delete; |
187 | |
188 | void |
189 | lock() |
190 | { |
191 | int __e = __gthread_mutex_lock(&_M_mutex); |
192 | |
193 | // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may) |
194 | if (__e) |
195 | __throw_system_error(__e); |
196 | } |
197 | |
198 | bool |
199 | try_lock() noexcept |
200 | { |
201 | // XXX EINVAL, EAGAIN, EBUSY |
202 | return !__gthread_mutex_trylock(&_M_mutex); |
203 | } |
204 | |
205 | template <class _Rep, class _Period> |
206 | bool |
207 | try_lock_for(const chrono::duration<_Rep, _Period>& __rtime) |
208 | { return _M_try_lock_for(__rtime); } |
209 | |
210 | template <class _Clock, class _Duration> |
211 | bool |
212 | try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime) |
213 | { return _M_try_lock_until(__atime); } |
214 | |
215 | void |
216 | unlock() |
217 | { |
218 | // XXX EINVAL, EAGAIN, EBUSY |
219 | __gthread_mutex_unlock(&_M_mutex); |
220 | } |
221 | |
222 | native_handle_type |
223 | native_handle() noexcept |
224 | { return &_M_mutex; } |
225 | |
226 | private: |
227 | friend class __timed_mutex_impl<timed_mutex>; |
228 | |
229 | bool |
230 | _M_timedlock(const __gthread_time_t& __ts) |
231 | { return !__gthread_mutex_timedlock(&_M_mutex, &__ts); } |
232 | }; |
233 | |
234 | /// recursive_timed_mutex |
235 | class recursive_timed_mutex |
236 | : private __recursive_mutex_base, |
237 | public __timed_mutex_impl<recursive_timed_mutex> |
238 | { |
239 | public: |
240 | typedef __native_type* native_handle_type; |
241 | |
242 | recursive_timed_mutex() = default; |
243 | ~recursive_timed_mutex() = default; |
244 | |
245 | recursive_timed_mutex(const recursive_timed_mutex&) = delete; |
246 | recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete; |
247 | |
248 | void |
249 | lock() |
250 | { |
251 | int __e = __gthread_recursive_mutex_lock(&_M_mutex); |
252 | |
253 | // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may) |
254 | if (__e) |
255 | __throw_system_error(__e); |
256 | } |
257 | |
258 | bool |
259 | try_lock() noexcept |
260 | { |
261 | // XXX EINVAL, EAGAIN, EBUSY |
262 | return !__gthread_recursive_mutex_trylock(&_M_mutex); |
263 | } |
264 | |
265 | template <class _Rep, class _Period> |
266 | bool |
267 | try_lock_for(const chrono::duration<_Rep, _Period>& __rtime) |
268 | { return _M_try_lock_for(__rtime); } |
269 | |
270 | template <class _Clock, class _Duration> |
271 | bool |
272 | try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime) |
273 | { return _M_try_lock_until(__atime); } |
274 | |
275 | void |
276 | unlock() |
277 | { |
278 | // XXX EINVAL, EAGAIN, EBUSY |
279 | __gthread_recursive_mutex_unlock(&_M_mutex); |
280 | } |
281 | |
282 | native_handle_type |
283 | native_handle() noexcept |
284 | { return &_M_mutex; } |
285 | |
286 | private: |
287 | friend class __timed_mutex_impl<recursive_timed_mutex>; |
288 | |
289 | bool |
290 | _M_timedlock(const __gthread_time_t& __ts) |
291 | { return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts); } |
292 | }; |
293 | |
294 | #else // !_GTHREAD_USE_MUTEX_TIMEDLOCK |
295 | |
296 | /// timed_mutex |
297 | class timed_mutex |
298 | { |
299 | mutex _M_mut; |
300 | condition_variable _M_cv; |
301 | bool _M_locked = false; |
302 | |
303 | public: |
304 | |
305 | timed_mutex() = default; |
306 | ~timed_mutex() { __glibcxx_assert( !_M_locked ); } |
307 | |
308 | timed_mutex(const timed_mutex&) = delete; |
309 | timed_mutex& operator=(const timed_mutex&) = delete; |
310 | |
311 | void |
312 | lock() |
313 | { |
314 | unique_lock<mutex> __lk(_M_mut); |
315 | _M_cv.wait(__lk, [&]{ return !_M_locked; }); |
316 | _M_locked = true; |
317 | } |
318 | |
319 | bool |
320 | try_lock() |
321 | { |
322 | lock_guard<mutex> __lk(_M_mut); |
323 | if (_M_locked) |
324 | return false; |
325 | _M_locked = true; |
326 | return true; |
327 | } |
328 | |
329 | template<typename _Rep, typename _Period> |
330 | bool |
331 | try_lock_for(const chrono::duration<_Rep, _Period>& __rtime) |
332 | { |
333 | unique_lock<mutex> __lk(_M_mut); |
334 | if (!_M_cv.wait_for(__lk, __rtime, [&]{ return !_M_locked; })) |
335 | return false; |
336 | _M_locked = true; |
337 | return true; |
338 | } |
339 | |
340 | template<typename _Clock, typename _Duration> |
341 | bool |
342 | try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime) |
343 | { |
344 | unique_lock<mutex> __lk(_M_mut); |
345 | if (!_M_cv.wait_until(__lk, __atime, [&]{ return !_M_locked; })) |
346 | return false; |
347 | _M_locked = true; |
348 | return true; |
349 | } |
350 | |
351 | void |
352 | unlock() |
353 | { |
354 | lock_guard<mutex> __lk(_M_mut); |
355 | __glibcxx_assert( _M_locked ); |
356 | _M_locked = false; |
357 | _M_cv.notify_one(); |
358 | } |
359 | }; |
360 | |
361 | /// recursive_timed_mutex |
362 | class recursive_timed_mutex |
363 | { |
364 | mutex _M_mut; |
365 | condition_variable _M_cv; |
366 | thread::id _M_owner; |
367 | unsigned _M_count = 0; |
368 | |
369 | // Predicate type that tests whether the current thread can lock a mutex. |
370 | struct _Can_lock |
371 | { |
372 | // Returns true if the mutex is unlocked or is locked by _M_caller. |
373 | bool |
374 | operator()() const noexcept |
375 | { return _M_mx->_M_count == 0 || _M_mx->_M_owner == _M_caller; } |
376 | |
377 | const recursive_timed_mutex* _M_mx; |
378 | thread::id _M_caller; |
379 | }; |
380 | |
381 | public: |
382 | |
383 | recursive_timed_mutex() = default; |
384 | ~recursive_timed_mutex() { __glibcxx_assert( _M_count == 0 ); } |
385 | |
386 | recursive_timed_mutex(const recursive_timed_mutex&) = delete; |
387 | recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete; |
388 | |
389 | void |
390 | lock() |
391 | { |
392 | auto __id = this_thread::get_id(); |
393 | _Can_lock __can_lock{this, __id}; |
394 | unique_lock<mutex> __lk(_M_mut); |
395 | _M_cv.wait(__lk, __can_lock); |
396 | if (_M_count == -1u) |
397 | __throw_system_error(EAGAIN); // [thread.timedmutex.recursive]/3 |
398 | _M_owner = __id; |
399 | ++_M_count; |
400 | } |
401 | |
402 | bool |
403 | try_lock() |
404 | { |
405 | auto __id = this_thread::get_id(); |
406 | _Can_lock __can_lock{this, __id}; |
407 | lock_guard<mutex> __lk(_M_mut); |
408 | if (!__can_lock()) |
409 | return false; |
410 | if (_M_count == -1u) |
411 | return false; |
412 | _M_owner = __id; |
413 | ++_M_count; |
414 | return true; |
415 | } |
416 | |
417 | template<typename _Rep, typename _Period> |
418 | bool |
419 | try_lock_for(const chrono::duration<_Rep, _Period>& __rtime) |
420 | { |
421 | auto __id = this_thread::get_id(); |
422 | _Can_lock __can_lock{this, __id}; |
423 | unique_lock<mutex> __lk(_M_mut); |
424 | if (!_M_cv.wait_for(__lk, __rtime, __can_lock)) |
425 | return false; |
426 | if (_M_count == -1u) |
427 | return false; |
428 | _M_owner = __id; |
429 | ++_M_count; |
430 | return true; |
431 | } |
432 | |
433 | template<typename _Clock, typename _Duration> |
434 | bool |
435 | try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime) |
436 | { |
437 | auto __id = this_thread::get_id(); |
438 | _Can_lock __can_lock{this, __id}; |
439 | unique_lock<mutex> __lk(_M_mut); |
440 | if (!_M_cv.wait_until(__lk, __atime, __can_lock)) |
441 | return false; |
442 | if (_M_count == -1u) |
443 | return false; |
444 | _M_owner = __id; |
445 | ++_M_count; |
446 | return true; |
447 | } |
448 | |
449 | void |
450 | unlock() |
451 | { |
452 | lock_guard<mutex> __lk(_M_mut); |
453 | __glibcxx_assert( _M_owner == this_thread::get_id() ); |
454 | __glibcxx_assert( _M_count > 0 ); |
455 | if (--_M_count == 0) |
456 | { |
457 | _M_owner = {}; |
458 | _M_cv.notify_one(); |
459 | } |
460 | } |
461 | }; |
462 | |
463 | #endif |
464 | #endif // _GLIBCXX_HAS_GTHREADS |
465 | |
466 | template<typename _Lock> |
467 | inline unique_lock<_Lock> |
468 | __try_to_lock(_Lock& __l) |
469 | { return unique_lock<_Lock>{__l, try_to_lock}; } |
470 | |
471 | template<int _Idx, bool _Continue = true> |
472 | struct __try_lock_impl |
473 | { |
474 | template<typename... _Lock> |
475 | static void |
476 | __do_try_lock(tuple<_Lock&...>& __locks, int& __idx) |
477 | { |
478 | __idx = _Idx; |
479 | auto __lock = std::__try_to_lock(std::get<_Idx>(__locks)); |
480 | if (__lock.owns_lock()) |
481 | { |
482 | constexpr bool __cont = _Idx + 2 < sizeof...(_Lock); |
483 | using __try_locker = __try_lock_impl<_Idx + 1, __cont>; |
484 | __try_locker::__do_try_lock(__locks, __idx); |
485 | if (__idx == -1) |
486 | __lock.release(); |
487 | } |
488 | } |
489 | }; |
490 | |
491 | template<int _Idx> |
492 | struct __try_lock_impl<_Idx, false> |
493 | { |
494 | template<typename... _Lock> |
495 | static void |
496 | __do_try_lock(tuple<_Lock&...>& __locks, int& __idx) |
497 | { |
498 | __idx = _Idx; |
499 | auto __lock = std::__try_to_lock(std::get<_Idx>(__locks)); |
500 | if (__lock.owns_lock()) |
501 | { |
502 | __idx = -1; |
503 | __lock.release(); |
504 | } |
505 | } |
506 | }; |
507 | |
508 | /** @brief Generic try_lock. |
509 | * @param __l1 Meets Lockable requirements (try_lock() may throw). |
510 | * @param __l2 Meets Lockable requirements (try_lock() may throw). |
511 | * @param __l3 Meets Lockable requirements (try_lock() may throw). |
512 | * @return Returns -1 if all try_lock() calls return true. Otherwise returns |
513 | * a 0-based index corresponding to the argument that returned false. |
514 | * @post Either all arguments are locked, or none will be. |
515 | * |
516 | * Sequentially calls try_lock() on each argument. |
517 | */ |
518 | template<typename _Lock1, typename _Lock2, typename... _Lock3> |
519 | int |
520 | try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3) |
521 | { |
522 | int __idx; |
523 | auto __locks = std::tie(__l1, __l2, __l3...); |
524 | __try_lock_impl<0>::__do_try_lock(__locks, __idx); |
525 | return __idx; |
526 | } |
527 | |
528 | /** @brief Generic lock. |
529 | * @param __l1 Meets Lockable requirements (try_lock() may throw). |
530 | * @param __l2 Meets Lockable requirements (try_lock() may throw). |
531 | * @param __l3 Meets Lockable requirements (try_lock() may throw). |
532 | * @throw An exception thrown by an argument's lock() or try_lock() member. |
533 | * @post All arguments are locked. |
534 | * |
535 | * All arguments are locked via a sequence of calls to lock(), try_lock() |
536 | * and unlock(). If the call exits via an exception any locks that were |
537 | * obtained will be released. |
538 | */ |
539 | template<typename _L1, typename _L2, typename... _L3> |
540 | void |
541 | lock(_L1& __l1, _L2& __l2, _L3&... __l3) |
542 | { |
543 | while (true) |
544 | { |
545 | using __try_locker = __try_lock_impl<0, sizeof...(_L3) != 0>; |
546 | unique_lock<_L1> __first(__l1); |
547 | int __idx; |
548 | auto __locks = std::tie(__l2, __l3...); |
549 | __try_locker::__do_try_lock(__locks, __idx); |
550 | if (__idx == -1) |
551 | { |
552 | __first.release(); |
553 | return; |
554 | } |
555 | } |
556 | } |
557 | |
558 | #if __cplusplus >= 201703L |
559 | #define __cpp_lib_scoped_lock 201703 |
560 | /** @brief A scoped lock type for multiple lockable objects. |
561 | * |
562 | * A scoped_lock controls mutex ownership within a scope, releasing |
563 | * ownership in the destructor. |
564 | */ |
565 | template<typename... _MutexTypes> |
566 | class scoped_lock |
567 | { |
568 | public: |
569 | explicit scoped_lock(_MutexTypes&... __m) : _M_devices(std::tie(__m...)) |
570 | { std::lock(__m...); } |
571 | |
572 | explicit scoped_lock(adopt_lock_t, _MutexTypes&... __m) noexcept |
573 | : _M_devices(std::tie(__m...)) |
574 | { } // calling thread owns mutex |
575 | |
576 | ~scoped_lock() |
577 | { |
578 | std::apply([](_MutexTypes&... __m) { |
579 | char __i[] __attribute__((__unused__)) = { (__m.unlock(), 0)... }; |
580 | }, _M_devices); |
581 | } |
582 | |
583 | scoped_lock(const scoped_lock&) = delete; |
584 | scoped_lock& operator=(const scoped_lock&) = delete; |
585 | |
586 | private: |
587 | tuple<_MutexTypes&...> _M_devices; |
588 | }; |
589 | |
590 | template<> |
591 | class scoped_lock<> |
592 | { |
593 | public: |
594 | explicit scoped_lock() = default; |
595 | explicit scoped_lock(adopt_lock_t) noexcept { } |
596 | ~scoped_lock() = default; |
597 | |
598 | scoped_lock(const scoped_lock&) = delete; |
599 | scoped_lock& operator=(const scoped_lock&) = delete; |
600 | }; |
601 | |
602 | template<typename _Mutex> |
603 | class scoped_lock<_Mutex> |
604 | { |
605 | public: |
606 | using mutex_type = _Mutex; |
607 | |
608 | explicit scoped_lock(mutex_type& __m) : _M_device(__m) |
609 | { _M_device.lock(); } |
610 | |
611 | explicit scoped_lock(adopt_lock_t, mutex_type& __m) noexcept |
612 | : _M_device(__m) |
613 | { } // calling thread owns mutex |
614 | |
615 | ~scoped_lock() |
616 | { _M_device.unlock(); } |
617 | |
618 | scoped_lock(const scoped_lock&) = delete; |
619 | scoped_lock& operator=(const scoped_lock&) = delete; |
620 | |
621 | private: |
622 | mutex_type& _M_device; |
623 | }; |
624 | #endif // C++17 |
625 | |
626 | #ifdef _GLIBCXX_HAS_GTHREADS |
627 | /// once_flag |
628 | struct once_flag |
629 | { |
630 | private: |
631 | typedef __gthread_once_t __native_type; |
632 | __native_type _M_once = __GTHREAD_ONCE_INIT; |
633 | |
634 | public: |
635 | /// Constructor |
636 | constexpr once_flag() noexcept = default; |
637 | |
638 | /// Deleted copy constructor |
639 | once_flag(const once_flag&) = delete; |
640 | /// Deleted assignment operator |
641 | once_flag& operator=(const once_flag&) = delete; |
642 | |
643 | template<typename _Callable, typename... _Args> |
644 | friend void |
645 | call_once(once_flag& __once, _Callable&& __f, _Args&&... __args); |
646 | }; |
647 | |
648 | #ifdef _GLIBCXX_HAVE_TLS |
649 | extern __thread void* __once_callable; |
650 | extern __thread void (*__once_call)(); |
651 | #else |
652 | extern function<void()> __once_functor; |
653 | |
654 | extern void |
655 | __set_once_functor_lock_ptr(unique_lock<mutex>*); |
656 | |
657 | extern mutex& |
658 | __get_once_mutex(); |
659 | #endif |
660 | |
661 | extern "C" void __once_proxy(void); |
662 | |
663 | /// call_once |
664 | template<typename _Callable, typename... _Args> |
665 | void |
666 | call_once(once_flag& __once, _Callable&& __f, _Args&&... __args) |
667 | { |
668 | // _GLIBCXX_RESOLVE_LIB_DEFECTS |
669 | // 2442. call_once() shouldn't DECAY_COPY() |
670 | auto __callable = [&] { |
671 | std::__invoke(std::forward<_Callable>(__f), |
672 | std::forward<_Args>(__args)...); |
673 | }; |
674 | #ifdef _GLIBCXX_HAVE_TLS |
675 | __once_callable = std::__addressof(__callable); |
676 | __once_call = []{ (*(decltype(__callable)*)__once_callable)(); }; |
677 | #else |
678 | unique_lock<mutex> __functor_lock(__get_once_mutex()); |
679 | __once_functor = __callable; |
680 | __set_once_functor_lock_ptr(&__functor_lock); |
681 | #endif |
682 | |
683 | int __e = __gthread_once(&__once._M_once, &__once_proxy); |
684 | |
685 | #ifndef _GLIBCXX_HAVE_TLS |
686 | if (__functor_lock) |
687 | __set_once_functor_lock_ptr(0); |
688 | #endif |
689 | |
690 | #ifdef __clang_analyzer__ |
691 | // PR libstdc++/82481 |
692 | __once_callable = nullptr; |
693 | __once_call = nullptr; |
694 | #endif |
695 | |
696 | if (__e) |
697 | __throw_system_error(__e); |
698 | } |
699 | #endif // _GLIBCXX_HAS_GTHREADS |
700 | |
701 | // @} group mutexes |
702 | _GLIBCXX_END_NAMESPACE_VERSION |
703 | } // namespace |
704 | |
705 | #endif // C++11 |
706 | |
707 | #endif // _GLIBCXX_MUTEX |
708 | |