libstdc++
mutex
Go to the documentation of this file.
1// <mutex> -*- C++ -*-
2
3// Copyright (C) 2003-2022 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file include/mutex
26 * This is a Standard C++ Library header.
27 */
28
29#ifndef _GLIBCXX_MUTEX
30#define _GLIBCXX_MUTEX 1
31
32#pragma GCC system_header
33
34#include <bits/requires_hosted.h> // concurrency
35
36#if __cplusplus < 201103L
37# include <bits/c++0x_warning.h>
38#else
39
40#include <tuple>
41#include <exception>
42#include <type_traits>
43#include <system_error>
44#include <bits/chrono.h>
45#include <bits/std_mutex.h>
46#include <bits/unique_lock.h>
47#if ! _GTHREAD_USE_MUTEX_TIMEDLOCK
48# include <condition_variable>
49# include <thread>
50#endif
51#include <ext/atomicity.h> // __gnu_cxx::__is_single_threaded
52
53#if defined _GLIBCXX_HAS_GTHREADS && ! defined _GLIBCXX_HAVE_TLS
54# include <bits/std_function.h> // std::function
55#endif
56
57namespace std _GLIBCXX_VISIBILITY(default)
58{
59_GLIBCXX_BEGIN_NAMESPACE_VERSION
60
61 /**
62 * @addtogroup mutexes
63 * @{
64 */
65
66#ifdef _GLIBCXX_HAS_GTHREADS
67 /// @cond undocumented
68
69 // Common base class for std::recursive_mutex and std::recursive_timed_mutex
70 class __recursive_mutex_base
71 {
72 protected:
73 typedef __gthread_recursive_mutex_t __native_type;
74
75 __recursive_mutex_base(const __recursive_mutex_base&) = delete;
76 __recursive_mutex_base& operator=(const __recursive_mutex_base&) = delete;
77
78#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
79 __native_type _M_mutex = __GTHREAD_RECURSIVE_MUTEX_INIT;
80
81 __recursive_mutex_base() = default;
82#else
83 __native_type _M_mutex;
84
85 __recursive_mutex_base()
86 {
87 // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
88 __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
89 }
90
91 ~__recursive_mutex_base()
92 { __gthread_recursive_mutex_destroy(&_M_mutex); }
93#endif
94 };
95 /// @endcond
96
97 /** The standard recursive mutex type.
98 *
99 * A recursive mutex can be locked more than once by the same thread.
100 * Other threads cannot lock the mutex until the owning thread unlocks it
101 * as many times as it was locked.
102 *
103 * @headerfile mutex
104 * @since C++11
105 */
106 class recursive_mutex : private __recursive_mutex_base
107 {
108 public:
109 typedef __native_type* native_handle_type;
110
111 recursive_mutex() = default;
112 ~recursive_mutex() = default;
113
114 recursive_mutex(const recursive_mutex&) = delete;
115 recursive_mutex& operator=(const recursive_mutex&) = delete;
116
117 void
118 lock()
119 {
120 int __e = __gthread_recursive_mutex_lock(&_M_mutex);
121
122 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
123 if (__e)
124 __throw_system_error(__e);
125 }
126
127 bool
128 try_lock() noexcept
129 {
130 // XXX EINVAL, EAGAIN, EBUSY
131 return !__gthread_recursive_mutex_trylock(&_M_mutex);
132 }
133
134 void
135 unlock()
136 {
137 // XXX EINVAL, EAGAIN, EBUSY
138 __gthread_recursive_mutex_unlock(&_M_mutex);
139 }
140
141 native_handle_type
142 native_handle() noexcept
143 { return &_M_mutex; }
144 };
145
146#if _GTHREAD_USE_MUTEX_TIMEDLOCK
147 /// @cond undocumented
148
149 template<typename _Derived>
150 class __timed_mutex_impl
151 {
152 protected:
153 template<typename _Rep, typename _Period>
154 bool
155 _M_try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
156 {
157#if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
158 using __clock = chrono::steady_clock;
159#else
160 using __clock = chrono::system_clock;
161#endif
162
163 auto __rt = chrono::duration_cast<__clock::duration>(__rtime);
165 ++__rt;
166 return _M_try_lock_until(__clock::now() + __rt);
167 }
168
169 template<typename _Duration>
170 bool
171 _M_try_lock_until(const chrono::time_point<chrono::system_clock,
172 _Duration>& __atime)
173 {
174 auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
175 auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
176
177 __gthread_time_t __ts = {
178 static_cast<std::time_t>(__s.time_since_epoch().count()),
179 static_cast<long>(__ns.count())
180 };
181
182 return static_cast<_Derived*>(this)->_M_timedlock(__ts);
183 }
184
185#ifdef _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
186 template<typename _Duration>
187 bool
188 _M_try_lock_until(const chrono::time_point<chrono::steady_clock,
189 _Duration>& __atime)
190 {
191 auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
192 auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
193
194 __gthread_time_t __ts = {
195 static_cast<std::time_t>(__s.time_since_epoch().count()),
196 static_cast<long>(__ns.count())
197 };
198
199 return static_cast<_Derived*>(this)->_M_clocklock(CLOCK_MONOTONIC,
200 __ts);
201 }
202#endif
203
204 template<typename _Clock, typename _Duration>
205 bool
206 _M_try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
207 {
208#if __cplusplus > 201703L
209 static_assert(chrono::is_clock_v<_Clock>);
210#endif
211 // The user-supplied clock may not tick at the same rate as
212 // steady_clock, so we must loop in order to guarantee that
213 // the timeout has expired before returning false.
214 auto __now = _Clock::now();
215 do {
216 auto __rtime = __atime - __now;
217 if (_M_try_lock_for(__rtime))
218 return true;
219 __now = _Clock::now();
220 } while (__atime > __now);
221 return false;
222 }
223 };
224 /// @endcond
225
226 /** The standard timed mutex type.
227 *
228 * A non-recursive mutex that supports a timeout when trying to acquire the
229 * lock.
230 *
231 * @headerfile mutex
232 * @since C++11
233 */
235 : private __mutex_base, public __timed_mutex_impl<timed_mutex>
236 {
237 public:
238 typedef __native_type* native_handle_type;
239
240 timed_mutex() = default;
241 ~timed_mutex() = default;
242
243 timed_mutex(const timed_mutex&) = delete;
244 timed_mutex& operator=(const timed_mutex&) = delete;
245
246 void
247 lock()
248 {
249 int __e = __gthread_mutex_lock(&_M_mutex);
250
251 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
252 if (__e)
253 __throw_system_error(__e);
254 }
255
256 bool
257 try_lock() noexcept
258 {
259 // XXX EINVAL, EAGAIN, EBUSY
260 return !__gthread_mutex_trylock(&_M_mutex);
261 }
262
263 template <class _Rep, class _Period>
264 bool
265 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
266 { return _M_try_lock_for(__rtime); }
267
268 template <class _Clock, class _Duration>
269 bool
270 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
271 { return _M_try_lock_until(__atime); }
272
273 void
274 unlock()
275 {
276 // XXX EINVAL, EAGAIN, EBUSY
277 __gthread_mutex_unlock(&_M_mutex);
278 }
279
280 native_handle_type
281 native_handle() noexcept
282 { return &_M_mutex; }
283
284 private:
285 friend class __timed_mutex_impl<timed_mutex>;
286
287 bool
288 _M_timedlock(const __gthread_time_t& __ts)
289 { return !__gthread_mutex_timedlock(&_M_mutex, &__ts); }
290
291#if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
292 bool
293 _M_clocklock(clockid_t clockid, const __gthread_time_t& __ts)
294 { return !pthread_mutex_clocklock(&_M_mutex, clockid, &__ts); }
295#endif
296 };
297
298 /** The standard recursive timed mutex type.
299 *
300 * A recursive mutex that supports a timeout when trying to acquire the
301 * lock. A recursive mutex can be locked more than once by the same thread.
302 * Other threads cannot lock the mutex until the owning thread unlocks it
303 * as many times as it was locked.
304 *
305 * @headerfile mutex
306 * @since C++11
307 */
309 : private __recursive_mutex_base,
310 public __timed_mutex_impl<recursive_timed_mutex>
311 {
312 public:
313 typedef __native_type* native_handle_type;
314
315 recursive_timed_mutex() = default;
316 ~recursive_timed_mutex() = default;
317
319 recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
320
321 void
322 lock()
323 {
324 int __e = __gthread_recursive_mutex_lock(&_M_mutex);
325
326 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
327 if (__e)
328 __throw_system_error(__e);
329 }
330
331 bool
332 try_lock() noexcept
333 {
334 // XXX EINVAL, EAGAIN, EBUSY
335 return !__gthread_recursive_mutex_trylock(&_M_mutex);
336 }
337
338 template <class _Rep, class _Period>
339 bool
340 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
341 { return _M_try_lock_for(__rtime); }
342
343 template <class _Clock, class _Duration>
344 bool
345 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
346 { return _M_try_lock_until(__atime); }
347
348 void
349 unlock()
350 {
351 // XXX EINVAL, EAGAIN, EBUSY
352 __gthread_recursive_mutex_unlock(&_M_mutex);
353 }
354
355 native_handle_type
356 native_handle() noexcept
357 { return &_M_mutex; }
358
359 private:
360 friend class __timed_mutex_impl<recursive_timed_mutex>;
361
362 bool
363 _M_timedlock(const __gthread_time_t& __ts)
364 { return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts); }
365
366#ifdef _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
367 bool
368 _M_clocklock(clockid_t clockid, const __gthread_time_t& __ts)
369 { return !pthread_mutex_clocklock(&_M_mutex, clockid, &__ts); }
370#endif
371 };
372
373#else // !_GTHREAD_USE_MUTEX_TIMEDLOCK
374
375 /// timed_mutex
376 class timed_mutex
377 {
378 mutex _M_mut;
379 condition_variable _M_cv;
380 bool _M_locked = false;
381
382 public:
383
384 timed_mutex() = default;
385 ~timed_mutex() { __glibcxx_assert( !_M_locked ); }
386
387 timed_mutex(const timed_mutex&) = delete;
388 timed_mutex& operator=(const timed_mutex&) = delete;
389
390 void
391 lock()
392 {
393 unique_lock<mutex> __lk(_M_mut);
394 _M_cv.wait(__lk, [&]{ return !_M_locked; });
395 _M_locked = true;
396 }
397
398 bool
399 try_lock()
400 {
401 lock_guard<mutex> __lk(_M_mut);
402 if (_M_locked)
403 return false;
404 _M_locked = true;
405 return true;
406 }
407
408 template<typename _Rep, typename _Period>
409 bool
410 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
411 {
412 unique_lock<mutex> __lk(_M_mut);
413 if (!_M_cv.wait_for(__lk, __rtime, [&]{ return !_M_locked; }))
414 return false;
415 _M_locked = true;
416 return true;
417 }
418
419 template<typename _Clock, typename _Duration>
420 bool
421 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
422 {
423 unique_lock<mutex> __lk(_M_mut);
424 if (!_M_cv.wait_until(__lk, __atime, [&]{ return !_M_locked; }))
425 return false;
426 _M_locked = true;
427 return true;
428 }
429
430 void
431 unlock()
432 {
433 lock_guard<mutex> __lk(_M_mut);
434 __glibcxx_assert( _M_locked );
435 _M_locked = false;
436 _M_cv.notify_one();
437 }
438 };
439
440 /// recursive_timed_mutex
441 class recursive_timed_mutex
442 {
443 mutex _M_mut;
444 condition_variable _M_cv;
445 thread::id _M_owner;
446 unsigned _M_count = 0;
447
448 // Predicate type that tests whether the current thread can lock a mutex.
449 struct _Can_lock
450 {
451 // Returns true if the mutex is unlocked or is locked by _M_caller.
452 bool
453 operator()() const noexcept
454 { return _M_mx->_M_count == 0 || _M_mx->_M_owner == _M_caller; }
455
456 const recursive_timed_mutex* _M_mx;
457 thread::id _M_caller;
458 };
459
460 public:
461
462 recursive_timed_mutex() = default;
463 ~recursive_timed_mutex() { __glibcxx_assert( _M_count == 0 ); }
464
465 recursive_timed_mutex(const recursive_timed_mutex&) = delete;
466 recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
467
468 void
469 lock()
470 {
471 auto __id = this_thread::get_id();
472 _Can_lock __can_lock{this, __id};
473 unique_lock<mutex> __lk(_M_mut);
474 _M_cv.wait(__lk, __can_lock);
475 if (_M_count == -1u)
476 __throw_system_error(EAGAIN); // [thread.timedmutex.recursive]/3
477 _M_owner = __id;
478 ++_M_count;
479 }
480
481 bool
482 try_lock()
483 {
484 auto __id = this_thread::get_id();
485 _Can_lock __can_lock{this, __id};
486 lock_guard<mutex> __lk(_M_mut);
487 if (!__can_lock())
488 return false;
489 if (_M_count == -1u)
490 return false;
491 _M_owner = __id;
492 ++_M_count;
493 return true;
494 }
495
496 template<typename _Rep, typename _Period>
497 bool
498 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
499 {
500 auto __id = this_thread::get_id();
501 _Can_lock __can_lock{this, __id};
502 unique_lock<mutex> __lk(_M_mut);
503 if (!_M_cv.wait_for(__lk, __rtime, __can_lock))
504 return false;
505 if (_M_count == -1u)
506 return false;
507 _M_owner = __id;
508 ++_M_count;
509 return true;
510 }
511
512 template<typename _Clock, typename _Duration>
513 bool
514 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
515 {
516 auto __id = this_thread::get_id();
517 _Can_lock __can_lock{this, __id};
518 unique_lock<mutex> __lk(_M_mut);
519 if (!_M_cv.wait_until(__lk, __atime, __can_lock))
520 return false;
521 if (_M_count == -1u)
522 return false;
523 _M_owner = __id;
524 ++_M_count;
525 return true;
526 }
527
528 void
529 unlock()
530 {
531 lock_guard<mutex> __lk(_M_mut);
532 __glibcxx_assert( _M_owner == this_thread::get_id() );
533 __glibcxx_assert( _M_count > 0 );
534 if (--_M_count == 0)
535 {
536 _M_owner = {};
537 _M_cv.notify_one();
538 }
539 }
540 };
541
542#endif
543#endif // _GLIBCXX_HAS_GTHREADS
544
545 /// @cond undocumented
546 namespace __detail
547 {
548 // Lock the last lockable, after all previous ones are locked.
549 template<typename _Lockable>
550 inline int
551 __try_lock_impl(_Lockable& __l)
552 {
553 if (unique_lock<_Lockable> __lock{__l, try_to_lock})
554 {
555 __lock.release();
556 return -1;
557 }
558 else
559 return 0;
560 }
561
562 // Lock each lockable in turn.
563 // Use iteration if all lockables are the same type, recursion otherwise.
564 template<typename _L0, typename... _Lockables>
565 inline int
566 __try_lock_impl(_L0& __l0, _Lockables&... __lockables)
567 {
568#if __cplusplus >= 201703L
569 if constexpr ((is_same_v<_L0, _Lockables> && ...))
570 {
571 constexpr int _Np = 1 + sizeof...(_Lockables);
572 unique_lock<_L0> __locks[_Np] = {
573 {__l0, defer_lock}, {__lockables, defer_lock}...
574 };
575 for (int __i = 0; __i < _Np; ++__i)
576 {
577 if (!__locks[__i].try_lock())
578 {
579 const int __failed = __i;
580 while (__i--)
581 __locks[__i].unlock();
582 return __failed;
583 }
584 }
585 for (auto& __l : __locks)
586 __l.release();
587 return -1;
588 }
589 else
590#endif
591 if (unique_lock<_L0> __lock{__l0, try_to_lock})
592 {
593 int __idx = __detail::__try_lock_impl(__lockables...);
594 if (__idx == -1)
595 {
596 __lock.release();
597 return -1;
598 }
599 return __idx + 1;
600 }
601 else
602 return 0;
603 }
604
605 } // namespace __detail
606 /// @endcond
607
608 /** @brief Generic try_lock.
609 * @param __l1 Meets Lockable requirements (try_lock() may throw).
610 * @param __l2 Meets Lockable requirements (try_lock() may throw).
611 * @param __l3 Meets Lockable requirements (try_lock() may throw).
612 * @return Returns -1 if all try_lock() calls return true. Otherwise returns
613 * a 0-based index corresponding to the argument that returned false.
614 * @post Either all arguments are locked, or none will be.
615 *
616 * Sequentially calls try_lock() on each argument.
617 */
618 template<typename _L1, typename _L2, typename... _L3>
619 inline int
620 try_lock(_L1& __l1, _L2& __l2, _L3&... __l3)
621 {
622 return __detail::__try_lock_impl(__l1, __l2, __l3...);
623 }
624
625 /// @cond undocumented
626 namespace __detail
627 {
628 // This function can recurse up to N levels deep, for N = 1+sizeof...(L1).
629 // On each recursion the lockables are rotated left one position,
630 // e.g. depth 0: l0, l1, l2; depth 1: l1, l2, l0; depth 2: l2, l0, l1.
631 // When a call to l_i.try_lock() fails it recurses/returns to depth=i
632 // so that l_i is the first argument, and then blocks until l_i is locked.
633 template<typename _L0, typename... _L1>
634 void
635 __lock_impl(int& __i, int __depth, _L0& __l0, _L1&... __l1)
636 {
637 while (__i >= __depth)
638 {
639 if (__i == __depth)
640 {
641 int __failed = 1; // index that couldn't be locked
642 {
643 unique_lock<_L0> __first(__l0);
644 __failed += __detail::__try_lock_impl(__l1...);
645 if (!__failed)
646 {
647 __i = -1; // finished
648 __first.release();
649 return;
650 }
651 }
652#if defined _GLIBCXX_HAS_GTHREADS && defined _GLIBCXX_USE_SCHED_YIELD
653 __gthread_yield();
654#endif
655 constexpr auto __n = 1 + sizeof...(_L1);
656 __i = (__depth + __failed) % __n;
657 }
658 else // rotate left until l_i is first.
659 __detail::__lock_impl(__i, __depth + 1, __l1..., __l0);
660 }
661 }
662
663 } // namespace __detail
664 /// @endcond
665
666 /** @brief Generic lock.
667 * @param __l1 Meets Lockable requirements (try_lock() may throw).
668 * @param __l2 Meets Lockable requirements (try_lock() may throw).
669 * @param __l3 Meets Lockable requirements (try_lock() may throw).
670 * @throw An exception thrown by an argument's lock() or try_lock() member.
671 * @post All arguments are locked.
672 *
673 * All arguments are locked via a sequence of calls to lock(), try_lock()
674 * and unlock(). If this function exits via an exception any locks that
675 * were obtained will be released.
676 */
677 template<typename _L1, typename _L2, typename... _L3>
678 void
679 lock(_L1& __l1, _L2& __l2, _L3&... __l3)
680 {
681#if __cplusplus >= 201703L
682 if constexpr (is_same_v<_L1, _L2> && (is_same_v<_L1, _L3> && ...))
683 {
684 constexpr int _Np = 2 + sizeof...(_L3);
685 unique_lock<_L1> __locks[] = {
686 {__l1, defer_lock}, {__l2, defer_lock}, {__l3, defer_lock}...
687 };
688 int __first = 0;
689 do {
690 __locks[__first].lock();
691 for (int __j = 1; __j < _Np; ++__j)
692 {
693 const int __idx = (__first + __j) % _Np;
694 if (!__locks[__idx].try_lock())
695 {
696 for (int __k = __j; __k != 0; --__k)
697 __locks[(__first + __k - 1) % _Np].unlock();
698 __first = __idx;
699 break;
700 }
701 }
702 } while (!__locks[__first].owns_lock());
703
704 for (auto& __l : __locks)
705 __l.release();
706 }
707 else
708#endif
709 {
710 int __i = 0;
711 __detail::__lock_impl(__i, 0, __l1, __l2, __l3...);
712 }
713 }
714
715#if __cplusplus >= 201703L
716#define __cpp_lib_scoped_lock 201703L
717 /** @brief A scoped lock type for multiple lockable objects.
718 *
719 * A scoped_lock controls mutex ownership within a scope, releasing
720 * ownership in the destructor.
721 *
722 * @headerfile mutex
723 * @since C++17
724 */
725 template<typename... _MutexTypes>
727 {
728 public:
729 explicit scoped_lock(_MutexTypes&... __m) : _M_devices(std::tie(__m...))
730 { std::lock(__m...); }
731
732 explicit scoped_lock(adopt_lock_t, _MutexTypes&... __m) noexcept
733 : _M_devices(std::tie(__m...))
734 { } // calling thread owns mutex
735
737 { std::apply([](auto&... __m) { (__m.unlock(), ...); }, _M_devices); }
738
739 scoped_lock(const scoped_lock&) = delete;
740 scoped_lock& operator=(const scoped_lock&) = delete;
741
742 private:
743 tuple<_MutexTypes&...> _M_devices;
744 };
745
746 template<>
747 class scoped_lock<>
748 {
749 public:
750 explicit scoped_lock() = default;
751 explicit scoped_lock(adopt_lock_t) noexcept { }
752 ~scoped_lock() = default;
753
754 scoped_lock(const scoped_lock&) = delete;
755 scoped_lock& operator=(const scoped_lock&) = delete;
756 };
757
758 template<typename _Mutex>
759 class scoped_lock<_Mutex>
760 {
761 public:
762 using mutex_type = _Mutex;
763
764 explicit scoped_lock(mutex_type& __m) : _M_device(__m)
765 { _M_device.lock(); }
766
767 explicit scoped_lock(adopt_lock_t, mutex_type& __m) noexcept
768 : _M_device(__m)
769 { } // calling thread owns mutex
770
771 ~scoped_lock()
772 { _M_device.unlock(); }
773
774 scoped_lock(const scoped_lock&) = delete;
775 scoped_lock& operator=(const scoped_lock&) = delete;
776
777 private:
778 mutex_type& _M_device;
779 };
780#endif // C++17
781
782#ifdef _GLIBCXX_HAS_GTHREADS
783 /// Flag type used by std::call_once
785 {
786 constexpr once_flag() noexcept = default;
787
788 /// Deleted copy constructor
789 once_flag(const once_flag&) = delete;
790 /// Deleted assignment operator
791 once_flag& operator=(const once_flag&) = delete;
792
793 private:
794 // For gthreads targets a pthread_once_t is used with pthread_once, but
795 // for most targets this doesn't work correctly for exceptional executions.
796 __gthread_once_t _M_once = __GTHREAD_ONCE_INIT;
797
798 struct _Prepare_execution;
799
800 template<typename _Callable, typename... _Args>
801 friend void
802 call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
803 };
804
805 /// @cond undocumented
806# ifdef _GLIBCXX_HAVE_TLS
807 // If TLS is available use thread-local state for the type-erased callable
808 // that is being run by std::call_once in the current thread.
809 extern __thread void* __once_callable;
810 extern __thread void (*__once_call)();
811
812 // RAII type to set up state for pthread_once call.
813 struct once_flag::_Prepare_execution
814 {
815 template<typename _Callable>
816 explicit
817 _Prepare_execution(_Callable& __c)
818 {
819 // Store address in thread-local pointer:
820 __once_callable = std::__addressof(__c);
821 // Trampoline function to invoke the closure via thread-local pointer:
822 __once_call = [] { (*static_cast<_Callable*>(__once_callable))(); };
823 }
824
825 ~_Prepare_execution()
826 {
827 // PR libstdc++/82481
828 __once_callable = nullptr;
829 __once_call = nullptr;
830 }
831
832 _Prepare_execution(const _Prepare_execution&) = delete;
833 _Prepare_execution& operator=(const _Prepare_execution&) = delete;
834 };
835
836# else
837 // Without TLS use a global std::mutex and store the callable in a
838 // global std::function.
839 extern function<void()> __once_functor;
840
841 extern void
842 __set_once_functor_lock_ptr(unique_lock<mutex>*);
843
844 extern mutex&
845 __get_once_mutex();
846
847 // RAII type to set up state for pthread_once call.
848 struct once_flag::_Prepare_execution
849 {
850 template<typename _Callable>
851 explicit
852 _Prepare_execution(_Callable& __c)
853 {
854 // Store the callable in the global std::function
855 __once_functor = __c;
856 __set_once_functor_lock_ptr(&_M_functor_lock);
857 }
858
859 ~_Prepare_execution()
860 {
861 if (_M_functor_lock)
862 __set_once_functor_lock_ptr(nullptr);
863 }
864
865 private:
866 // XXX This deadlocks if used recursively (PR 97949)
867 unique_lock<mutex> _M_functor_lock{__get_once_mutex()};
868
869 _Prepare_execution(const _Prepare_execution&) = delete;
870 _Prepare_execution& operator=(const _Prepare_execution&) = delete;
871 };
872# endif
873 /// @endcond
874
875 // This function is passed to pthread_once by std::call_once.
876 // It runs __once_call() or __once_functor().
877 extern "C" void __once_proxy(void);
878
879 /// Invoke a callable and synchronize with other calls using the same flag
880 template<typename _Callable, typename... _Args>
881 void
882 call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
883 {
884 // Closure type that runs the function
885 auto __callable = [&] {
886 std::__invoke(std::forward<_Callable>(__f),
887 std::forward<_Args>(__args)...);
888 };
889
890 once_flag::_Prepare_execution __exec(__callable);
891
892 // XXX pthread_once does not reset the flag if an exception is thrown.
893 if (int __e = __gthread_once(&__once._M_once, &__once_proxy))
894 __throw_system_error(__e);
895 }
896
897#else // _GLIBCXX_HAS_GTHREADS
898
899 /// Flag type used by std::call_once
900 struct once_flag
901 {
902 constexpr once_flag() noexcept = default;
903
904 /// Deleted copy constructor
905 once_flag(const once_flag&) = delete;
906 /// Deleted assignment operator
907 once_flag& operator=(const once_flag&) = delete;
908
909 private:
910 // There are two different std::once_flag interfaces, abstracting four
911 // different implementations.
912 // The single-threaded interface uses the _M_activate() and _M_finish(bool)
913 // functions, which start and finish an active execution respectively.
914 // See [thread.once.callonce] in C++11 for the definition of
915 // active/passive/returning/exceptional executions.
916 enum _Bits : int { _Init = 0, _Active = 1, _Done = 2 };
917
918 int _M_once = _Bits::_Init;
919
920 // Check to see if all executions will be passive now.
921 bool
922 _M_passive() const noexcept;
923
924 // Attempts to begin an active execution.
925 bool _M_activate();
926
927 // Must be called to complete an active execution.
928 // The argument is true if the active execution was a returning execution,
929 // false if it was an exceptional execution.
930 void _M_finish(bool __returning) noexcept;
931
932 // RAII helper to call _M_finish.
933 struct _Active_execution
934 {
935 explicit _Active_execution(once_flag& __flag) : _M_flag(__flag) { }
936
937 ~_Active_execution() { _M_flag._M_finish(_M_returning); }
938
939 _Active_execution(const _Active_execution&) = delete;
940 _Active_execution& operator=(const _Active_execution&) = delete;
941
942 once_flag& _M_flag;
943 bool _M_returning = false;
944 };
945
946 template<typename _Callable, typename... _Args>
947 friend void
948 call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
949 };
950
951 // Inline definitions of std::once_flag members for single-threaded targets.
952
953 inline bool
954 once_flag::_M_passive() const noexcept
955 { return _M_once == _Bits::_Done; }
956
957 inline bool
958 once_flag::_M_activate()
959 {
960 if (_M_once == _Bits::_Init) [[__likely__]]
961 {
962 _M_once = _Bits::_Active;
963 return true;
964 }
965 else if (_M_passive()) // Caller should have checked this already.
966 return false;
967 else
968 __throw_system_error(EDEADLK);
969 }
970
971 inline void
972 once_flag::_M_finish(bool __returning) noexcept
973 { _M_once = __returning ? _Bits::_Done : _Bits::_Init; }
974
975 /// Invoke a callable and synchronize with other calls using the same flag
976 template<typename _Callable, typename... _Args>
977 inline void
978 call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
979 {
980 if (__once._M_passive())
981 return;
982 else if (__once._M_activate())
983 {
984 once_flag::_Active_execution __exec(__once);
985
986 // _GLIBCXX_RESOLVE_LIB_DEFECTS
987 // 2442. call_once() shouldn't DECAY_COPY()
988 std::__invoke(std::forward<_Callable>(__f),
989 std::forward<_Args>(__args)...);
990
991 // __f(__args...) did not throw
992 __exec._M_returning = true;
993 }
994 }
995#endif // _GLIBCXX_HAS_GTHREADS
996
997 /// @} group mutexes
998_GLIBCXX_END_NAMESPACE_VERSION
999} // namespace
1000
1001#endif // C++11
1002
1003#endif // _GLIBCXX_MUTEX
constexpr __invoke_result< _Callable, _Args... >::type __invoke(_Callable &&__fn, _Args &&... __args) noexcept(__is_nothrow_invocable< _Callable, _Args... >::value)
Invoke a callable object.
Definition: invoke.h:90
constexpr tuple< _Elements &... > tie(_Elements &... __args) noexcept
tie
Definition: tuple:2152
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
Definition: move.h:49
void lock(_L1 &__l1, _L2 &__l2, _L3 &... __l3)
Generic lock.
Definition: mutex:679
constexpr try_to_lock_t try_to_lock
Tag used to prevent a scoped lock from blocking if a mutex is locked.
Definition: std_mutex.h:228
int try_lock(_L1 &__l1, _L2 &__l2, _L3 &... __l3)
Generic try_lock.
Definition: mutex:620
constexpr defer_lock_t defer_lock
Tag used to prevent a scoped lock from acquiring ownership of a mutex.
Definition: std_mutex.h:225
void call_once(once_flag &__once, _Callable &&__f, _Args &&... __args)
Invoke a callable and synchronize with other calls using the same flag.
Definition: mutex:882
ISO C++ entities toplevel namespace is std.
thread::id get_id() noexcept
The unique identifier of the current thread.
Definition: std_thread.h:340
condition_variable
A scoped lock type for multiple lockable objects.
Definition: mutex:727
Flag type used by std::call_once.
Definition: mutex:785
friend void call_once(once_flag &__once, _Callable &&__f, _Args &&... __args)
Invoke a callable and synchronize with other calls using the same flag.
Definition: mutex:882
once_flag(const once_flag &)=delete
Deleted copy constructor.
once_flag & operator=(const once_flag &)=delete
Deleted assignment operator.
ratio_greater
Definition: ratio:417
Primary class template, tuple.
Definition: tuple:746
chrono::duration represents a distance between two points in time
Definition: chrono.h:435
chrono::time_point represents a point in time as measured by a clock
Definition: chrono.h:848
Monotonic clock.
Definition: chrono.h:1142
Assume the calling thread has already obtained mutex ownership and manage it.
Definition: std_mutex.h:222
A movable scoped lock type.
Definition: unique_lock.h:60