libstdc++
mutex
Go to the documentation of this file.
00001 // <mutex> -*- C++ -*-
00002 
00003 // Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
00004 // Free Software Foundation, Inc.
00005 //
00006 // This file is part of the GNU ISO C++ Library.  This library is free
00007 // software; you can redistribute it and/or modify it under the
00008 // terms of the GNU General Public License as published by the
00009 // Free Software Foundation; either version 3, or (at your option)
00010 // any later version.
00011 
00012 // This library is distributed in the hope that it will be useful,
00013 // but WITHOUT ANY WARRANTY; without even the implied warranty of
00014 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00015 // GNU General Public License for more details.
00016 
00017 // Under Section 7 of GPL version 3, you are granted additional
00018 // permissions described in the GCC Runtime Library Exception, version
00019 // 3.1, as published by the Free Software Foundation.
00020 
00021 // You should have received a copy of the GNU General Public License and
00022 // a copy of the GCC Runtime Library Exception along with this program;
00023 // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
00024 // <http://www.gnu.org/licenses/>.
00025 
00026 /** @file include/mutex
00027  *  This is a Standard C++ Library header.
00028  */
00029 
00030 #ifndef _GLIBCXX_MUTEX
00031 #define _GLIBCXX_MUTEX 1
00032 
00033 #pragma GCC system_header
00034 
00035 #ifndef __GXX_EXPERIMENTAL_CXX0X__
00036 # include <bits/c++0x_warning.h>
00037 #else
00038 
00039 #include <tuple>
00040 #include <chrono>
00041 #include <exception>
00042 #include <type_traits>
00043 #include <functional>
00044 #include <system_error>
00045 #include <bits/functexcept.h>
00046 #include <bits/gthr.h>
00047 #include <bits/move.h> // for std::swap
00048 
00049 #if defined(_GLIBCXX_HAS_GTHREADS) && defined(_GLIBCXX_USE_C99_STDINT_TR1)
00050 
00051 namespace std _GLIBCXX_VISIBILITY(default)
00052 {
00053 _GLIBCXX_BEGIN_NAMESPACE_VERSION
00054 
00055   /**
00056    * @defgroup mutexes Mutexes
00057    * @ingroup concurrency
00058    *
00059    * Classes for mutex support.
00060    * @{
00061    */
00062 
00063   /// mutex
00064   class mutex
00065   {
00066     typedef __gthread_mutex_t           __native_type;
00067     __native_type  _M_mutex;
00068 
00069   public:
00070     typedef __native_type*          native_handle_type;
00071 
00072 #ifdef __GTHREAD_MUTEX_INIT
00073     constexpr mutex() : _M_mutex(__GTHREAD_MUTEX_INIT) { }
00074 #else
00075     mutex()
00076     {
00077       // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
00078       __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
00079     }
00080 
00081     ~mutex() { __gthread_mutex_destroy(&_M_mutex); }
00082 #endif
00083 
00084     mutex(const mutex&) = delete;
00085     mutex& operator=(const mutex&) = delete;
00086 
00087     void
00088     lock()
00089     {
00090       int __e = __gthread_mutex_lock(&_M_mutex);
00091 
00092       // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
00093       if (__e)
00094     __throw_system_error(__e);
00095     }
00096 
00097     bool
00098     try_lock()
00099     {
00100       // XXX EINVAL, EAGAIN, EBUSY
00101       return !__gthread_mutex_trylock(&_M_mutex);
00102     }
00103 
00104     void
00105     unlock()
00106     {
00107       // XXX EINVAL, EAGAIN, EPERM
00108       __gthread_mutex_unlock(&_M_mutex);
00109     }
00110 
00111     native_handle_type
00112     native_handle()
00113     { return &_M_mutex; }
00114   };
00115 
00116 #ifndef __GTHREAD_RECURSIVE_MUTEX_INIT
00117   // FIXME: gthreads doesn't define __gthread_recursive_mutex_destroy
00118   // so we need to obtain a __gthread_mutex_t to destroy
00119   class __destroy_recursive_mutex
00120   {
00121     template<typename _Mx, typename _Rm>
00122       static void
00123       _S_destroy_win32(_Mx* __mx, _Rm const* __rmx)
00124       {
00125         __mx->counter = __rmx->counter;
00126         __mx->sema = __rmx->sema;
00127         __gthread_mutex_destroy(__mx);
00128       }
00129 
00130   public:
00131     // matches a gthr-win32.h recursive mutex
00132     template<typename _Rm>
00133       static typename enable_if<sizeof(&_Rm::sema), void>::type
00134       _S_destroy(_Rm* __mx)
00135       {
00136         __gthread_mutex_t __tmp;
00137         _S_destroy_win32(&__tmp, __mx);
00138       }
00139 
00140     // matches a recursive mutex with a member 'actual'
00141     template<typename _Rm>
00142       static typename enable_if<sizeof(&_Rm::actual), void>::type
00143       _S_destroy(_Rm* __mx)
00144       { __gthread_mutex_destroy(&__mx->actual); }
00145 
00146     // matches when there's only one mutex type
00147     template<typename _Rm>
00148       static
00149       typename enable_if<is_same<_Rm, __gthread_mutex_t>::value, void>::type
00150       _S_destroy(_Rm* __mx)
00151       { __gthread_mutex_destroy(__mx); }
00152   };
00153 #endif
00154 
00155   /// recursive_mutex
00156   class recursive_mutex
00157   {
00158     typedef __gthread_recursive_mutex_t     __native_type;
00159     __native_type  _M_mutex;
00160 
00161   public:
00162     typedef __native_type*          native_handle_type;
00163 
00164 #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
00165     recursive_mutex() : _M_mutex(__GTHREAD_RECURSIVE_MUTEX_INIT) { }
00166 #else
00167     recursive_mutex()
00168     {
00169       // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
00170       __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
00171     }
00172 
00173     ~recursive_mutex()
00174     { __destroy_recursive_mutex::_S_destroy(&_M_mutex); }
00175 #endif
00176 
00177     recursive_mutex(const recursive_mutex&) = delete;
00178     recursive_mutex& operator=(const recursive_mutex&) = delete;
00179 
00180     void
00181     lock()
00182     {
00183       int __e = __gthread_recursive_mutex_lock(&_M_mutex);
00184 
00185       // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
00186       if (__e)
00187     __throw_system_error(__e);
00188     }
00189 
00190     bool
00191     try_lock()
00192     {
00193       // XXX EINVAL, EAGAIN, EBUSY
00194       return !__gthread_recursive_mutex_trylock(&_M_mutex);
00195     }
00196 
00197     void
00198     unlock()
00199     {
00200       // XXX EINVAL, EAGAIN, EBUSY
00201       __gthread_recursive_mutex_unlock(&_M_mutex);
00202     }
00203 
00204     native_handle_type
00205     native_handle()
00206     { return &_M_mutex; }
00207   };
00208 
00209   /// timed_mutex
00210   class timed_mutex
00211   {
00212     typedef __gthread_mutex_t           __native_type;
00213 
00214 #ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
00215     typedef chrono::monotonic_clock         __clock_t;
00216 #else
00217     typedef chrono::high_resolution_clock   __clock_t;
00218 #endif
00219 
00220     __native_type  _M_mutex;
00221 
00222   public:
00223     typedef __native_type*          native_handle_type;
00224 
00225 #ifdef __GTHREAD_MUTEX_INIT
00226     timed_mutex() : _M_mutex(__GTHREAD_MUTEX_INIT) { }
00227 #else
00228     timed_mutex()
00229     {
00230       __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
00231     }
00232 
00233     ~timed_mutex() { __gthread_mutex_destroy(&_M_mutex); }
00234 #endif
00235 
00236     timed_mutex(const timed_mutex&) = delete;
00237     timed_mutex& operator=(const timed_mutex&) = delete;
00238 
00239     void
00240     lock()
00241     {
00242       int __e = __gthread_mutex_lock(&_M_mutex);
00243 
00244       // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
00245       if (__e)
00246     __throw_system_error(__e);
00247     }
00248 
00249     bool
00250     try_lock()
00251     {
00252       // XXX EINVAL, EAGAIN, EBUSY
00253       return !__gthread_mutex_trylock(&_M_mutex);
00254     }
00255 
00256     template <class _Rep, class _Period>
00257       bool
00258       try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
00259       { return __try_lock_for_impl(__rtime); }
00260 
00261     template <class _Clock, class _Duration>
00262       bool
00263       try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
00264       {
00265     chrono::time_point<_Clock, chrono::seconds> __s =
00266       chrono::time_point_cast<chrono::seconds>(__atime);
00267 
00268     chrono::nanoseconds __ns =
00269       chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
00270 
00271     __gthread_time_t __ts = {
00272       static_cast<std::time_t>(__s.time_since_epoch().count()),
00273       static_cast<long>(__ns.count())
00274     };
00275 
00276     return !__gthread_mutex_timedlock(&_M_mutex, &__ts);
00277       }
00278 
00279     void
00280     unlock()
00281     {
00282       // XXX EINVAL, EAGAIN, EBUSY
00283       __gthread_mutex_unlock(&_M_mutex);
00284     }
00285 
00286     native_handle_type
00287     native_handle()
00288     { return &_M_mutex; }
00289 
00290   private:
00291     template<typename _Rep, typename _Period>
00292       typename enable_if<
00293     ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
00294       __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
00295       {
00296     __clock_t::time_point __atime = __clock_t::now()
00297       + chrono::duration_cast<__clock_t::duration>(__rtime);
00298 
00299     return try_lock_until(__atime);
00300       }
00301 
00302     template <typename _Rep, typename _Period>
00303       typename enable_if<
00304     !ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
00305       __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
00306       {
00307     __clock_t::time_point __atime = __clock_t::now()
00308       + ++chrono::duration_cast<__clock_t::duration>(__rtime);
00309 
00310     return try_lock_until(__atime);
00311       }
00312   };
00313 
00314   /// recursive_timed_mutex
00315   class recursive_timed_mutex
00316   {
00317     typedef __gthread_recursive_mutex_t     __native_type;
00318 
00319 #ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
00320     typedef chrono::monotonic_clock         __clock_t;
00321 #else
00322     typedef chrono::high_resolution_clock   __clock_t;
00323 #endif
00324 
00325     __native_type  _M_mutex;
00326 
00327   public:
00328     typedef __native_type*          native_handle_type;
00329 
00330 #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
00331     recursive_timed_mutex() : _M_mutex(__GTHREAD_RECURSIVE_MUTEX_INIT) { }
00332 #else
00333     recursive_timed_mutex()
00334     {
00335       // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
00336       __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
00337     }
00338 
00339     ~recursive_timed_mutex()
00340     { __destroy_recursive_mutex::_S_destroy(&_M_mutex); }
00341 #endif
00342 
00343     recursive_timed_mutex(const recursive_timed_mutex&) = delete;
00344     recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
00345 
00346     void
00347     lock()
00348     {
00349       int __e = __gthread_recursive_mutex_lock(&_M_mutex);
00350 
00351       // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
00352       if (__e)
00353     __throw_system_error(__e);
00354     }
00355 
00356     bool
00357     try_lock()
00358     {
00359       // XXX EINVAL, EAGAIN, EBUSY
00360       return !__gthread_recursive_mutex_trylock(&_M_mutex);
00361     }
00362 
00363     template <class _Rep, class _Period>
00364       bool
00365       try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
00366       { return __try_lock_for_impl(__rtime); }
00367 
00368     template <class _Clock, class _Duration>
00369       bool
00370       try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
00371       {
00372     chrono::time_point<_Clock, chrono::seconds>  __s =
00373       chrono::time_point_cast<chrono::seconds>(__atime);
00374 
00375     chrono::nanoseconds __ns =
00376       chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
00377 
00378     __gthread_time_t __ts = {
00379       static_cast<std::time_t>(__s.time_since_epoch().count()),
00380       static_cast<long>(__ns.count())
00381     };
00382 
00383     return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts);
00384       }
00385 
00386     void
00387     unlock()
00388     {
00389       // XXX EINVAL, EAGAIN, EBUSY
00390       __gthread_recursive_mutex_unlock(&_M_mutex);
00391     }
00392 
00393     native_handle_type
00394     native_handle()
00395     { return &_M_mutex; }
00396 
00397   private:
00398     template<typename _Rep, typename _Period>
00399       typename enable_if<
00400     ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
00401       __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
00402       {
00403     __clock_t::time_point __atime = __clock_t::now()
00404       + chrono::duration_cast<__clock_t::duration>(__rtime);
00405 
00406     return try_lock_until(__atime);
00407       }
00408 
00409     template <typename _Rep, typename _Period>
00410       typename enable_if<
00411     !ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
00412       __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
00413       {
00414     __clock_t::time_point __atime = __clock_t::now()
00415       + ++chrono::duration_cast<__clock_t::duration>(__rtime);
00416 
00417     return try_lock_until(__atime);
00418       }
00419   };
00420 
00421   /// Do not acquire ownership of the mutex.
00422   struct defer_lock_t { };
00423 
00424   /// Try to acquire ownership of the mutex without blocking.
00425   struct try_to_lock_t { };
00426 
00427   /// Assume the calling thread has already obtained mutex ownership
00428   /// and manage it.
00429   struct adopt_lock_t { };
00430 
00431   constexpr defer_lock_t    defer_lock { };
00432   constexpr try_to_lock_t   try_to_lock { };
00433   constexpr adopt_lock_t    adopt_lock { };
00434 
00435   /// @brief  Scoped lock idiom.
00436   // Acquire the mutex here with a constructor call, then release with
00437   // the destructor call in accordance with RAII style.
00438   template<typename _Mutex>
00439     class lock_guard
00440     {
00441     public:
00442       typedef _Mutex mutex_type;
00443 
00444       explicit lock_guard(mutex_type& __m) : _M_device(__m)
00445       { _M_device.lock(); }
00446 
00447       lock_guard(mutex_type& __m, adopt_lock_t) : _M_device(__m)
00448       { } // calling thread owns mutex
00449 
00450       ~lock_guard()
00451       { _M_device.unlock(); }
00452 
00453       lock_guard(const lock_guard&) = delete;
00454       lock_guard& operator=(const lock_guard&) = delete;
00455 
00456     private:
00457       mutex_type&  _M_device;
00458     };
00459 
00460   /// unique_lock
00461   template<typename _Mutex>
00462     class unique_lock
00463     {
00464     public:
00465       typedef _Mutex mutex_type;
00466 
00467       unique_lock()
00468       : _M_device(0), _M_owns(false)
00469       { }
00470 
00471       explicit unique_lock(mutex_type& __m)
00472       : _M_device(&__m), _M_owns(false)
00473       {
00474     lock();
00475     _M_owns = true;
00476       }
00477 
00478       unique_lock(mutex_type& __m, defer_lock_t)
00479       : _M_device(&__m), _M_owns(false)
00480       { }
00481 
00482       unique_lock(mutex_type& __m, try_to_lock_t)
00483       : _M_device(&__m), _M_owns(_M_device->try_lock())
00484       { }
00485 
00486       unique_lock(mutex_type& __m, adopt_lock_t)
00487       : _M_device(&__m), _M_owns(true)
00488       {
00489     // XXX calling thread owns mutex
00490       }
00491 
00492       template<typename _Clock, typename _Duration>
00493     unique_lock(mutex_type& __m,
00494             const chrono::time_point<_Clock, _Duration>& __atime)
00495     : _M_device(&__m), _M_owns(_M_device->try_lock_until(__atime))
00496     { }
00497 
00498       template<typename _Rep, typename _Period>
00499     unique_lock(mutex_type& __m,
00500             const chrono::duration<_Rep, _Period>& __rtime)
00501     : _M_device(&__m), _M_owns(_M_device->try_lock_for(__rtime))
00502     { }
00503 
00504       ~unique_lock()
00505       {
00506     if (_M_owns)
00507       unlock();
00508       }
00509 
00510       unique_lock(const unique_lock&) = delete;
00511       unique_lock& operator=(const unique_lock&) = delete;
00512 
00513       unique_lock(unique_lock&& __u)
00514       : _M_device(__u._M_device), _M_owns(__u._M_owns)
00515       {
00516     __u._M_device = 0;
00517     __u._M_owns = false;
00518       }
00519 
00520       unique_lock& operator=(unique_lock&& __u)
00521       {
00522     if(_M_owns)
00523       unlock();
00524 
00525     unique_lock(std::move(__u)).swap(*this);
00526 
00527     __u._M_device = 0;
00528     __u._M_owns = false;
00529 
00530     return *this;
00531       }
00532 
00533       void
00534       lock()
00535       {
00536     if (!_M_device)
00537       __throw_system_error(int(errc::operation_not_permitted));
00538     else if (_M_owns)
00539       __throw_system_error(int(errc::resource_deadlock_would_occur));
00540     else
00541       {
00542         _M_device->lock();
00543         _M_owns = true;
00544       }
00545       }
00546 
00547       bool
00548       try_lock()
00549       {
00550     if (!_M_device)
00551       __throw_system_error(int(errc::operation_not_permitted));
00552     else if (_M_owns)
00553       __throw_system_error(int(errc::resource_deadlock_would_occur));
00554     else
00555       {
00556         _M_owns = _M_device->try_lock();
00557         return _M_owns;
00558       }
00559       }
00560 
00561       template<typename _Clock, typename _Duration>
00562     bool
00563     try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
00564     {
00565       if (!_M_device)
00566         __throw_system_error(int(errc::operation_not_permitted));
00567       else if (_M_owns)
00568         __throw_system_error(int(errc::resource_deadlock_would_occur));
00569       else
00570         {
00571           _M_owns = _M_device->try_lock_until(__atime);
00572           return _M_owns;
00573         }
00574     }
00575 
00576       template<typename _Rep, typename _Period>
00577     bool
00578     try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
00579     {
00580       if (!_M_device)
00581         __throw_system_error(int(errc::operation_not_permitted));
00582       else if (_M_owns)
00583         __throw_system_error(int(errc::resource_deadlock_would_occur));
00584       else
00585         {
00586           _M_owns = _M_device->try_lock_for(__rtime);
00587           return _M_owns;
00588         }
00589      }
00590 
00591       void
00592       unlock()
00593       {
00594     if (!_M_owns)
00595       __throw_system_error(int(errc::operation_not_permitted));
00596     else if (_M_device)
00597       {
00598         _M_device->unlock();
00599         _M_owns = false;
00600       }
00601       }
00602 
00603       void
00604       swap(unique_lock& __u)
00605       {
00606     std::swap(_M_device, __u._M_device);
00607     std::swap(_M_owns, __u._M_owns);
00608       }
00609 
00610       mutex_type*
00611       release()
00612       {
00613     mutex_type* __ret = _M_device;
00614     _M_device = 0;
00615     _M_owns = false;
00616     return __ret;
00617       }
00618 
00619       bool
00620       owns_lock() const
00621       { return _M_owns; }
00622 
00623       explicit operator bool() const
00624       { return owns_lock(); }
00625 
00626       mutex_type*
00627       mutex() const
00628       { return _M_device; }
00629 
00630     private:
00631       mutex_type*   _M_device;
00632       bool      _M_owns; // XXX use atomic_bool
00633     };
00634 
00635   template<typename _Mutex>
00636     inline void
00637     swap(unique_lock<_Mutex>& __x, unique_lock<_Mutex>& __y)
00638     { __x.swap(__y); }
00639 
00640   template<int _Idx>
00641     struct __unlock_impl
00642     {
00643       template<typename... _Lock>
00644     static void
00645     __do_unlock(tuple<_Lock&...>& __locks)
00646     {
00647       std::get<_Idx>(__locks).unlock();
00648       __unlock_impl<_Idx - 1>::__do_unlock(__locks);
00649     }
00650     };
00651 
00652   template<>
00653     struct __unlock_impl<-1>
00654     {
00655       template<typename... _Lock>
00656     static void
00657     __do_unlock(tuple<_Lock&...>&)
00658     { }
00659     };
00660 
00661   template<typename _Lock>
00662     unique_lock<_Lock>
00663     __try_to_lock(_Lock& __l)
00664     { return unique_lock<_Lock>(__l, try_to_lock); }
00665 
00666   template<int _Idx, bool _Continue = true>
00667     struct __try_lock_impl
00668     {
00669       template<typename... _Lock>
00670     static void
00671     __do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
00672     {
00673           __idx = _Idx;
00674           auto __lock = __try_to_lock(std::get<_Idx>(__locks));
00675           if (__lock.owns_lock())
00676             {
00677               __try_lock_impl<_Idx + 1, _Idx + 2 < sizeof...(_Lock)>::
00678                 __do_try_lock(__locks, __idx);
00679               if (__idx == -1)
00680                 __lock.release();
00681             }
00682     }
00683     };
00684 
00685   template<int _Idx>
00686     struct __try_lock_impl<_Idx, false>
00687     {
00688       template<typename... _Lock>
00689     static void
00690     __do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
00691     {
00692           __idx = _Idx;
00693           auto __lock = __try_to_lock(std::get<_Idx>(__locks));
00694           if (__lock.owns_lock())
00695             {
00696               __idx = -1;
00697               __lock.release();
00698             }
00699     }
00700     };
00701 
00702   /** @brief Generic try_lock.
00703    *  @param __l1 Meets Mutex requirements (try_lock() may throw).
00704    *  @param __l2 Meets Mutex requirements (try_lock() may throw).
00705    *  @param __l3 Meets Mutex requirements (try_lock() may throw).
00706    *  @return Returns -1 if all try_lock() calls return true. Otherwise returns
00707    *          a 0-based index corresponding to the argument that returned false.
00708    *  @post Either all arguments are locked, or none will be.
00709    *
00710    *  Sequentially calls try_lock() on each argument.
00711    */
00712   template<typename _Lock1, typename _Lock2, typename... _Lock3>
00713     int
00714     try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3)
00715     {
00716       int __idx;
00717       auto __locks = std::tie(__l1, __l2, __l3...);
00718       __try
00719       { __try_lock_impl<0>::__do_try_lock(__locks, __idx); }
00720       __catch(...)
00721       { }
00722       return __idx;
00723     }
00724 
00725   /** @brief Generic lock.
00726    *  @param __l1 Meets Mutex requirements (try_lock() may throw).
00727    *  @param __l2 Meets Mutex requirements (try_lock() may throw).
00728    *  @param __l3 Meets Mutex requirements (try_lock() may throw).
00729    *  @throw An exception thrown by an argument's lock() or try_lock() member.
00730    *  @post All arguments are locked.
00731    *
00732    *  All arguments are locked via a sequence of calls to lock(), try_lock()
00733    *  and unlock().  If the call exits via an exception any locks that were
00734    *  obtained will be released.
00735    */
00736   template<typename _L1, typename _L2, typename ..._L3>
00737     void
00738     lock(_L1& __l1, _L2& __l2, _L3&... __l3)
00739     {
00740       while (true)
00741         {
00742           unique_lock<_L1> __first(__l1);
00743           int __idx;
00744           auto __locks = std::tie(__l2, __l3...);
00745           __try_lock_impl<0, sizeof...(_L3)>::__do_try_lock(__locks, __idx);
00746           if (__idx == -1)
00747             {
00748               __first.release();
00749               return;
00750             }
00751         }
00752     }
00753 
00754   /// once_flag
00755   struct once_flag
00756   {
00757   private:
00758     typedef __gthread_once_t __native_type;
00759     __native_type  _M_once;
00760 
00761   public:
00762     constexpr once_flag() : _M_once(__GTHREAD_ONCE_INIT) { }
00763 
00764     once_flag(const once_flag&) = delete;
00765     once_flag& operator=(const once_flag&) = delete;
00766 
00767     template<typename _Callable, typename... _Args>
00768       friend void
00769       call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
00770   };
00771 
00772 #ifdef _GLIBCXX_HAVE_TLS
00773   extern __thread void* __once_callable;
00774   extern __thread void (*__once_call)();
00775 
00776   template<typename _Callable>
00777     inline void
00778     __once_call_impl()
00779     {
00780       (*(_Callable*)__once_callable)();
00781     }
00782 #else
00783   extern function<void()> __once_functor;
00784 
00785   extern void
00786   __set_once_functor_lock_ptr(unique_lock<mutex>*);
00787 
00788   extern mutex&
00789   __get_once_mutex();
00790 #endif
00791 
00792   extern "C" void __once_proxy();
00793 
00794   /// call_once
00795   template<typename _Callable, typename... _Args>
00796     void
00797     call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
00798     {
00799 #ifdef _GLIBCXX_HAVE_TLS
00800       auto __bound_functor = std::bind<void>(std::forward<_Callable>(__f),
00801           std::forward<_Args>(__args)...);
00802       __once_callable = &__bound_functor;
00803       __once_call = &__once_call_impl<decltype(__bound_functor)>;
00804 #else
00805       unique_lock<mutex> __functor_lock(__get_once_mutex());
00806       __once_functor = std::bind<void>(std::forward<_Callable>(__f),
00807           std::forward<_Args>(__args)...);
00808       __set_once_functor_lock_ptr(&__functor_lock);
00809 #endif
00810 
00811       int __e = __gthread_once(&(__once._M_once), &__once_proxy);
00812 
00813 #ifndef _GLIBCXX_HAVE_TLS
00814       if (__functor_lock)
00815         __set_once_functor_lock_ptr(0);
00816 #endif
00817 
00818       if (__e)
00819     __throw_system_error(__e);
00820     }
00821 
00822   // @} group mutexes
00823 _GLIBCXX_END_NAMESPACE_VERSION
00824 } // namespace
00825 
00826 #endif // _GLIBCXX_HAS_GTHREADS && _GLIBCXX_USE_C99_STDINT_TR1
00827 
00828 #endif // __GXX_EXPERIMENTAL_CXX0X__
00829 
00830 #endif // _GLIBCXX_MUTEX