atomic_2.h

Go to the documentation of this file.
00001 // -*- C++ -*- header.
00002 
00003 // Copyright (C) 2008, 2009
00004 // Free Software Foundation, Inc.
00005 //
00006 // This file is part of the GNU ISO C++ Library.  This library is free
00007 // software; you can redistribute it and/or modify it under the
00008 // terms of the GNU General Public License as published by the
00009 // Free Software Foundation; either version 3, or (at your option)
00010 // any later version.
00011 
00012 // This library is distributed in the hope that it will be useful,
00013 // but WITHOUT ANY WARRANTY; without even the implied warranty of
00014 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00015 // GNU General Public License for more details.
00016 
00017 // Under Section 7 of GPL version 3, you are granted additional
00018 // permissions described in the GCC Runtime Library Exception, version
00019 // 3.1, as published by the Free Software Foundation.
00020 
00021 // You should have received a copy of the GNU General Public License and
00022 // a copy of the GCC Runtime Library Exception along with this program;
00023 // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
00024 // <http://www.gnu.org/licenses/>.
00025 
00026 /** @file bits/atomic_2.h
00027  *  This is an internal header file, included by other library headers.
00028  *  You should not attempt to use it directly.
00029  */
00030 
00031 #ifndef _GLIBCXX_ATOMIC_2_H
00032 #define _GLIBCXX_ATOMIC_2_H 1
00033 
00034 #pragma GCC system_header
00035 
00036 // _GLIBCXX_BEGIN_NAMESPACE(std)
00037 
00038 // 2 == __atomic2 == Always lock-free
00039 // Assumed:
00040 // _GLIBCXX_ATOMIC_BUILTINS_1
00041 // _GLIBCXX_ATOMIC_BUILTINS_2
00042 // _GLIBCXX_ATOMIC_BUILTINS_4
00043 // _GLIBCXX_ATOMIC_BUILTINS_8
00044 namespace __atomic2
00045 {
00046   /// atomic_flag
00047   struct atomic_flag : public __atomic_flag_base
00048   {
00049     atomic_flag() = default;
00050     ~atomic_flag() = default;
00051     atomic_flag(const atomic_flag&) = delete;
00052     atomic_flag& operator=(const atomic_flag&) volatile = delete;
00053 
00054     // Conversion to ATOMIC_FLAG_INIT.
00055     atomic_flag(bool __i): __atomic_flag_base({ __i }) { }
00056 
00057     bool
00058     test_and_set(memory_order __m = memory_order_seq_cst)
00059     {
00060       // Redundant synchronize if built-in for lock is a full barrier.
00061       if (__m != memory_order_acquire && __m != memory_order_acq_rel)
00062     __sync_synchronize();
00063       return __sync_lock_test_and_set(&_M_i, 1);
00064     }
00065 
00066     void
00067     clear(memory_order __m = memory_order_seq_cst)
00068     {
00069       __glibcxx_assert(__m != memory_order_consume);
00070       __glibcxx_assert(__m != memory_order_acquire);
00071       __glibcxx_assert(__m != memory_order_acq_rel);
00072 
00073       __sync_lock_release(&_M_i);
00074       if (__m != memory_order_acquire && __m != memory_order_acq_rel)
00075     __sync_synchronize();
00076     }
00077   };
00078 
00079 
00080   /// 29.4.2, address types
00081   struct atomic_address
00082   {
00083   private:
00084     void* _M_i;
00085 
00086   public:
00087     atomic_address() = default;
00088     ~atomic_address() = default;
00089     atomic_address(const atomic_address&) = delete;
00090     atomic_address& operator=(const atomic_address&) volatile = delete;
00091 
00092     atomic_address(void* __v) { _M_i = __v; }
00093 
00094     bool
00095     is_lock_free() const
00096     { return true; }
00097 
00098     void
00099     store(void* __v, memory_order __m = memory_order_seq_cst)
00100     {
00101       __glibcxx_assert(__m != memory_order_acquire);
00102       __glibcxx_assert(__m != memory_order_acq_rel);
00103       __glibcxx_assert(__m != memory_order_consume);
00104 
00105       if (__m == memory_order_relaxed)
00106     _M_i = __v;
00107       else
00108     {
00109       // write_mem_barrier();
00110       _M_i = __v;
00111       if (__m == memory_order_seq_cst)
00112         __sync_synchronize();
00113     }
00114     }
00115 
00116     void*
00117     load(memory_order __m = memory_order_seq_cst) const
00118     {
00119       __glibcxx_assert(__m != memory_order_release);
00120       __glibcxx_assert(__m != memory_order_acq_rel);
00121 
00122       __sync_synchronize();
00123       void* __ret = _M_i;
00124       __sync_synchronize();
00125       return __ret;
00126     }
00127 
00128     void*
00129     exchange(void* __v, memory_order __m = memory_order_seq_cst)
00130     {
00131       // XXX built-in assumes memory_order_acquire.
00132       return __sync_lock_test_and_set(&_M_i, __v);
00133     }
00134 
00135     bool
00136     compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
00137               memory_order __m2)
00138     { return compare_exchange_strong(__v1, __v2, __m1, __m2); }
00139 
00140     bool
00141     compare_exchange_weak(void*& __v1, void* __v2,
00142               memory_order __m = memory_order_seq_cst)
00143     {
00144       return compare_exchange_weak(__v1, __v2, __m,
00145                    __calculate_memory_order(__m));
00146     }
00147 
00148     bool
00149     compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
00150                 memory_order __m2)
00151     {
00152       __glibcxx_assert(__m2 != memory_order_release);
00153       __glibcxx_assert(__m2 != memory_order_acq_rel);
00154       __glibcxx_assert(__m2 <= __m1);
00155 
00156       void* __v1o = __v1;
00157       void* __v1n = __sync_val_compare_and_swap(&_M_i, __v1o, __v2);
00158 
00159       // Assume extra stores (of same value) allowed in true case.
00160       __v1 = __v1n;
00161       return __v1o == __v1n;
00162     }
00163 
00164     bool
00165     compare_exchange_strong(void*& __v1, void* __v2,
00166               memory_order __m = memory_order_seq_cst)
00167     {
00168       return compare_exchange_strong(__v1, __v2, __m,
00169                      __calculate_memory_order(__m));
00170     }
00171 
00172     void*
00173     fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
00174     { return __sync_fetch_and_add(&_M_i, __d); }
00175 
00176     void*
00177     fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
00178     { return __sync_fetch_and_sub(&_M_i, __d); }
00179 
00180     operator void*() const
00181     { return load(); }
00182 
00183     void*
00184     operator=(void* __v)
00185     {
00186       store(__v);
00187       return __v;
00188     }
00189 
00190     void*
00191     operator+=(ptrdiff_t __d)
00192     { return __sync_add_and_fetch(&_M_i, __d); }
00193 
00194     void*
00195     operator-=(ptrdiff_t __d)
00196     { return __sync_sub_and_fetch(&_M_i, __d); }
00197   };
00198 
00199   // 29.3.1 atomic integral types
00200   // For each of the integral types, define atomic_[integral type] struct
00201   //
00202   // atomic_bool     bool
00203   // atomic_char     char
00204   // atomic_schar    signed char
00205   // atomic_uchar    unsigned char
00206   // atomic_short    short
00207   // atomic_ushort   unsigned short
00208   // atomic_int      int
00209   // atomic_uint     unsigned int
00210   // atomic_long     long
00211   // atomic_ulong    unsigned long
00212   // atomic_llong    long long
00213   // atomic_ullong   unsigned long long
00214   // atomic_char16_t char16_t
00215   // atomic_char32_t char32_t
00216   // atomic_wchar_t  wchar_t
00217 
00218   // Base type.
00219   // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or 8 bytes,
00220   // since that is what GCC built-in functions for atomic memory access work on.
00221   template<typename _ITp>
00222     struct __atomic_base
00223     {
00224     private:
00225       typedef _ITp  __integral_type;
00226 
00227       __integral_type   _M_i;
00228 
00229     public:
00230       __atomic_base() = default;
00231       ~__atomic_base() = default;
00232       __atomic_base(const __atomic_base&) = delete;
00233       __atomic_base& operator=(const __atomic_base&) volatile = delete;
00234 
00235       // Requires __integral_type convertible to _M_base._M_i.
00236       __atomic_base(__integral_type __i) { _M_i = __i; }
00237 
00238       operator __integral_type() const
00239       { return load(); }
00240 
00241       __integral_type
00242       operator=(__integral_type __i)
00243       {
00244     store(__i);
00245     return __i;
00246       }
00247 
00248       __integral_type
00249       operator++(int)
00250       { return fetch_add(1); }
00251 
00252       __integral_type
00253       operator--(int)
00254       { return fetch_sub(1); }
00255 
00256       __integral_type
00257       operator++()
00258       { return __sync_add_and_fetch(&_M_i, 1); }
00259 
00260       __integral_type
00261       operator--()
00262       { return __sync_sub_and_fetch(&_M_i, 1); }
00263 
00264       __integral_type
00265       operator+=(__integral_type __i)
00266       { return __sync_add_and_fetch(&_M_i, __i); }
00267 
00268       __integral_type
00269       operator-=(__integral_type __i)
00270       { return __sync_sub_and_fetch(&_M_i, __i); }
00271 
00272       __integral_type
00273       operator&=(__integral_type __i)
00274       { return __sync_and_and_fetch(&_M_i, __i); }
00275 
00276       __integral_type
00277       operator|=(__integral_type __i)
00278       { return __sync_or_and_fetch(&_M_i, __i); }
00279 
00280       __integral_type
00281       operator^=(__integral_type __i)
00282       { return __sync_xor_and_fetch(&_M_i, __i); }
00283 
00284       bool
00285       is_lock_free() const
00286       { return true; }
00287 
00288       void
00289       store(__integral_type __i, memory_order __m = memory_order_seq_cst)
00290       {
00291     __glibcxx_assert(__m != memory_order_acquire);
00292     __glibcxx_assert(__m != memory_order_acq_rel);
00293     __glibcxx_assert(__m != memory_order_consume);
00294 
00295     if (__m == memory_order_relaxed)
00296       _M_i = __i;
00297     else
00298       {
00299         // write_mem_barrier();
00300         _M_i = __i;
00301         if (__m == memory_order_seq_cst)
00302           __sync_synchronize();
00303       }
00304       }
00305 
00306       __integral_type
00307       load(memory_order __m = memory_order_seq_cst) const 
00308       {
00309     __glibcxx_assert(__m != memory_order_release);
00310     __glibcxx_assert(__m != memory_order_acq_rel);
00311 
00312     __sync_synchronize();
00313     __integral_type __ret = _M_i;
00314     __sync_synchronize();
00315     return __ret;
00316       }
00317 
00318       __integral_type
00319       exchange(__integral_type __i, memory_order __m = memory_order_seq_cst)
00320       {
00321     // XXX built-in assumes memory_order_acquire.
00322     return __sync_lock_test_and_set(&_M_i, __i);
00323       }
00324 
00325       bool
00326       compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
00327                 memory_order __m1, memory_order __m2)
00328       { return compare_exchange_strong(__i1, __i2, __m1, __m2); }
00329 
00330       bool
00331       compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
00332                 memory_order __m = memory_order_seq_cst)
00333       {
00334     return compare_exchange_weak(__i1, __i2, __m,
00335                      __calculate_memory_order(__m));
00336       }
00337 
00338       bool
00339       compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
00340                   memory_order __m1, memory_order __m2) 
00341       {
00342     __glibcxx_assert(__m2 != memory_order_release);
00343     __glibcxx_assert(__m2 != memory_order_acq_rel);
00344     __glibcxx_assert(__m2 <= __m1);
00345 
00346     __integral_type __i1o = __i1;
00347     __integral_type __i1n = __sync_val_compare_and_swap(&_M_i, __i1o, __i2);
00348 
00349     // Assume extra stores (of same value) allowed in true case.
00350     __i1 = __i1n;
00351     return __i1o == __i1n;
00352       }
00353 
00354       bool
00355       compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
00356                   memory_order __m = memory_order_seq_cst)
00357       {
00358     return compare_exchange_strong(__i1, __i2, __m,
00359                        __calculate_memory_order(__m));
00360       }
00361 
00362       __integral_type
00363       fetch_add(__integral_type __i,
00364         memory_order __m = memory_order_seq_cst)
00365       { return __sync_fetch_and_add(&_M_i, __i); }
00366 
00367       __integral_type
00368       fetch_sub(__integral_type __i,
00369         memory_order __m = memory_order_seq_cst) 
00370       { return __sync_fetch_and_sub(&_M_i, __i); }
00371 
00372       __integral_type
00373       fetch_and(__integral_type __i,
00374         memory_order __m = memory_order_seq_cst) 
00375       { return __sync_fetch_and_and(&_M_i, __i); }
00376 
00377       __integral_type
00378       fetch_or(__integral_type __i,
00379            memory_order __m = memory_order_seq_cst) 
00380       { return __sync_fetch_and_or(&_M_i, __i); }
00381 
00382       __integral_type
00383       fetch_xor(__integral_type __i,
00384         memory_order __m = memory_order_seq_cst)
00385       { return __sync_fetch_and_xor(&_M_i, __i); }
00386     };
00387 
00388 
00389   /// atomic_bool
00390   // NB: No operators or fetch-operations for this type.
00391   struct atomic_bool
00392   {
00393   private:
00394     __atomic_base<bool> _M_base;
00395 
00396   public:
00397     atomic_bool() = default;
00398     ~atomic_bool() = default;
00399     atomic_bool(const atomic_bool&) = delete;
00400     atomic_bool& operator=(const atomic_bool&) volatile = delete;
00401 
00402     atomic_bool(bool __i) : _M_base(__i) { }
00403 
00404     bool
00405     operator=(bool __i)
00406     { return _M_base.operator=(__i); }
00407 
00408     operator bool() const 
00409     { return _M_base.load(); }
00410 
00411     bool
00412     is_lock_free() const
00413     { return _M_base.is_lock_free(); }
00414 
00415     void
00416     store(bool __i, memory_order __m = memory_order_seq_cst)
00417     { _M_base.store(__i, __m); }
00418 
00419     bool
00420     load(memory_order __m = memory_order_seq_cst) const
00421     { return _M_base.load(__m); }
00422 
00423     bool
00424     exchange(bool __i, memory_order __m = memory_order_seq_cst)
00425     { return _M_base.exchange(__i, __m); }
00426 
00427     bool
00428     compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1,
00429               memory_order __m2)
00430     { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); }
00431 
00432     bool
00433     compare_exchange_weak(bool& __i1, bool __i2,
00434               memory_order __m = memory_order_seq_cst)
00435     { return _M_base.compare_exchange_weak(__i1, __i2, __m); }
00436 
00437     bool
00438     compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1,
00439                 memory_order __m2)
00440     { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); }
00441 
00442 
00443     bool
00444     compare_exchange_strong(bool& __i1, bool __i2,
00445                 memory_order __m = memory_order_seq_cst)
00446     { return _M_base.compare_exchange_strong(__i1, __i2, __m); }
00447   };
00448 } // namespace __atomic2
00449 
00450 // _GLIBCXX_END_NAMESPACE
00451 
00452 #endif