This is the mail archive of the
gcc-patches@gcc.gnu.org
mailing list for the GCC project.
[v3] libstdc++/54005
- From: Benjamin De Kosnik <bkoz at redhat dot com>
- To: libstdc++ at gcc dot gnu dot org, gcc-patches at gcc dot gnu dot org
- Date: Tue, 7 Aug 2012 16:10:01 -0700
- Subject: [v3] libstdc++/54005
Pretty minor change, as per PR. This version seems more appropriate for
templatized types.
I'll wait a bit before putting this on 4.7_branch
-benjamin
tested x86/linux
2012-08-07 Benjamin Kosnik <bkoz@redhat.com>
PR libstdc++/54005
* include/std/atomic: Use __atomic_always_lock_free.
* include/bits/atomic_base.h: Same.
diff --git a/libstdc++-v3/include/bits/atomic_base.h b/libstdc++-v3/include/bits/atomic_base.h
index 9d5f4eb..598e1f1 100644
--- a/libstdc++-v3/include/bits/atomic_base.h
+++ b/libstdc++-v3/include/bits/atomic_base.h
@@ -422,11 +422,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
bool
is_lock_free() const noexcept
- { return __atomic_is_lock_free (sizeof (_M_i), &_M_i); }
+ { return __atomic_always_lock_free(sizeof(_M_i), &_M_i); }
bool
is_lock_free() const volatile noexcept
- { return __atomic_is_lock_free (sizeof (_M_i), &_M_i); }
+ { return __atomic_always_lock_free(sizeof(_M_i), &_M_i); }
void
store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
@@ -716,11 +716,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
bool
is_lock_free() const noexcept
- { return __atomic_is_lock_free(_M_type_size(1), &_M_p); }
+ { return __atomic_always_lock_free(_M_type_size(1), &_M_p); }
bool
is_lock_free() const volatile noexcept
- { return __atomic_is_lock_free(_M_type_size(1), &_M_p); }
+ { return __atomic_always_lock_free(_M_type_size(1), &_M_p); }
void
store(__pointer_type __p,
diff --git a/libstdc++-v3/include/std/atomic b/libstdc++-v3/include/std/atomic
index 6a08b28..b5ca606 100644
--- a/libstdc++-v3/include/std/atomic
+++ b/libstdc++-v3/include/std/atomic
@@ -184,11 +184,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
bool
is_lock_free() const noexcept
- { return __atomic_is_lock_free(sizeof(_M_i), &_M_i); }
+ { return __atomic_always_lock_free(sizeof(_M_i), &_M_i); }
bool
is_lock_free() const volatile noexcept
- { return __atomic_is_lock_free(sizeof(_M_i), &_M_i); }
+ { return __atomic_always_lock_free(sizeof(_M_i), &_M_i); }
void
store(_Tp __i, memory_order _m = memory_order_seq_cst) noexcept