This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[patch] fix libstdc++/40297


This flips the sense of some debug mode assertions which are currently
backwards.

Tested x86_64/linux, with and without debug mode. OK for trunk and 4.4 branch?


        PR libstdc++/40297:
        * include/bits/atomic_0.h: Reverse debug assertions.
        * include/bits/atomic_2.h: Likewise.
Index: include/bits/atomic_0.h
===================================================================
--- include/bits/atomic_0.h	(revision 148666)
+++ include/bits/atomic_0.h	(working copy)
@@ -119,17 +119,17 @@ namespace __atomic0
     void
     store(void* __v, memory_order __m = memory_order_seq_cst) volatile
     {
-      __glibcxx_assert(__m == memory_order_acquire);
-      __glibcxx_assert(__m == memory_order_acq_rel);
-      __glibcxx_assert(__m == memory_order_consume);
+      __glibcxx_assert(__m != memory_order_acquire);
+      __glibcxx_assert(__m != memory_order_acq_rel);
+      __glibcxx_assert(__m != memory_order_consume);
       _ATOMIC_STORE_(this, __v, __m);
     }
 
     void*
     load(memory_order __m = memory_order_seq_cst) const volatile
     {
-      __glibcxx_assert(__m == memory_order_release);
-      __glibcxx_assert(__m == memory_order_acq_rel);
+      __glibcxx_assert(__m != memory_order_release);
+      __glibcxx_assert(__m != memory_order_acq_rel);
       return _ATOMIC_LOAD_(this, __m);
     }
 
@@ -141,8 +141,8 @@ namespace __atomic0
     compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
 			  memory_order __m2) volatile
     {
-      __glibcxx_assert(__m2 == memory_order_release);
-      __glibcxx_assert(__m2 == memory_order_acq_rel);
+      __glibcxx_assert(__m2 != memory_order_release);
+      __glibcxx_assert(__m2 != memory_order_acq_rel);
       __glibcxx_assert(__m2 <= __m1);
       return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
     }
@@ -159,8 +159,8 @@ namespace __atomic0
     compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
 			    memory_order __m2) volatile
     {
-      __glibcxx_assert(__m2 == memory_order_release);
-      __glibcxx_assert(__m2 == memory_order_acq_rel);
+      __glibcxx_assert(__m2 != memory_order_release);
+      __glibcxx_assert(__m2 != memory_order_acq_rel);
       __glibcxx_assert(__m2 <= __m1);
       return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
     }
@@ -310,17 +310,17 @@ namespace __atomic0
       store(__integral_type __i,
 	    memory_order __m = memory_order_seq_cst) volatile
       {
-	__glibcxx_assert(__m == memory_order_acquire);
-	__glibcxx_assert(__m == memory_order_acq_rel);
-	__glibcxx_assert(__m == memory_order_consume);
+	__glibcxx_assert(__m != memory_order_acquire);
+	__glibcxx_assert(__m != memory_order_acq_rel);
+	__glibcxx_assert(__m != memory_order_consume);
 	_ATOMIC_STORE_(this, __i, __m);
       }
 
       __integral_type
       load(memory_order __m = memory_order_seq_cst) const volatile
       {
-	__glibcxx_assert(__m == memory_order_release);
-	__glibcxx_assert(__m == memory_order_acq_rel);
+	__glibcxx_assert(__m != memory_order_release);
+	__glibcxx_assert(__m != memory_order_acq_rel);
 	return _ATOMIC_LOAD_(this, __m);
       }
 
@@ -333,8 +333,8 @@ namespace __atomic0
       compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
 			    memory_order __m1, memory_order __m2) volatile
       {
-	__glibcxx_assert(__m2 == memory_order_release);
-	__glibcxx_assert(__m2 == memory_order_acq_rel);
+	__glibcxx_assert(__m2 != memory_order_release);
+	__glibcxx_assert(__m2 != memory_order_acq_rel);
 	__glibcxx_assert(__m2 <= __m1);
 	return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
       }
@@ -351,8 +351,8 @@ namespace __atomic0
       compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
 			      memory_order __m1, memory_order __m2) volatile
       {
-	__glibcxx_assert(__m2 == memory_order_release);
-	__glibcxx_assert(__m2 == memory_order_acq_rel);
+	__glibcxx_assert(__m2 != memory_order_release);
+	__glibcxx_assert(__m2 != memory_order_acq_rel);
 	__glibcxx_assert(__m2 <= __m1);
 	return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
       }
Index: include/bits/atomic_2.h
===================================================================
--- include/bits/atomic_2.h	(revision 148666)
+++ include/bits/atomic_2.h	(working copy)
@@ -65,6 +65,10 @@ namespace __atomic2
     void
     clear(memory_order __m = memory_order_seq_cst) volatile
     {
+      __glibcxx_assert(__m != memory_order_consume);
+      __glibcxx_assert(__m != memory_order_acquire);
+      __glibcxx_assert(__m != memory_order_acq_rel);
+
       __sync_lock_release(&_M_i);
       if (__m != memory_order_acquire && __m != memory_order_acq_rel)
 	__sync_synchronize();
@@ -93,9 +97,9 @@ namespace __atomic2
     void
     store(void* __v, memory_order __m = memory_order_seq_cst) volatile
     {
-      __glibcxx_assert(__m == memory_order_acquire);
-      __glibcxx_assert(__m == memory_order_acq_rel);
-      __glibcxx_assert(__m == memory_order_consume);
+      __glibcxx_assert(__m != memory_order_acquire);
+      __glibcxx_assert(__m != memory_order_acq_rel);
+      __glibcxx_assert(__m != memory_order_consume);
 
       if (__m == memory_order_relaxed)
 	_M_i = __v;
@@ -111,8 +115,8 @@ namespace __atomic2
     void*
     load(memory_order __m = memory_order_seq_cst) const volatile
     {
-      __glibcxx_assert(__m == memory_order_release);
-      __glibcxx_assert(__m == memory_order_acq_rel);
+      __glibcxx_assert(__m != memory_order_release);
+      __glibcxx_assert(__m != memory_order_acq_rel);
 
       __sync_synchronize();
       void* __ret = _M_i;
@@ -144,8 +148,8 @@ namespace __atomic2
     compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
 			    memory_order __m2) volatile
     {
-      __glibcxx_assert(__m2 == memory_order_release);
-      __glibcxx_assert(__m2 == memory_order_acq_rel);
+      __glibcxx_assert(__m2 != memory_order_release);
+      __glibcxx_assert(__m2 != memory_order_acq_rel);
       __glibcxx_assert(__m2 <= __m1);
 
       void* __v1o = __v1;
@@ -284,9 +288,9 @@ namespace __atomic2
       store(__integral_type __i,
 	    memory_order __m = memory_order_seq_cst) volatile
       {
-	__glibcxx_assert(__m == memory_order_acquire);
-	__glibcxx_assert(__m == memory_order_acq_rel);
-	__glibcxx_assert(__m == memory_order_consume);
+	__glibcxx_assert(__m != memory_order_acquire);
+	__glibcxx_assert(__m != memory_order_acq_rel);
+	__glibcxx_assert(__m != memory_order_consume);
 
 	if (__m == memory_order_relaxed)
 	  _M_i = __i;
@@ -302,8 +306,8 @@ namespace __atomic2
       __integral_type
       load(memory_order __m = memory_order_seq_cst) const volatile
       {
-	__glibcxx_assert(__m == memory_order_release);
-	__glibcxx_assert(__m == memory_order_acq_rel);
+	__glibcxx_assert(__m != memory_order_release);
+	__glibcxx_assert(__m != memory_order_acq_rel);
 
 	__sync_synchronize();
 	__integral_type __ret = _M_i;
@@ -336,8 +340,8 @@ namespace __atomic2
       compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
 			      memory_order __m1, memory_order __m2) volatile
       {
-	__glibcxx_assert(__m2 == memory_order_release);
-	__glibcxx_assert(__m2 == memory_order_acq_rel);
+	__glibcxx_assert(__m2 != memory_order_release);
+	__glibcxx_assert(__m2 != memory_order_acq_rel);
 	__glibcxx_assert(__m2 <= __m1);
 
 	__integral_type __i1o = __i1;

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]