This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH] Fix PR11784 - atomic __exchange_and_add() broken for ColdFire targets


This patch replaces all m68k specific variants of __exchange_and_add() with
a single version using either TAS (for 680x0, CPU32 and 5407) or BTST, which
behaves atomically on non-SMP systems.

Previously, ColdFire targets used the variant for vxWorks/__embedded__.
This code path was disabling interrupts to update the counter atomically,
but user apps linking with libstdc++ would segfault because "move #0,%sr"
is a privileged opcode.

There was also a generic, thread-unsafe C version to be used as a last
resort. now it's gone since all m68k targets do have the BSET instruction.

-- 
  // Bernardo Innocenti - Develer S.r.l., R&D dept.
\X/  http://www.develer.com/


2003-08-03  Bernardo Innocenti  <bernie@develer.com>

	PR 11784
	* libstdc++-v3/config/cpu/m68k/atomicity.h (__exchange_and_add): Replace variants with new BSET-based version.


--- gcc-3.3.1-20030720/libstdc++-v3/config/cpu/m68k/atomicity.h.orig	2003-07-24 18:58:58.000000000 +0200
+++ gcc-3.3.1-20030720/libstdc++-v3/config/cpu/m68k/atomicity.h	2003-07-24 23:23:35.000000000 +0200
@@ -74,8 +74,7 @@
   return __result;
 }
 
-#elif !defined(__mcf5200__) && !defined(__mcf5300__)
-// 68000, 68010, cpu32 and 5400 support test-and-set.
+#else
 
 template <int __inst>
 struct __Atomicity_lock
@@ -94,10 +93,28 @@
 {
   _Atomic_word __result;
 
+// bset with no immediate addressing
+#if defined(__mcf5200__) || defined(__mcf5300__)
+  __asm__ __volatile__("1: bset.b #7,%0@\n\tjbne 1b"
+		      : /* no outputs */
+		      : "a"(&__Atomicity_lock<0>::_S_atomicity_lock)
+		      : "cc", "memory");
+
+// bset with immediate addressing
+#elif defined(__mc68000__)
+  __asm__ __volatile__("1: bset.b #7,%0\n\tjbne 1b"
+		      : "=m"(__Atomicity_lock<0>::_S_atomicity_lock)
+		      : "m"(__Atomicity_lock<0>::_S_atomicity_lock)
+		      : "cc");
+
+#else // 680x0, cpu32, 5400 support test-and-set.
   __asm__ __volatile__("1: tas %0\n\tjbne 1b"
 		       : "=m"(__Atomicity_lock<0>::_S_atomicity_lock)
-		       : "m"(__Atomicity_lock<0>::_S_atomicity_lock));
+		       : "m"(__Atomicity_lock<0>::_S_atomicity_lock)
+		       : "cc");
 
+#endif
+  
   __result = *__mem;
   *__mem = __result + __val;
 
@@ -106,45 +123,7 @@
   return __result;
 }
 
-#elif defined(__vxWorks__) || defined(__embedded__)
-// The best we can hope for is to disable interrupts, which we
-// can only do from supervisor mode.
-
-static inline _Atomic_word 
-__attribute__ ((__unused__))
-__exchange_and_add (volatile _Atomic_word *__mem, int __val)
-{
-  _Atomic_word __result;
-  short __level, __tmpsr;
-  __asm__ __volatile__ ("move%.w %%sr,%0\n\tor%.l %0,%1\n\tmove%.w %1,%%sr"
-		  	: "=d"(__level), "=d"(__tmpsr) : "1"(0x700));
-
-  __result = *__mem;
-  *__mem = __result + __val;
-
-  __asm__ __volatile__ ("move%.w %0,%%sr" : : "d"(__level));
-
-  return __result;
-}
-
-#else
-// These variants do not support any atomic operations at all.
-
-#warning "__exchange_and_add is not atomic for this target"
-
-static inline _Atomic_word
-__attribute__ ((__unused__))
-__exchange_and_add (volatile _Atomic_word *__mem, int __val)
-{
-  _Atomic_word __result;
-
-  __result = *__mem;
-  *__mem = __result + __val;
-
-  return __result;
-}
-
-#endif /* CAS / IRQ / TAS */
+#endif /* TAS / BSET */
 
 static inline void
 __attribute__ ((__unused__))
@@ -155,4 +134,4 @@
   (void) __exchange_and_add (__mem, __val);
 }
 
-#endif /* atomicity.h */
+#endif /* !_BITS_ATOMICITY_H */



Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]