This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Atomic operations on the ARM


It turns out that the atomic operations for the ARM in 
config/cpu/arm/atomicity.h are completely useless.  Firstly in ARM mode 
the operations aren't atomic (there is a race condition which could lead 
to a corrupt value) and secondly the Thumb operations simply cause the 
executing program to go AWOL (the problem I think is really in GAS, but 
given that they also suffer from the same bug that the ARM-mode operations 
suffer from, there's little point in worrying overly about that).

Fixed by removing them altogether.  To do this properly on the ARM 
requires a mutex, either as a bit in the _Atomic_word type, or separately. 
 But to do that would probably break the existing sematics -- Well it 
could be done by overriding the _Atomic_word type with a C++ class that 
handled initialization and comparison operations, but I don't really want 
to go down that road at this time.  For thumb code, I'd like to be able to 
make these operations out-of-line, but there's no obvious place for them 
at this time.

Oh, another question, why does the generic implementation not use a 
standard mutex from gthr.h?

R.

2002-10-03  Richard Earnshaw  <rearnsha@arm.com>

	PR libstdc++/3584
	* config/cpu/arm/atomicity.h (__exchange_and_add): Don't try to use
	ASM sequences that don't give us atomic addition operations.  Instead
	just add a comment explaining why it doesn't work.
	(__atomic_add): Just use __exchange_and_add.
	(__test_and_set, __always_swap): Delete.


Index: config/cpu/arm/atomicity.h
===================================================================
RCS file: /cvs/gcc/gcc/libstdc++-v3/config/cpu/arm/atomicity.h,v
retrieving revision 1.3
diff -p -r1.3 atomicity.h
*** config/cpu/arm/atomicity.h	24 Jun 2002 05:47:15 -0000	1.3
--- config/cpu/arm/atomicity.h	3 Oct 2002 15:53:15 -0000
***************
*** 32,82 ****
  
  typedef int _Atomic_word;
  
  static inline _Atomic_word
  __attribute__ ((__unused__))
  __exchange_and_add (volatile _Atomic_word* __mem, int __val)
  {
!   _Atomic_word __tmp, __tmp2, __result;
! #ifdef __thumb__
!   /* Since this function is inlined, we can't be sure of the alignment.  */
!   __asm__ __volatile__ (
! 	"ldr     %0, 4f \n\t"
! 	"bx      %0 \n\t"
! 	".align 0 \n"
! 	"4:\t"
! 	".word   0f \n\t"
! 	".code 32 \n"
! 	"0:\t"
! 	"ldr     %0, [%3] \n\t"
! 	"add     %1, %0, %4 \n\t"
! 	"swp     %2, %1, [%3] \n\t"
!         "cmp     %0, %2 \n\t"
!         "swpne   %1, %2, [%3] \n\t"
!         "bne     0b \n\t"
! 	"ldr     %1, 1f \n\t"
! 	"bx      %1 \n"
! 	"1:\t"
! 	".word   2f \n\t"
! 	".code 16 \n"
! 	"2:\n"
! 	: "=&l"(__result), "=&r"(__tmp), "=&r"(__tmp2) 
! 	: "r" (__mem), "r"(__val) 
! 	: "cc", "memory");
! #else
!   __asm__ __volatile__ (
! 	"\n"
! 	"0:\t"
! 	"ldr     %0, [%3] \n\t"
! 	"add     %1, %0, %4 \n\t"
! 	"swp     %2, %1, [%3] \n\t"
! 	"cmp     %0, %2 \n\t"
! 	"swpne   %1, %2, [%3] \n\t"
! 	"bne     0b \n\t"
! 	""
! 	: "=&r"(__result), "=&r"(__tmp), "=&r"(__tmp2) 
! 	: "r" (__mem), "r"(__val) 
! 	: "cc", "memory");
! #endif
    return __result;
  }
  
--- 32,54 ----
  
  typedef int _Atomic_word;
  
+ /* It isn't possible to write an atomic add instruction using the ARM
+    SWP instruction without using either a global guard variable or a
+    guard bit somewhere in the Atomic word.  However, even with a guard
+    bit we need to understand the thread model (if any) in order to
+    make co-operatively threaded applications work correctly.
+ 
+    The previous Thumb-based implementations were also completely
+    broken, since they failed to switch back into Thumb mode (Gas bug,
+    I think).  */
+ 
  static inline _Atomic_word
  __attribute__ ((__unused__))
  __exchange_and_add (volatile _Atomic_word* __mem, int __val)
  {
!   _Atomic_word __result = *__mem;
! 
!   *__mem = __result + __val;
    return __result;
  }
  
*************** static inline void
*** 84,221 ****
  __attribute__ ((__unused__))
  __atomic_add (volatile _Atomic_word *__mem, int __val)
  {
!   _Atomic_word __tmp, __tmp2, __tmp3;
! #ifdef __thumb__
!   /* Since this function is inlined, we can't be sure of the alignment.  */
!   __asm__ __volatile__ (
! 	"ldr     %0, 4f \n\t"
! 	"bx      %0 \n\t"
! 	".align 0\n"
! 	"4:\t"
! 	".word   0f \n\t"
! 	".code 32 \n"
! 	"0:\t"
! 	"ldr     %0, [%3] \n\t"
! 	"add     %1, %0, %4 \n\t"
!         "swp     %2, %1, [%3] \n\t"
!         "cmp     %0, %2 \n\t"
!         "swpne   %1, %2,[%3] \n\t"
!         "bne     0b \n\t"
! 	"ldr     %1, 1f \n\t"
! 	"bx      %1 \n"
! 	"1:\t"
! 	".word   2f \n\t"
! 	".code 16 \n"
! 	"2:\n"
! 	: "=&l"(__tmp), "=&r"(__tmp2), "=&r"(__tmp3) 
! 	: "r" (__mem), "r"(__val) 
! 	: "cc", "memory");
! #else
!   __asm__ __volatile__ (
! 	"\n"
! 	"0:\t"
! 	"ldr     %0, [%3] \n\t"
! 	"add     %1, %0, %4 \n\t"
! 	"swp     %2, %1, [%3] \n\t"
! 	"cmp     %0, %2 \n\t"
! 	"swpne   %1, %2, [%3] \n\t"
! 	"bne     0b \n\t"
! 	""
! 	: "=&r"(__tmp), "=&r"(__tmp2), "=&r"(__tmp3) 
! 	: "r" (__mem), "r"(__val) 
! 	: "cc", "memory");
! #endif
! }
! 
! static inline long
! __attribute__ ((__unused__))
! __always_swap (volatile long *__p, long __newval)
! {
!   long __result;
! #ifdef __thumb__
!   long __tmp;
!   /* Since this function is inlined, we can't be sure of the alignment.  */
!   __asm__ __volatile__ (
! 	"ldr     %0, 4f \n\t"
! 	"bx      %0 \n\t"
! 	".align 0 \n"
! 	"4:\t"
! 	".word   0f \n\t"
! 	".code 32\n"
! 	"0:\t"
! 	"swp     %0, %3, [%2] \n\t"
! 	"ldr     %1, 1f \n\t"
! 	"bx      %1 \n"
! 	"1:\t"
! 	".word   2f \n\t"
! 	".code 16 \n"
! 	"2:\n"
! 	: "=&l"(__result), "=&r"(__tmp)
! 	: "r"(__p), "r"(__newval)
! 	: "memory");
! #else
!   __asm__ __volatile__ (
! 	"\n\t"
! 	"swp     %0, %2, [%1] \n\t"
! 	""
! 	: "=&r"(__result)
! 	: "r"(__p), "r"(__newval)
! 	: "memory");
! #endif
!   return __result;
! }
! 
! static inline int
! __attribute__ ((__unused__))
! __test_and_set (volatile long *__p, long __newval)
! {
!   int __result;
!   long __tmp;
! #ifdef __thumb__
!   /* Since this function is inlined, we can't be sure of the alignment.  */
!   __asm__ __volatile__ (
! 	"ldr     %0, 4f \n\t"
! 	"bx      %0 \n\t"
! 	".align 0 \n"
! 	"4:\t"
! 	".word   0f \n\t"
! 	".code 32 \n"
! 	"0:\t"
! 	"ldr     %0, [%2] \n\t"
!         "cmp     %0, #0 \n\t"
!         "bne     1f \n\t"
!         "swp     %1, %3, [%2] \n\t"
!         "cmp     %0, %1 \n\t"
!         "swpne   %0, %1, [%2]\n\t"
!         "bne     0b \n"
! 	"1:\t"
! 	"ldr     %1, 2f \n\t"
! 	"bx      %1 \n"
! 	"2:\t"
! 	".word   3f \n\t"
! 	".code 16 \n"
! 	"3:"
! 	: "=&l"(__result), "=r" (__tmp) 
! 	: "r"(__p), "r"(__newval) 
! 	: "cc", "memory");
! #else
!   __asm__ __volatile__ (
! 	"\n"
! 	"0:\t"
! 	"ldr     %0, [%2] \n\t"
! 	"cmp     %0, #0 \n\t"
! 	"bne     1f \n\t"
! 	"swp     %1, %3, [%2] \n\t"
! 	"cmp     %0, %1 \n\t"
! 	"swpne   %0, %1, [%2] \n\t"
! 	"bne     0b \n"
! 	"1:\n\t"
! 	""
! 	: "=&r"(__result), "=r" (__tmp) 
! 	: "r"(__p), "r"(__newval) 
! 	: "cc", "memory");
! #endif
!   return __result;
  }
  
  #endif /* atomicity.h */
--- 56,62 ----
  __attribute__ ((__unused__))
  __atomic_add (volatile _Atomic_word *__mem, int __val)
  {
!   __exchange_and_add (__mem, __val);
  }
  
  #endif /* atomicity.h */

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]