This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: [PATCH, i386]: Implement atomic_fetch_sub


On 08/03/2012 02:52 PM, Andrew MacLeod wrote:

I'll take a look at it next week unless someone gets to it first.


Andrew

OK, so maybe I sort of forgot about implementing the generic transformation until now.


This patch bootstraps and passes all the testsuite cases. Factored out a little code to make sure we only generate the add if there is a direct opcode for it.
Also implemented the reverse since it was trivial... if an atomic_add doesn't generate anything, it'll try an atomic_sub to see if thats possible.


If that all fails, then we continue to regress to a compare and swap loop.

bootstraps with no new regressions and verified to fix the PR with no side-effects

OK for mainline?

Andrew
2012-08-23  Andrew MacLeod  <amacleod@redhat.com>

gcc
	PR target/54087
	* optabs.c (expand_atomic_fetch_op_no_fallback): New.  Factored code
	from expand_atomic_fetch_op.
	(expand_atomic_fetch_op):  iTry atomic_{add|sub} operations in terms of
	the other one if direct opcode fails.

testsuite
	* gcc.dg/pr54087.c:  New testcase for atomic_sub -> atomic_add when
	atomic_sub fails.



Index: optabs.c
===================================================================
*** optabs.c	(revision 190595)
--- optabs.c	(working copy)
*************** maybe_emit_op (const struct atomic_op_fu
*** 7745,7754 ****
     CODE is the operation being performed (OP)
     MEMMODEL is the memory model variant to use.
     AFTER is true to return the result of the operation (OP_fetch).
!    AFTER is false to return the value before the operation (fetch_OP).  */
! rtx
! expand_atomic_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
! 			enum memmodel model, bool after)
  {
    enum machine_mode mode = GET_MODE (mem);
    struct atomic_op_functions optab;
--- 7745,7759 ----
     CODE is the operation being performed (OP)
     MEMMODEL is the memory model variant to use.
     AFTER is true to return the result of the operation (OP_fetch).
!    AFTER is false to return the value before the operation (fetch_OP).  
! 
!    This function will *only* generate instructions if there is a direct
!    optab. No compare and swap loops or libcalls will be generated. */
! 
! static rtx
! expand_atomic_fetch_op_no_fallback (rtx target, rtx mem, rtx val,
! 				    enum rtx_code code, enum memmodel model,
! 				    bool after)
  {
    enum machine_mode mode = GET_MODE (mem);
    struct atomic_op_functions optab;
*************** expand_atomic_fetch_op (rtx target, rtx 
*** 7821,7833 ****
--- 7826,7891 ----
  	}
      }
  
+   /* No direct opcode can be generated.  */
+   return NULL_RTX;
+ }
+ 
+ 
+ 
+ /* This function expands an atomic fetch_OP or OP_fetch operation:
+    TARGET is an option place to stick the return value.  const0_rtx indicates
+    the result is unused. 
+    atomically fetch MEM, perform the operation with VAL and return it to MEM.
+    CODE is the operation being performed (OP)
+    MEMMODEL is the memory model variant to use.
+    AFTER is true to return the result of the operation (OP_fetch).
+    AFTER is false to return the value before the operation (fetch_OP).  */
+ rtx
+ expand_atomic_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
+ 			enum memmodel model, bool after)
+ {
+   enum machine_mode mode = GET_MODE (mem);
+   rtx result;
+   bool unused_result = (target == const0_rtx);
+ 
+   result = expand_atomic_fetch_op_no_fallback (target, mem, val, code, model,
+ 					       after);
+   
+   if (result)
+     return result;
+ 
+   /* Add/sub can be implemented by doing the reverse operation with -(val).  */
+   if (code == PLUS || code == MINUS)
+     {
+       rtx tmp;
+       enum rtx_code reverse = (code == PLUS ? MINUS : PLUS);
+ 
+       start_sequence ();
+       tmp = expand_simple_unop (mode, NEG, val, NULL_RTX, true);
+       result = expand_atomic_fetch_op_no_fallback (target, mem, tmp, reverse,
+ 						   model, after);
+       if (result)
+ 	{
+ 	  /* PLUS worked so emit the insns and return.  */
+ 	  tmp = get_insns ();
+ 	  end_sequence ();
+ 	  emit_insn (tmp);
+           return result;
+ 	}
+ 
+       /* PLUS did not work, so throw away the negation code and continue.  */
+       end_sequence ();
+     }
+ 
    /* Try the __sync libcalls only if we can't do compare-and-swap inline.  */
    if (!can_compare_and_swap_p (mode, false))
      {
        rtx libfunc;
        bool fixup = false;
        enum rtx_code orig_code = code;
+       struct atomic_op_functions optab;
  
+       get_atomic_op_for_code (&optab, code);
        libfunc = optab_libfunc (after ? optab.fetch_after
  			       : optab.fetch_before, mode);
        if (libfunc == NULL


Index: testsuite/gcc.dg/pr54087.c
===================================================================
*** testsuite/gcc.dg/pr54087.c	(revision 0)
--- testsuite/gcc.dg/pr54087.c	(revision 0)
***************
*** 0 ****
--- 1,18 ----
+ /* PR54087.  Verify __atomic_sub (val) uses __atomic_add (-val) if there is no
+              atomic_aub.  */
+ /* { dg-require-effective-target sync_int_long } */
+ /* { dg-do compile { target { i?86-*-* x86_64-*-* } } } */
+ /* { dg-final { scan-assembler-times "xadd" 2 } } */
+ 
+ 
+ int a;
+ 
+ int f1(int p)
+ {
+   return __atomic_sub_fetch(&a, p, __ATOMIC_SEQ_CST) == 0;
+ }
+ 
+ int f2(int p)
+ {
+   return __atomic_fetch_sub(&a, p, __ATOMIC_SEQ_CST) - p == 0;
+ }

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]