[PATCH, middle-end]: Introduce memory_blockage named insn pattern

Uros Bizjak ubizjak@gmail.com
Tue Sep 5 10:26:00 GMT 2017


Hello!

This patch allows to emit memory_blockage pattern instead of default
asm volatile as a memory blockage. This patch is needed, so targets
(e.g. x86) can define and emit more optimal memory blockage pseudo
insn. And let's call scheduler memory barriers a "memory blockage"
pseudo insn, not "memory barrier" which should describe real
instruction.

2017-09-05  Uros Bizjak  <ubizjak@gmail.com>

    * optabs.c (expand_memory_blockage): New function.
    (expand_asm_memory_barrier): Rename ...
    (expand_asm_memory_blockage): ... to this.
    (expand_mem_thread_fence): Call expand_memory_blockage
    instead of expand_asm_memory_barrier.
    (expand_mem_singnal_fence): Ditto.
    (expand_atomic_load): Ditto.
    (expand_atomic_store): Ditto.
    * doc/md.texi (Standard Pattern Names For Generation):
    Document memory_blockage instruction pattern.

Bootstrapped on x86_64-linux-gnu, regression test (with additional x86
patch that fixes recent optimization regression with FP atomic loads)
is in progress.

OK for mainline?

Uros.
-------------- next part --------------
diff --git a/gcc/doc/md.texi b/gcc/doc/md.texi
index 14aab9474bc2..df4dc8ccd0e1 100644
--- a/gcc/doc/md.texi
+++ b/gcc/doc/md.texi
@@ -6734,6 +6734,13 @@ scheduler and other passes from moving instructions and using register
 equivalences across the boundary defined by the blockage insn.
 This needs to be an UNSPEC_VOLATILE pattern or a volatile ASM.
 
+@cindex @code{memmory_blockage} instruction pattern
+@item @samp{memory_blockage}
+This pattern defines a pseudo insn that prevents the instruction
+scheduler and other passes from moving instructions accessing memory
+across the boundary defined by the blockage insn.  This instruction
+needs to read and write volatile BLKmode memory.
+
 @cindex @code{memory_barrier} instruction pattern
 @item @samp{memory_barrier}
 If the target memory model is not fully synchronous, then this pattern
diff --git a/gcc/optabs.c b/gcc/optabs.c
index b65707080eee..c3b1bc848bf7 100644
--- a/gcc/optabs.c
+++ b/gcc/optabs.c
@@ -6276,10 +6276,10 @@ expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval,
   return true;
 }
 
-/* Generate asm volatile("" : : : "memory") as the memory barrier.  */
+/* Generate asm volatile("" : : : "memory") as the memory blockage.  */
 
 static void
-expand_asm_memory_barrier (void)
+expand_asm_memory_blockage (void)
 {
   rtx asm_op, clob;
 
@@ -6295,6 +6295,18 @@ expand_asm_memory_barrier (void)
   emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, asm_op, clob)));
 }
 
+/* Do not schedule instructions accessing memory across this point.  */
+
+static void
+expand_memory_blockage (void)
+{
+#ifdef HAVE_memory_blockage
+  emit_insn (gen_memory_blockage ());
+#else
+  expand_asm_memory_blockage ();
+#endif
+}
+
 /* This routine will either emit the mem_thread_fence pattern or issue a 
    sync_synchronize to generate a fence for memory model MEMMODEL.  */
 
@@ -6306,14 +6318,14 @@ expand_mem_thread_fence (enum memmodel model)
   if (targetm.have_mem_thread_fence ())
     {
       emit_insn (targetm.gen_mem_thread_fence (GEN_INT (model)));
-      expand_asm_memory_barrier ();
+      expand_memory_blockage ();
     }
   else if (targetm.have_memory_barrier ())
     emit_insn (targetm.gen_memory_barrier ());
   else if (synchronize_libfunc != NULL_RTX)
     emit_library_call (synchronize_libfunc, LCT_NORMAL, VOIDmode);
   else
-    expand_asm_memory_barrier ();
+    expand_memory_blockage ();
 }
 
 /* Emit a signal fence with given memory model.  */
@@ -6324,7 +6336,7 @@ expand_mem_signal_fence (enum memmodel model)
   /* No machine barrier is required to implement a signal fence, but
      a compiler memory barrier must be issued, except for relaxed MM.  */
   if (!is_mm_relaxed (model))
-    expand_asm_memory_barrier ();
+    expand_memory_blockage ();
 }
 
 /* This function expands the atomic load operation:
@@ -6346,7 +6358,7 @@ expand_atomic_load (rtx target, rtx mem, enum memmodel model)
       struct expand_operand ops[3];
       rtx_insn *last = get_last_insn ();
       if (is_mm_seq_cst (model))
-	expand_asm_memory_barrier ();
+	expand_memory_blockage ();
 
       create_output_operand (&ops[0], target, mode);
       create_fixed_operand (&ops[1], mem);
@@ -6354,7 +6366,7 @@ expand_atomic_load (rtx target, rtx mem, enum memmodel model)
       if (maybe_expand_insn (icode, 3, ops))
 	{
 	  if (!is_mm_relaxed (model))
-	    expand_asm_memory_barrier ();
+	    expand_memory_blockage ();
 	  return ops[0].value;
 	}
       delete_insns_since (last);
@@ -6404,14 +6416,14 @@ expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
     {
       rtx_insn *last = get_last_insn ();
       if (!is_mm_relaxed (model))
-	expand_asm_memory_barrier ();
+	expand_memory_blockage ();
       create_fixed_operand (&ops[0], mem);
       create_input_operand (&ops[1], val, mode);
       create_integer_operand (&ops[2], model);
       if (maybe_expand_insn (icode, 3, ops))
 	{
 	  if (is_mm_seq_cst (model))
-	    expand_asm_memory_barrier ();
+	    expand_memory_blockage ();
 	  return const0_rtx;
 	}
       delete_insns_since (last);


More information about the Gcc-patches mailing list