This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[new-ra] spill code emitting (5)


Hi,

this changes the emission of spill code a bit, in that it uses the changes
from the first patches to recog.c for validating changes to try to emit
accesses to stack memory directly into the instructions.

I couldn't convince myself to completely remove Denis' code which does
slightly different things in the sense that he mostly tries to do this
only for copy instructions.  Needs some investigation.

Sometimes the label references could become confused, so we rebuilt the
jump label too.

Booted/regtested on i686-linux, all langs except Ada+treelang.  Fixes 7
fortran regressions.


Ciao,
Michael.
-- 
2003-06-30  Michael Matz  <matz@suse.de>
	* ra-rewrite.c (uninit_webs_which_changed): New.
	(insert_stores): Use validate_change.  Don't respill already spilled
	refs.  Possibly mark the web as uninit but changed.  Clear the already
	spilled mark.
	(emit_loads): Try to do the load in-place.
	(detect_web_parts_to_rebuild): Handle uninited webs which changed.
	(actual_spill): Allocate/free uninit_webs_which_changed.
	* ra.c (reg_alloc): Rebuild jump labels.

diff -urpN work-gcc.orig/gcc/ra-rewrite.c work-gcc/gcc/ra-rewrite.c
--- work-gcc.orig/gcc/ra-rewrite.c	2003-06-16 17:51:58.000000000 +0200
+++ work-gcc/gcc/ra-rewrite.c	2003-06-16 17:53:25.000000000 +0200
@@ -87,6 +87,10 @@ static int coalesce_spill_slot PARAMS ((
    Very similar to ra_modified_insns.  */
 bitmap last_changed_insns;

+/* A collection of web ID for uninitialized webs, which changed their
+   layout while spilling.  See insert_stores().  */
+static bitmap uninit_webs_which_changed;
+
 /* For tracking some statistics, we count the number (and cost)
    of deleted move insns.  */
 static unsigned int deleted_move_insns;
@@ -781,21 +785,53 @@ insert_stores (new_deaths)
 	      if ((!last_slot || !rtx_equal_p (slot, last_slot))
 		  && ! slot_member_p (slots, slot))
 		{
+		  unsigned int done = 0;
 		  rtx insns, ni;
+		  struct ra_ref *rdef = DF2RA (df2ra, info.defs[n]);
 		  rtx spill;
 		  int has_use;
 		  last_slot = slot;
 		  remember_slot (&slots, slot);
+#if DENIS
 		  if ((web->pattern || copy_insn_p (insn, NULL, NULL))
 		      && ra_validate_change (insn, DF_REF_LOC (info.defs[n]),
 					     slot, 0))
+#else
+		  if ((DF_REF_FLAGS (info.defs[n]) & DF_REF_ALREADY_SPILLED)
+		      || (! (rdef && RA_REF_ADDRESS_P (rdef))
+			  && validate_change (insn, DF_REF_LOC (info.defs[n]),
+					      slot, 0)))
 		    {
 		      df_insn_modify (df, bb, insn);
-		      bitmap_set_bit (ra_modified_insns, uid);
 		      bitmap_set_bit (last_changed_insns, uid);
+		      if (!flag_ra_test)
+			bitmap_set_bit (ra_modified_insns, uid);
+#endif
 		      if (!bitmap_bit_p (useless_defs,
 					 DF_REF_ID (info.defs[n])))
 			ra_emit_move_insn (source, slot);
+		      /* If we have a rmw webpart whose read part was
+			 uninitialized, we didn't connect them in
+			 connect_rmw_web_parts().  But inplace changing this
+			 (write part) web means, that also the read part will
+			 go away in the next round.  That means, we have
+			 to mark that read part web also as having it's
+			 layout changed.  */
+		      if (DF_REF_FLAGS (info.defs[n]) & DF_REF_READ_WRITE)
+			{
+			  unsigned int n2;
+			  for (n2 = 0; n2 < info.num_uses; n2++)
+			    if ((DF_REF_REAL_REG (info.defs[n])
+				 == DF_REF_REAL_REG (info.uses[n2]))
+				&& (alias (find_web_for_subweb (def2web[DF_REF_ID
+							 (info.defs[n])]))
+				    != alias (find_web_for_subweb (use2web[DF_REF_ID
+							    (info.uses[n2])]))))
+			      bitmap_set_bit (uninit_webs_which_changed,
+					      find_web_for_subweb
+					      (use2web[DF_REF_ID
+					       (info.uses[n2])])->id);
+			}
 		    }
 		  else
 		    ra_emit_move_insn (slot, source);
@@ -850,6 +886,7 @@ insert_stores (new_deaths)
 		  /* Otherwise ignore insns from adjust_address() above.  */
 		  end_sequence ();
 		}
+	      DF_REF_FLAGS (info.defs[n]) &= ~DF_REF_ALREADY_SPILLED;
 	    }
 	}
       /* If we look at a load generated by the allocator, forget
@@ -1033,7 +1070,8 @@ emit_loads (ri, nl_first_reload, last_bl
       rtx ni, slot, reg;
       enum machine_mode innermode;
       rtx before = NULL_RTX, after = NULL_RTX;
-      basic_block bb;
+      int done;
+      basic_block bb = bb;
       /* When spilltemps were spilled for the last insns, their
 	 loads already are emitted, which is noted by setting
 	 needed_loads[] for it to 0.  */
@@ -1070,6 +1108,8 @@ emit_loads (ri, nl_first_reload, last_bl
       if (GET_CODE (reg) == SUBREG)
 	slot = simplify_gen_subreg (GET_MODE (reg), slot, innermode,
 				    SUBREG_BYTE (reg));
+      done = 0;
+#ifdef DENIS
       if (web->one_load && web->last_use_insn
  	  && copy_insn_p (web->last_use_insn, NULL,NULL)
 	  && ra_validate_change (web->last_use_insn,
@@ -1079,8 +1119,63 @@ emit_loads (ri, nl_first_reload, last_bl
 	  df_insn_modify (df, bb, web->last_use_insn);
 	  bitmap_set_bit (ra_modified_insns, INSN_UID (web->last_use_insn));
 	  bitmap_set_bit (last_changed_insns, INSN_UID (web->last_use_insn));
+	  done = 1;
 	}
-      else
+#else
+      if (web->one_load && web->last_use_insn)
+	{
+	  unsigned int n;
+	  struct ra_insn_info info = insn_df[INSN_UID (web->last_use_insn)];
+	  struct ra_ref *ruse;
+	  /* Search for a DEF which defines exactly the same (sub)web as
+	     the USE (i.e. not just an overlapping part of it).  */
+	  for (n = 0; n < info.num_defs; n++)
+	    if (web == def2web[DF_REF_ID (info.defs[n])])
+	      break;
+	  ruse = DF2RA (df2ra, web->last_use);
+	  if (n != info.num_defs)
+	    {
+	      struct ra_ref *rdef = DF2RA (df2ra, info.defs[n]);
+	      /* Don't put stack pseudos into addresses.  */
+	      if (! ((ruse && RA_REF_ADDRESS_P (ruse))
+		     || (rdef && RA_REF_ADDRESS_P (rdef))))
+		{
+		  if (GET_MODE_SIZE (GET_MODE (DF_REF_REG (info.defs[n])))
+		      != GET_MODE_SIZE (GET_MODE (DF_REF_REG (web->last_use))))
+		    abort ();
+		  /* For an rmw web we want to try to change the use and the
+		     def inplace to the mem-ref.  If that doesn't work, only
+		     try to handle the use.  */
+		  validate_change (web->last_use_insn,
+				   DF_REF_LOC (web->last_use), slot, 1);
+		  validate_change (web->last_use_insn,
+				   DF_REF_LOC (info.defs[n]), slot, 1);
+		  if (apply_change_group ())
+		    {
+		      DF_REF_FLAGS (info.defs[n]) |= DF_REF_ALREADY_SPILLED;
+		      done = 1;
+		    }
+		}
+	    }
+	  /* No rmw web or spilling the def too didn't work,
+	     so handle just the use here.  */
+	  if (!done && !(ruse && RA_REF_ADDRESS_P (ruse))
+	      && validate_change (web->last_use_insn,
+				  DF_REF_LOC (web->last_use), slot, 0))
+	    done = 1;
+	  if (done)
+	    {
+	      bb = BLOCK_FOR_INSN (web->last_use_insn);
+	      df_insn_modify (df, bb, web->last_use_insn);
+	      bitmap_set_bit (last_changed_insns,
+			      INSN_UID (web->last_use_insn));
+	      if (!flag_ra_test)
+		bitmap_set_bit (ra_modified_insns,
+				INSN_UID (web->last_use_insn));
+	    }
+	}
+#endif
+      if (!done)
 	ra_emit_move_insn (reg, slot);
       ni = get_insns ();
       end_sequence ();
@@ -1732,6 +1827,24 @@ detect_web_parts_to_rebuild ()
 	  }
       });

+  /* We generally want to handle all webs whose layout changed, plus the webs
+     which conflicted with them (for those we only need to recheck their
+     conflicts, but do not need to rebuild them).  Normally only the SPILLED
+     webs have changed layout, but under some circumstances (see
+     insert_stores()) this happens also to uninitialized rmw webs.  */
+  if (uninit_webs_which_changed)
+    for (i = 0; i < num_webs - num_subwebs; i++)
+      if (alias (ID2WEB (i))->type != SPILLED
+	  && bitmap_bit_p (uninit_webs_which_changed, i))
+        {
+	  struct web *web = ID2WEB (i);
+	  /* Check if it's indeed an uninitialized web.  */
+	  if (web->num_defs || web->conflict_list)
+	    abort ();
+	  remove_web_from_list (web);
+	  put_web (web, SPILLED);
+        }
+
   /* We need to recheck all uses of all webs involved in spilling (and the
      uses added by spill insns, but those are not analyzed yet).
      Those are the spilled webs themself, webs coalesced to spilled ones,
@@ -2046,6 +2159,7 @@ actual_spill (spill_p)
   if (last_changed_insns)
     BITMAP_XFREE (last_changed_insns);
   last_changed_insns = BITMAP_XMALLOC ();
+  uninit_webs_which_changed = BITMAP_XMALLOC ();
   reset_changed_flag ();
   spill_coalprop ();
   choose_spill_colors ();
@@ -2072,6 +2186,7 @@ actual_spill (spill_p)
   detect_web_parts_to_rebuild ();
   BITMAP_XFREE (webs_changed_layout);
   BITMAP_XFREE (new_deaths);
+  BITMAP_XFREE (uninit_webs_which_changed);
   return rebuildit;
 }

diff -urpN work-gcc.orig/gcc/ra.c work-gcc/gcc/ra.c
--- work-gcc.orig/gcc/ra.c	2003-06-16 17:51:58.000000000 +0200
+++ work-gcc/gcc/ra.c	2003-06-16 17:53:25.000000000 +0200
@@ -1202,6 +1202,7 @@ reg_alloc ()
 	}
     }
   BITMAP_XFREE (use_insns);
+  rebuild_jump_labels (get_insns ());
   /* We might have deleted/moved dead stores, which could trap (mem accesses
      with flag_non_call_exceptions).  This might have made some edges dead.
      Get rid of them now.  No need to rebuild life info with that call,


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]