This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[trunk<-vta] Re: [vta, 4.4] make df-scan df_collection_rec arrays overflow-safe


On Jan  9, 2008, Alexandre Oliva <aoliva@redhat.com> wrote:

> df-scan.c uses alloca()ed fixed-size arrays for df_collection_rec
> members.  The limits ought to be large enough for normal cases, and
> the short-lived nature of the arrays make them appropriate for fast
> stack allocation

[paragraph below slightly updated from the original]
> A consequence of substituting stuff into debug insns is that they grow
> unbounded for some testcases.  In the case I chose for debugging,
> there was a debug insn with 1388 df_use_vec members.  It took a long
> time just to debug_rtx it.  Ouch!  ;-)

> I decided that, instead of trying to fix what triggered the error, I'd
> first make sure the error didn't occur any more.  Call that papering
> over the actual problem if you like, but to me it's proactive problem
> aversion ;-) df-scan shouldn't just hope that there aren't
> sufficiently complex instructions and addressing modes that the fixed
> limits are going to always be enough.

> So I came up with a trade-off that should benefit nearly everyone.
> First, I arranged for the array to start out still on the stack, but
> to grow as needed onto the heap.  Using GC memory for growth would
> work, but it was easy enough to add explicit deallocation where
> needed.

> And then, given that the array could now grow unbounded, the initial
> sizes didn't have to be so outrageously conservative, so I've reduced
> stack usage while at that.  No biggie, but still...

> Of course there trade-off is that additions to the array now incur an
> additional test-and-branch in the normal case.

Ok for trunk?

for  gcc/ChangeLog
from  Alexandre Oliva  <aoliva@redhat.com>

	* df-scan.c (struct df_collection_ref): Add alloc_ members.
	(vec_SIZE_def, vec_SIZE_use, vec_SIZE_eq_use, vec_SIZE_mw): New.
	(safe_alloca_vec, safe_grow_vec, safe_free_vec): New.
	(df_insn_rescan_1): Move allocation down.  Use new macros.
	(df_bb_refs_record): Likewise.
	(df_notes_rescan): Use new macros.
	(df_ref_create_structure, df_ref_record): Likewise.
	(df_record_entry_block_defs): Likewise.
	(df_record_exit_blocks_uses): Likewise.
	(df_bb_verify): Likewise.
	
Index: gcc/df-scan.c
===================================================================
--- gcc/df-scan.c.orig	2008-01-07 18:18:44.000000000 -0200
+++ gcc/df-scan.c	2008-01-07 18:25:40.000000000 -0200
@@ -87,13 +87,71 @@
   df_ref * def_vec;
   df_ref * use_vec;
   unsigned int next_def;
+  unsigned int alloc_def;
   unsigned int next_use;
+  unsigned int alloc_use;
   df_ref * eq_use_vec;
   struct df_mw_hardreg **mw_vec;
   unsigned int next_eq_use;
+  unsigned int alloc_eq_use;
   unsigned int next_mw;
+  unsigned int alloc_mw;
 };
 
+/* The following macros define the default sizes for the vectors above.  */
+
+#define vec_SIZE_def 256
+#define vec_SIZE_use 256
+#define vec_SIZE_eq_use 256
+#define vec_SIZE_mw 64
+
+/* Initialize the vector to a stack-allocated region.  PTR is a struct
+   container_rec*, and FLD is either def, use, eq_use or mw.  */
+
+#define safe_alloca_vec(ptr, fld, type)					\
+  ((ptr)->fld##_vec = ((df_##type *)					\
+		       alloca (sizeof (*(ptr)->fld##_vec)		\
+			       * ((ptr)->alloc_##fld = vec_SIZE_##fld))))
+
+/* Safely grow the vector, moving to a non-alloca area if we outgrow
+   the default size, and growing exponentially as needed.  The result
+   is an lvalue for the newly-added vector element.  */
+
+#define safe_grow_vec(ptr, fld, type)					\
+  (*(((ptr)->next_##fld < (ptr)->alloc_##fld				\
+      ? 0								\
+      : (ptr)->next_##fld <= vec_SIZE_##fld				\
+      ? ((ptr)->fld##_vec = ((df_##type *)				\
+			     memcpy (xmalloc				\
+				     (sizeof (*(ptr)->fld##_vec)	\
+				      * ((ptr)->alloc_##fld		\
+					 = 2 * vec_SIZE_##fld)),	\
+				     (ptr)->fld##_vec,			\
+				     sizeof (*(ptr)->fld##_vec)		\
+				     * (ptr)->next_##fld)))		\
+      : ((ptr)->fld##_vec = ((df_##type *)				\
+			     xrealloc ((ptr)->fld##_vec,		\
+				       (sizeof (*(ptr)->fld##_vec)	\
+					* (((ptr)->alloc_##fld		\
+					    *= 2))))))),		\
+     &(ptr)->fld##_vec[(ptr)->next_##fld++]))
+
+/* Release the vector storage, if we've used non-alloca memory.  */
+
+#define safe_free_vec(ptr, fld)						\
+  (((ptr)->alloc_##fld > vec_SIZE_##fld					\
+    ? free ((ptr)->fld##_vec), 0					\
+    : 0),								\
+   (ptr)->next_##fld = (ptr)->alloc_##fld = 0,				\
+   (ptr)->fld##_vec = 0)
+
+/* This alias enables us to use mwhreg for the type in the macros
+   above.  */
+typedef struct df_mw_hardreg *df_mwhreg;
+
+
+
+
 static df_ref df_null_ref_rec[1];
 static struct df_mw_hardreg * df_null_mw_rec[1];
 
@@ -1199,20 +1257,15 @@
       pool_free (problem_data->mw_reg_pool, *mw);
 }
 
-
 /* Rescan INSN.  Return TRUE if the rescanning produced any changes.  */
 
-bool 
+bool
 df_insn_rescan (rtx insn)
 {
   unsigned int uid = INSN_UID (insn);
   struct df_insn_info *insn_info = NULL;
   basic_block bb = BLOCK_FOR_INSN (insn);
   struct df_collection_rec collection_rec;
-  collection_rec.def_vec = XALLOCAVEC (df_ref, 1000);
-  collection_rec.use_vec = XALLOCAVEC (df_ref, 1000);
-  collection_rec.eq_use_vec = XALLOCAVEC (df_ref, 1000);
-  collection_rec.mw_vec = XALLOCAVEC (struct df_mw_hardreg *, 100);
 
   if ((!df) || (!INSN_P (insn)))
     return false;
@@ -1256,6 +1309,12 @@
   bitmap_clear_bit (df->insns_to_delete, uid);
   bitmap_clear_bit (df->insns_to_rescan, uid);
   bitmap_clear_bit (df->insns_to_notes_rescan, uid);
+
+  safe_alloca_vec (&collection_rec, def, ref);
+  safe_alloca_vec (&collection_rec, use, ref);
+  safe_alloca_vec (&collection_rec, eq_use, ref);
+  safe_alloca_vec (&collection_rec, mw, mwhreg);
+
   if (insn_info)
     {
       int luid;
@@ -1264,8 +1323,13 @@
       if (the_same)
 	{
 	  df_free_collection_rec (&collection_rec);
+	  safe_free_vec (&collection_rec, def);
+	  safe_free_vec (&collection_rec, use);
+	  safe_free_vec (&collection_rec, eq_use);
+	  safe_free_vec (&collection_rec, mw);
 	  if (dump_file)
 	    fprintf (dump_file, "verify found no changes in insn with uid = %d.\n", uid);
+
 	  return false;
 	}
       if (dump_file)
@@ -1288,6 +1352,12 @@
 
   df_refs_add_to_chains (&collection_rec, bb, insn);
   df_set_bb_dirty (bb);
+
+  safe_free_vec (&collection_rec, def);
+  safe_free_vec (&collection_rec, use);
+  safe_free_vec (&collection_rec, eq_use);
+  safe_free_vec (&collection_rec, mw);
+
   return true;
 }
 
@@ -2133,9 +2259,10 @@
       unsigned int num_deleted;
 
       memset (&collection_rec, 0, sizeof (struct df_collection_rec));
-      collection_rec.eq_use_vec = XALLOCAVEC (df_ref, 1000);
-      collection_rec.mw_vec = XALLOCAVEC (struct df_mw_hardreg *, 1000);
 
+      safe_alloca_vec (&collection_rec, eq_use, ref);
+      safe_alloca_vec (&collection_rec, mw, mwhreg);
+
       num_deleted = df_mw_hardreg_chain_delete_eq_uses (insn_info);
       df_ref_chain_delete (insn_info->eq_uses);
       insn_info->eq_uses = NULL;
@@ -2199,6 +2326,9 @@
       collection_rec.mw_vec = NULL;
       collection_rec.next_mw = 0;
       df_refs_add_to_chains (&collection_rec, bb, insn);
+
+      safe_free_vec (&collection_rec, eq_use);
+      safe_free_vec (&collection_rec, mw);
     }
   else
     df_insn_rescan (insn);
@@ -2767,11 +2897,11 @@
   if (collection_rec)
     {
       if (DF_REF_REG_DEF_P (this_ref))
-	collection_rec->def_vec[collection_rec->next_def++] = this_ref;
+	safe_grow_vec (collection_rec, def, ref) = this_ref;
       else if (DF_REF_FLAGS (this_ref) & DF_REF_IN_NOTE)
-	collection_rec->eq_use_vec[collection_rec->next_eq_use++] = this_ref;
+	safe_grow_vec (collection_rec, eq_use, ref) = this_ref;
       else
-	collection_rec->use_vec[collection_rec->next_use++] = this_ref;
+	safe_grow_vec (collection_rec, use, ref) = this_ref;
     }
 
   return this_ref;
@@ -2837,7 +2967,7 @@
 	  hardreg->start_regno = regno;
 	  hardreg->end_regno = endregno - 1;
 	  hardreg->mw_order = df->ref_order++;
-	  collection_rec->mw_vec[collection_rec->next_mw++] = hardreg;
+	  safe_grow_vec (collection_rec, mw, mwhreg) = hardreg;
 	}
 
       for (i = regno; i < endregno; i++)
@@ -3590,14 +3728,15 @@
   int luid = 0;
   struct df_scan_bb_info *bb_info;
   struct df_collection_rec collection_rec;
-  collection_rec.def_vec = XALLOCAVEC (df_ref, 1000);
-  collection_rec.use_vec = XALLOCAVEC (df_ref, 1000);
-  collection_rec.eq_use_vec = XALLOCAVEC (df_ref, 1000);
-  collection_rec.mw_vec = XALLOCAVEC (struct df_mw_hardreg *, 100);
 
   if (!df)
     return;
 
+  safe_alloca_vec (&collection_rec, def, ref);
+  safe_alloca_vec (&collection_rec, use, ref);
+  safe_alloca_vec (&collection_rec, eq_use, ref);
+  safe_alloca_vec (&collection_rec, mw, mwhreg);
+
   bb_info = df_scan_get_bb_info (bb_index);
 
   /* Need to make sure that there is a record in the basic block info. */  
@@ -3634,6 +3773,11 @@
   /* Now that the block has been processed, set the block as dirty so
      LR and LIVE will get it processed.  */
   df_set_bb_dirty (bb);
+
+  safe_free_vec (&collection_rec, def);
+  safe_free_vec (&collection_rec, use);
+  safe_free_vec (&collection_rec, eq_use);
+  safe_free_vec (&collection_rec, mw);
 }
 
 
@@ -3889,12 +4033,14 @@
 {
   struct df_collection_rec collection_rec;
   memset (&collection_rec, 0, sizeof (struct df_collection_rec));
-  collection_rec.def_vec = XALLOCAVEC (df_ref, FIRST_PSEUDO_REGISTER);
+  safe_alloca_vec (&collection_rec, def, ref);
 
   df_entry_block_defs_collect (&collection_rec, entry_block_defs);
 
   /* Process bb_refs chain */
   df_refs_add_to_chains (&collection_rec, BASIC_BLOCK (ENTRY_BLOCK), NULL);
+
+  safe_free_vec (&collection_rec, def);
 }
 
 
@@ -4060,12 +4206,14 @@
 {
   struct df_collection_rec collection_rec;
   memset (&collection_rec, 0, sizeof (struct df_collection_rec));
-  collection_rec.use_vec = XALLOCAVEC (df_ref, FIRST_PSEUDO_REGISTER);
+  safe_alloca_vec (&collection_rec, use, ref);
 
   df_exit_block_uses_collect (&collection_rec, exit_block_uses);
 
   /* Process bb_refs chain */
   df_refs_add_to_chains (&collection_rec, BASIC_BLOCK (EXIT_BLOCK), NULL);
+
+  safe_free_vec (&collection_rec, use);
 }
 
 
@@ -4424,10 +4572,10 @@
   struct df_collection_rec collection_rec;
   
   memset (&collection_rec, 0, sizeof (struct df_collection_rec));
-  collection_rec.def_vec = XALLOCAVEC (df_ref, 1000);
-  collection_rec.use_vec = XALLOCAVEC (df_ref, 1000);
-  collection_rec.eq_use_vec = XALLOCAVEC (df_ref, 1000);
-  collection_rec.mw_vec = XALLOCAVEC (struct df_mw_hardreg *, 100);
+  safe_alloca_vec (&collection_rec, def, ref);
+  safe_alloca_vec (&collection_rec, use, ref);
+  safe_alloca_vec (&collection_rec, eq_use, ref);
+  safe_alloca_vec (&collection_rec, mw, mwhreg);
 
   gcc_assert (bb_info);
 
@@ -4446,6 +4594,11 @@
   df_refs_verify (collection_rec.use_vec, df_get_artificial_uses (bb->index), true);
   df_free_collection_rec (&collection_rec);
   
+  safe_free_vec (&collection_rec, def);
+  safe_free_vec (&collection_rec, use);
+  safe_free_vec (&collection_rec, eq_use);
+  safe_free_vec (&collection_rec, mw);
+
   return true;
 }
 
-- 
Alexandre Oliva, freedom fighter    http://FSFLA.org/~lxoliva/
You must be the change you wish to see in the world. -- Gandhi
Be Free! -- http://FSFLA.org/   FSF Latin America board member
Free Software Evangelist      Red Hat Brazil Compiler Engineer

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]