[PATCH 15/35] Change use to type-based pool allocator in dse.c.

mliska mliska@suse.cz
Wed May 27 14:21:00 GMT 2015


gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* dse.c (get_group_info):Use new type-based pool allocator.
	(dse_step0) Likewise.
	(free_store_info) Likewise.
	(delete_dead_store_insn) Likewise.
	(free_read_records) Likewise.
	(record_store) Likewise.
	(replace_read) Likewise.
	(check_mem_read_rtx) Likewise.
	(scan_insn) Likewise.
	(dse_step1) Likewise.
	(dse_step7) Likewise.
---
 gcc/dse.c | 201 ++++++++++++++++++++++++++++++++++++++++----------------------
 1 file changed, 129 insertions(+), 72 deletions(-)

diff --git a/gcc/dse.c b/gcc/dse.c
index b3b38d5..5ade9dd 100644
--- a/gcc/dse.c
+++ b/gcc/dse.c
@@ -249,7 +249,7 @@ static struct obstack dse_obstack;
 /* Scratch bitmap for cselib's cselib_expand_value_rtx.  */
 static bitmap scratch = NULL;
 
-struct insn_info;
+struct insn_info_type;
 
 /* This structure holds information about a candidate store.  */
 struct store_info
@@ -316,7 +316,7 @@ struct store_info
   /* Set if this store stores the same constant value as REDUNDANT_REASON
      insn stored.  These aren't eliminated early, because doing that
      might prevent the earlier larger store to be eliminated.  */
-  struct insn_info *redundant_reason;
+  struct insn_info_type *redundant_reason;
 };
 
 /* Return a bitmask with the first N low bits set.  */
@@ -329,12 +329,15 @@ lowpart_bitmask (int n)
 }
 
 typedef struct store_info *store_info_t;
-static alloc_pool cse_store_info_pool;
-static alloc_pool rtx_store_info_pool;
+static pool_allocator<store_info> cse_store_info_pool ("cse_store_info_pool",
+						       100);
+
+static pool_allocator<store_info> rtx_store_info_pool ("rtx_store_info_pool",
+						       100);
 
 /* This structure holds information about a load.  These are only
    built for rtx bases.  */
-struct read_info
+struct read_info_type
 {
   /* The id of the mem group of the base address.  */
   int group_id;
@@ -351,15 +354,30 @@ struct read_info
   rtx mem;
 
   /* The next read_info for this insn.  */
-  struct read_info *next;
+  struct read_info_type *next;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((read_info_type *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<read_info_type> pool;
 };
-typedef struct read_info *read_info_t;
-static alloc_pool read_info_pool;
+typedef struct read_info_type *read_info_t;
 
+pool_allocator<read_info_type> read_info_type::pool ("read_info_pool", 100);
 
 /* One of these records is created for each insn.  */
 
-struct insn_info
+struct insn_info_type
 {
   /* Set true if the insn contains a store but the insn itself cannot
      be deleted.  This is set if the insn is a parallel and there is
@@ -433,27 +451,41 @@ struct insn_info
   regset fixed_regs_live;
 
   /* The prev insn in the basic block.  */
-  struct insn_info * prev_insn;
+  struct insn_info_type * prev_insn;
 
   /* The linked list of insns that are in consideration for removal in
      the forwards pass through the basic block.  This pointer may be
      trash as it is not cleared when a wild read occurs.  The only
      time it is guaranteed to be correct is when the traversal starts
      at active_local_stores.  */
-  struct insn_info * next_local_store;
+  struct insn_info_type * next_local_store;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((insn_info_type *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<insn_info_type> pool;
 };
+typedef struct insn_info_type *insn_info_t;
 
-typedef struct insn_info *insn_info_t;
-static alloc_pool insn_info_pool;
+pool_allocator<insn_info_type> insn_info_type::pool ("insn_info_pool", 100);
 
 /* The linked list of stores that are under consideration in this
    basic block.  */
 static insn_info_t active_local_stores;
 static int active_local_stores_len;
 
-struct dse_bb_info
+struct dse_bb_info_type
 {
-
   /* Pointer to the insn info for the last insn in the block.  These
      are linked so this is how all of the insns are reached.  During
      scanning this is the current insn being scanned.  */
@@ -507,10 +539,25 @@ struct dse_bb_info
      to assure that shift and/or add sequences that are inserted do not
      accidentally clobber live hard regs.  */
   bitmap regs_live;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((dse_bb_info_type *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<dse_bb_info_type> pool;
 };
 
-typedef struct dse_bb_info *bb_info_t;
-static alloc_pool bb_info_pool;
+typedef struct dse_bb_info_type *bb_info_t;
+pool_allocator<dse_bb_info_type> dse_bb_info_type::pool ("bb_info_pool", 100);
 
 /* Table to hold all bb_infos.  */
 static bb_info_t *bb_table;
@@ -578,10 +625,26 @@ struct group_info
      care about.  */
   int *offset_map_n, *offset_map_p;
   int offset_map_size_n, offset_map_size_p;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((group_info *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<group_info> pool;
 };
 typedef struct group_info *group_info_t;
 typedef const struct group_info *const_group_info_t;
-static alloc_pool rtx_group_info_pool;
+
+pool_allocator<group_info> group_info::pool ("rtx_group_info_pool", 100);
 
 /* Index into the rtx_group_vec.  */
 static int rtx_group_next_id;
@@ -602,10 +665,27 @@ struct deferred_change
   rtx reg;
 
   struct deferred_change *next;
+
+  /* Pool allocation new operator.  */
+  inline void *operator new (size_t)
+  {
+    return pool.allocate ();
+  }
+
+  /* Delete operator utilizing pool allocation.  */
+  inline void operator delete (void *ptr)
+  {
+    pool.remove((deferred_change *) ptr);
+  }
+
+  /* Memory allocation pool.  */
+  static pool_allocator<deferred_change> pool;
 };
 
 typedef struct deferred_change *deferred_change_t;
-static alloc_pool deferred_change_pool;
+
+pool_allocator<deferred_change> deferred_change::pool
+  ("deferred_change_pool", 10);
 
 static deferred_change_t deferred_change_list = NULL;
 
@@ -712,8 +792,7 @@ get_group_info (rtx base)
     {
       if (!clear_alias_group)
 	{
-	  clear_alias_group = gi =
-	    (group_info_t) pool_alloc (rtx_group_info_pool);
+	  clear_alias_group = gi = new group_info;
 	  memset (gi, 0, sizeof (struct group_info));
 	  gi->id = rtx_group_next_id++;
 	  gi->store1_n = BITMAP_ALLOC (&dse_bitmap_obstack);
@@ -735,7 +814,7 @@ get_group_info (rtx base)
 
   if (gi == NULL)
     {
-      *slot = gi = (group_info_t) pool_alloc (rtx_group_info_pool);
+      *slot = gi = new group_info;
       gi->rtx_base = base;
       gi->id = rtx_group_next_id++;
       gi->base_mem = gen_rtx_MEM (BLKmode, base);
@@ -776,24 +855,6 @@ dse_step0 (void)
   scratch = BITMAP_ALLOC (&reg_obstack);
   kill_on_calls = BITMAP_ALLOC (&dse_bitmap_obstack);
 
-  rtx_store_info_pool
-    = create_alloc_pool ("rtx_store_info_pool",
-			 sizeof (struct store_info), 100);
-  read_info_pool
-    = create_alloc_pool ("read_info_pool",
-			 sizeof (struct read_info), 100);
-  insn_info_pool
-    = create_alloc_pool ("insn_info_pool",
-			 sizeof (struct insn_info), 100);
-  bb_info_pool
-    = create_alloc_pool ("bb_info_pool",
-			 sizeof (struct dse_bb_info), 100);
-  rtx_group_info_pool
-    = create_alloc_pool ("rtx_group_info_pool",
-			 sizeof (struct group_info), 100);
-  deferred_change_pool
-    = create_alloc_pool ("deferred_change_pool",
-			 sizeof (struct deferred_change), 10);
 
   rtx_group_table = new hash_table<invariant_group_base_hasher> (11);
 
@@ -829,9 +890,9 @@ free_store_info (insn_info_t insn_info)
       if (store_info->is_large)
 	BITMAP_FREE (store_info->positions_needed.large.bmap);
       if (store_info->cse_base)
-	pool_free (cse_store_info_pool, store_info);
+	cse_store_info_pool.remove (store_info);
       else
-	pool_free (rtx_store_info_pool, store_info);
+	rtx_store_info_pool.remove (store_info);
       store_info = next;
     }
 
@@ -948,7 +1009,7 @@ check_for_inc_dec_1 (insn_info_t insn_info)
 bool
 check_for_inc_dec (rtx_insn *insn)
 {
-  struct insn_info insn_info;
+  insn_info_type insn_info;
   rtx note;
 
   insn_info.insn = insn;
@@ -989,7 +1050,7 @@ delete_dead_store_insn (insn_info_t insn_info)
   while (read_info)
     {
       read_info_t next = read_info->next;
-      pool_free (read_info_pool, read_info);
+      delete read_info;
       read_info = next;
     }
   insn_info->read_rec = NULL;
@@ -1113,7 +1174,7 @@ free_read_records (bb_info_t bb_info)
       read_info_t next = (*ptr)->next;
       if ((*ptr)->alias_set == 0)
         {
-          pool_free (read_info_pool, *ptr);
+	  delete *ptr;
           *ptr = next;
         }
       else
@@ -1167,7 +1228,7 @@ const_or_frame_p (rtx x)
 	return true;
       return false;
     }
-  
+
   return false;
 }
 
@@ -1488,7 +1549,7 @@ record_store (rtx body, bb_info_t bb_info)
       if (clear_alias_group->offset_map_size_p < spill_alias_set)
 	clear_alias_group->offset_map_size_p = spill_alias_set;
 
-      store_info = (store_info_t) pool_alloc (rtx_store_info_pool);
+      store_info = rtx_store_info_pool.allocate ();
 
       if (dump_file && (dump_flags & TDF_DETAILS))
 	fprintf (dump_file, " processing spill store %d(%s)\n",
@@ -1503,7 +1564,7 @@ record_store (rtx body, bb_info_t bb_info)
 	= rtx_group_vec[group_id];
       tree expr = MEM_EXPR (mem);
 
-      store_info = (store_info_t) pool_alloc (rtx_store_info_pool);
+      store_info = rtx_store_info_pool.allocate ();
       set_usage_bits (group, offset, width, expr);
 
       if (dump_file && (dump_flags & TDF_DETAILS))
@@ -1516,7 +1577,7 @@ record_store (rtx body, bb_info_t bb_info)
 	insn_info->stack_pointer_based = true;
       insn_info->contains_cselib_groups = true;
 
-      store_info = (store_info_t) pool_alloc (cse_store_info_pool);
+      store_info = cse_store_info_pool.allocate ();
       group_id = -1;
 
       if (dump_file && (dump_flags & TDF_DETAILS))
@@ -2060,8 +2121,7 @@ replace_read (store_info_t store_info, insn_info_t store_insn,
 
   if (validate_change (read_insn->insn, loc, read_reg, 0))
     {
-      deferred_change_t deferred_change =
-	(deferred_change_t) pool_alloc (deferred_change_pool);
+      deferred_change_t change = new deferred_change;
 
       /* Insert this right before the store insn where it will be safe
 	 from later insns that might change it before the read.  */
@@ -2091,15 +2151,15 @@ replace_read (store_info_t store_info, insn_info_t store_insn,
 	 block we can put them back.  */
 
       *loc = read_info->mem;
-      deferred_change->next = deferred_change_list;
-      deferred_change_list = deferred_change;
-      deferred_change->loc = loc;
-      deferred_change->reg = read_reg;
+      change->next = deferred_change_list;
+      deferred_change_list = change;
+      change->loc = loc;
+      change->reg = read_reg;
 
       /* Get rid of the read_info, from the point of view of the
 	 rest of dse, play like this read never happened.  */
       read_insn->read_rec = read_info->next;
-      pool_free (read_info_pool, read_info);
+      delete read_info;
       if (dump_file && (dump_flags & TDF_DETAILS))
 	{
 	  fprintf (dump_file, " -- replaced the loaded MEM with ");
@@ -2165,7 +2225,7 @@ check_mem_read_rtx (rtx *loc, bb_info_t bb_info)
   else
     width = GET_MODE_SIZE (GET_MODE (mem));
 
-  read_info = (read_info_t) pool_alloc (read_info_pool);
+  read_info = new read_info_type;
   read_info->group_id = group_id;
   read_info->mem = mem;
   read_info->alias_set = spill_alias_set;
@@ -2481,9 +2541,9 @@ static void
 scan_insn (bb_info_t bb_info, rtx_insn *insn)
 {
   rtx body;
-  insn_info_t insn_info = (insn_info_t) pool_alloc (insn_info_pool);
+  insn_info_type *insn_info = new insn_info_type;
   int mems_found = 0;
-  memset (insn_info, 0, sizeof (struct insn_info));
+  memset (insn_info, 0, sizeof (struct insn_info_type));
 
   if (dump_file && (dump_flags & TDF_DETAILS))
     fprintf (dump_file, "\n**scanning insn=%d\n",
@@ -2740,9 +2800,9 @@ dse_step1 (void)
   FOR_ALL_BB_FN (bb, cfun)
     {
       insn_info_t ptr;
-      bb_info_t bb_info = (bb_info_t) pool_alloc (bb_info_pool);
+      bb_info_t bb_info = new dse_bb_info_type;
 
-      memset (bb_info, 0, sizeof (struct dse_bb_info));
+      memset (bb_info, 0, sizeof (dse_bb_info_type));
       bitmap_set_bit (all_blocks, bb->index);
       bb_info->regs_live = regs_live;
 
@@ -2756,9 +2816,6 @@ dse_step1 (void)
 	{
 	  rtx_insn *insn;
 
-	  cse_store_info_pool
-	    = create_alloc_pool ("cse_store_info_pool",
-				 sizeof (struct store_info), 100);
 	  active_local_stores = NULL;
 	  active_local_stores_len = 0;
 	  cselib_clear_table ();
@@ -2820,7 +2877,7 @@ dse_step1 (void)
 	      /* There is no reason to validate this change.  That was
 		 done earlier.  */
 	      *deferred_change_list->loc = deferred_change_list->reg;
-	      pool_free (deferred_change_pool, deferred_change_list);
+	      delete deferred_change_list;
 	      deferred_change_list = next;
 	    }
 
@@ -2866,7 +2923,7 @@ dse_step1 (void)
 	      ptr = ptr->prev_insn;
 	    }
 
-	  free_alloc_pool (cse_store_info_pool);
+	  cse_store_info_pool.release ();
 	}
       bb_info->regs_live = NULL;
     }
@@ -3704,12 +3761,12 @@ dse_step7 (void)
   BITMAP_FREE (all_blocks);
   BITMAP_FREE (scratch);
 
-  free_alloc_pool (rtx_store_info_pool);
-  free_alloc_pool (read_info_pool);
-  free_alloc_pool (insn_info_pool);
-  free_alloc_pool (bb_info_pool);
-  free_alloc_pool (rtx_group_info_pool);
-  free_alloc_pool (deferred_change_pool);
+  rtx_store_info_pool.release ();
+  read_info_type::pool.release ();
+  insn_info_type::pool.release ();
+  dse_bb_info_type::pool.release ();
+  group_info::pool.release ();
+  deferred_change::pool.release ();
 }
 
 
-- 
2.1.4




More information about the Gcc-patches mailing list