This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]

Simplify handling of ggc_push/pop_context


ggc_push/pop_context are a fairly seldom-used feature of the garbage
collector where you can ask it to leave alone everything currently
allocated, because you've got live roots it doesn't know about.
They're used by nested function handling and CSE.

Their implementation is rather complicated, with knock-on effects all
over the garbage collector.  This patch simplifies them considerably.
Performance should be improved both in the normal case and the case
where contexts are pushed.  Memory consumption is reduced for the
normal case.  In the pushed-contexts case, objects at different
context depth can no longer share contexts, so that will take more
space, but not much.

The algorithm is detailed in commentary in the code.  I am a tiny bit
worried about one aspect: ggc_alloc assumes that if there's a page
with free entries at the appropriate level of the context stack, it
will be first on that page queue.  I believe this to be true but I
can't prove it.

Bootstrapped i686-linux with normal checking flags.  The C test suite
gets additional -O3 failures with GGC_ALWAYS_COLLECT defined, but I
believe these are unrelated.

-- 
zw   You have to care about the 'fair trial' part; you have to care about
     the 'due process of law' part, and you have to care about these things
     *more* than you care about putting child molestors away.
     	-- Graydon Saunders

	* ggc-page.c (struct page_entry): Remove save_in_use_p.
	(G.context_depth): Replaced by collection_depth and allocation_depth.
	(ggc_recalculate_in_use_p): Delete.
	(ggc_alloc, alloc_page, ggc_push_context, ggc_pop_context,
	clear_marks, sweep_pages, poison_pages, ggc_collect):
	Implement new context stack scheme.

===================================================================
Index: ggc-page.c
--- ggc-page.c	2001/02/20 05:49:05	1.39
+++ ggc-page.c	2001/05/23 17:30:20
@@ -74,7 +74,8 @@ Boston, MA 02111-1307, USA.  */
 
    Each page-entry also has a context depth, which is used to track
    pushing and popping of allocation contexts.  Only objects allocated
-   in the current (highest-numbered) context may be collected.  
+   in the current (highest-numbered) context may be collected.  No page
+   contains objects from more than one context.
 
    Page entries are arranged in an array of singly-linked lists.  The
    array is indexed by the allocation size, in bits, of the pages on
@@ -225,10 +226,6 @@ typedef struct page_entry 
   struct page_group *group;
 #endif
 
-  /* Saved in-use bit vector for pages that aren't in the topmost
-     context during collection.  */
-  unsigned long *save_in_use_p;
-
   /* Context depth of this page.  */
   unsigned short context_depth;
 
@@ -316,8 +313,10 @@ static struct globals
   /* Total amount of memory mapped.  */
   size_t bytes_mapped;
 
-  /* The current depth in the context stack.  */
-  unsigned short context_depth;
+  /* The current depth in the context stack.  There are two different
+     markers.  See comments above ggc_push_context for details. */
+  unsigned short collection_depth;
+  unsigned short allocation_depth;
 
   /* A file descriptor open to /dev/zero for reading.  */
 #if defined (HAVE_MMAP_DEV_ZERO)
@@ -372,7 +371,6 @@ static void free_page PARAMS ((struct pa
 static void release_pages PARAMS ((void));
 static void clear_marks PARAMS ((void));
 static void sweep_pages PARAMS ((void));
-static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
 
 #ifdef GGC_POISON
 static void poison_pages PARAMS ((void));
@@ -714,7 +712,7 @@ alloc_page (order)
 
   entry->bytes = entry_size;
   entry->page = page;
-  entry->context_depth = G.context_depth;
+  entry->context_depth = G.allocation_depth;
   entry->order = order;
   entry->num_free_objects = num_objects;
   entry->next_bit_hint = 1;
@@ -860,6 +858,13 @@ ggc_alloc (size)
   struct page_entry *entry;
   void *result;
 
+  /* If the collection depth has been raised, start allocating in the
+     new topmost context.  (If it has been lowered, we must wait for
+     the next garbage collection to re-validate lower pages' in-use
+     vectors before it is safe to use them again.)  */
+  if (G.collection_depth > G.allocation_depth)
+    G.allocation_depth = G.collection_depth;
+
   if (size <= 256)
     order = size_lookup[size];
   else
@@ -874,8 +879,10 @@ ggc_alloc (size)
   entry = G.pages[order];
 
   /* If there is no page for this object size, or all pages in this
-     context are full, allocate a new page.  */
-  if (entry == NULL || entry->num_free_objects == 0)
+     context are full or not at the current allocation depth, get a
+     new page.  */
+  if (entry == NULL || entry->num_free_objects == 0
+      || entry->context_depth != G.allocation_depth)
     {
       struct page_entry *new_entry;
       new_entry = alloc_page (order);
@@ -1098,89 +1105,57 @@ init_ggc ()
     }
 }
 
-/* Increment the `GC context'.  Objects allocated in an outer context
-   are never freed, eliminating the need to register their roots.  */
+/* Enter a new 'GC context'.  This tells the garbage collector that
+   the registered GC roots do not necessarily cover all objects
+   allocated at the time ggc_push_context is called.  This continues
+   to be true until ggc_pop_context is called.  If a collection occurs
+   while a context has been pushed, objects allocated between paired
+   calls to ggc_push/pop_context must still be reachable from the
+   known set of roots.  ggc_push_context can be called repeatedly;
+   only objects in the topmost context are collectable.
+
+   Here's how it works:
+
+   No page contains objects in more than one context.  The _context
+   depth_ of a page indicates how far down in the context stack it is.
+   Allocation creates new pages whose context depth is equal to the
+   "allocation depth."
+
+   When ggc_collect is called, the mark phase proceeds as normal, but
+   the sweep phase does not touch any pages whose depth is below the
+   "collection depth."  
+
+   ggc_push_context and ggc_pop_context raise or lower the collection
+   depth.  If we go to allocate memory and the collection depth is
+   above the allocation depth, the allocation depth rises to match.
+   At the end of each collection the allocation depth is unconditionally
+   set to the collection depth.
+
+   These rules mean a push_context ... pop_context pair has no
+   lingering effect if no memory was allocated in between them, but if
+   memory was allocated, then allocation continues at the higher
+   context depth until the next garbage collection.  That is necessary
+   because pages with lower depths won't have valid in-use vectors until
+   the next collection happens.  */
 
 void
 ggc_push_context ()
 {
-  ++G.context_depth;
+  ++G.collection_depth;
 
   /* Die on wrap.  */
-  if (G.context_depth == 0)
-    abort ();
-}
-
-/* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
-   reflects reality.  Recalculate NUM_FREE_OBJECTS as well.  */
-
-static void
-ggc_recalculate_in_use_p (p)
-     page_entry *p;
-{
-  unsigned int i;
-  size_t num_objects;
-
-  /* Because the past-the-end bit in in_use_p is always set, we 
-     pretend there is one additional object.  */
-  num_objects = OBJECTS_PER_PAGE (p->order) + 1;
-
-  /* Reset the free object count.  */
-  p->num_free_objects = num_objects;
-
-  /* Combine the IN_USE_P and SAVE_IN_USE_P arrays.  */
-  for (i = 0; 
-       i < CEIL (BITMAP_SIZE (num_objects),
-		 sizeof (*p->in_use_p));
-       ++i)
-    {
-      unsigned long j;
-
-      /* Something is in use if it is marked, or if it was in use in a
-	 context further down the context stack.  */
-      p->in_use_p[i] |= p->save_in_use_p[i];
-
-      /* Decrement the free object count for every object allocated.  */
-      for (j = p->in_use_p[i]; j; j >>= 1)
-	p->num_free_objects -= (j & 1);
-    }
-
-  if (p->num_free_objects >= num_objects)
+  if (G.collection_depth == 0)
     abort ();
 }
 
-/* Decrement the `GC context'.  All objects allocated since the 
-   previous ggc_push_context are migrated to the outer context.  */
+/* Decrement the `GC context'.  All objects allocated since the
+   previous ggc_push_context will be migrated to the current context
+   in the next collection. */
 
 void
 ggc_pop_context ()
 {
-  unsigned order, depth;
-
-  depth = --G.context_depth;
-
-  /* Any remaining pages in the popped context are lowered to the new
-     current context; i.e. objects allocated in the popped context and
-     left over are imported into the previous context.  */
-  for (order = 2; order < NUM_ORDERS; order++)
-    {
-      page_entry *p;
-
-      for (p = G.pages[order]; p != NULL; p = p->next)
-	{
-	  if (p->context_depth > depth)
-	    p->context_depth = depth;
-
-	  /* If this page is now in the topmost context, and we'd
-	     saved its allocation state, restore it.  */
-	  else if (p->context_depth == depth && p->save_in_use_p)
-	    {
-	      ggc_recalculate_in_use_p (p);
-	      free (p->save_in_use_p);
-	      p->save_in_use_p = 0;
-	    }
-	}
-    }
+  --G.collection_depth;
 }
 
 /* Unmark all objects.  */
@@ -1203,19 +1178,9 @@ clear_marks ()
 	  if ((size_t) p->page & (G.pagesize - 1))
 	    abort ();
 #endif
-
-	  /* Pages that aren't in the topmost context are not collected;
-	     nevertheless, we need their in-use bit vectors to store GC
-	     marks.  So, back them up first.  */
-	  if (p->context_depth < G.context_depth)
-	    {
-	      if (! p->save_in_use_p)
-		p->save_in_use_p = xmalloc (bitmap_size);
-	      memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
-	    }
 
-	  /* Reset reset the number of free objects and clear the
-             in-use bits.  These will be adjusted by mark_obj.  */
+	  /* Reset the number of free objects and clear the in-use
+             bits.  These will be adjusted by ggc_set_mark.  */
 	  p->num_free_objects = num_objects;
 	  memset (p->in_use_p, 0, bitmap_size);
 
@@ -1263,11 +1228,17 @@ sweep_pages ()
 
 	  G.allocated += OBJECT_SIZE (order) * live_objects;
 
-	  /* Only objects on pages in the topmost context should get
-	     collected.  */
-	  if (p->context_depth < G.context_depth)
-	    ;
+	  /* Pages above the current collection depth are lowered
+	     to that depth, then swept normally.  */
 
+	  if (p->context_depth > G.collection_depth)
+	    p->context_depth = G.collection_depth;
+
+	  /* Pages below the current collection depth are left strictly
+	     alone.  */
+	  if (p->context_depth < G.collection_depth)
+	    /* nothing */;
+
 	  /* Remove the page if it's empty.  */
 	  else if (live_objects == 0)
 	    {
@@ -1324,12 +1295,6 @@ sweep_pages ()
 	  p = next;
 	} 
       while (! done);
-
-      /* Now, restore the in_use_p vectors for any pages from contexts
-         other than the current one.  */
-      for (p = G.pages[order]; p; p = p->next)
-	if (p->context_depth != G.context_depth)
-	  ggc_recalculate_in_use_p (p);
     }
 }
 
@@ -1351,7 +1316,7 @@ poison_pages ()
 	{
 	  size_t i;
 
-	  if (p->context_depth != G.context_depth)
+	  if (p->context_depth < G.collection_depth)
 	    /* Since we don't do any collection for pages in pushed
 	       contexts, there's no need to do any poisoning.  And
 	       besides, the IN_USE_P array isn't valid until we pop
@@ -1404,6 +1369,10 @@ ggc_collect ()
 #endif
 
   sweep_pages ();
+
+  /* Now that all pages above the collection depth have been lowered to
+     that depth, it's safe to lower the allocation depth.  */
+  G.allocation_depth = G.collection_depth;
 
   G.allocated_last_gc = G.allocated;
   if (G.allocated_last_gc < GGC_MIN_LAST_ALLOCATED)


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]