PATCH: new GC implementation

Richard Henderson rth@cygnus.com
Thu Sep 30 23:58:00 GMT 1999


On Wed, Sep 22, 1999 at 01:07:56AM -0700, Alex Samuel wrote:
> This patch provides gcc with a new garbage collector implementation.
> It is based on a `bag of pages' model, in which objects are allocated
> from larger pages which are in turn obtained from mmap.

I went over the code last night and made things work on my Alphas,
and I ran the code past Alex this morning.  I'm committing the
following.

Note that there is still no configury to turn this on -- at 
present you just edit the Makefile.  My understanding is that Alex
will be submitting all those bits shortly.



r~



        * ggc-page.c: New file.
        * Makefile.in (ggc-page.o): New.

Index: Makefile.in
===================================================================
RCS file: /egcs/carton/cvsfiles/egcs/gcc/Makefile.in,v
retrieving revision 1.311
diff -c -p -d -r1.311 Makefile.in
*** Makefile.in	1999/09/22 15:24:30	1.311
--- Makefile.in	1999/09/23 19:58:49
*************** ggc-common.o: ggc-common.c $(CONFIG_H) $
*** 1431,1436 ****
--- 1431,1439 ----
  	flags.h ggc.h varray.h hash.h
  
  ggc-simple.o: ggc-simple.c $(CONFIG_H) $(RTL_BASE_H) $(TREE_H) flags.h \
+ 	ggc.h varray.h hash.h
+ 
+ ggc-page.o: ggc-page.c $(CONFIG_H) $(RTL_BASE_H) $(TREE_H) flags.h \
  	ggc.h varray.h hash.h
  
  ggc-none.o: ggc-none.c $(CONFIG_H) $(RTL_BASE_H) ggc.h
Index: ggc-page.c
===================================================================
RCS file: ggc-page.c
diff -N ggc-page.c
*** /dev/null	Sat Dec  5 20:30:03 1998
--- ggc-page.c	Thu Sep 23 12:58:49 1999
***************
*** 0 ****
--- 1,1083 ----
+ /* "Bag-of-pages" garbage collector for the GNU compiler.
+    Copyright (C) 1999 Free Software Foundation, Inc.
+ 
+    This file is part of GNU CC.
+ 
+    GNU CC is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2, or (at your option)
+    any later version.
+ 
+    GNU CC is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+ 
+    You should have received a copy of the GNU General Public License
+    along with GNU CC; see the file COPYING.  If not, write to
+    the Free Software Foundation, 59 Temple Place - Suite 330,
+    Boston, MA 02111-1307, USA.  */
+ 
+ #include <unistd.h>
+ #include <stdio.h>
+ #include <sys/mman.h>
+ #include <fcntl.h>
+ 
+ #include "config.h"
+ #include "ggc.h"
+ #include "rtl.h"
+ #include "system.h"
+ #include "tree.h"
+ #include "varray.h"
+ #include "flags.h"
+ 
+ 
+ /* Stategy: 
+ 
+    This garbage-collecting allocator allocates objects on one of a set
+    of pages.  Each page can allocate objects of a single size only;
+    available sizes are powers of two starting at four bytes.  The size
+    of an allocation request is rounded up to the next power of two
+    (`order'), and satisfied from the appropriate page.
+ 
+    Each page is recorded in a page-entry, which also maintains an
+    in-use bitmap of object positions on the page.  This allows the
+    allocation state of a particular object to be flipped without
+    touching the page itself.
+ 
+    Each page-entry also has a context depth, which is used to track
+    pushing and popping of allocation contexts.  Only objects allocated
+    in the current (highest-numbered) context may be collected.  
+ 
+    Page entries are arranged in an array of singly-linked lists.  The
+    array is indexed by the allocation size, in bits, of the pages on
+    it; i.e. all pages on a list allocate objects of the same size.
+    Pages are ordered on the list such that all non-full pages precede
+    all full pages, with non-full pages arranged in order of decreasing
+    context depth.
+ 
+    Empty pages (of all orders) are kept on a single page cache list,
+    and are considered first when new pages are required; they are
+    deallocated at the start of the next collection if they haven't
+    been recycled by then.  */
+ 
+ 
+ /* Define GGC_POISON to poison memory marked unused by the collector.  */
+ #undef GGC_POISON
+ 
+ /* Define GGC_ALWAYS_COLLECT to perform collection every time
+    ggc_collect is invoked.  Otherwise, collection is performed only
+    when a significant amount of memory has been allocated since the
+    last collection.  */
+ #undef GGC_ALWAYS_COLLECT.
+ 
+ /* If ENABLE_CHECKING is defined, enable GGC_POISON and
+    GGC_ALWAYS_COLLECT automatically.  */
+ #ifdef ENABLE_CHECKING
+ #define GGC_POISON
+ #define GGC_ALWAYS_COLLECT
+ #endif
+ 
+ /* Define GGC_DEBUG_LEVEL to print debugging information.
+      0: No debugging output.
+      1: GC statistics only.
+      2: Page-entry allocations/deallocations as well.
+      3: Object allocations as well.
+      4: Object marks as well.   */
+ #define GGC_DEBUG_LEVEL (0)
+ 
+ #ifndef HOST_BITS_PER_PTR
+ #define HOST_BITS_PER_PTR  HOST_BITS_PER_LONG
+ #endif
+ 
+ /* Timing information for collect execution goes into here.  */
+ extern int gc_time;
+ 
+ /* The "" allocated string.  */
+ char *empty_string;
+ 
+ /* A two-level tree is used to look up the page-entry for a given
+    pointer.  Two chunks of the pointer's bits are extracted to index
+    the first and second levels of the tree, as follows:
+ 
+ 				   HOST_PAGE_SIZE_BITS
+ 			   32		|      |
+        msb +----------------+----+------+------+ lsb
+ 			    |    |      |
+ 			 PAGE_L1_BITS   |
+ 				 |      |
+ 			       PAGE_L2_BITS
+ 
+    The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
+    pages are aligned on system page boundaries.  The next most
+    significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
+    index values in the lookup table, respectively.  
+ 
+    The topmost leftover bits, if any, are ignored.  For 32-bit
+    architectures and the settings below, there are no leftover bits.
+    For architectures with wider pointers, the lookup tree points to a
+    list of pages, which must be scanned to find the correct one.  */
+ 
+ #define PAGE_L1_BITS	(8)
+ #define PAGE_L2_BITS	(32 - PAGE_L1_BITS - G.lg_pagesize)
+ #define PAGE_L1_SIZE	((size_t) 1 << PAGE_L1_BITS)
+ #define PAGE_L2_SIZE	((size_t) 1 << PAGE_L2_BITS)
+ 
+ #define LOOKUP_L1(p) \
+   (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
+ 
+ #define LOOKUP_L2(p) \
+   (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
+ 
+ 
+ /* A page_entry records the status of an allocation page.  This
+    structure is dynamically sized to fit the bitmap in_use_p.  */
+ typedef struct page_entry 
+ {
+   /* The next page-entry with objects of the same size, or NULL if
+      this is the last page-entry.  */
+   struct page_entry *next;
+ 
+   /* The number of bytes allocated.  (This will always be a multiple
+      of the host system page size.)  */
+   size_t bytes;
+ 
+   /* The address at which the memory is allocated.  */
+   char *page;
+ 
+   /* Saved in-use bit vector for pages that aren't in the topmost
+      context during collection.  */
+   unsigned long *save_in_use_p;
+ 
+   /* Context depth of this page.  */
+   unsigned char context_depth;
+ 
+   /* The lg of size of objects allocated from this page.  */
+   unsigned char order;
+ 
+   /* The number of free objects remaining on this page.  */
+   unsigned short num_free_objects;
+ 
+   /* A likely candidate for the bit position of a free object for the
+      next allocation from this page.  */
+   unsigned short next_bit_hint;
+ 
+   /* Saved number of free objects for pages that aren't in the topmost
+      context during colleciton.  */
+   unsigned short save_num_free_objects;
+ 
+   /* A bit vector indicating whether or not objects are in use.  The
+      Nth bit is one if the Nth object on this page is allocated.  This
+      array is dynamically sized.  */
+   unsigned long in_use_p[1];
+ } page_entry;
+ 
+ 
+ #if HOST_BITS_PER_PTR <= 32
+ 
+ /* On 32-bit hosts, we use a two level page table, as pictured above.  */
+ typedef page_entry **page_table[PAGE_L1_SIZE];
+ 
+ #else
+ 
+ /* On 64-bit hosts, we use two level page tables plus a linked list
+    that disambiguates the top 32-bits.  There will almost always be
+    exactly one entry in the list.  */
+ typedef struct page_table_chain
+ {
+   struct page_table_chain *next;
+   size_t high_bits;
+   page_entry **table[PAGE_L1_SIZE];
+ } *page_table;
+ 
+ #endif
+ 
+ /* The rest of the global variables.  */
+ static struct globals
+ {
+   /* The Nth element in this array is a page with objects of size 2^N.
+      If there are any pages with free objects, they will be at the
+      head of the list.  NULL if there are no page-entries for this
+      object size.  */
+   page_entry *pages[HOST_BITS_PER_PTR];
+ 
+   /* The Nth element in this array is the last page with objects of
+      size 2^N.  NULL if there are no page-entries for this object
+      size.  */
+   page_entry *page_tails[HOST_BITS_PER_PTR];
+ 
+   /* Lookup table for associating allocation pages with object addresses.  */
+   page_table lookup;
+ 
+   /* The system's page size.  */
+   size_t pagesize;
+   size_t lg_pagesize;
+ 
+   /* Bytes currently allocated.  */
+   size_t allocated;
+ 
+   /* Bytes currently allocated at the end of the last collection.  */
+   size_t allocated_last_gc;
+ 
+   /* The current depth in the context stack.  */
+   unsigned char context_depth;
+ 
+   /* A file descriptor open to /dev/zero for reading.  */
+ #ifndef MAP_ANONYMOUS
+   int dev_zero_fd;
+ #endif
+ 
+   /* A cache of free system pages.  */
+   page_entry *free_pages;
+ 
+   /* The file descriptor for debugging output.  */
+   FILE *debug_file;
+ } G;
+ 
+ 
+ /* Compute DIVIDEND / DIVISOR, rounded up.  */
+ #define DIV_ROUND_UP(Dividend, Divisor) \
+   ((Dividend + Divisor - 1) / Divisor)
+ 
+ /* The number of objects per allocation page, for objects of size
+    2^ORDER.  */
+ #define OBJECTS_PER_PAGE(Order) \
+   ((Order) >= G.lg_pagesize ? 1 : G.pagesize / ((size_t)1 << (Order)))
+ 
+ /* The size in bytes required to maintain a bitmap for the objects
+    on a page-entry.  */
+ #define BITMAP_SIZE(Num_objects) \
+   (DIV_ROUND_UP ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
+ 
+ /* Skip garbage collection if the current allocation is not at least
+    this factor times the allocation at the end of the last collection.
+    In other words, total allocation must expand by (this factor minus
+    one) before collection is performed.  */
+ #define GGC_MIN_EXPAND_FOR_GC (1.3)
+ 
+ 
+ static page_entry *lookup_page_table_entry PROTO ((void *));
+ static void set_page_table_entry PROTO ((void *, page_entry *));
+ static char *alloc_anon PROTO ((char *, size_t));
+ static struct page_entry * alloc_page PROTO ((unsigned));
+ static void free_page PROTO ((struct page_entry *));
+ static void release_pages PROTO ((void));
+ static void *alloc_obj PROTO ((size_t, int));
+ static int mark_obj PROTO ((void *));
+ static void clear_marks PROTO ((void));
+ static void sweep_pages PROTO ((void));
+ 
+ #ifdef GGC_POISON
+ static void poison PROTO ((void *, size_t));
+ static void poison_pages PROTO ((void));
+ #endif
+ 
+ void debug_print_page_list PROTO ((int));
+ 
+ /* Traverse the page table and find the entry for a page. 
+    Die (probably) if the object wasn't allocated via GC.  */
+ 
+ static inline page_entry *
+ lookup_page_table_entry(p)
+      void *p;
+ {
+   page_entry ***base;
+   size_t L1, L2;
+ 
+ #if HOST_BITS_PER_PTR <= 32
+   base = &G.lookup[0];
+ #else
+   page_table table = G.lookup;
+   size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
+   while (table->high_bits != high_bits)
+     table = table->next;
+   base = &table->table[0];
+ #endif
+ 
+   /* Extract the level 1 and 2 indicies.  */
+   L1 = LOOKUP_L1 (p);
+   L2 = LOOKUP_L2 (p);
+ 
+   return base[L1][L2];
+ }
+ 
+ 
+ /* Set the page table entry for a page.  */
+ static void
+ set_page_table_entry(p, entry)
+      void *p;
+      page_entry *entry;
+ {
+   page_entry ***base;
+   size_t L1, L2;
+ 
+ #if HOST_BITS_PER_PTR <= 32
+   base = &G.lookup[0];
+ #else
+   page_table table;
+   size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
+   for (table = G.lookup; table; table = table->next)
+     if (table->high_bits == high_bits)
+       goto found;
+ 
+   /* Not found -- allocate a new table.  */
+   table = (page_table) xcalloc (1, sizeof(*table));
+   table->next = G.lookup;
+   table->high_bits = high_bits;
+   G.lookup = table;
+ found:
+   base = &table->table[0];
+ #endif
+ 
+   /* Extract the level 1 and 2 indicies.  */
+   L1 = LOOKUP_L1 (p);
+   L2 = LOOKUP_L2 (p);
+ 
+   if (base[L1] == NULL)
+     base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
+ 
+   base[L1][L2] = entry;
+ }
+ 
+ 
+ /* Prints the page-entry for object size ORDER, for debugging.  */
+ void
+ debug_print_page_list (order)
+      int order;
+ {
+   page_entry *p;
+   printf ("Head=%p, Tail=%p:\n", G.pages[order], G.page_tails[order]);
+   p = G.pages[order];
+   while (p != NULL)
+     {
+       printf ("%p(%1d|%3d) -> ", p, p->context_depth, p->num_free_objects);
+       p = p->next;
+     }
+   printf ("NULL\n");
+   fflush (stdout);
+ }
+ 
+ #ifdef GGC_POISON
+ /* `Poisons' the region of memory starting at START and extending for
+    LEN bytes.  */
+ static inline void
+ poison (start, len)
+      void *start;
+      size_t len;
+ {
+   memset (start, 0xa5, len);
+ }
+ #endif
+ 
+ /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
+    (if non-null).  */
+ static inline char *
+ alloc_anon (pref, size)
+      char *pref;
+      size_t size;
+ {
+   char *page;
+ 
+ #ifdef MAP_ANONYMOUS
+   page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
+ 			MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ #else
+   page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
+ 			MAP_PRIVATE, G.dev_zero_fd, 0);
+ #endif
+   if (page == (char *) MAP_FAILED)
+     {
+       fputs ("Virtual memory exhausted!\n", stderr);
+       exit(1);
+     }
+ 
+   return page;
+ }
+ 
+ /* Allocate a new page for allocating objects of size 2^ORDER,
+    and return an entry for it.  The entry is not added to the
+    appropriate page_table list.  */
+ static inline struct page_entry *
+ alloc_page (order)
+      unsigned order;
+ {
+   struct page_entry *entry, *p, **pp;
+   char *page;
+   size_t num_objects;
+   size_t bitmap_size;
+   size_t page_entry_size;
+   size_t entry_size;
+ 
+   num_objects = OBJECTS_PER_PAGE (order);
+   bitmap_size = BITMAP_SIZE (num_objects + 1);
+   page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
+   entry_size = num_objects * (1 << order);
+ 
+   entry = NULL;
+   page = NULL;
+ 
+   /* Check the list of free pages for one we can use.  */
+   for (pp = &G.free_pages, p = *pp; p ; pp = &p->next, p = *pp)
+     if (p->bytes == entry_size)
+       break;
+ 
+   if (p != NULL)
+     {
+       /* Recycle the allocated memory from this page ... */
+       *pp = p->next;
+       page = p->page;
+       /* ... and, if possible, the page entry itself.  */
+       if (p->order == order)
+ 	{
+ 	  entry = p;
+ 	  memset (entry, 0, page_entry_size);
+ 	}
+       else
+ 	free (p);
+     }
+   else
+     {
+       /* Actually allocate the memory, using mmap.  */
+       page = alloc_anon (NULL, entry_size);
+     }
+ 
+   if (entry == NULL)
+     entry = (struct page_entry *) xcalloc (1, page_entry_size);
+ 
+   entry->bytes = entry_size;
+   entry->page = page;
+   entry->context_depth = G.context_depth;
+   entry->order = order;
+   entry->num_free_objects = num_objects;
+   entry->next_bit_hint = 1;
+ 
+   /* Set the one-past-the-end in-use bit.  This acts as a sentry as we
+      increment the hint.  */
+   entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
+     = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
+ 
+   set_page_table_entry (page, entry);
+ 
+   if (GGC_DEBUG_LEVEL >= 2)
+     fprintf (G.debug_file, 
+ 	     "Allocating page at %p, object size=%d, data %p-%p\n", entry,
+ 	     1 << order, page, page + entry_size - 1);
+ 
+   return entry;
+ }
+ 
+ 
+ /* Free a page when it's no longer needed.  */
+ static inline void
+ free_page (entry)
+      page_entry *entry;
+ {
+   if (GGC_DEBUG_LEVEL >= 2)
+     fprintf (G.debug_file, 
+ 	     "Deallocating page at %p, data %p-%p\n", entry,
+ 	     entry->page, entry->page + entry->bytes - 1);
+ 
+   set_page_table_entry (entry->page, NULL);
+ 
+   entry->next = G.free_pages;
+   G.free_pages = entry;
+ }
+ 
+ 
+ /* Release the page cache to the system.  */
+ static inline void
+ release_pages ()
+ {
+   page_entry *p, *next;
+   char *start;
+   size_t len;
+ 
+   p = G.free_pages;
+   if (p == NULL)
+     return;
+ 
+   next = p->next;
+   start = p->page;
+   len = p->bytes;
+   free (p);
+   p = next;
+ 
+   while (p)
+     {
+       next = p->next;
+       /* Gather up adjacent pages so they are unmapped together.  */
+       if (p->page == start + len)
+ 	len += p->bytes;
+       else
+ 	{
+ 	  munmap (start, len);
+ 	  start = p->page;
+ 	  len = p->bytes;
+ 	}
+       free (p);
+       p = next;
+     }
+ 
+   munmap (start, len);
+   G.free_pages = NULL;
+ }
+ 
+ 
+ /* This table provides a fast way to determine ceil(log_2(size)) for
+    allocation requests.  The minimum allocation size is four bytes.  */
+ static unsigned char const size_lookup[257] = 
+ { 
+   2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 
+   4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 
+   5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 
+   6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 
+   6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 
+   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 
+   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 
+   7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+   8
+ };
+ 
+ /* Allocate a chunk of memory of SIZE bytes.  If ZERO is non-zero, the
+    memory is zeroed; otherwise, its contents are undefined.  */
+ static void *
+ alloc_obj (size, zero)
+      size_t size;
+      int zero;
+ {
+   unsigned order, word, bit, object_offset;
+   struct page_entry *entry;
+   void *result;
+ 
+   if (size <= 256)
+     order = size_lookup[size];
+   else
+     {
+       order = 9;
+       while (size > ((size_t) 1 << order))
+ 	order++;
+     }
+ 
+   /* If there are non-full pages for this size allocation, they are at
+      the head of the list.  */
+   entry = G.pages[order];
+ 
+   /* If there is no page for this object size, or all pages in this
+      context are full, allocate a new page.  */
+   if (entry == NULL 
+       || entry->num_free_objects == 0 
+       || entry->context_depth != G.context_depth)
+     {
+       struct page_entry *new_entry;
+       new_entry = alloc_page (order);
+       
+       /* If this is the only entry, it's also the tail.  */
+       if (entry == NULL)
+ 	G.page_tails[order] = new_entry;
+      
+       /* Put new pages at the head of the page list.  */
+       new_entry->next = entry;
+       entry = new_entry;
+       G.pages[order] = new_entry;
+ 
+       /* For a new page, we know the word and bit positions (in the
+ 	 in_use bitmap) of the first available object -- they're zero.  */
+       new_entry->next_bit_hint = 1;
+       word = 0;
+       bit = 0;
+       object_offset = 0;
+     }
+   else
+     {
+       /* First try to use the hint left from the previous allocation
+ 	 to locate a clear bit in the in-use bitmap.  We've made sure
+ 	 that the one-past-the-end bit is always set, so if the hint
+ 	 has run over, this test will fail.  */
+       unsigned hint = entry->next_bit_hint;
+       word = hint / HOST_BITS_PER_LONG;
+       bit = hint % HOST_BITS_PER_LONG;
+       
+       /* If the hint didn't work, scan the bitmap from the beginning.  */
+       if ((entry->in_use_p[word] >> bit) & 1)
+ 	{
+ 	  word = bit = 0;
+ 	  while (~entry->in_use_p[word] == 0)
+ 	    ++word;
+ 	  while ((entry->in_use_p[word] >> bit) & 1)
+ 	    ++bit;
+ 	  hint = word * HOST_BITS_PER_LONG + bit;
+ 	}
+ 
+       /* Next time, try the next bit.  */
+       entry->next_bit_hint = hint + 1;
+ 
+       object_offset = hint << order;
+     }
+ 
+   /* Set the in-use bit.  */
+   entry->in_use_p[word] |= ((unsigned long) 1 << bit);
+ 
+   /* Keep a running total of the number of free objects.  If this page
+      fills up, we may have to move it to the end of the list if the
+      next page isn't full.  If the next page is full, all subsequent
+      pages are full, so there's no need to move it.  */
+   if (--entry->num_free_objects == 0
+       && entry->next != NULL
+       && entry->next->num_free_objects > 0)
+     {
+       G.pages[order] = entry->next;
+       entry->next = NULL;
+       G.page_tails[order]->next = entry;
+       G.page_tails[order] = entry;
+     }
+ 
+   /* Calculate the object's address.  */
+   result = entry->page + object_offset;
+ 
+ #ifdef GGC_POISON
+   /* `Poison' the entire allocated object before zeroing the requested area,
+      so that bytes beyond the end, if any, will not necessarily be zero.  */
+   poison (result, 1 << order);
+ #endif
+   if (zero)
+     memset (result, 0, size);
+ 
+   /* Keep track of how many bytes are being allocated.  This
+      information is used in deciding when to collect.  */
+   G.allocated += (size_t) 1 << order;
+ 
+   if (GGC_DEBUG_LEVEL >= 3)
+     fprintf (G.debug_file, 
+ 	     "Allocating object, requested size=%d, actual=%d at %p on %p\n",
+ 	     (int) size, 1 << order, result, entry);
+ 
+   return result;
+ }
+ 
+ 
+ /* If P is not marked, marks it and returns 0.  Otherwise returns 1.
+    P must have been allocated by the GC allocator; it mustn't point to
+    static objects, stack variables, or memory allocated with malloc.  */
+ static int
+ mark_obj (p)
+      void *p;
+ {
+   page_entry *entry;
+   unsigned bit, word;
+   unsigned long mask;
+ 
+   /* Look up the page on which the object is alloced.  If the object
+      wasn't allocated by the collector, we'll probably die.  */
+   entry = lookup_page_table_entry(p);
+ #ifdef ENABLE_CHECKING
+   if (entry == NULL)
+     abort ();
+ #endif
+ 
+   /* Calculate the index of the object on the page; this is its bit
+      position in the in_use_p bitmap.  */
+   bit = (((char *) p) - entry->page) >> entry->order;
+   word = bit / HOST_BITS_PER_LONG;
+   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
+   
+   /* If the bit was previously set, skip it. */
+   if (entry->in_use_p[word] & mask)
+     return 1;
+ 
+   /* Otherwise set it, and decrement the free object count.  */
+   entry->in_use_p[word] |= mask;
+   entry->num_free_objects -= 1;
+ 
+   G.allocated += (size_t) 1 << entry->order;
+ 
+   if (GGC_DEBUG_LEVEL >= 4)
+     fprintf (G.debug_file, "Marking %p\n", p);
+ 
+   return 0;
+ }
+ 
+ 
+ /* Initialize the ggc-mmap allocator.  */
+ void
+ init_ggc ()
+ {
+   G.pagesize = getpagesize();
+   G.lg_pagesize = exact_log2 (G.pagesize);
+ 
+ #ifndef MAP_ANONYMOUS
+   G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
+   if (G.dev_zero_fd == -1)
+     abort ();
+ #endif
+ 
+ #if 0
+   G.debug_file = fopen ("ggc-mmap.debug", "w");
+ #else
+   G.debug_file = stdout;
+ #endif
+ 
+   /* Set the initial allocation to 4MB, so no collection will be
+      performed until the heap is somewhat larger than 4 MB.  */
+   G.allocated_last_gc = 4 * 1024 * 1024;
+ 
+   empty_string = ggc_alloc_string ("", 0);
+   ggc_add_string_root (&empty_string, 1);
+ }
+ 
+ 
+ void
+ ggc_push_context ()
+ {
+   ++G.context_depth;
+ 
+   /* Die on wrap.  */
+   if (G.context_depth == 0)
+     abort ();
+ }
+ 
+ 
+ void
+ ggc_pop_context ()
+ {
+   unsigned order, depth;
+ 
+   depth = --G.context_depth;
+ 
+   /* Any remaining pages in the popped context are lowered to the new
+      current context; i.e. objects allocated in the popped context and
+      left over are imported into the previous context.  */
+   for (order = 2; order < HOST_BITS_PER_PTR; order++)
+     {
+       size_t num_objects = OBJECTS_PER_PAGE (order);
+       size_t bitmap_size = BITMAP_SIZE (num_objects);
+ 
+       page_entry *p;
+ 
+       for (p = G.pages[order]; p != NULL; p = p->next)
+ 	{
+ 	  if (p->context_depth > depth)
+ 	    {
+ 	      p->context_depth = depth;
+ 	    }
+ 
+ 	  /* If this page is now in the topmost context, and we'd
+ 	     saved its allocation state, restore it.  */
+ 	  else if (p->context_depth == depth && p->save_in_use_p)
+ 	    {
+ 	      memcpy (p->in_use_p, p->save_in_use_p, bitmap_size);
+ 	      free (p->save_in_use_p);
+ 	      p->save_in_use_p = 0;
+ 	      p->num_free_objects = p->save_num_free_objects;
+ 	    }
+ 	}
+     }
+ }
+ 
+ 
+ struct rtx_def *
+ ggc_alloc_rtx (nslots)
+      int nslots;
+ {
+   return (struct rtx_def *) 
+     alloc_obj (sizeof (struct rtx_def) + (nslots - 1) * sizeof (rtunion), 1);
+ }
+ 
+ 
+ struct rtvec_def *
+ ggc_alloc_rtvec (nelt)
+      int nelt;
+ {
+   return (struct rtvec_def *)
+     alloc_obj (sizeof (struct rtvec_def) + (nelt - 1) * sizeof (rtunion), 1);
+ }
+ 
+ 
+ union tree_node *
+ ggc_alloc_tree (length)
+      int length;
+ {
+   return (union tree_node *) alloc_obj (length, 1);
+ }
+ 
+ 
+ char *
+ ggc_alloc_string (contents, length)
+      const char *contents;
+      int length;
+ {
+   char *string;
+ 
+   if (length < 0)
+     {
+       if (contents == NULL)
+ 	return NULL;
+       length = strlen (contents);
+     }
+ 
+   string = (char *) alloc_obj (length + 1, 0);
+   if (contents != NULL)
+     memcpy (string, contents, length);
+   string[length] = 0;
+ 
+   return string;
+ }
+ 
+ 
+ void *
+ ggc_alloc (size)
+      size_t size;
+ {
+   return alloc_obj (size, 0);
+ }
+ 
+ 
+ static inline void
+ clear_marks ()
+ {
+   unsigned order;
+ 
+   for (order = 2; order < HOST_BITS_PER_PTR; order++)
+     {
+       size_t num_objects = OBJECTS_PER_PAGE (order);
+       size_t bitmap_size = BITMAP_SIZE (num_objects);
+       page_entry *p;
+ 
+       for (p = G.pages[order]; p != NULL; p = p->next)
+ 	{
+ #ifdef ENABLE_CHECKING
+ 	  /* The data should be page-aligned.  */
+ 	  if ((size_t) p->page & (G.pagesize - 1))
+ 	    abort ();
+ #endif
+ 
+ 	  /* Pages that aren't in the topmost context are not collected;
+ 	     nevertheless, we need their in-use bit vectors to store GC
+ 	     marks.  So, back them up first.  */
+ 	  if (p->context_depth < G.context_depth
+ 	      && ! p->save_in_use_p)
+ 	    {
+ 	      p->save_in_use_p = xmalloc (bitmap_size);
+ 	      memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
+ 	      p->save_num_free_objects = p->num_free_objects;
+ 	    }
+ 
+ 	  /* Reset reset the number of free objects and clear the
+              in-use bits.  These will be adjusted by mark_obj.  */
+ 	  p->num_free_objects = num_objects;
+ 	  memset (p->in_use_p, 0, bitmap_size);
+ 
+ 	  /* Make sure the one-past-the-end bit is always set.  */
+ 	  p->in_use_p[num_objects / HOST_BITS_PER_LONG] 
+ 	    = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
+ 	}
+     }
+ }
+ 
+ static inline void
+ sweep_pages ()
+ {
+   unsigned order;
+ 
+   for (order = 2; order < HOST_BITS_PER_PTR; order++)
+     {
+       /* The last page-entry to consider, regardless of entries
+ 	 placed at the end of the list.  */
+       page_entry * const last = G.page_tails[order];
+ 
+       size_t num_objects = OBJECTS_PER_PAGE (order);
+       page_entry *p, *previous;
+       int done;
+ 	
+       p = G.pages[order];
+       if (p == NULL)
+ 	continue;
+ 
+       previous = NULL;
+       do
+ 	{
+ 	  page_entry *next = p->next;
+ 
+ 	  /* Loop until all entries have been examined.  */
+ 	  done = (p == last);
+ 
+ 	  /* Only objects on pages in the topmost context should get
+ 	     collected.  */
+ 	  if (p->context_depth < G.context_depth)
+ 	    ;
+ 
+ 	  /* Remove the page if it's empty.  */
+ 	  else if (p->num_free_objects == num_objects)
+ 	    {
+ 	      if (! previous)
+ 		G.pages[order] = next;
+ 	      else
+ 		previous->next = next;
+ 
+ 	      /* Are we removing the last element?  */
+ 	      if (p == G.page_tails[order])
+ 		G.page_tails[order] = previous;
+ 	      free_page (p);
+ 	      p = previous;
+ 	    }
+ 
+ 	  /* If the page is full, move it to the end.  */
+ 	  else if (p->num_free_objects == 0)
+ 	    {
+ 	      /* Don't move it if it's already at the end.  */
+ 	      if (p != G.page_tails[order])
+ 		{
+ 		  /* Move p to the end of the list.  */
+ 		  p->next = NULL;
+ 		  G.page_tails[order]->next = p;
+ 
+ 		  /* Update the tail pointer...  */
+ 		  G.page_tails[order] = p;
+ 
+ 		  /* ... and the head pointer, if necessary.  */
+ 		  if (! previous)
+ 		    G.pages[order] = next;
+ 		  else
+ 		    previous->next = next;
+ 		  p = previous;
+ 		}
+ 	    }
+ 
+ 	  /* If we've fallen through to here, it's a page in the
+ 	     topmost context that is neither full nor empty.  Such a
+ 	     page must precede pages at lesser context depth in the
+ 	     list, so move it to the head.  */
+ 	  else if (p != G.pages[order])
+ 	    {
+ 	      previous->next = p->next;
+ 	      p->next = G.pages[order];
+ 	      G.pages[order] = p;
+ 	      /* Are we moving the last element?  */
+ 	      if (G.page_tails[order] == p)
+ 	        G.page_tails[order] = previous;
+ 	      p = previous;
+ 	    }
+ 
+ 	  previous = p;
+ 	  p = next;
+ 	} 
+       while (! done);
+     }
+ }
+ 
+ #ifdef GGC_POISON
+ static inline void
+ poison_pages ()
+ {
+   unsigned order;
+ 
+   for (order = 2; order < HOST_BITS_PER_PTR; order++)
+     {
+       size_t num_objects = OBJECTS_PER_PAGE (order);
+       size_t size = (size_t) 1 << order;
+       page_entry *p;
+ 
+       for (p = G.pages[order]; p != NULL; p = p->next)
+ 	{
+ 	  size_t i;
+ 	  for (i = 0; i < num_objects; i++)
+ 	    {
+ 	      size_t word, bit;
+ 	      word = i / HOST_BITS_PER_LONG;
+ 	      bit = i % HOST_BITS_PER_LONG;
+ 	      if (((p->in_use_p[word] >> bit) & 1) == 0)
+ 		poison (p->page + i * size, size);
+ 	    }
+ 	}
+     }
+ }
+ #endif
+ 
+ void
+ ggc_collect ()
+ {
+   int time;
+ 
+   /* Avoid frequent unnecessary work by skipping collection if the
+      total allocations haven't expanded much since the last
+      collection.  */
+ #ifndef GGC_ALWAYS_COLLECT
+   if (G.allocated < GGC_MIN_EXPAND_FOR_GC * G.allocated_last_gc)
+     return;
+ #endif
+ 
+   time = get_run_time ();
+   if (!quiet_flag)
+     fprintf (stderr, " {GC %luk -> ", (unsigned long)G.allocated / 1024);
+ 
+   /* Zero the total allocated bytes.  We'll reaccumulate this while
+      marking.  */
+   G.allocated = 0;
+ 
+   /* Release the pages we freed the last time we collected, but didn't 
+      reuse in the interim.  */
+   release_pages ();
+ 
+   clear_marks ();
+   ggc_mark_roots ();
+   sweep_pages ();
+   
+ #ifdef GGC_POISON
+   poison_pages ();
+ #endif
+ 
+   G.allocated_last_gc = G.allocated;
+ 
+   time = get_run_time () - time;
+   gc_time += time;
+ 
+   time = (time + 500) / 1000;
+   if (!quiet_flag)
+     fprintf (stderr, "%luk in %d.%03d}", 
+ 	     (unsigned long) G.allocated / 1024, time / 1000, time % 1000);
+ }
+ 
+ 
+ int
+ ggc_set_mark_rtx (r)
+      rtx r;
+ {
+   return mark_obj (r);
+ }
+ 
+ int
+ ggc_set_mark_rtvec (v)
+      rtvec v;
+ {
+   return mark_obj (v);
+ }
+ 
+ int
+ ggc_set_mark_tree (t)
+      tree t;
+ {
+   return mark_obj (t);
+ }
+ 
+ void
+ ggc_mark_string (s)
+      char *s;
+ {
+   if (s)
+     mark_obj (s);
+ }
+ 
+ void 
+ ggc_mark (p)
+      void *p;
+ {
+   if (p)
+     mark_obj (p);
+ }



More information about the Gcc-patches mailing list