This is the mail archive of the gcc@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH] make zone collector work without mmap


This patch augments the zone collector with the page groups
functionality in ggc-page.c.  If I understood correctly, the
zone collector is going to become the default one day, so it
needs to work without mmap.  Even if it is going to use
mprotect, it would nevertheless help to have this, because
most malloc's actually work fine with mprotect even if POSIX
does not say so.

Boostrapped/regtested i686-pc-linux-gnu with USING_MMAP
#undef'ed to exercise the new functionality.  Ok for
mainline?

Paolo

2004-05-07 Paolo Bonzini <bonzini@gnu.org>

	* ggc-zone.c (struct page_entry) [!USING_MMAP]: Add
	group field.
	(struct page_group): New struct.
	(struct alloc_zone) [!USING_MMAP]: Add page_groups field.
	(page_group_index, set_page_group_in_use,
	clear_page_group_in_use): New functions.
	(alloc_small_page, release_pages, free_page) [!USING_MMAP]:
	Provide fallback using page groups.

--- old-ggc/ggc-zone.c	2004-05-01 15:23:45.000000000 +0200
+++ ggc-zone.c	2004-05-01 15:58:52.000000000 +0200
@@ -78,7 +78,7 @@ Software Foundation, 59 Temple Place - S
 #endif
 
 #ifndef USING_MMAP
-#error "Zone collector requires mmap"
+#define USING_MALLOC_PAGE_GROUPS
 #endif
 
 #if (GCC_VERSION < 3001)
@@ -223,6 +223,11 @@ typedef struct page_entry
      this is the last page-entry.  */
   struct page_entry *next;
 
+#ifdef USING_MALLOC_PAGE_GROUPS
+  /* Back pointer to the page group this page came from.  */
+  struct page_group *group;
+#endif
+
   /* The number of bytes allocated.  (This will always be a multiple
      of the host system page size.)  */
   size_t bytes;
@@ -243,6 +248,24 @@ typedef struct page_entry
   struct alloc_zone *zone;
 } page_entry;
 
+#ifdef USING_MALLOC_PAGE_GROUPS
+/* A page_group describes a large allocation from malloc, from which
+   we parcel out aligned pages.  */
+typedef struct page_group
+{
+  /* A linked list of all extant page groups.  */
+  struct page_group *next;
+
+  /* The address we received from malloc.  */
+  char *allocation;
+
+  /* The size of the block.  */
+  size_t alloc_size;
+
+  /* A bitmask of pages in use.  */
+  unsigned int in_use;
+} page_group;
+#endif
 
 /* The global variables.  */
 static struct globals
@@ -297,6 +321,10 @@ struct alloc_zone
   /* A cache of free system pages.  */
   page_entry *free_pages;
 
+#ifdef USING_MALLOC_PAGE_GROUPS
+  page_group *page_groups;
+#endif
+
   /* Next zone in the linked list of zones.  */
   struct alloc_zone *next_zone;
 
@@ -334,6 +362,11 @@ static void sweep_pages (struct alloc_zo
 static void * ggc_alloc_zone_1 (size_t, struct alloc_zone *, short MEM_STAT_DECL);
 static bool ggc_collect_1 (struct alloc_zone *, bool);
 static void check_cookies (void);
+#ifdef USING_MALLOC_PAGE_GROUPS
+static inline size_t page_group_index (char *, char *);
+static inline void set_page_group_in_use (page_group *, char *);
+static inline void clear_page_group_in_use (page_group *, char *);
+#endif
 
 
 /* Returns nonzero if P was allocated in GC'able memory.  */
@@ -387,6 +420,32 @@ alloc_anon (char *pref ATTRIBUTE_UNUSED,
 }
 #endif
 
+
+#ifdef USING_MALLOC_PAGE_GROUPS
+/* Compute the index for this page into the page group.  */
+
+static inline size_t
+page_group_index (char *allocation, char *page)
+{
+  return (size_t) (page - allocation) >> G.lg_pagesize;
+}
+
+/* Set and clear the in_use bit for this page in the page group.  */
+
+static inline void
+set_page_group_in_use (page_group *group, char *page)
+{
+  group->in_use |= 1 << page_group_index (group->allocation, page);
+}
+
+static inline void
+clear_page_group_in_use (page_group *group, char *page)
+{
+  group->in_use &= ~(1 << page_group_index (group->allocation, page));
+}
+#endif
+
+
 /* Allocate a new page for allocating objects of size 2^ORDER,
    and return an entry for it.  */
 
@@ -395,6 +454,9 @@ alloc_small_page (struct alloc_zone *zon
 {
   struct page_entry *entry;
   char *page;
+#ifdef USING_MALLOC_PAGE_GROUPS
+  page_group *group;
+#endif
 
   page = NULL;
 
@@ -405,8 +467,9 @@ alloc_small_page (struct alloc_zone *zon
       /* Recycle the allocated memory from this page ...  */
       zone->free_pages = entry->next;
       page = entry->page;
-
-
+#ifdef USING_MALLOC_PAGE_GROUPS
+      group = entry->group;
+#endif
     }
 #ifdef USING_MMAP
   else
@@ -433,6 +496,68 @@ alloc_small_page (struct alloc_zone *zon
       zone->free_pages = f;
     }
 #endif
+#ifdef USING_MALLOC_PAGE_GROUPS
+  else
+    {
+      /* Allocate a large block of memory and serve out the aligned
+         pages therein.  This results in much less memory wastage
+         than the traditional implementation of valloc.  */
+
+      char *allocation, *a, *enda;
+      size_t alloc_size, head_slop, tail_slop;
+
+      alloc_size = GGC_QUIRE_SIZE * G.pagesize;
+      allocation = xmalloc (alloc_size);
+
+      page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
+      head_slop = page - allocation;
+      tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
+      enda = allocation + alloc_size - tail_slop;
+
+      /* We allocated N pages, which are likely not aligned, leaving
+         us with N-1 usable pages.  We plan to place the page_group
+         structure somewhere in the slop.  */
+      if (head_slop >= sizeof (page_group))
+        group = (page_group *)page - 1;
+      else
+        {
+          /* We magically got an aligned allocation.  Too bad, we have
+             to waste a page anyway.  */
+          if (tail_slop < sizeof (page_group))
+            {
+              enda -= G.pagesize;
+              tail_slop += G.pagesize;
+            }
+          group = (page_group *)enda;
+          tail_slop -= sizeof (page_group);
+        }
+
+      /* Remember that we allocated this memory.  */
+      group->next = zone->page_groups;
+      group->allocation = allocation;
+      group->alloc_size = alloc_size;
+      group->in_use = 0;
+      zone->page_groups = group;
+      zone->bytes_mapped += alloc_size;
+
+      /* Put the rest on the free list.  */
+      {
+        struct page_entry *e, *f = zone->free_pages;
+        for (a = enda - G.pagesize; a != page; a -= G.pagesize)
+          {
+	    e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
+            e->bytes = G.pagesize;
+            e->page = a;
+            e->group = group;
+            e->next = f;
+            e->large_p = false;
+            f = e;
+          }
+        zone->free_pages = f;
+      }
+    }
+#endif
+
   if (entry == NULL)
     entry = (struct page_entry *) xmalloc (sizeof (struct page_entry));
 
@@ -444,6 +571,11 @@ alloc_small_page (struct alloc_zone *zon
   entry->zone = zone;
   zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
 
+#ifdef USING_MALLOC_PAGE_GROUPS
+  entry->group = group;
+  set_page_group_in_use (group, page);
+#endif
+
   if (GGC_DEBUG_LEVEL >= 2)
     fprintf (G.debug_file,
 	     "Allocating %s page at %p, data %p-%p\n", entry->zone->name,
@@ -500,6 +632,10 @@ free_page (page_entry *entry)
     }
   else
     {
+#ifdef USING_MALLOC_PAGE_GROUPS
+      clear_page_group_in_use (entry->group, entry->page);
+#endif
+
       /* Mark the page as inaccessible.  Discard the handle to
 	 avoid handle leak.  */
       VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
@@ -544,6 +680,33 @@ release_pages (struct alloc_zone *zone)
 
   zone->free_pages = NULL;
 #endif
+#ifdef USING_MALLOC_PAGE_GROUPS
+  page_entry **pp, *p;
+  page_group **gp, *g;
+
+  /* Remove all pages from free page groups from the list.  */
+  pp = &zone->free_pages;
+  while ((p = *pp) != NULL)
+    if (p->group->in_use == 0)
+      {
+        *pp = p->next;
+        free (p);
+      }
+    else
+      pp = &p->next;
+
+  /* Remove all free page groups, and release the storage.  */
+  gp = &zone->page_groups;
+  while ((g = *gp) != NULL)
+    if (g->in_use == 0)
+      {
+        *gp = g->next;
+        zone->bytes_mapped -= g->alloc_size;
+        free (g->allocation);
+      }
+    else
+      gp = &g->next;
+#endif
 }
 
 /* Place CHUNK of size SIZE on the free list for ZONE.  */

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]