This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH 3/4] Add a fragmentation fallback in ggc-page v2


From: Andi Kleen <ak@linux.intel.com>

There were some concerns that the earlier munmap patch could lead
to address space being freed that cannot be allocated again
by ggc due to fragmentation. This patch adds a fragmentation
fallback to solve this: when a GGC_QUIRE_SIZE sized allocation fails,
try again with a page sized allocation.

Passes bootstrap and testing on x86_64-linux with the fallback
forced artificially.

v2: fix missed initialization bug added in last minute edit.

gcc/:
2011-10-20  Andi Kleen  <ak@linux.intel.com>

	* ggc-page (alloc_anon): Add check argument.
	(alloc_page): Add fallback to 1 page allocation.
	Adjust alloc_anon calls to new argument.
---
 gcc/ggc-page.c |   23 +++++++++++++++--------
 1 files changed, 15 insertions(+), 8 deletions(-)

diff --git a/gcc/ggc-page.c b/gcc/ggc-page.c
index 99bf2df..0bf0907 100644
--- a/gcc/ggc-page.c
+++ b/gcc/ggc-page.c
@@ -482,7 +482,7 @@ static int ggc_allocated_p (const void *);
 static page_entry *lookup_page_table_entry (const void *);
 static void set_page_table_entry (void *, page_entry *);
 #ifdef USING_MMAP
-static char *alloc_anon (char *, size_t);
+static char *alloc_anon (char *, size_t, bool check);
 #endif
 #ifdef USING_MALLOC_PAGE_GROUPS
 static size_t page_group_index (char *, char *);
@@ -661,7 +661,7 @@ debug_print_page_list (int order)
    compile error unless exactly one of the HAVE_* is defined.  */
 
 static inline char *
-alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
+alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
 {
 #ifdef HAVE_MMAP_ANON
   char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
@@ -674,6 +674,8 @@ alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
 
   if (page == (char *) MAP_FAILED)
     {
+      if (!check)
+        return NULL;
       perror ("virtual memory exhausted");
       exit (FATAL_EXIT_CODE);
     }
@@ -776,13 +778,18 @@ alloc_page (unsigned order)
 	 extras on the freelist.  (Can only do this optimization with
 	 mmap for backing store.)  */
       struct page_entry *e, *f = G.free_pages;
-      int i;
+      int i, entries = GGC_QUIRE_SIZE;
 
-      page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
+      page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
+      if (page == NULL)
+     	{
+	  page = alloc_anon(NULL, G.pagesize, true);
+          entries = 1;
+	}
 
       /* This loop counts down so that the chain will be in ascending
 	 memory order.  */
-      for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
+      for (i = entries - 1; i >= 1; i--)
 	{
 	  e = XCNEWVAR (struct page_entry, page_entry_size);
 	  e->order = order;
@@ -795,7 +802,7 @@ alloc_page (unsigned order)
       G.free_pages = f;
     }
   else
-    page = alloc_anon (NULL, entry_size);
+    page = alloc_anon (NULL, entry_size, true);
 #endif
 #ifdef USING_MALLOC_PAGE_GROUPS
   else
@@ -1648,14 +1655,14 @@ init_ggc (void)
      believe, is an unaligned page allocation, which would cause us to
      hork badly if we tried to use it.  */
   {
-    char *p = alloc_anon (NULL, G.pagesize);
+    char *p = alloc_anon (NULL, G.pagesize, true);
     struct page_entry *e;
     if ((size_t)p & (G.pagesize - 1))
       {
 	/* How losing.  Discard this one and try another.  If we still
 	   can't get something useful, give up.  */
 
-	p = alloc_anon (NULL, G.pagesize);
+	p = alloc_anon (NULL, G.pagesize, true);
 	gcc_assert (!((size_t)p & (G.pagesize - 1)));
       }
 
-- 
1.7.5.4


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]