This is the mail archive of the gcc@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH] provide means to access the zone from the corresponding chunk


This patch enables the zone collector to recover the page_entry and
alloc_zone structures from the address of an arbitrary ggc_alloc-ed pointer. Unfortunately, while very useful, this is quite invasive.


In order to avoid substantial overhead, it changes the data
representation of the zone allocator so that the page_entry, instead of
being in malloced memory, is simply stored as a header in the page. For small-object pages, you retrieve the page_entry by getting the beginning of the pages; for large-object pages, the page_entry is just before the chunk.


It also simplifies the way PCH's are written, by embedding the
page_entry struct in the PCH (since it is now a header to the PCH data).
  The PCH zone however is special because it is a single big big
page_entry holding all the objects (both small and large) in the PCH.
For this reason the (unused) type field is changed so that it
distinguishes between objects from the PCH zone and from the other zone.

The patch uses new_persistent_ggc_zone from my patch at
http://gcc.gnu.org/ml/gcc-patches/2004-05/msg00047.html but it can be
checked in independent of that patch simply by changing it to new_ggc_zone.

I can provide a follow-up patch to use this, so that the garbage_zone is not scanned.

Bootstrapped/regtested i686-pc-linux-gnu, ok for mainline?

Paolo

2004-05-07 Paolo Bonzini <bonzini@gnu.org>

	* ggc-common.c (gt_pch_save): Move call to
	ggc_pch_prepare_write after the padding for the mmap-ed
	area has been written.
	* ggc-zone.c (enum object_type): New.
	(alloc_chunk): Make type field a bit wider, and size
	field a bit smaller.
	(page_entry): Add u field to hold a pointer to the first chunk
	or data.  Remove page field.  bytes is not a multiple of the
	page size anymore.	Make the list of entries doubly-linked.
	(PAGE_ENTRY_SIZE): New.
	(pch_zone): New global.
	(ggc_allocated_p): Removed.
	(link_page): New.
	(release_pages): Replace references to page field
	with u.data or u.chunks, carve the page_entry struct
	out of the allocated page and decrement the bytes
	field appropriately.
	(init_ggc): Likewise.
	(sweep_pages): Likewise.
	(check_cookies): Likewise.
	(ggc_collect): Likewise.
	(free_page): Likewise, plus handle doubly-linked list.
	(alloc_small_page): Likewise, plus remove code moved
	to link_page and call it.  Fix head comment.
	(alloc_large_page): Likewise, plus remove code moved
	to link_page and call it.  Fix head comment.
	(ggc_alloc_zone_1): Likewise, plus include zone name into
	the debugging output, and remove code moved to link_page.
	(free_chunk): Likewise, plus include zone name into
	the debugging output.
	(struct ggc_pch_data): Remove ggc_pch_ondisk, we write
	a page_entry.  Move the only field there to the struct.
	Remove the (unused) written field.
	(ggc_pch_count_object): Adjust.
	(ggc_pch_total_size): Adjust and account for the page_entry.
	(ggc_pch_this_base): Account for the space taken by the
	page_entry.
	(ggc_pch_prepare_write): Write a page_entry to disk.
	(ggc_pch_write_object): Remove accesses to the unused
	d->written field.  Set type to OBJ_PCH in the written
	alloc_chunk.
	(ggc_pch_finish): Do not write anything.
	(ggc_pch_read): Link the mmap-ed page_entry in pch_zone.
	Do not create pch_zone here.



--- old-ggc/ggc-common.c	2004-05-01 15:23:44.000000000 +0200
+++ ggc-common.c	2004-05-06 15:27:07.000000000 +0200
@@ -479,8 +479,6 @@
   write_pch_globals (gt_ggc_rtab, &state);
   write_pch_globals (gt_pch_cache_rtab, &state);
 
-  ggc_pch_prepare_write (state.d, state.f);
-
   /* Pad the PCH file so that the mmapped area starts on a page boundary.  */
   {
     long o;
@@ -499,6 +497,8 @@
     fatal_error ("can't write padding to PCH file: %m");
 
   /* Actually write out the objects.  */
+  ggc_pch_prepare_write (state.d, state.f);
+
   for (i = 0; i < state.count; i++)
     {
       if (this_object_size < state.ptrs[i]->size)
--- old-ggc/ggc-zone.c	2004-05-01 14:58:15.000000000 +0200
+++ ggc-zone.c	2004-05-06 15:27:33.000000000 +0200
@@ -132,23 +132,24 @@
 #define DEADCHUNK_MAGIC 0x12817317
 #endif
 
+enum object_type { OBJ_NORMAL, OBJ_PCH, OBJ_UNUSED_2, OBJ_UNUSED_3 };
+
 /* This structure manages small chunks.  When the chunk is free, it's
    linked with other chunks via free_next.  When the chunk is allocated,
    the data starts at u.  Large chunks are allocated one at a time to
    their own page, and so don't come in here.
 
    The "type" field is a placeholder for a future change to do
-   generational collection.  At present it is 0 when free and
-   and 1 when allocated.  */
+   generational collection.  */
 
 struct alloc_chunk {
 #ifdef COOKIE_CHECKING
   unsigned int magic;
 #endif
-  unsigned int type:1;
+  ENUM_BITFIELD(object_type) type:2;
   unsigned int typecode:14;
   unsigned int large:1;
-  unsigned int size:15;
+  unsigned int size:14;
   unsigned int mark:1;
   union {
     struct alloc_chunk *next_free;
@@ -182,10 +183,6 @@
 #define SIZE_BIN_UP(SIZE)	(((SIZE) + FREE_BIN_DELTA - 1) / FREE_BIN_DELTA)
 #define SIZE_BIN_DOWN(SIZE)	((SIZE) / FREE_BIN_DELTA)
 
-/* Marker used as chunk->size for a large object.  Should correspond
-   to the size of the bitfield above.  */
-#define LARGE_OBJECT_SIZE	0x7fff
-
 /* We use this structure to determine the alignment required for
    allocations.  For power-of-two sized allocations, that's not a
    problem, but it does matter for odd-sized allocations.  */
@@ -219,25 +216,22 @@
 /* A page_entry records the status of an allocation page.  */
 typedef struct page_entry
 {
-  /* The next page-entry with objects of the same size, or NULL if
-     this is the last page-entry.  */
-  struct page_entry *next;
+  /* A doubly-linked list of page-entries with objects of the same size,
+     or NULL if this is the last/first (respectively) page-entry.
+     prev is NULL for free pages.  */
+  struct page_entry *next, *prev;
 
 #ifdef USING_MALLOC_PAGE_GROUPS
   /* Back pointer to the page group this page came from.  */
   struct page_group *group;
 #endif
 
-  /* The number of bytes allocated.  (This will always be a multiple
-     of the host system page size.)  */
+  /* The number of bytes allocated.  */
   size_t bytes;
 
   /* How many collections we've survived.  */
   size_t survived;
 
-  /* The address at which the memory is allocated.  */
-  char *page;
-
   /* Context depth of this page.  */
   unsigned short context_depth;
 
@@ -246,8 +240,23 @@
 
   /* The zone that this page entry belongs to.  */
   struct alloc_zone *zone;
+
+  union {
+    struct alloc_chunk chunks[1];
+    char data[1];
+
+    /* Make sure the data is sufficiently aligned.  */
+    HOST_WIDEST_INT align_i;
+#ifdef HAVE_LONG_DOUBLE
+    long double align_d;
+#else
+    double align_d;
+#endif
+  } u;
 } page_entry;
 
+#define PAGE_ENTRY_SIZE	(offsetof (struct page_entry, u))
+
 #ifdef USING_MALLOC_PAGE_GROUPS
 /* A page_group describes a large allocation from malloc, from which
    we parcel out aligned pages.  */
@@ -337,6 +346,7 @@
   bool persistent;
 } main_zone;
 
+struct alloc_zone *pch_zone;
 struct alloc_zone *rtl_zone;
 struct alloc_zone *garbage_zone;
 struct alloc_zone *tree_zone;
@@ -347,14 +357,14 @@
    in_use bitmask for page_group.  */
 #define GGC_QUIRE_SIZE 16
 
-static int ggc_allocated_p (const void *);
 #ifdef USING_MMAP
 static char *alloc_anon (char *, size_t, struct alloc_zone *);
 #endif
-static struct page_entry * alloc_small_page ( struct alloc_zone *);
+static void link_page (struct page_entry *entry, struct alloc_zone *zone);
+static struct page_entry * alloc_small_page (struct alloc_zone *);
 static struct page_entry * alloc_large_page (size_t, struct alloc_zone *);
 static void free_chunk (struct alloc_chunk *, size_t, struct alloc_zone *);
-static void free_page (struct page_entry *);
+static void free_page (struct page_entry *, struct alloc_zone *);
 static void release_pages (struct alloc_zone *);
 static void sweep_pages (struct alloc_zone *);
 static void * ggc_alloc_zone_1 (size_t, struct alloc_zone *, short MEM_STAT_DECL);
@@ -367,23 +377,6 @@
 #endif
 
 
-/* Returns nonzero if P was allocated in GC'able memory.  */
-
-static inline int
-ggc_allocated_p (const void *p)
-{
-  struct alloc_chunk *chunk;
-  chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
-#ifdef COOKIE_CHECKING
-  if (chunk->magic != CHUNK_MAGIC)
-    abort ();
-#endif
-  if (chunk->type == 1)
-    return true;  
-  return false;
-}
-
-
 #ifdef USING_MMAP
 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
    (if non-null).  The ifdef structure here is intended to cause a
@@ -443,8 +436,27 @@
 }
 #endif
 
+static void
+link_page (struct page_entry *entry, struct alloc_zone *zone)
+{
+  entry->context_depth = zone->context_depth;
+  entry->zone = zone;
+  entry->survived = 0;
+  entry->prev = 0;
+
+  if (zone->pages)
+    {
+      entry->next = zone->pages;
+      entry->next->prev = entry;
+    }
+  else
+    entry->next = 0;
+
+  zone->pages = entry;
+  zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
+}
 
-/* Allocate a new page for allocating objects of size 2^ORDER,
+/* Allocate a new page for allocating small objects,
    and return an entry for it.  */
 
 static inline struct page_entry *
@@ -464,7 +476,7 @@
     {
       /* Recycle the allocated memory from this page ...  */
       zone->free_pages = entry->next;
-      page = entry->page;
+      page = entry->u.data;
 #ifdef USING_MALLOC_PAGE_GROUPS
       group = entry->group;
 #endif
@@ -484,9 +496,8 @@
 	 memory order.  */
       for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
 	{
-	  e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
-	  e->bytes = G.pagesize;
-	  e->page = page + (i << G.lg_pagesize);
+	  e = (struct page_entry *) (page + (i << G.lg_pagesize));
+	  e->bytes = G.pagesize - PAGE_ENTRY_SIZE;
 	  e->next = f;
 	  f = e;
 	}
@@ -543,9 +554,8 @@
         struct page_entry *e, *f = zone->free_pages;
         for (a = enda - G.pagesize; a != page; a -= G.pagesize)
           {
-	    e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
-            e->bytes = G.pagesize;
-            e->page = a;
+	    e = (struct page_entry *) a;
+	    e->bytes = G.pagesize - PAGE_ENTRY_SIZE;
             e->group = group;
             e->next = f;
             e->large_p = false;
@@ -557,15 +567,10 @@
 #endif
 
   if (entry == NULL)
-    entry = (struct page_entry *) xmalloc (sizeof (struct page_entry));
+    entry = (struct page_entry *) page;
 
-  entry->next = 0;
-  entry->bytes = G.pagesize;
-  entry->page = page;
-  entry->context_depth = zone->context_depth;
+  entry->bytes = G.pagesize - PAGE_ENTRY_SIZE;
   entry->large_p = false;
-  entry->zone = zone;
-  zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
 
 #ifdef USING_MALLOC_PAGE_GROUPS
   entry->group = group;
@@ -574,11 +579,14 @@
 
   if (GGC_DEBUG_LEVEL >= 2)
     fprintf (G.debug_file,
-	     "Allocating %s page at %p, data %p-%p\n", entry->zone->name,
-	     (PTR) entry, page, page + G.pagesize - 1);
+	     "Allocating %s page at %p, data %p-%p\n", zone->name,
+	     (PTR) entry, entry->u.data, entry->u.data + entry->bytes - 1);
 
+  link_page (entry, zone);
   return entry;
 }
+
+
 /* Compute the smallest multiple of F that is >= X.  */
 
 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
@@ -589,24 +597,18 @@
 alloc_large_page (size_t size, struct alloc_zone *zone)
 {
   struct page_entry *entry;
-  char *page;
   size =  ROUND_UP (size, 1024);
-  page = (char *) xmalloc (size + CHUNK_OVERHEAD + sizeof (struct page_entry));
-  entry = (struct page_entry *) (page + size + CHUNK_OVERHEAD);
+  entry = (struct page_entry *) xmalloc (size + CHUNK_OVERHEAD + PAGE_ENTRY_SIZE);
 
-  entry->next = 0;
   entry->bytes = size;
-  entry->page = page;
-  entry->context_depth = zone->context_depth;
   entry->large_p = true;
-  entry->zone = zone;
-  zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
 
   if (GGC_DEBUG_LEVEL >= 2)
     fprintf (G.debug_file,
-	     "Allocating %s large page at %p, data %p-%p\n", entry->zone->name,
-	     (PTR) entry, page, page + size - 1);
+	     "Allocating %s large page at %p, data %p-%p\n", zone->name,
+	     (PTR) entry, entry->u.data, entry->u.data + size - 1);
 
+  link_page (entry, zone);
   return entry;
 }
 
@@ -614,30 +616,42 @@
 /* For a page that is no longer needed, put it on the free page list.  */
 
 static inline void
-free_page (page_entry *entry)
+free_page (struct page_entry *entry, struct alloc_zone *zone)
 {
+#ifdef ENABLE_GC_CHECKING
+  /* Poison the page.  */
+  memset (entry->u.data, 0xb5, entry->bytes);
+#endif
+
   if (GGC_DEBUG_LEVEL >= 2)
     fprintf (G.debug_file,
-	     "Deallocating %s page at %p, data %p-%p\n", entry->zone->name, (PTR) entry,
-	     entry->page, entry->page + entry->bytes - 1);
+	     "Deallocating %s page at %p, data %p-%p\n", zone->name, (PTR) entry,
+	     entry->u.data, entry->u.data + entry->bytes - 1);
+
+  if (entry->prev)
+    entry->prev->next = entry->next;
+  else
+    zone->pages = entry->next;
+  if (entry->next)
+    entry->next->prev = entry->prev;
 
   if (entry->large_p)
     {
-      free (entry->page);
-      VALGRIND_FREELIKE_BLOCK (entry->page, entry->bytes);
+      free (entry);
+      VALGRIND_FREELIKE_BLOCK (entry->u.data, entry->bytes);
     }
   else
     {
 #ifdef USING_MALLOC_PAGE_GROUPS
-      clear_page_group_in_use (entry->group, entry->page);
+      clear_page_group_in_use (entry->group, entry->u.data);
 #endif
 
       /* Mark the page as inaccessible.  Discard the handle to
 	 avoid handle leak.  */
-      VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
+      VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->u.data, entry->bytes));
 
-      entry->next = entry->zone->free_pages;
-      entry->zone->free_pages = entry;
+      entry->next = zone->free_pages;
+      zone->free_pages = entry;
     }
 }
 
@@ -647,29 +661,21 @@
 release_pages (struct alloc_zone *zone)
 {
 #ifdef USING_MMAP
-  page_entry *p, *next;
+  page_entry *p;
   char *start;
   size_t len;
 
-  /* Gather up adjacent pages so they are unmapped together.  */
-  p = zone->free_pages;
-
-  while (p)
+  for (p = zone->free_pages; p; )
     {
-      start = p->page;
-      next = p->next;
-      len = p->bytes;
-      free (p);
-      p = next;
-
-      while (p && p->page == start + len)
+      /* Gather up adjacent pages so they are unmapped together.  */
+      start = (char *) p;
+      len = 0;
+      do
 	{
-	  next = p->next;
-	  len += p->bytes;
-	  free (p);
-	  p = next;
+	  len += p->bytes + PAGE_ENTRY_SIZE;
+          p = p->next;
 	}
-
+      while ((char *) p == start + len);
       munmap (start, len);
       zone->bytes_mapped -= len;
     }
@@ -725,7 +731,8 @@
   chunk->u.next_free = zone->free_chunks[bin];
   zone->free_chunks[bin] = chunk;
   if (GGC_DEBUG_LEVEL >= 3)
-    fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
+    fprintf (G.debug_file, "Deallocating %s object, chunk=%p\n",
+	     zone->name, (void *)chunk);
   VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (chunk, sizeof (struct alloc_chunk)));
 }
 
@@ -747,15 +754,12 @@
   size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
 
   /* Large objects are handled specially.  */
-  if (size >= G.pagesize - 2*CHUNK_OVERHEAD - FREE_BIN_DELTA)
+  if (size >= G.pagesize - PAGE_ENTRY_SIZE - 2*CHUNK_OVERHEAD - FREE_BIN_DELTA)
     {
       size = ROUND_UP (size, 1024);
       entry = alloc_large_page (size, zone);
-      entry->survived = 0;
-      entry->next = entry->zone->pages;
-      entry->zone->pages = entry;
 
-      chunk = (struct alloc_chunk *) entry->page;
+      chunk = entry->u.chunks;
       VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
       chunk->large = 1;
       chunk->size = CEIL (size, 1024);
@@ -791,12 +795,10 @@
   if (!chunk)
     {
       entry = alloc_small_page (zone);
-      entry->next = entry->zone->pages;
-      entry->zone->pages = entry;
 
-      chunk = (struct alloc_chunk *) entry->page;
+      chunk = entry->u.chunks;
       VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
-      chunk->size = G.pagesize - CHUNK_OVERHEAD;
+      chunk->size = entry->bytes - CHUNK_OVERHEAD;
       chunk->large = 0;
     }
   else
@@ -818,7 +820,7 @@
 #ifdef COOKIE_CHECKING
       lchunk->magic = CHUNK_MAGIC;
 #endif
-      lchunk->type = 0;
+      lchunk->type = OBJ_NORMAL;
       lchunk->mark = 0;
       lchunk->size = lsize;
       lchunk->large = 0;
@@ -831,10 +833,12 @@
 
   /* Calculate the object's address.  */
  found:
+  entry = (struct page_entry *) (((size_t) chunk) & ~(G.pagesize - 1));
+
 #ifdef COOKIE_CHECKING
   chunk->magic = CHUNK_MAGIC;
 #endif
-  chunk->type = 1;
+  chunk->type = OBJ_NORMAL;
   chunk->mark = 0;
   chunk->typecode = type;
   result = chunk->u.data;
@@ -860,8 +864,8 @@
   zone->allocated += size + CHUNK_OVERHEAD;
 
   if (GGC_DEBUG_LEVEL >= 3)
-    fprintf (G.debug_file, "Allocating object, chunk=%p size=%lu at %p\n",
-	     (void *)chunk, (unsigned long) size, result);
+    fprintf (G.debug_file, "Allocating %s object, chunk=%p size=%lu at %p\n",
+	     zone->name, (void *)chunk, (unsigned long) size, result);
 
   return result;
 }
@@ -927,7 +931,11 @@
 
   /* XXX: We only deal with explicitly freeing large objects ATM.  */
   if (chunk->large)
-    free (p);
+    {
+      struct page_entry *entry;
+      entry = (struct page_entry *) ((char *)chunk - PAGE_ENTRY_SIZE);
+      free_page (entry, entry->zone);
+    }
 }
 
 /* If P is not marked, mark it and return false.  Otherwise return true.
@@ -998,6 +1006,7 @@
   G.zones = &main_zone;
 
   /* Allocate the default zones.  */
+  pch_zone = new_persistent_ggc_zone ("PCH zone");
   rtl_zone = new_ggc_zone ("RTL zone");
   tree_zone = new_ggc_zone ("Tree zone");
   garbage_zone = new_ggc_zone ("Garbage zone");
@@ -1036,9 +1045,8 @@
       }
 
     /* We have a good page, might as well hold onto it...  */
-    e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
-    e->bytes = G.pagesize;
-    e->page = p;
+    e = (struct page_entry *) p;
+    e->bytes = G.pagesize - PAGE_ENTRY_SIZE;
     e->next = main_zone.free_pages;
     main_zone.free_pages = e;
   }
@@ -1166,39 +1174,35 @@
   pp = &zone->pages;
   for (p = zone->pages; p ; p = next)
     {
+      /* This page has now survived another collection.  */
+      p->survived++;
       next = p->next;
+
       /* Large pages are all or none affairs. Either they are
 	 completely empty, or they are completely full.
 	 
 	 XXX: Should we bother to increment allocated.  */
       if (p->large_p)
 	{
-	  if (((struct alloc_chunk *)p->page)->mark == 1)
+	  if (p->u.chunks[0].mark == 1)
 	    {
-	      ((struct alloc_chunk *)p->page)->mark = 0;
+	      p->u.chunks[0].mark = 0;
 	    }
 	  else
 	    {
 	      *pp = next;
-#ifdef ENABLE_GC_CHECKING
-	  /* Poison the page.  */
-	  memset (p->page, 0xb5, p->bytes);
-#endif
-	      free_page (p);
+	      free_page (p, zone);
 	    }
 	  continue;
 	}
 
-      /* This page has now survived another collection.  */
-      p->survived++;
-
       /* Which leaves full and partial pages.  Step through all chunks,
 	 consolidate those that are free and insert them into the free
 	 lists.  Note that consolidation slows down collection
 	 slightly.  */
 
-      chunk = (struct alloc_chunk *)p->page;
-      end = (struct alloc_chunk *)(p->page + G.pagesize);
+      chunk = p->u.chunks;
+      end = (struct alloc_chunk *)(p->u.data + p->bytes);
       last_free = NULL;
       last_free_size = 0;
       nomarksinpage = true;
@@ -1210,7 +1214,7 @@
 	      nomarksinpage = false;
 	      if (last_free)
 		{
-		  last_free->type = 0;
+		  last_free->type = OBJ_NORMAL;
 		  last_free->size = last_free_size;
 		  last_free->mark = 0;
 		  poison_chunk (last_free, last_free_size);
@@ -1245,14 +1249,14 @@
 	  *pp = next;
 #ifdef ENABLE_GC_CHECKING
 	  /* Poison the page.  */
-	  memset (p->page, 0xb5, p->bytes);
+	  memset (p->u.data, 0xb5, p->bytes);
 #endif
-	  free_page (p);
+	  free_page (p, zone);
 	  continue;
 	}
       else if (last_free)
 	{
-	  last_free->type = 0;
+	  last_free->type = OBJ_NORMAL;
 	  last_free->size = last_free_size;
 	  last_free->mark = 0;
 	  poison_chunk (last_free, last_free_size);
@@ -1354,8 +1358,8 @@
 	{
 	  if (!p->large_p)
 	    {
-	      struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
-	      struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
+	      struct alloc_chunk *chunk = p->u.chunks;
+	      struct alloc_chunk *end = (struct alloc_chunk *)(p->u.data + p->bytes);
 	      do
 		{
 		  if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
@@ -1432,8 +1436,8 @@
 	  {
 	    if (!p->large_p)
 	      {
-		struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
-		struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
+	        struct alloc_chunk *chunk = p->u.chunks;
+	        struct alloc_chunk *end = (struct alloc_chunk *)(p->u.data + p->bytes);
 		do
 		  {
 		    prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
@@ -1447,7 +1451,7 @@
 	      }
 	    else
 	      {
-		((struct alloc_chunk *)p->page)->mark = 0;
+	        p->u.chunks[0].mark = 0;
 	      }
 	  }
       }
@@ -1485,12 +1489,8 @@
 
 struct ggc_pch_data
 {
-  struct ggc_pch_ondisk
-  {
-    unsigned total;
-  } d;
   size_t base;
-  size_t written;
+  unsigned total;
 };
 
 /* Initialize the PCH data structure.  */
@@ -1508,11 +1508,9 @@
 		      size_t size, bool is_string)
 {
   if (!is_string)
-    {
-      d->d.total += size + CHUNK_OVERHEAD;
-    }
+    d->total += size + CHUNK_OVERHEAD;
   else
-    d->d.total += size;
+    d->total += size;
 }
 
 /* Return the total size of the PCH data.  */
@@ -1520,7 +1518,7 @@
 size_t
 ggc_pch_total_size (struct ggc_pch_data *d)
 {
-  return d->d.total;
+  return d->total + PAGE_ENTRY_SIZE;
 }
 
 /* Set the base address for the objects in the PCH file.  */
@@ -1528,7 +1526,7 @@
 void
 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
 {
-  d->base = (size_t) base;
+  d->base = (size_t) base + PAGE_ENTRY_SIZE;
 }
 
 /* Allocate a place for object X of size SIZE in the PCH file.  */
@@ -1562,7 +1560,11 @@
 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
 		       FILE *f ATTRIBUTE_UNUSED)
 {
-  /* Nothing to do.  */
+  struct page_entry entry;
+  memset (&entry, 0, PAGE_ENTRY_SIZE);
+  entry.bytes = d->total;
+  if (fwrite (&entry, PAGE_ENTRY_SIZE, 1, f) != 1)
+    fatal_error ("can't write PCH file: %m");
 }
 
 /* Write out object X of SIZE to file F.  */
@@ -1574,41 +1576,25 @@
 {
   if (!is_string)
     {
-      struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
+      struct alloc_chunk chunk = *(struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
+      chunk.type = OBJ_PCH;
       size = ggc_get_size (x);
-      if (fwrite (chunk, size + CHUNK_OVERHEAD, 1, f) != 1)
+      if (fwrite (&chunk, CHUNK_OVERHEAD, 1, f) != 1)
 	fatal_error ("can't write PCH file: %m");
-      d->written += size + CHUNK_OVERHEAD;
     }
-   else
-     {
-       if (fwrite (x, size, 1, f) != 1)
-	 fatal_error ("can't write PCH file: %m");
-       d->written += size;
-     }
+
+  if (fwrite (x, size, 1, f) != 1)
+    fatal_error ("can't write PCH file: %m");
 }
 
 void
-ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
+ggc_pch_finish (struct ggc_pch_data *d, FILE *f ATTRIBUTE_UNUSED)
 {
-  if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
-    fatal_error ("can't write PCH file: %m");
   free (d);
 }
 void
-ggc_pch_read (FILE *f, void *addr)
+ggc_pch_read (FILE *f ATTRIBUTE_UNUSED, void *addr)
 {
-  struct ggc_pch_ondisk d;
-  struct page_entry *entry;
-  struct alloc_zone *pch_zone;
-  if (fread (&d, sizeof (d), 1, f) != 1)
-    fatal_error ("can't read PCH file: %m");
-  entry = xcalloc (1, sizeof (struct page_entry));
-  entry->bytes = d.total;
-  entry->page = addr;
-  entry->context_depth = 0;
-  pch_zone = new_persistent_ggc_zone ("PCH zone");
-  entry->zone = pch_zone;
-  entry->next = entry->zone->pages;
-  entry->zone->pages = entry;
+  struct page_entry *entry = (struct page_entry *) addr;
+  link_page (entry, pch_zone);
 }


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]