This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

ggc_pop_context speedup


ggc_pop_context is an utter and complete pig. This rearranges some of its data so that it isn't a pig. Instead of taking cache misses for each access, we group the accessed data together, so that the hardware next cache line prefetch can do its job better and so that we hit the cache for a minimum of 7/8 accesses. Net increase, ggc_pop_context is 19.7x faster, or 94.9% speedup, depending on how you want to view it. For the entire compile, it is 31.3% faster. Testcase: Finder.ii.

I wish we could write the code, as originally coded, and I wish the optimizer could rearrange the data heavily to account for cache effects, but until that time...

Ok? Ok for 3.3 branch?

* varray.h (VARRAY_DATA_GENERICNGC): Add.
(VARRAY_DATA_UNGC): Add.
(union varray_data_tag): Add genericngc, ungc.
(VARRAY_GENERIC_PTRNGC_INIT): Add.
(VARRAY_UINTNGC_INIT): Add.
(VARRAY_PUSH_GENERIC_PTRNGC): Add.
(VARRAY_PUSH_UINTNGC): Add.
(VARRAY_GENERIC_PTRNGC): Add.
(VARRAY_UINTNGC): Add.
(VARRAY_TOP_GENERIC_PTRNGC): Add.
(VARRAY_TOP_UINTNGC): Add.
* varray.c (element_size): Remove.
(uses_ggc): Remove.
(element): Add.
(varray_init): Use new interface.
(varray_grow): Use new interface.
(varray_clear): Use new interface.
* ggc-page.c (struct page_entry): Add index_by_depth field.
Remove save_in_use_p field.
(struct globals): Add by_depth, depth, and save_in_use fields.
(INITIAL_PTE_COUNT): Add.
(save_in_use_p_i): Add.
(save_in_use_p): Add.
(adjust_depth): Add.
(free_page): Switch over to faster data structures.
(ggc_alloc): Likewise.
(init_ggc): Likewise.
(ggc_recalculate_in_use_p): Likewise.
(ggc_pop_context): Likewise.
(clear_marks): Likewise.
(ggc_pch_read): Likewise.

Doing diffs in .:
*** ./varray.h.~1~ Wed Dec 18 10:53:02 2002
--- ./varray.h Wed Jan 29 19:29:07 2003
*************** struct const_equiv_data GTY(()) {
*** 57,63 ****
};

/* Enum indicating what the varray contains.
! If this is changed, `element_size' in varray.c needs to be updated. */

enum varray_data_enum {
VARRAY_DATA_C,
--- 57,63 ----
};

/* Enum indicating what the varray contains.
! If this is changed, `element' in varray.c needs to be updated. */

enum varray_data_enum {
VARRAY_DATA_C,
*************** enum varray_data_enum {
*** 80,85 ****
--- 80,87 ----
VARRAY_DATA_CONST_EQUIV,
VARRAY_DATA_BB,
VARRAY_DATA_TE,
+ VARRAY_DATA_GENERICNGC,
+ VARRAY_DATA_UNGC,
NUM_VARRAY_DATA
};

*************** typedef union varray_data_tag GTY (()) {
*** 125,130 ****
--- 127,136 ----
tag ("VARRAY_DATA_BB"))) bb[1];
struct elt_list *GTY ((length ("%0.num_elements"),
tag ("VARRAY_DATA_TE"))) te[1];
+ PTR GTY ((length ("%0.num_elements"), skip (""),
+ tag ("VARRAY_DATA_GENERICNGC"))) genericngc[1];
+ unsigned int GTY ((length ("%0.num_elements"),
+ tag ("VARRAY_DATA_UNGC"))) ungc[1];
} varray_data;

/* Virtual array of pointers header. */
*************** extern varray_type varray_init PARAMS ((
*** 204,209 ****
--- 210,221 ----
#define VARRAY_ELT_LIST_INIT(va, num, name) \
va = varray_init (num, VARRAY_DATA_TE, name)

+ #define VARRAY_GENERIC_PTRNGC_INIT(va, num, name) \
+ va = varray_init (num, VARRAY_DATA_GENERICNGC, name)
+
+ #define VARRAY_UINTNGC_INIT(va, num, name) \
+ va = varray_init (num, VARRAY_DATA_UNGC, name)
+
/* Free up memory allocated by the virtual array, but do not free any of the
elements involved. */
#define VARRAY_FREE(vp) \
*************** extern void varray_check_failed PARAMS (
*** 277,282 ****
--- 289,296 ----
#define VARRAY_CONST_EQUIV(VA, N) VARRAY_CHECK (VA, N, const_equiv)
#define VARRAY_BB(VA, N) VARRAY_CHECK (VA, N, bb)
#define VARRAY_ELT_LIST(VA, N) VARRAY_CHECK (VA, N, te)
+ #define VARRAY_GENERIC_PTRNGC(VA,N) VARRAY_CHECK (VA, N, genericngc)
+ #define VARRAY_UINTNGC(VA, N) VARRAY_CHECK (VA, N, ungc)

/* Push a new element on the end of VA, extending it if necessary. */
#define VARRAY_PUSH_CHAR(VA, X) VARRAY_PUSH (VA, c, X)
*************** extern void varray_check_failed PARAMS (
*** 298,303 ****
--- 312,319 ----
#define VARRAY_PUSH_REG(VA, X) VARRAY_PUSH (VA, reg, X)
#define VARRAY_PUSH_CONST_EQUIV(VA, X) VARRAY_PUSH (VA, const_equiv, X)
#define VARRAY_PUSH_BB(VA, X) VARRAY_PUSH (VA, bb, X)
+ #define VARRAY_PUSH_GENERIC_PTRNGC(VA, X) VARRAY_PUSH (VA, genericngc, X)
+ #define VARRAY_PUSH_UINTNGC(VA, X) VARRAY_PUSH (VA, ungc, X)

/* Return the last element of VA. */
#define VARRAY_TOP_CHAR(VA) VARRAY_TOP (VA, c)
*************** extern void varray_check_failed PARAMS (
*** 319,323 ****
--- 335,341 ----
#define VARRAY_TOP_REG(VA) VARRAY_TOP (VA, reg)
#define VARRAY_TOP_CONST_EQUIV(VA) VARRAY_TOP (VA, const_equiv)
#define VARRAY_TOP_BB(VA) VARRAY_TOP (VA, bb)
+ #define VARRAY_TOP_GENERIC_PTRNGC(VA) VARRAY_TOP (VA, genericngc)
+ #define VARRAY_TOP_UINTNGC(VA) VARRAY_TOP (VA, ungc)

#endif /* ! GCC_VARRAY_H */
*** ./ggc-page.c.~1~ Wed Jan 29 19:44:32 2003
--- ./ggc-page.c Wed Jan 29 19:47:42 2003
*************** typedef struct page_entry
*** 253,261 ****
struct page_group *group;
#endif

! /* Saved in-use bit vector for pages that aren't in the topmost
! context during collection. */
! unsigned long *save_in_use_p;

/* Context depth of this page. */
unsigned short context_depth;
--- 253,261 ----
struct page_group *group;
#endif

! /* This is the index in the by_depth varray where this page table can be
! found. */
! unsigned long index_by_depth;

/* Context depth of this page. */
unsigned short context_depth;
*************** static struct globals
*** 361,366 ****
--- 361,386 ----

/* The file descriptor for debugging output. */
FILE *debug_file;
+
+ /* Each element of this array is a page_entry, all page_entries can
+ be found in here by increasing depth. index_by_depth in the
+ page_entry is the index into this data structure where that
+ page_entry can be found. This is used to speed up finding all
+ page_entries at a particular depth. We avoid the use of GCed
+ memory for this structure, because we might catch fire it we
+ tried. */
+ varray_type by_depth;
+
+ /* Each element is an index in by_depth where the given depth starts.
+ This structure is indexed by that given depth we are interested
+ in. */
+ varray_type depth;
+
+ /* Each element is a pointer to the saved in_use_p bits, if any,
+ zero otherwise. We allocate them all together, to enable a
+ better runtime data access pattern. */
+ varray_type save_in_use;
+
} G;

/* The size in bytes required to maintain a bitmap for the objects
*************** static struct globals
*** 373,378 ****
--- 393,401 ----
free list. This cannot be larger than HOST_BITS_PER_INT for the
in_use bitmask for page_group. */
#define GGC_QUIRE_SIZE 16
+
+ /* Initial guess as to how many page table entries we might need. */
+ #define INITIAL_PTE_COUNT 15000


static int ggc_allocated_p PARAMS ((const void *));
static page_entry *lookup_page_table_entry PARAMS ((const void *));
*************** static void clear_marks PARAMS ((void));
*** 392,403 ****
--- 415,432 ----
static void sweep_pages PARAMS ((void));
static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
static void compute_inverse PARAMS ((unsigned));
+ static inline void adjust_depth PARAMS ((void));

#ifdef ENABLE_GC_CHECKING
static void poison_pages PARAMS ((void));
#endif

void debug_print_page_list PARAMS ((int));
+
+ #define save_in_use_p_i(__i) \
+ (*(unsigned long **)&VARRAY_GENERIC_PTRNGC (G.save_in_use, __i))
+ #define save_in_use_p(__p) \
+ (save_in_use_p_i (__p->index_by_depth))


/* Returns nonzero if P was allocated in GC'able memory. */

*************** alloc_page (order)
*** 764,769 ****
--- 793,816 ----
return entry;
}

+ /* Adjust the size of G.depth so that no index greater than the one
+ used by the top of the G.by_depth varray is used. */
+ static inline void
+ adjust_depth ()
+ {
+ page_entry *top;
+
+ if (VARRAY_ACTIVE_SIZE (G.by_depth))
+ {
+ top = VARRAY_TOP_GENERIC_PTRNGC (G.by_depth);
+ /* Peel back indicies in depth that index into by_depth, so that
+ as new elements are added to by_depth, we note the indicies
+ of those elements, if they are for new context depths. */
+ while (VARRAY_ACTIVE_SIZE (G.depth) > top->context_depth+1)
+ VARRAY_POP (G.depth);
+ }
+ }
+
/* For a page that is no longer needed, put it on the free page list. */

static inline void
*************** free_page (entry)
*** 785,790 ****
--- 832,862 ----
clear_page_group_in_use (entry->group, entry->page);
#endif

+ if (VARRAY_ACTIVE_SIZE (G.by_depth) > 1)
+ {
+ page_entry *top = VARRAY_TOP_GENERIC_PTRNGC (G.by_depth);
+
+ /* If they are at the same depth, put top element into freed slot. */
+ if (entry->context_depth == top->context_depth)
+ {
+ int i = entry->index_by_depth;
+ VARRAY_GENERIC_PTRNGC (G.by_depth, i) = top;
+ VARRAY_GENERIC_PTRNGC (G.save_in_use, i)
+ = VARRAY_TOP_GENERIC_PTRNGC (G.save_in_use);
+ top->index_by_depth = i;
+ }
+ else
+ {
+ /* In theory we cannot ever free a page from a deeper
+ context than the current context, right?! */
+ abort ();
+ }
+ }
+ VARRAY_POP (G.by_depth);
+ VARRAY_POP (G.save_in_use);
+
+ adjust_depth ();
+
entry->next = G.free_pages;
G.free_pages = entry;
}
*************** ggc_alloc (size)
*** 908,913 ****
--- 980,994 ----
struct page_entry *new_entry;
new_entry = alloc_page (order);

+ new_entry->index_by_depth = VARRAY_ACTIVE_SIZE (G.by_depth);
+ VARRAY_PUSH_GENERIC_PTRNGC (G.by_depth, new_entry);
+ VARRAY_PUSH_GENERIC_PTRNGC (G.save_in_use, 0);
+
+ /* We can skip context depths, if we do, make sure we go all the
+ way to the new depth. */
+ while (new_entry->context_depth >= VARRAY_ACTIVE_SIZE (G.depth))
+ VARRAY_PUSH_UINTNGC (G.depth, VARRAY_ACTIVE_SIZE (G.by_depth)-1);
+
/* If this is the only entry, it's also the tail. */
if (entry == NULL)
G.page_tails[order] = new_entry;
*************** init_ggc ()
*** 1210,1215 ****
--- 1291,1300 ----
for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
size_lookup[i] = order;
}
+
+ VARRAY_GENERIC_PTRNGC_INIT (G.by_depth, INITIAL_PTE_COUNT, "by_depth");
+ VARRAY_GENERIC_PTRNGC_INIT (G.save_in_use, INITIAL_PTE_COUNT, "save_in_use");
+ VARRAY_UINTNGC_INIT (G.depth, 10, "depth");
}

/* Increment the `GC context'. Objects allocated in an outer context
*************** ggc_recalculate_in_use_p (p)
*** 1252,1258 ****

/* Something is in use if it is marked, or if it was in use in a
context further down the context stack. */
! p->in_use_p[i] |= p->save_in_use_p[i];

/* Decrement the free object count for every object allocated. */
for (j = p->in_use_p[i]; j; j >>= 1)
--- 1337,1343 ----

/* Something is in use if it is marked, or if it was in use in a
context further down the context stack. */
! p->in_use_p[i] |= save_in_use_p (p)[i];

/* Decrement the free object count for every object allocated. */
for (j = p->in_use_p[i]; j; j >>= 1)
*************** ggc_recalculate_in_use_p (p)
*** 1269,1278 ****
void
ggc_pop_context ()
{
! unsigned order, depth;

depth = --G.context_depth;

/* Any remaining pages in the popped context are lowered to the new
current context; i.e. objects allocated in the popped context and
left over are imported into the previous context. */
--- 1354,1421 ----
void
ggc_pop_context ()
{
! unsigned order, depth, i, e;

depth = --G.context_depth;

+ /* The G.depth array is shortend so that the last index is the
+ context_depth of the top element of by_depth. */
+ if (depth+1 < VARRAY_ACTIVE_SIZE (G.depth))
+ e = VARRAY_UINTNGC (G.depth, depth+1);
+ else
+ e = VARRAY_ACTIVE_SIZE (G.by_depth);
+
+ /* We might not have any PTEs of depth depth. */
+ if (depth < VARRAY_ACTIVE_SIZE (G.depth))
+ {
+
+ /* First we go through all the pages at depth depth to recalculate
+ the in use bits. */
+ for (i = VARRAY_UINTNGC (G.depth, depth); i < e; ++i)
+ {
+ page_entry *p;
+
+ #ifdef ENABLE_CHECKING
+ p = VARRAY_GENERIC_PTRNGC (G.by_depth, i);
+
+ /* We wanted only depths == depth, honest! */
+ if (p->context_depth != depth)
+ abort ();
+ if (p->index_by_depth != i)
+ abort ();
+ #endif
+
+ __builtin_prefetch (&save_in_use_p_i (i+8));
+ __builtin_prefetch (&save_in_use_p_i (i+16));
+ if (save_in_use_p_i (i))
+ {
+ p = VARRAY_GENERIC_PTRNGC (G.by_depth, i);
+ ggc_recalculate_in_use_p (p);
+ free (save_in_use_p_i (i));
+ save_in_use_p_i (i) = 0;
+ }
+ }
+ }
+
+ /* Then, we reset all page_entries with a depth greater than depth to be
+ at depth. */
+ for (i = e; i < VARRAY_ACTIVE_SIZE (G.by_depth); ++i)
+ {
+ page_entry *p = VARRAY_GENERIC_PTRNGC (G.by_depth, i);
+
+ /* We wanted only depths > than depth, honest! */
+ #ifdef ENABLE_CHECKING
+ if (p->context_depth <= depth)
+ abort ();
+ if (p->index_by_depth != i)
+ abort ();
+ #endif
+ p->context_depth = depth;
+ }
+
+ adjust_depth ();
+
+ #ifdef ENABLE_CHECKING
/* Any remaining pages in the popped context are lowered to the new
current context; i.e. objects allocated in the popped context and
left over are imported into the previous context. */
*************** ggc_pop_context ()
*** 1283,1300 ****
for (p = G.pages[order]; p != NULL; p = p->next)
{
if (p->context_depth > depth)
! p->context_depth = depth;
!
! /* If this page is now in the topmost context, and we'd
! saved its allocation state, restore it. */
! else if (p->context_depth == depth && p->save_in_use_p)
! {
! ggc_recalculate_in_use_p (p);
! free (p->save_in_use_p);
! p->save_in_use_p = 0;
! }
}
}
}


/* Unmark all objects. */
--- 1426,1437 ----
for (p = G.pages[order]; p != NULL; p = p->next)
{
if (p->context_depth > depth)
! abort ();
! else if (p->context_depth == depth && save_in_use_p (p))
! abort ();
}
}
+ #endif
}


/* Unmark all objects. */
*************** clear_marks ()
*** 1324,1332 ****
marks. So, back them up first. */
if (p->context_depth < G.context_depth)
{
! if (! p->save_in_use_p)
! p->save_in_use_p = xmalloc (bitmap_size);
! memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
}

/* Reset reset the number of free objects and clear the
--- 1461,1469 ----
marks. So, back them up first. */
if (p->context_depth < G.context_depth)
{
! if (! save_in_use_p (p))
! save_in_use_p (p) = xmalloc (bitmap_size);
! memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
}

/* Reset reset the number of free objects and clear the
*************** ggc_pch_read (f, addr)
*** 1765,1771 ****
struct ggc_pch_ondisk d;
unsigned i;
char *offs = addr;
!
/* We've just read in a PCH file. So, every object that used to be allocated
is now free. */
clear_marks ();
--- 1902,1912 ----
struct ggc_pch_ondisk d;
unsigned i;
char *offs = addr;
! unsigned long count_old_page_tables;
! unsigned long count_new_page_tables;
!
! count_old_page_tables = VARRAY_ACTIVE_SIZE (G.by_depth);
!
/* We've just read in a PCH file. So, every object that used to be allocated
is now free. */
clear_marks ();
*************** ggc_pch_read (f, addr)
*** 1798,1807 ****
size_t bytes;
size_t num_objs;
size_t j;
!
if (d.totals[i] == 0)
continue;
!
bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
num_objs = bytes / OBJECT_SIZE (i);
entry = xcalloc (1, (sizeof (struct page_entry)
--- 1939,1948 ----
size_t bytes;
size_t num_objs;
size_t j;
!
if (d.totals[i] == 0)
continue;
!
bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
num_objs = bytes / OBJECT_SIZE (i);
entry = xcalloc (1, (sizeof (struct page_entry)
*************** ggc_pch_read (f, addr)
*** 1832,1838 ****
--- 1973,2046 ----
else
G.pages[i] = entry;
G.page_tails[i] = entry;
+
+ /* We start off by just adding all the new information to the end of the varrays,
+ later, we will move the new information to the front of the varrays, as the PCH
+ page tables are at context 0. */
+ VARRAY_PUSH_GENERIC_PTRNGC (G.by_depth, entry);
+ VARRAY_PUSH_GENERIC_PTRNGC (G.save_in_use, 0);
}
+
+ /* Now, we update the various data structures that speed page table handling. */
+
+ count_new_page_tables = VARRAY_ACTIVE_SIZE (G.by_depth) - count_old_page_tables;
+
+ /* First, we swap the new entries to the front of the varrays. */
+
+ {
+ varray_type new_by_depth;
+ varray_type new_save_in_use;
+ int c;
+
+ c = count_old_page_tables + count_new_page_tables;
+ /* Allow for at least 25% growth without resizing. */
+ c = c + c/4;
+ c = MAX (c, INITIAL_PTE_COUNT);
+
+ VARRAY_GENERIC_PTRNGC_INIT (new_by_depth,
+ c,
+ "by_depth");
+ VARRAY_GENERIC_PTRNGC_INIT (new_save_in_use,
+ c,
+ "save_in_use");
+ VARRAY_ACTIVE_SIZE (new_by_depth) = count_old_page_tables + count_new_page_tables;
+ VARRAY_ACTIVE_SIZE (new_save_in_use) = count_old_page_tables + count_new_page_tables;
+
+ memcpy (&VARRAY_GENERIC_PTRNGC (new_by_depth, 0),
+ &VARRAY_GENERIC_PTRNGC (G.by_depth, count_old_page_tables),
+ count_new_page_tables * sizeof (void *));
+ memcpy (&VARRAY_GENERIC_PTRNGC (new_by_depth, count_new_page_tables),
+ &VARRAY_GENERIC_PTRNGC (G.by_depth, 0),
+ count_old_page_tables * sizeof (void *));
+ memcpy (&VARRAY_GENERIC_PTRNGC (new_save_in_use, 0),
+ &VARRAY_GENERIC_PTRNGC (G.save_in_use, count_old_page_tables),
+ count_new_page_tables * sizeof (void *));
+ memcpy (&VARRAY_GENERIC_PTRNGC (new_save_in_use, count_new_page_tables),
+ &VARRAY_GENERIC_PTRNGC (G.save_in_use, 0),
+ count_old_page_tables * sizeof (void *));
+
+ VARRAY_FREE (G.by_depth);
+ VARRAY_FREE (G.save_in_use);
+
+ G.by_depth = new_by_depth;
+ G.save_in_use = new_save_in_use;
+ }
+
+ /* Now update all the index_by_depth fields, boy is this gonna
+ slpatter all over memory. */
+ for (i = VARRAY_ACTIVE_SIZE (G.by_depth); i > 0; --i)
+ {
+ page_entry *p = VARRAY_GENERIC_PTRNGC (G.by_depth, i-1);
+ p->index_by_depth = i-1;
+ }
+
+ /* And last, we update the depth pointers in G.depth. The first
+ entry is already 0, and context 0 entries always start at index
+ 0, so there is nothing to update in the first slot. We need a
+ second slot, only if we have old ptes, and if we do, they start
+ at index count_new_page_tables. */
+ if (count_old_page_tables)
+ VARRAY_PUSH_UINTNGC (G.depth, count_new_page_tables);

/* Update the statistics. */
G.allocated = G.allocated_last_gc = offs - (char *)addr;
*** ./varray.c.~1~ Wed Dec 18 10:53:02 2002
--- ./varray.c Wed Jan 29 19:34:14 2003
***************
*** 29,62 ****

#define VARRAY_HDR_SIZE (sizeof (struct varray_head_tag) - sizeof (varray_data))

! static const size_t element_size[NUM_VARRAY_DATA] = {
! sizeof (char),
! sizeof (unsigned char),
! sizeof (short),
! sizeof (unsigned short),
! sizeof (int),
! sizeof (unsigned int),
! sizeof (long),
! sizeof (unsigned long),
! sizeof (HOST_WIDE_INT),
! sizeof (unsigned HOST_WIDE_INT),
! sizeof (PTR),
! sizeof (char *),
! sizeof (struct rtx_def *),
! sizeof (struct rtvec_def *),
! sizeof (union tree_node *),
! sizeof (struct bitmap_head_def *),
! sizeof (struct reg_info_def *),
! sizeof (struct const_equiv_data),
! sizeof (struct basic_block_def *),
! sizeof (struct elt_list *)
! };
!
! static const int uses_ggc[NUM_VARRAY_DATA] = {
! 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* unsigned HOST_WIDE_INT */
! 1, /* PTR */
! 1, 1, 1, 1, 1, /* bitmap_head_def */
! 0, 0, 0, 1
};

/* Allocate a virtual array with NUM_ELEMENT elements, each of which is
--- 29,60 ----

#define VARRAY_HDR_SIZE (sizeof (struct varray_head_tag) - sizeof (varray_data))

! static const struct {
! size_t size;
! int uses_ggc;
! } element[NUM_VARRAY_DATA] = {
! { sizeof (char), 1 },
! { sizeof (unsigned char), 1 },
! { sizeof (short), 1 },
! { sizeof (unsigned short), 1 },
! { sizeof (int), 1 },
! { sizeof (unsigned int), 1 },
! { sizeof (long), 1 },
! { sizeof (unsigned long), 1 },
! { sizeof (HOST_WIDE_INT), 1 },
! { sizeof (unsigned HOST_WIDE_INT), 1 },
! { sizeof (PTR), 1 },
! { sizeof (char *), 1 },
! { sizeof (struct rtx_def *), 1 },
! { sizeof (struct rtvec_def *), 1 },
! { sizeof (union tree_node *), 1 },
! { sizeof (struct bitmap_head_def *), 1 },
! { sizeof (struct reg_info_def *), 0 },
! { sizeof (struct const_equiv_data), 0 },
! { sizeof (struct basic_block_def *), 0 },
! { sizeof (struct elt_list *), 1 },
! { sizeof (PTR), 0 },
! { sizeof (unsigned int), 0 }
};

/* Allocate a virtual array with NUM_ELEMENT elements, each of which is
*************** varray_init (num_elements, element_kind,
*** 67,75 ****
enum varray_data_enum element_kind;
const char *name;
{
! size_t data_size = num_elements * element_size[element_kind];
varray_type ptr;
! if (uses_ggc [element_kind])
ptr = (varray_type) ggc_alloc_cleared (VARRAY_HDR_SIZE + data_size);
else
ptr = (varray_type) xcalloc (VARRAY_HDR_SIZE + data_size, 1);
--- 65,73 ----
enum varray_data_enum element_kind;
const char *name;
{
! size_t data_size = num_elements * element[element_kind].size;
varray_type ptr;
! if (element[element_kind].uses_ggc)
ptr = (varray_type) ggc_alloc_cleared (VARRAY_HDR_SIZE + data_size);
else
ptr = (varray_type) xcalloc (VARRAY_HDR_SIZE + data_size, 1);
*************** varray_grow (va, n)
*** 92,102 ****

if (n != old_elements)
{
! size_t elem_size = element_size[va->type];
size_t old_data_size = old_elements * elem_size;
size_t data_size = n * elem_size;

! if (uses_ggc[va->type])
va = (varray_type) ggc_realloc (va, VARRAY_HDR_SIZE + data_size);
else
va = (varray_type) xrealloc ((char *) va, VARRAY_HDR_SIZE + data_size);
--- 90,100 ----

if (n != old_elements)
{
! size_t elem_size = element[va->type].size;
size_t old_data_size = old_elements * elem_size;
size_t data_size = n * elem_size;

! if (element[va->type].uses_ggc)
va = (varray_type) ggc_realloc (va, VARRAY_HDR_SIZE + data_size);
else
va = (varray_type) xrealloc ((char *) va, VARRAY_HDR_SIZE + data_size);
*************** void
*** 113,119 ****
varray_clear (va)
varray_type va;
{
! size_t data_size = element_size[va->type] * va->num_elements;

memset (va->data.c, 0, data_size);
va->elements_used = 0;
--- 111,117 ----
varray_clear (va)
varray_type va;
{
! size_t data_size = element[va->type].size * va->num_elements;

memset (va->data.c, 0, data_size);
va->elements_used = 0;
--------------

Attachment: popcontext-fsf.diffs
Description: Binary data


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]