]> gcc.gnu.org Git - gcc.git/blame - gcc/ggc-common.c
expr.h (extract_bit_field): Remove packedp parameter.
[gcc.git] / gcc / ggc-common.c
CommitLineData
b49a6a90 1/* Simple garbage collection for the GNU compiler.
d1e082c2 2 Copyright (C) 1999-2013 Free Software Foundation, Inc.
b49a6a90 3
1322177d 4This file is part of GCC.
b49a6a90 5
1322177d
LB
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
9dcd6f09 8Software Foundation; either version 3, or (at your option) any later
1322177d 9version.
b49a6a90 10
1322177d
LB
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
14a774a9
RK
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
b49a6a90 15
14a774a9 16You should have received a copy of the GNU General Public License
9dcd6f09
NC
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
b49a6a90
AS
19
20/* Generic garbage collection (GC) functions and data, not specific to
21 any particular GC implementation. */
22
23#include "config.h"
24#include "system.h"
4977bab6 25#include "coretypes.h"
4a8fb1a1 26#include "hash-table.h"
1b42a6a9 27#include "ggc.h"
a9429e29 28#include "ggc-internal.h"
718f9c0f 29#include "diagnostic-core.h"
9ac121af 30#include "params.h"
18c81520 31#include "hosthooks.h"
4d0c31e6 32#include "hosthooks-def.h"
ae2392a9
BS
33#include "plugin.h"
34#include "vec.h"
10d43c2d 35#include "timevar.h"
17211ab5 36
07724022
JH
37/* When set, ggc_collect will do collection. */
38bool ggc_force_collect;
39
dae4174e
TT
40/* When true, protect the contents of the identifier hash table. */
41bool ggc_protect_identifiers = true;
42
3277221c
MM
43/* Statistics about the allocation. */
44static ggc_statistics *ggc_stats;
45
17211ab5
GK
46struct traversal_state;
47
20c1dc5e 48static int ggc_htab_delete (void **, void *);
20c1dc5e
AJ
49static int compare_ptr_data (const void *, const void *);
50static void relocate_ptrs (void *, void *);
51static void write_pch_globals (const struct ggc_root_tab * const *tab,
52 struct traversal_state *state);
b49a6a90
AS
53
54/* Maintain global roots that are preserved during GC. */
55
4c160717
RK
56/* Process a slot of an htab by deleting it if it has not been marked. */
57
58static int
20c1dc5e 59ggc_htab_delete (void **slot, void *info)
4c160717 60{
e2500fed 61 const struct ggc_cache_tab *r = (const struct ggc_cache_tab *) info;
4c160717
RK
62
63 if (! (*r->marked_p) (*slot))
e2500fed
GK
64 htab_clear_slot (*r->base, slot);
65 else
66 (*r->cb) (*slot);
4c160717
RK
67
68 return 1;
69}
70
ae2392a9
BS
71
72/* This extra vector of dynamically registered root_tab-s is used by
73 ggc_mark_roots and gives the ability to dynamically add new GGC root
32c9b4e9
DS
74 tables, for instance from some plugins; this vector is on the heap
75 since it is used by GGC internally. */
76typedef const struct ggc_root_tab *const_ggc_root_tab_t;
9771b263 77static vec<const_ggc_root_tab_t> extra_root_vec;
ae2392a9 78
ae2392a9
BS
79/* Dynamically register a new GGC root table RT. This is useful for
80 plugins. */
81
b8698a0f 82void
ae2392a9
BS
83ggc_register_root_tab (const struct ggc_root_tab* rt)
84{
32c9b4e9 85 if (rt)
9771b263 86 extra_root_vec.safe_push (rt);
ae2392a9
BS
87}
88
32c9b4e9
DS
89/* This extra vector of dynamically registered cache_tab-s is used by
90 ggc_mark_roots and gives the ability to dynamically add new GGC cache
91 tables, for instance from some plugins; this vector is on the heap
92 since it is used by GGC internally. */
93typedef const struct ggc_cache_tab *const_ggc_cache_tab_t;
9771b263 94static vec<const_ggc_cache_tab_t> extra_cache_vec;
32c9b4e9
DS
95
96/* Dynamically register a new GGC cache table CT. This is useful for
97 plugins. */
98
99void
100ggc_register_cache_tab (const struct ggc_cache_tab* ct)
101{
102 if (ct)
9771b263 103 extra_cache_vec.safe_push (ct);
32c9b4e9
DS
104}
105
106/* Scan a hash table that has objects which are to be deleted if they are not
107 already marked. */
108
109static void
110ggc_scan_cache_tab (const_ggc_cache_tab_t ctp)
111{
112 const struct ggc_cache_tab *cti;
113
114 for (cti = ctp; cti->base != NULL; cti++)
115 if (*cti->base)
116 {
117 ggc_set_mark (*cti->base);
118 htab_traverse_noresize (*cti->base, ggc_htab_delete,
119 CONST_CAST (void *, (const void *)cti));
120 ggc_set_mark ((*cti->base)->entries);
121 }
122}
ae2392a9 123
71bb2d86
NF
124/* Mark all the roots in the table RT. */
125
126static void
127ggc_mark_root_tab (const_ggc_root_tab_t rt)
128{
129 size_t i;
130
131 for ( ; rt->base != NULL; rt++)
132 for (i = 0; i < rt->nelt; i++)
133 (*rt->cb) (*(void **) ((char *)rt->base + rt->stride * i));
134}
135
cb2ec151
RH
136/* Iterate through all registered roots and mark each element. */
137
b49a6a90 138void
20c1dc5e 139ggc_mark_roots (void)
96df4529 140{
e2500fed 141 const struct ggc_root_tab *const *rt;
71bb2d86 142 const_ggc_root_tab_t rtp, rti;
e2500fed 143 const struct ggc_cache_tab *const *ct;
32c9b4e9 144 const_ggc_cache_tab_t ctp;
e2500fed 145 size_t i;
589005ff 146
e2500fed
GK
147 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
148 for (rti = *rt; rti->base != NULL; rti++)
149 memset (rti->base, 0, rti->stride);
150
151 for (rt = gt_ggc_rtab; *rt; rt++)
71bb2d86 152 ggc_mark_root_tab (*rt);
ae2392a9 153
9771b263 154 FOR_EACH_VEC_ELT (extra_root_vec, i, rtp)
71bb2d86 155 ggc_mark_root_tab (rtp);
bedda2da 156
dae4174e
TT
157 if (ggc_protect_identifiers)
158 ggc_mark_stringpool ();
bedda2da 159
4c160717 160 /* Now scan all hash tables that have objects which are to be deleted if
e2500fed
GK
161 they are not already marked. */
162 for (ct = gt_ggc_cache_rtab; *ct; ct++)
32c9b4e9
DS
163 ggc_scan_cache_tab (*ct);
164
9771b263 165 FOR_EACH_VEC_ELT (extra_cache_vec, i, ctp)
32c9b4e9 166 ggc_scan_cache_tab (ctp);
dae4174e
TT
167
168 if (! ggc_protect_identifiers)
169 ggc_purge_stringpool ();
ae2392a9
BS
170
171 /* Some plugins may call ggc_set_mark from here. */
172 invoke_plugin_callbacks (PLUGIN_GGC_MARKING, NULL);
96df4529
AS
173}
174
e2500fed
GK
175/* Allocate a block of memory, then clear it. */
176void *
a9429e29 177ggc_internal_cleared_alloc_stat (size_t size MEM_STAT_DECL)
ef8288f7 178{
a9429e29 179 void *buf = ggc_internal_alloc_stat (size PASS_MEM_STAT);
e2500fed
GK
180 memset (buf, 0, size);
181 return buf;
ef8288f7
RH
182}
183
e2500fed
GK
184/* Resize a block of memory, possibly re-allocating it. */
185void *
b9dcdee4 186ggc_realloc_stat (void *x, size_t size MEM_STAT_DECL)
ef8288f7 187{
e2500fed
GK
188 void *r;
189 size_t old_size;
ef8288f7 190
e2500fed 191 if (x == NULL)
a9429e29 192 return ggc_internal_alloc_stat (size PASS_MEM_STAT);
ef8288f7 193
e2500fed 194 old_size = ggc_get_size (x);
685fe032 195
e2500fed 196 if (size <= old_size)
9a0a7d5d
HPN
197 {
198 /* Mark the unwanted memory as unaccessible. We also need to make
199 the "new" size accessible, since ggc_get_size returns the size of
200 the pool, not the size of the individually allocated object, the
201 size which was previously made accessible. Unfortunately, we
202 don't know that previously allocated size. Without that
203 knowledge we have to lose some initialization-tracking for the
204 old parts of the object. An alternative is to mark the whole
20c1dc5e 205 old_size as reachable, but that would lose tracking of writes
9a0a7d5d
HPN
206 after the end of the object (by small offsets). Discard the
207 handle to avoid handle leak. */
35dee980
HPN
208 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) x + size,
209 old_size - size));
210 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, size));
9a0a7d5d
HPN
211 return x;
212 }
ef8288f7 213
a9429e29 214 r = ggc_internal_alloc_stat (size PASS_MEM_STAT);
9a0a7d5d
HPN
215
216 /* Since ggc_get_size returns the size of the pool, not the size of the
217 individually allocated object, we'd access parts of the old object
218 that were marked invalid with the memcpy below. We lose a bit of the
219 initialization-tracking since some of it may be uninitialized. */
35dee980 220 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, old_size));
9a0a7d5d 221
e2500fed 222 memcpy (r, x, old_size);
9a0a7d5d
HPN
223
224 /* The old object is not supposed to be used anymore. */
685fe032 225 ggc_free (x);
9a0a7d5d 226
e2500fed 227 return r;
ef8288f7
RH
228}
229
f8a83ee3 230void *
a9429e29
LB
231ggc_cleared_alloc_htab_ignore_args (size_t c ATTRIBUTE_UNUSED,
232 size_t n ATTRIBUTE_UNUSED)
f8a83ee3 233{
a9429e29
LB
234 gcc_assert (c * n == sizeof (struct htab));
235 return ggc_alloc_cleared_htab ();
236}
237
238/* TODO: once we actually use type information in GGC, create a new tag
239 gt_gcc_ptr_array and use it for pointer arrays. */
240void *
241ggc_cleared_alloc_ptr_array_two_args (size_t c, size_t n)
242{
243 gcc_assert (sizeof (PTR *) == n);
244 return ggc_internal_cleared_vec_alloc (sizeof (PTR *), c);
f8a83ee3
ZW
245}
246
17211ab5 247/* These are for splay_tree_new_ggc. */
20c1dc5e 248void *
cd030c07 249ggc_splay_alloc (int sz, void *nl)
17211ab5 250{
282899df 251 gcc_assert (!nl);
a9429e29 252 return ggc_internal_alloc (sz);
17211ab5
GK
253}
254
255void
20c1dc5e 256ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
17211ab5 257{
282899df 258 gcc_assert (!nl);
17211ab5
GK
259}
260
3277221c 261/* Print statistics that are independent of the collector in use. */
fba0bfd4
ZW
262#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
263 ? (x) \
264 : ((x) < 1024*1024*10 \
265 ? (x) / 1024 \
266 : (x) / (1024*1024))))
267#define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
3277221c
MM
268
269void
20c1dc5e
AJ
270ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
271 ggc_statistics *stats)
3277221c 272{
3277221c
MM
273 /* Set the pointer so that during collection we will actually gather
274 the statistics. */
275 ggc_stats = stats;
276
277 /* Then do one collection to fill in the statistics. */
278 ggc_collect ();
279
17211ab5
GK
280 /* At present, we don't really gather any interesting statistics. */
281
282 /* Don't gather statistics any more. */
283 ggc_stats = NULL;
284}
285\f
286/* Functions for saving and restoring GCable memory to disk. */
287
20c1dc5e 288struct ptr_data
17211ab5
GK
289{
290 void *obj;
291 void *note_ptr_cookie;
292 gt_note_pointers note_ptr_fn;
293 gt_handle_reorder reorder_fn;
294 size_t size;
295 void *new_addr;
296};
297
9204da15 298#define POINTER_HASH(x) (hashval_t)((intptr_t)x >> 3)
17211ab5 299
4a8fb1a1
LC
300/* Helper for hashing saving_htab. */
301
302struct saving_hasher : typed_free_remove <ptr_data>
303{
304 typedef ptr_data value_type;
305 typedef void compare_type;
306 static inline hashval_t hash (const value_type *);
307 static inline bool equal (const value_type *, const compare_type *);
308};
309
310inline hashval_t
311saving_hasher::hash (const value_type *p)
312{
313 return POINTER_HASH (p->obj);
314}
315
316inline bool
317saving_hasher::equal (const value_type *p1, const compare_type *p2)
318{
319 return p1->obj == p2;
320}
321
322static hash_table <saving_hasher> saving_htab;
323
17211ab5
GK
324/* Register an object in the hash table. */
325
326int
20c1dc5e 327gt_pch_note_object (void *obj, void *note_ptr_cookie,
cd030c07 328 gt_note_pointers note_ptr_fn)
17211ab5
GK
329{
330 struct ptr_data **slot;
20c1dc5e 331
17211ab5
GK
332 if (obj == NULL || obj == (void *) 1)
333 return 0;
334
335 slot = (struct ptr_data **)
4a8fb1a1 336 saving_htab.find_slot_with_hash (obj, POINTER_HASH (obj), INSERT);
17211ab5
GK
337 if (*slot != NULL)
338 {
282899df
NS
339 gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn
340 && (*slot)->note_ptr_cookie == note_ptr_cookie);
17211ab5
GK
341 return 0;
342 }
20c1dc5e 343
d3bfe4de 344 *slot = XCNEW (struct ptr_data);
17211ab5
GK
345 (*slot)->obj = obj;
346 (*slot)->note_ptr_fn = note_ptr_fn;
347 (*slot)->note_ptr_cookie = note_ptr_cookie;
348 if (note_ptr_fn == gt_pch_p_S)
d3bfe4de 349 (*slot)->size = strlen ((const char *)obj) + 1;
17211ab5
GK
350 else
351 (*slot)->size = ggc_get_size (obj);
352 return 1;
353}
354
355/* Register an object in the hash table. */
356
357void
20c1dc5e
AJ
358gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
359 gt_handle_reorder reorder_fn)
17211ab5
GK
360{
361 struct ptr_data *data;
20c1dc5e 362
17211ab5
GK
363 if (obj == NULL || obj == (void *) 1)
364 return;
365
d3bfe4de 366 data = (struct ptr_data *)
4a8fb1a1 367 saving_htab.find_with_hash (obj, POINTER_HASH (obj));
282899df 368 gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie);
20c1dc5e 369
17211ab5
GK
370 data->reorder_fn = reorder_fn;
371}
372
17211ab5
GK
373/* Handy state for the traversal functions. */
374
20c1dc5e 375struct traversal_state
17211ab5
GK
376{
377 FILE *f;
378 struct ggc_pch_data *d;
379 size_t count;
380 struct ptr_data **ptrs;
381 size_t ptrs_i;
382};
383
384/* Callbacks for htab_traverse. */
385
4a8fb1a1
LC
386int
387ggc_call_count (ptr_data **slot, traversal_state *state)
17211ab5 388{
4a8fb1a1 389 struct ptr_data *d = *slot;
20c1dc5e 390
08cee789 391 ggc_pch_count_object (state->d, d->obj, d->size,
cd030c07 392 d->note_ptr_fn == gt_pch_p_S);
17211ab5
GK
393 state->count++;
394 return 1;
395}
396
4a8fb1a1
LC
397int
398ggc_call_alloc (ptr_data **slot, traversal_state *state)
17211ab5 399{
4a8fb1a1 400 struct ptr_data *d = *slot;
20c1dc5e 401
08cee789 402 d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size,
cd030c07 403 d->note_ptr_fn == gt_pch_p_S);
17211ab5
GK
404 state->ptrs[state->ptrs_i++] = d;
405 return 1;
406}
407
408/* Callback for qsort. */
409
410static int
20c1dc5e 411compare_ptr_data (const void *p1_p, const void *p2_p)
17211ab5 412{
58f9752a
KG
413 const struct ptr_data *const p1 = *(const struct ptr_data *const *)p1_p;
414 const struct ptr_data *const p2 = *(const struct ptr_data *const *)p2_p;
17211ab5
GK
415 return (((size_t)p1->new_addr > (size_t)p2->new_addr)
416 - ((size_t)p1->new_addr < (size_t)p2->new_addr));
417}
418
419/* Callbacks for note_ptr_fn. */
420
421static void
20c1dc5e 422relocate_ptrs (void *ptr_p, void *state_p)
17211ab5
GK
423{
424 void **ptr = (void **)ptr_p;
20c1dc5e 425 struct traversal_state *state ATTRIBUTE_UNUSED
17211ab5
GK
426 = (struct traversal_state *)state_p;
427 struct ptr_data *result;
428
429 if (*ptr == NULL || *ptr == (void *)1)
430 return;
20c1dc5e 431
d3bfe4de 432 result = (struct ptr_data *)
4a8fb1a1 433 saving_htab.find_with_hash (*ptr, POINTER_HASH (*ptr));
282899df 434 gcc_assert (result);
17211ab5
GK
435 *ptr = result->new_addr;
436}
437
438/* Write out, after relocation, the pointers in TAB. */
439static void
20c1dc5e
AJ
440write_pch_globals (const struct ggc_root_tab * const *tab,
441 struct traversal_state *state)
17211ab5
GK
442{
443 const struct ggc_root_tab *const *rt;
444 const struct ggc_root_tab *rti;
445 size_t i;
446
447 for (rt = tab; *rt; rt++)
448 for (rti = *rt; rti->base != NULL; rti++)
449 for (i = 0; i < rti->nelt; i++)
450 {
451 void *ptr = *(void **)((char *)rti->base + rti->stride * i);
452 struct ptr_data *new_ptr;
453 if (ptr == NULL || ptr == (void *)1)
454 {
20c1dc5e 455 if (fwrite (&ptr, sizeof (void *), 1, state->f)
17211ab5 456 != 1)
d8a07487 457 fatal_error ("can%'t write PCH file: %m");
17211ab5
GK
458 }
459 else
460 {
d3bfe4de 461 new_ptr = (struct ptr_data *)
4a8fb1a1 462 saving_htab.find_with_hash (ptr, POINTER_HASH (ptr));
20c1dc5e 463 if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
17211ab5 464 != 1)
d8a07487 465 fatal_error ("can%'t write PCH file: %m");
17211ab5
GK
466 }
467 }
468}
469
470/* Hold the information we need to mmap the file back in. */
471
20c1dc5e 472struct mmap_info
17211ab5
GK
473{
474 size_t offset;
475 size_t size;
476 void *preferred_base;
477};
478
479/* Write out the state of the compiler to F. */
480
481void
20c1dc5e 482gt_pch_save (FILE *f)
17211ab5
GK
483{
484 const struct ggc_root_tab *const *rt;
485 const struct ggc_root_tab *rti;
486 size_t i;
487 struct traversal_state state;
488 char *this_object = NULL;
489 size_t this_object_size = 0;
490 struct mmap_info mmi;
90aa6719 491 const size_t mmap_offset_alignment = host_hooks.gt_pch_alloc_granularity();
17211ab5
GK
492
493 gt_pch_save_stringpool ();
494
10d43c2d 495 timevar_push (TV_PCH_PTR_REALLOC);
4a8fb1a1 496 saving_htab.create (50000);
17211ab5
GK
497
498 for (rt = gt_ggc_rtab; *rt; rt++)
499 for (rti = *rt; rti->base != NULL; rti++)
500 for (i = 0; i < rti->nelt; i++)
501 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
502
503 for (rt = gt_pch_cache_rtab; *rt; rt++)
504 for (rti = *rt; rti->base != NULL; rti++)
505 for (i = 0; i < rti->nelt; i++)
506 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
507
508 /* Prepare the objects for writing, determine addresses and such. */
509 state.f = f;
a9429e29 510 state.d = init_ggc_pch ();
17211ab5 511 state.count = 0;
4a8fb1a1 512 saving_htab.traverse <traversal_state *, ggc_call_count> (&state);
17211ab5
GK
513
514 mmi.size = ggc_pch_total_size (state.d);
515
18c81520
GK
516 /* Try to arrange things so that no relocation is necessary, but
517 don't try very hard. On most platforms, this will always work,
b8698a0f 518 and on the rest it's a lot of work to do better.
18c81520
GK
519 (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
520 HOST_HOOKS_GT_PCH_USE_ADDRESS.) */
4d0c31e6 521 mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f));
b8698a0f 522
17211ab5
GK
523 ggc_pch_this_base (state.d, mmi.preferred_base);
524
5ed6ace5 525 state.ptrs = XNEWVEC (struct ptr_data *, state.count);
17211ab5 526 state.ptrs_i = 0;
10d43c2d 527
4a8fb1a1 528 saving_htab.traverse <traversal_state *, ggc_call_alloc> (&state);
10d43c2d
DN
529 timevar_pop (TV_PCH_PTR_REALLOC);
530
531 timevar_push (TV_PCH_PTR_SORT);
17211ab5 532 qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
10d43c2d 533 timevar_pop (TV_PCH_PTR_SORT);
17211ab5
GK
534
535 /* Write out all the scalar variables. */
536 for (rt = gt_pch_scalar_rtab; *rt; rt++)
537 for (rti = *rt; rti->base != NULL; rti++)
538 if (fwrite (rti->base, rti->stride, 1, f) != 1)
d8a07487 539 fatal_error ("can%'t write PCH file: %m");
17211ab5
GK
540
541 /* Write out all the global pointers, after translation. */
542 write_pch_globals (gt_ggc_rtab, &state);
543 write_pch_globals (gt_pch_cache_rtab, &state);
544
90aa6719
DS
545 /* Pad the PCH file so that the mmapped area starts on an allocation
546 granularity (usually page) boundary. */
17211ab5 547 {
70f8b89f
KG
548 long o;
549 o = ftell (state.f) + sizeof (mmi);
550 if (o == -1)
d8a07487 551 fatal_error ("can%'t get position in PCH file: %m");
90aa6719
DS
552 mmi.offset = mmap_offset_alignment - o % mmap_offset_alignment;
553 if (mmi.offset == mmap_offset_alignment)
17211ab5
GK
554 mmi.offset = 0;
555 mmi.offset += o;
556 }
557 if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
d8a07487 558 fatal_error ("can%'t write PCH file: %m");
17211ab5
GK
559 if (mmi.offset != 0
560 && fseek (state.f, mmi.offset, SEEK_SET) != 0)
d8a07487 561 fatal_error ("can%'t write padding to PCH file: %m");
17211ab5 562
08cee789
DJ
563 ggc_pch_prepare_write (state.d, state.f);
564
0b50e654
JJ
565#if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
566 vec<char> vbits = vNULL;
567#endif
568
17211ab5
GK
569 /* Actually write out the objects. */
570 for (i = 0; i < state.count; i++)
3277221c 571 {
17211ab5
GK
572 if (this_object_size < state.ptrs[i]->size)
573 {
574 this_object_size = state.ptrs[i]->size;
d3bfe4de 575 this_object = XRESIZEVAR (char, this_object, this_object_size);
17211ab5 576 }
0b50e654
JJ
577#if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
578 /* obj might contain uninitialized bytes, e.g. in the trailing
579 padding of the object. Avoid warnings by making the memory
580 temporarily defined and then restoring previous state. */
581 int get_vbits = 0;
582 size_t valid_size = state.ptrs[i]->size;
583 if (__builtin_expect (RUNNING_ON_VALGRIND, 0))
584 {
585 if (vbits.length () < valid_size)
586 vbits.safe_grow (valid_size);
587 get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
588 vbits.address (), valid_size);
589 if (get_vbits == 3)
590 {
591 /* We assume that first part of obj is addressable, and
592 the rest is unaddressable. Find out where the boundary is
593 using binary search. */
594 size_t lo = 0, hi = valid_size;
595 while (hi > lo)
596 {
597 size_t mid = (lo + hi) / 2;
598 get_vbits = VALGRIND_GET_VBITS ((char *) state.ptrs[i]->obj
599 + mid, vbits.address (),
600 1);
601 if (get_vbits == 3)
602 hi = mid;
603 else if (get_vbits == 1)
604 lo = mid + 1;
605 else
606 break;
607 }
608 if (get_vbits == 1 || get_vbits == 3)
609 {
610 valid_size = lo;
611 get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
612 vbits.address (),
613 valid_size);
614 }
615 }
616 if (get_vbits == 1)
617 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (state.ptrs[i]->obj,
618 state.ptrs[i]->size));
619 }
620#endif
17211ab5
GK
621 memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
622 if (state.ptrs[i]->reorder_fn != NULL)
20c1dc5e 623 state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
17211ab5
GK
624 state.ptrs[i]->note_ptr_cookie,
625 relocate_ptrs, &state);
20c1dc5e 626 state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
17211ab5
GK
627 state.ptrs[i]->note_ptr_cookie,
628 relocate_ptrs, &state);
629 ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
4d0c31e6
RH
630 state.ptrs[i]->new_addr, state.ptrs[i]->size,
631 state.ptrs[i]->note_ptr_fn == gt_pch_p_S);
17211ab5
GK
632 if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
633 memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
0b50e654
JJ
634#if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
635 if (__builtin_expect (get_vbits == 1, 0))
636 {
637 (void) VALGRIND_SET_VBITS (state.ptrs[i]->obj, vbits.address (),
638 valid_size);
639 if (valid_size != state.ptrs[i]->size)
640 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *)
641 state.ptrs[i]->obj
642 + valid_size,
643 state.ptrs[i]->size
644 - valid_size));
645 }
646#endif
3277221c 647 }
0b50e654
JJ
648#if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
649 vbits.release ();
650#endif
651
17211ab5 652 ggc_pch_finish (state.d, state.f);
d24ecd21 653 gt_pch_fixup_stringpool ();
17211ab5 654
0b50e654
JJ
655 XDELETE (state.ptrs);
656 XDELETE (this_object);
4a8fb1a1 657 saving_htab.dispose ();
17211ab5
GK
658}
659
660/* Read the state of the compiler back in from F. */
661
662void
20c1dc5e 663gt_pch_restore (FILE *f)
17211ab5
GK
664{
665 const struct ggc_root_tab *const *rt;
666 const struct ggc_root_tab *rti;
667 size_t i;
668 struct mmap_info mmi;
4d0c31e6 669 int result;
17211ab5
GK
670
671 /* Delete any deletable objects. This makes ggc_pch_read much
672 faster, as it can be sure that no GCable objects remain other
673 than the ones just read in. */
674 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
675 for (rti = *rt; rti->base != NULL; rti++)
676 memset (rti->base, 0, rti->stride);
677
678 /* Read in all the scalar variables. */
679 for (rt = gt_pch_scalar_rtab; *rt; rt++)
680 for (rti = *rt; rti->base != NULL; rti++)
681 if (fread (rti->base, rti->stride, 1, f) != 1)
d8a07487 682 fatal_error ("can%'t read PCH file: %m");
17211ab5
GK
683
684 /* Read in all the global pointers, in 6 easy loops. */
685 for (rt = gt_ggc_rtab; *rt; rt++)
686 for (rti = *rt; rti->base != NULL; rti++)
687 for (i = 0; i < rti->nelt; i++)
688 if (fread ((char *)rti->base + rti->stride * i,
689 sizeof (void *), 1, f) != 1)
d8a07487 690 fatal_error ("can%'t read PCH file: %m");
17211ab5
GK
691
692 for (rt = gt_pch_cache_rtab; *rt; rt++)
693 for (rti = *rt; rti->base != NULL; rti++)
694 for (i = 0; i < rti->nelt; i++)
695 if (fread ((char *)rti->base + rti->stride * i,
696 sizeof (void *), 1, f) != 1)
d8a07487 697 fatal_error ("can%'t read PCH file: %m");
17211ab5
GK
698
699 if (fread (&mmi, sizeof (mmi), 1, f) != 1)
d8a07487 700 fatal_error ("can%'t read PCH file: %m");
20c1dc5e 701
4d0c31e6
RH
702 result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size,
703 fileno (f), mmi.offset);
704 if (result < 0)
705 fatal_error ("had to relocate PCH");
706 if (result == 0)
18c81520 707 {
4d0c31e6
RH
708 if (fseek (f, mmi.offset, SEEK_SET) != 0
709 || fread (mmi.preferred_base, mmi.size, 1, f) != 1)
d8a07487 710 fatal_error ("can%'t read PCH file: %m");
4d0c31e6
RH
711 }
712 else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0)
d8a07487 713 fatal_error ("can%'t read PCH file: %m");
8eb6a092 714
4d0c31e6 715 ggc_pch_read (f, mmi.preferred_base);
18c81520 716
4d0c31e6
RH
717 gt_pch_restore_stringpool ();
718}
18c81520 719
4d0c31e6
RH
720/* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present.
721 Select no address whatsoever, and let gt_pch_save choose what it will with
722 malloc, presumably. */
ee0d75ef 723
4d0c31e6
RH
724void *
725default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED,
726 int fd ATTRIBUTE_UNUSED)
727{
728 return NULL;
729}
ee0d75ef 730
4d0c31e6
RH
731/* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present.
732 Allocate SIZE bytes with malloc. Return 0 if the address we got is the
733 same as base, indicating that the memory has been allocated but needs to
734 be read in from the file. Return -1 if the address differs, to relocation
735 of the PCH file would be required. */
736
737int
738default_gt_pch_use_address (void *base, size_t size, int fd ATTRIBUTE_UNUSED,
739 size_t offset ATTRIBUTE_UNUSED)
740{
741 void *addr = xmalloc (size);
742 return (addr == base) - 1;
743}
ee0d75ef 744
90aa6719
DS
745/* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS. Return the
746 alignment required for allocating virtual memory. Usually this is the
747 same as pagesize. */
748
749size_t
750default_gt_pch_alloc_granularity (void)
751{
752 return getpagesize();
753}
754
4d0c31e6
RH
755#if HAVE_MMAP_FILE
756/* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present.
757 We temporarily allocate SIZE bytes, and let the kernel place the data
d1a6adeb 758 wherever it will. If it worked, that's our spot, if not we're likely
4d0c31e6 759 to be in trouble. */
8eb6a092 760
4d0c31e6
RH
761void *
762mmap_gt_pch_get_address (size_t size, int fd)
763{
764 void *ret;
18c81520 765
4d0c31e6
RH
766 ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
767 if (ret == (void *) MAP_FAILED)
768 ret = NULL;
769 else
bba09b5a 770 munmap ((caddr_t) ret, size);
3277221c 771
4d0c31e6
RH
772 return ret;
773}
3277221c 774
4d0c31e6 775/* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
b8698a0f 776 Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
4d0c31e6 777 mapping the data at BASE, -1 if we couldn't.
20c1dc5e 778
4d0c31e6
RH
779 This version assumes that the kernel honors the START operand of mmap
780 even without MAP_FIXED if START through START+SIZE are not currently
781 mapped with something. */
17211ab5 782
4d0c31e6
RH
783int
784mmap_gt_pch_use_address (void *base, size_t size, int fd, size_t offset)
785{
786 void *addr;
17211ab5 787
4d0c31e6
RH
788 /* We're called with size == 0 if we're not planning to load a PCH
789 file at all. This allows the hook to free any static space that
790 we might have allocated at link time. */
791 if (size == 0)
792 return -1;
793
bba09b5a 794 addr = mmap ((caddr_t) base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE,
4d0c31e6
RH
795 fd, offset);
796
797 return addr == base ? 1 : -1;
3277221c 798}
4d0c31e6 799#endif /* HAVE_MMAP_FILE */
9ac121af 800
e4dfaf72
LB
801#if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
802
d37e6b50 803/* Modify the bound based on rlimits. */
16226f1e 804static double
20c1dc5e 805ggc_rlimit_bound (double limit)
16226f1e
KG
806{
807#if defined(HAVE_GETRLIMIT)
808 struct rlimit rlim;
d37e6b50
GK
809# if defined (RLIMIT_AS)
810 /* RLIMIT_AS is what POSIX says is the limit on mmap. Presumably
811 any OS which has RLIMIT_AS also has a working mmap that GCC will use. */
812 if (getrlimit (RLIMIT_AS, &rlim) == 0
a2581175 813 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
16226f1e
KG
814 && rlim.rlim_cur < limit)
815 limit = rlim.rlim_cur;
d37e6b50
GK
816# elif defined (RLIMIT_DATA)
817 /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we
818 might be on an OS that has a broken mmap. (Others don't bound
819 mmap at all, apparently.) */
16226f1e 820 if (getrlimit (RLIMIT_DATA, &rlim) == 0
a2581175 821 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
d37e6b50
GK
822 && rlim.rlim_cur < limit
823 /* Darwin has this horribly bogus default setting of
824 RLIMIT_DATA, to 6144Kb. No-one notices because RLIMIT_DATA
825 appears to be ignored. Ignore such silliness. If a limit
826 this small was actually effective for mmap, GCC wouldn't even
827 start up. */
828 && rlim.rlim_cur >= 8 * 1024 * 1024)
16226f1e 829 limit = rlim.rlim_cur;
d37e6b50 830# endif /* RLIMIT_AS or RLIMIT_DATA */
16226f1e
KG
831#endif /* HAVE_GETRLIMIT */
832
833 return limit;
834}
835
9ac121af 836/* Heuristic to set a default for GGC_MIN_EXPAND. */
e4dfaf72 837static int
20c1dc5e 838ggc_min_expand_heuristic (void)
9ac121af
KG
839{
840 double min_expand = physmem_total();
16226f1e
KG
841
842 /* Adjust for rlimits. */
843 min_expand = ggc_rlimit_bound (min_expand);
20c1dc5e 844
9ac121af
KG
845 /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
846 a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */
847 min_expand /= 1024*1024*1024;
848 min_expand *= 70;
849 min_expand = MIN (min_expand, 70);
850 min_expand += 30;
851
852 return min_expand;
853}
854
855/* Heuristic to set a default for GGC_MIN_HEAPSIZE. */
e4dfaf72 856static int
20c1dc5e 857ggc_min_heapsize_heuristic (void)
9ac121af 858{
d37e6b50
GK
859 double phys_kbytes = physmem_total();
860 double limit_kbytes = ggc_rlimit_bound (phys_kbytes * 2);
16226f1e 861
d37e6b50
GK
862 phys_kbytes /= 1024; /* Convert to Kbytes. */
863 limit_kbytes /= 1024;
20c1dc5e 864
9ac121af
KG
865 /* The heuristic is RAM/8, with a lower bound of 4M and an upper
866 bound of 128M (when RAM >= 1GB). */
d37e6b50
GK
867 phys_kbytes /= 8;
868
869#if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS)
b8698a0f 870 /* Try not to overrun the RSS limit while doing garbage collection.
d37e6b50
GK
871 The RSS limit is only advisory, so no margin is subtracted. */
872 {
873 struct rlimit rlim;
874 if (getrlimit (RLIMIT_RSS, &rlim) == 0
875 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY)
876 phys_kbytes = MIN (phys_kbytes, rlim.rlim_cur / 1024);
877 }
878# endif
879
880 /* Don't blindly run over our data limit; do GC at least when the
ded5f8f4
NF
881 *next* GC would be within 20Mb of the limit or within a quarter of
882 the limit, whichever is larger. If GCC does hit the data limit,
883 compilation will fail, so this tries to be conservative. */
884 limit_kbytes = MAX (0, limit_kbytes - MAX (limit_kbytes / 4, 20 * 1024));
a9429e29 885 limit_kbytes = (limit_kbytes * 100) / (110 + ggc_min_expand_heuristic ());
d37e6b50
GK
886 phys_kbytes = MIN (phys_kbytes, limit_kbytes);
887
888 phys_kbytes = MAX (phys_kbytes, 4 * 1024);
889 phys_kbytes = MIN (phys_kbytes, 128 * 1024);
9ac121af 890
d37e6b50 891 return phys_kbytes;
9ac121af 892}
e4dfaf72 893#endif
9ac121af
KG
894
895void
20c1dc5e 896init_ggc_heuristics (void)
9ac121af 897{
d85a0aae 898#if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
128dc8e2
JM
899 set_default_param_value (GGC_MIN_EXPAND, ggc_min_expand_heuristic ());
900 set_default_param_value (GGC_MIN_HEAPSIZE, ggc_min_heapsize_heuristic ());
9ac121af
KG
901#endif
902}
b9dcdee4 903
b9dcdee4
JH
904/* Datastructure used to store per-call-site statistics. */
905struct loc_descriptor
906{
907 const char *file;
908 int line;
909 const char *function;
910 int times;
911 size_t allocated;
912 size_t overhead;
07724022
JH
913 size_t freed;
914 size_t collected;
b9dcdee4
JH
915};
916
4a8fb1a1 917/* Hash table helper. */
b9dcdee4 918
4a8fb1a1 919struct loc_desc_hasher : typed_noop_remove <loc_descriptor>
b9dcdee4 920{
4a8fb1a1
LC
921 typedef loc_descriptor value_type;
922 typedef loc_descriptor compare_type;
923 static inline hashval_t hash (const value_type *);
924 static inline bool equal (const value_type *, const compare_type *);
925};
b9dcdee4 926
4a8fb1a1
LC
927inline hashval_t
928loc_desc_hasher::hash (const value_type *d)
929{
b9dcdee4
JH
930 return htab_hash_pointer (d->function) | d->line;
931}
932
4a8fb1a1
LC
933inline bool
934loc_desc_hasher::equal (const value_type *d, const compare_type *d2)
b9dcdee4 935{
b9dcdee4
JH
936 return (d->file == d2->file && d->line == d2->line
937 && d->function == d2->function);
938}
939
4a8fb1a1
LC
940/* Hashtable used for statistics. */
941static hash_table <loc_desc_hasher> loc_hash;
942
07724022
JH
943struct ptr_hash_entry
944{
945 void *ptr;
946 struct loc_descriptor *loc;
947 size_t size;
948};
949
4a8fb1a1
LC
950/* Helper for ptr_hash table. */
951
952struct ptr_hash_hasher : typed_noop_remove <ptr_hash_entry>
07724022 953{
4a8fb1a1
LC
954 typedef ptr_hash_entry value_type;
955 typedef void compare_type;
956 static inline hashval_t hash (const value_type *);
957 static inline bool equal (const value_type *, const compare_type *);
958};
07724022 959
4a8fb1a1
LC
960inline hashval_t
961ptr_hash_hasher::hash (const value_type *d)
962{
07724022
JH
963 return htab_hash_pointer (d->ptr);
964}
965
4a8fb1a1
LC
966inline bool
967ptr_hash_hasher::equal (const value_type *p, const compare_type *p2)
07724022 968{
07724022
JH
969 return (p->ptr == p2);
970}
971
4a8fb1a1
LC
972/* Hashtable converting address of allocated field to loc descriptor. */
973static hash_table <ptr_hash_hasher> ptr_hash;
974
b9dcdee4
JH
975/* Return descriptor for given call site, create new one if needed. */
976static struct loc_descriptor *
4a8fb1a1 977make_loc_descriptor (const char *name, int line, const char *function)
b9dcdee4
JH
978{
979 struct loc_descriptor loc;
980 struct loc_descriptor **slot;
981
982 loc.file = name;
983 loc.line = line;
984 loc.function = function;
4a8fb1a1
LC
985 if (!loc_hash.is_created ())
986 loc_hash.create (10);
b9dcdee4 987
4a8fb1a1 988 slot = loc_hash.find_slot (&loc, INSERT);
b9dcdee4
JH
989 if (*slot)
990 return *slot;
aebde504 991 *slot = XCNEW (struct loc_descriptor);
b9dcdee4
JH
992 (*slot)->file = name;
993 (*slot)->line = line;
994 (*slot)->function = function;
995 return *slot;
996}
997
d1a6adeb
KH
998/* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION). */
999void
07724022 1000ggc_record_overhead (size_t allocated, size_t overhead, void *ptr,
d1a6adeb 1001 const char *name, int line, const char *function)
b9dcdee4 1002{
4a8fb1a1 1003 struct loc_descriptor *loc = make_loc_descriptor (name, line, function);
5ed6ace5 1004 struct ptr_hash_entry *p = XNEW (struct ptr_hash_entry);
4a8fb1a1 1005 ptr_hash_entry **slot;
07724022
JH
1006
1007 p->ptr = ptr;
1008 p->loc = loc;
1009 p->size = allocated + overhead;
4a8fb1a1
LC
1010 if (!ptr_hash.is_created ())
1011 ptr_hash.create (10);
1012 slot = ptr_hash.find_slot_with_hash (ptr, htab_hash_pointer (ptr), INSERT);
282899df 1013 gcc_assert (!*slot);
07724022 1014 *slot = p;
b9dcdee4
JH
1015
1016 loc->times++;
1017 loc->allocated+=allocated;
1018 loc->overhead+=overhead;
1019}
1020
07724022
JH
1021/* Helper function for prune_overhead_list. See if SLOT is still marked and
1022 remove it from hashtable if it is not. */
4a8fb1a1
LC
1023int
1024ggc_prune_ptr (ptr_hash_entry **slot, void *b ATTRIBUTE_UNUSED)
07724022 1025{
4a8fb1a1 1026 struct ptr_hash_entry *p = *slot;
07724022
JH
1027 if (!ggc_marked_p (p->ptr))
1028 {
1029 p->loc->collected += p->size;
4a8fb1a1 1030 ptr_hash.clear_slot (slot);
07724022
JH
1031 free (p);
1032 }
1033 return 1;
1034}
1035
1036/* After live values has been marked, walk all recorded pointers and see if
1037 they are still live. */
1038void
1039ggc_prune_overhead_list (void)
1040{
4a8fb1a1 1041 ptr_hash.traverse <void *, ggc_prune_ptr> (NULL);
07724022
JH
1042}
1043
1044/* Notice that the pointer has been freed. */
83f676b3
RS
1045void
1046ggc_free_overhead (void *ptr)
07724022 1047{
4a8fb1a1
LC
1048 ptr_hash_entry **slot;
1049 slot = ptr_hash.find_slot_with_hash (ptr, htab_hash_pointer (ptr), NO_INSERT);
e4dfaf72
LB
1050 struct ptr_hash_entry *p;
1051 /* The pointer might be not found if a PCH read happened between allocation
1052 and ggc_free () call. FIXME: account memory properly in the presence of
1053 PCH. */
1054 if (!slot)
1055 return;
1056 p = (struct ptr_hash_entry *) *slot;
07724022 1057 p->loc->freed += p->size;
4a8fb1a1 1058 ptr_hash.clear_slot (slot);
07724022
JH
1059 free (p);
1060}
1061
b9dcdee4
JH
1062/* Helper for qsort; sort descriptors by amount of memory consumed. */
1063static int
a5573239 1064final_cmp_statistic (const void *loc1, const void *loc2)
b9dcdee4 1065{
aebde504
KG
1066 const struct loc_descriptor *const l1 =
1067 *(const struct loc_descriptor *const *) loc1;
1068 const struct loc_descriptor *const l2 =
1069 *(const struct loc_descriptor *const *) loc2;
a5573239
JH
1070 long diff;
1071 diff = ((long)(l1->allocated + l1->overhead - l1->freed) -
85914593 1072 (l2->allocated + l2->overhead - l2->freed));
a5573239
JH
1073 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1074}
1075
1076/* Helper for qsort; sort descriptors by amount of memory consumed. */
1077static int
1078cmp_statistic (const void *loc1, const void *loc2)
1079{
aebde504
KG
1080 const struct loc_descriptor *const l1 =
1081 *(const struct loc_descriptor *const *) loc1;
1082 const struct loc_descriptor *const l2 =
1083 *(const struct loc_descriptor *const *) loc2;
a5573239
JH
1084 long diff;
1085
1086 diff = ((long)(l1->allocated + l1->overhead - l1->freed - l1->collected) -
1087 (l2->allocated + l2->overhead - l2->freed - l2->collected));
1088 if (diff)
1089 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1090 diff = ((long)(l1->allocated + l1->overhead - l1->freed) -
1091 (l2->allocated + l2->overhead - l2->freed));
1092 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
b9dcdee4
JH
1093}
1094
1095/* Collect array of the descriptors from hashtable. */
cf975747 1096static struct loc_descriptor **loc_array;
4a8fb1a1
LC
1097int
1098ggc_add_statistics (loc_descriptor **slot, int *n)
b9dcdee4 1099{
4a8fb1a1 1100 loc_array[*n] = *slot;
b9dcdee4
JH
1101 (*n)++;
1102 return 1;
1103}
1104
1105/* Dump per-site memory statistics. */
7aa6d18a 1106
83f676b3 1107void
7aa6d18a 1108dump_ggc_loc_statistics (bool final)
b9dcdee4 1109{
b9dcdee4
JH
1110 int nentries = 0;
1111 char s[4096];
07724022 1112 size_t collected = 0, freed = 0, allocated = 0, overhead = 0, times = 0;
b9dcdee4
JH
1113 int i;
1114
7aa6d18a
SB
1115 if (! GATHER_STATISTICS)
1116 return;
1117
07724022
JH
1118 ggc_force_collect = true;
1119 ggc_collect ();
1120
4a8fb1a1
LC
1121 loc_array = XCNEWVEC (struct loc_descriptor *,
1122 loc_hash.elements_with_deleted ());
b9dcdee4 1123 fprintf (stderr, "-------------------------------------------------------\n");
07724022
JH
1124 fprintf (stderr, "\n%-48s %10s %10s %10s %10s %10s\n",
1125 "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
b9dcdee4 1126 fprintf (stderr, "-------------------------------------------------------\n");
4a8fb1a1 1127 loc_hash.traverse <int *, ggc_add_statistics> (&nentries);
a5573239
JH
1128 qsort (loc_array, nentries, sizeof (*loc_array),
1129 final ? final_cmp_statistic : cmp_statistic);
b9dcdee4
JH
1130 for (i = 0; i < nentries; i++)
1131 {
1132 struct loc_descriptor *d = loc_array[i];
07724022
JH
1133 allocated += d->allocated;
1134 times += d->times;
1135 freed += d->freed;
1136 collected += d->collected;
b9dcdee4
JH
1137 overhead += d->overhead;
1138 }
1139 for (i = 0; i < nentries; i++)
1140 {
1141 struct loc_descriptor *d = loc_array[i];
1142 if (d->allocated)
1143 {
1144 const char *s1 = d->file;
1145 const char *s2;
1146 while ((s2 = strstr (s1, "gcc/")))
1147 s1 = s2 + 4;
1148 sprintf (s, "%s:%i (%s)", s1, d->line, d->function);
07724022
JH
1149 s[48] = 0;
1150 fprintf (stderr, "%-48s %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li\n", s,
1151 (long)d->collected,
1152 (d->collected) * 100.0 / collected,
1153 (long)d->freed,
1154 (d->freed) * 100.0 / freed,
1155 (long)(d->allocated + d->overhead - d->freed - d->collected),
1156 (d->allocated + d->overhead - d->freed - d->collected) * 100.0
1157 / (allocated + overhead - freed - collected),
1158 (long)d->overhead,
1159 d->overhead * 100.0 / overhead,
1160 (long)d->times);
b9dcdee4
JH
1161 }
1162 }
07724022
JH
1163 fprintf (stderr, "%-48s %10ld %10ld %10ld %10ld %10ld\n",
1164 "Total", (long)collected, (long)freed,
1165 (long)(allocated + overhead - freed - collected), (long)overhead,
1166 (long)times);
1167 fprintf (stderr, "%-48s %10s %10s %10s %10s %10s\n",
1168 "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
b9dcdee4 1169 fprintf (stderr, "-------------------------------------------------------\n");
dd56b4c5 1170 ggc_force_collect = false;
b9dcdee4 1171}
This page took 4.004277 seconds and 5 git commands to generate.