]>
Commit | Line | Data |
---|---|---|
b49a6a90 | 1 | /* Simple garbage collection for the GNU compiler. |
9dcd6f09 | 2 | Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 |
14011ca4 | 3 | Free Software Foundation, Inc. |
b49a6a90 | 4 | |
1322177d | 5 | This file is part of GCC. |
b49a6a90 | 6 | |
1322177d LB |
7 | GCC is free software; you can redistribute it and/or modify it under |
8 | the terms of the GNU General Public License as published by the Free | |
9dcd6f09 | 9 | Software Foundation; either version 3, or (at your option) any later |
1322177d | 10 | version. |
b49a6a90 | 11 | |
1322177d LB |
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14a774a9 RK |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
15 | for more details. | |
b49a6a90 | 16 | |
14a774a9 | 17 | You should have received a copy of the GNU General Public License |
9dcd6f09 NC |
18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ | |
b49a6a90 AS |
20 | |
21 | /* Generic garbage collection (GC) functions and data, not specific to | |
22 | any particular GC implementation. */ | |
23 | ||
24 | #include "config.h" | |
25 | #include "system.h" | |
4977bab6 | 26 | #include "coretypes.h" |
4c160717 | 27 | #include "hashtab.h" |
1b42a6a9 | 28 | #include "ggc.h" |
17211ab5 | 29 | #include "toplev.h" |
9ac121af | 30 | #include "params.h" |
18c81520 | 31 | #include "hosthooks.h" |
4d0c31e6 | 32 | #include "hosthooks-def.h" |
17211ab5 | 33 | |
16226f1e KG |
34 | #ifdef HAVE_SYS_RESOURCE_H |
35 | # include <sys/resource.h> | |
36 | #endif | |
37 | ||
17211ab5 GK |
38 | #ifdef HAVE_MMAP_FILE |
39 | # include <sys/mman.h> | |
8eb6a092 EB |
40 | # ifdef HAVE_MINCORE |
41 | /* This is on Solaris. */ | |
42 | # include <sys/types.h> | |
43 | # endif | |
44 | #endif | |
45 | ||
46 | #ifndef MAP_FAILED | |
47 | # define MAP_FAILED ((void *)-1) | |
17211ab5 GK |
48 | #endif |
49 | ||
07724022 JH |
50 | /* When set, ggc_collect will do collection. */ |
51 | bool ggc_force_collect; | |
52 | ||
3277221c MM |
53 | /* Statistics about the allocation. */ |
54 | static ggc_statistics *ggc_stats; | |
55 | ||
17211ab5 GK |
56 | struct traversal_state; |
57 | ||
20c1dc5e AJ |
58 | static int ggc_htab_delete (void **, void *); |
59 | static hashval_t saving_htab_hash (const void *); | |
60 | static int saving_htab_eq (const void *, const void *); | |
61 | static int call_count (void **, void *); | |
62 | static int call_alloc (void **, void *); | |
63 | static int compare_ptr_data (const void *, const void *); | |
64 | static void relocate_ptrs (void *, void *); | |
65 | static void write_pch_globals (const struct ggc_root_tab * const *tab, | |
66 | struct traversal_state *state); | |
67 | static double ggc_rlimit_bound (double); | |
b49a6a90 AS |
68 | |
69 | /* Maintain global roots that are preserved during GC. */ | |
70 | ||
4c160717 RK |
71 | /* Process a slot of an htab by deleting it if it has not been marked. */ |
72 | ||
73 | static int | |
20c1dc5e | 74 | ggc_htab_delete (void **slot, void *info) |
4c160717 | 75 | { |
e2500fed | 76 | const struct ggc_cache_tab *r = (const struct ggc_cache_tab *) info; |
4c160717 RK |
77 | |
78 | if (! (*r->marked_p) (*slot)) | |
e2500fed GK |
79 | htab_clear_slot (*r->base, slot); |
80 | else | |
81 | (*r->cb) (*slot); | |
4c160717 RK |
82 | |
83 | return 1; | |
84 | } | |
85 | ||
cb2ec151 RH |
86 | /* Iterate through all registered roots and mark each element. */ |
87 | ||
b49a6a90 | 88 | void |
20c1dc5e | 89 | ggc_mark_roots (void) |
96df4529 | 90 | { |
e2500fed GK |
91 | const struct ggc_root_tab *const *rt; |
92 | const struct ggc_root_tab *rti; | |
93 | const struct ggc_cache_tab *const *ct; | |
94 | const struct ggc_cache_tab *cti; | |
95 | size_t i; | |
589005ff | 96 | |
e2500fed GK |
97 | for (rt = gt_ggc_deletable_rtab; *rt; rt++) |
98 | for (rti = *rt; rti->base != NULL; rti++) | |
99 | memset (rti->base, 0, rti->stride); | |
100 | ||
101 | for (rt = gt_ggc_rtab; *rt; rt++) | |
102 | for (rti = *rt; rti->base != NULL; rti++) | |
103 | for (i = 0; i < rti->nelt; i++) | |
104 | (*rti->cb)(*(void **)((char *)rti->base + rti->stride * i)); | |
bedda2da | 105 | |
17211ab5 | 106 | ggc_mark_stringpool (); |
bedda2da | 107 | |
4c160717 | 108 | /* Now scan all hash tables that have objects which are to be deleted if |
e2500fed GK |
109 | they are not already marked. */ |
110 | for (ct = gt_ggc_cache_rtab; *ct; ct++) | |
111 | for (cti = *ct; cti->base != NULL; cti++) | |
690eed2c | 112 | if (*cti->base) |
17211ab5 GK |
113 | { |
114 | ggc_set_mark (*cti->base); | |
20c1dc5e | 115 | htab_traverse_noresize (*cti->base, ggc_htab_delete, (void *) cti); |
17211ab5 GK |
116 | ggc_set_mark ((*cti->base)->entries); |
117 | } | |
96df4529 AS |
118 | } |
119 | ||
e2500fed GK |
120 | /* Allocate a block of memory, then clear it. */ |
121 | void * | |
b9dcdee4 | 122 | ggc_alloc_cleared_stat (size_t size MEM_STAT_DECL) |
ef8288f7 | 123 | { |
b9dcdee4 | 124 | void *buf = ggc_alloc_stat (size PASS_MEM_STAT); |
e2500fed GK |
125 | memset (buf, 0, size); |
126 | return buf; | |
ef8288f7 RH |
127 | } |
128 | ||
e2500fed GK |
129 | /* Resize a block of memory, possibly re-allocating it. */ |
130 | void * | |
b9dcdee4 | 131 | ggc_realloc_stat (void *x, size_t size MEM_STAT_DECL) |
ef8288f7 | 132 | { |
e2500fed GK |
133 | void *r; |
134 | size_t old_size; | |
ef8288f7 | 135 | |
e2500fed | 136 | if (x == NULL) |
b9dcdee4 | 137 | return ggc_alloc_stat (size PASS_MEM_STAT); |
ef8288f7 | 138 | |
e2500fed | 139 | old_size = ggc_get_size (x); |
685fe032 | 140 | |
e2500fed | 141 | if (size <= old_size) |
9a0a7d5d HPN |
142 | { |
143 | /* Mark the unwanted memory as unaccessible. We also need to make | |
144 | the "new" size accessible, since ggc_get_size returns the size of | |
145 | the pool, not the size of the individually allocated object, the | |
146 | size which was previously made accessible. Unfortunately, we | |
147 | don't know that previously allocated size. Without that | |
148 | knowledge we have to lose some initialization-tracking for the | |
149 | old parts of the object. An alternative is to mark the whole | |
20c1dc5e | 150 | old_size as reachable, but that would lose tracking of writes |
9a0a7d5d HPN |
151 | after the end of the object (by small offsets). Discard the |
152 | handle to avoid handle leak. */ | |
35dee980 HPN |
153 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) x + size, |
154 | old_size - size)); | |
155 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, size)); | |
9a0a7d5d HPN |
156 | return x; |
157 | } | |
ef8288f7 | 158 | |
b9dcdee4 | 159 | r = ggc_alloc_stat (size PASS_MEM_STAT); |
9a0a7d5d HPN |
160 | |
161 | /* Since ggc_get_size returns the size of the pool, not the size of the | |
162 | individually allocated object, we'd access parts of the old object | |
163 | that were marked invalid with the memcpy below. We lose a bit of the | |
164 | initialization-tracking since some of it may be uninitialized. */ | |
35dee980 | 165 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, old_size)); |
9a0a7d5d | 166 | |
e2500fed | 167 | memcpy (r, x, old_size); |
9a0a7d5d HPN |
168 | |
169 | /* The old object is not supposed to be used anymore. */ | |
685fe032 | 170 | ggc_free (x); |
9a0a7d5d | 171 | |
e2500fed | 172 | return r; |
ef8288f7 RH |
173 | } |
174 | ||
e2500fed | 175 | /* Like ggc_alloc_cleared, but performs a multiplication. */ |
f8a83ee3 | 176 | void * |
20c1dc5e | 177 | ggc_calloc (size_t s1, size_t s2) |
f8a83ee3 | 178 | { |
e2500fed | 179 | return ggc_alloc_cleared (s1 * s2); |
f8a83ee3 ZW |
180 | } |
181 | ||
17211ab5 | 182 | /* These are for splay_tree_new_ggc. */ |
20c1dc5e AJ |
183 | void * |
184 | ggc_splay_alloc (int sz, void *nl) | |
17211ab5 | 185 | { |
282899df | 186 | gcc_assert (!nl); |
17211ab5 GK |
187 | return ggc_alloc (sz); |
188 | } | |
189 | ||
190 | void | |
20c1dc5e | 191 | ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl) |
17211ab5 | 192 | { |
282899df | 193 | gcc_assert (!nl); |
17211ab5 GK |
194 | } |
195 | ||
3277221c | 196 | /* Print statistics that are independent of the collector in use. */ |
fba0bfd4 ZW |
197 | #define SCALE(x) ((unsigned long) ((x) < 1024*10 \ |
198 | ? (x) \ | |
199 | : ((x) < 1024*1024*10 \ | |
200 | ? (x) / 1024 \ | |
201 | : (x) / (1024*1024)))) | |
202 | #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M')) | |
3277221c MM |
203 | |
204 | void | |
20c1dc5e AJ |
205 | ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED, |
206 | ggc_statistics *stats) | |
3277221c | 207 | { |
3277221c MM |
208 | /* Set the pointer so that during collection we will actually gather |
209 | the statistics. */ | |
210 | ggc_stats = stats; | |
211 | ||
212 | /* Then do one collection to fill in the statistics. */ | |
213 | ggc_collect (); | |
214 | ||
17211ab5 GK |
215 | /* At present, we don't really gather any interesting statistics. */ |
216 | ||
217 | /* Don't gather statistics any more. */ | |
218 | ggc_stats = NULL; | |
219 | } | |
220 | \f | |
221 | /* Functions for saving and restoring GCable memory to disk. */ | |
222 | ||
223 | static htab_t saving_htab; | |
224 | ||
20c1dc5e | 225 | struct ptr_data |
17211ab5 GK |
226 | { |
227 | void *obj; | |
228 | void *note_ptr_cookie; | |
229 | gt_note_pointers note_ptr_fn; | |
230 | gt_handle_reorder reorder_fn; | |
231 | size_t size; | |
232 | void *new_addr; | |
08cee789 | 233 | enum gt_types_enum type; |
17211ab5 GK |
234 | }; |
235 | ||
236 | #define POINTER_HASH(x) (hashval_t)((long)x >> 3) | |
237 | ||
238 | /* Register an object in the hash table. */ | |
239 | ||
240 | int | |
20c1dc5e | 241 | gt_pch_note_object (void *obj, void *note_ptr_cookie, |
08cee789 DJ |
242 | gt_note_pointers note_ptr_fn, |
243 | enum gt_types_enum type) | |
17211ab5 GK |
244 | { |
245 | struct ptr_data **slot; | |
20c1dc5e | 246 | |
17211ab5 GK |
247 | if (obj == NULL || obj == (void *) 1) |
248 | return 0; | |
249 | ||
250 | slot = (struct ptr_data **) | |
251 | htab_find_slot_with_hash (saving_htab, obj, POINTER_HASH (obj), | |
252 | INSERT); | |
253 | if (*slot != NULL) | |
254 | { | |
282899df NS |
255 | gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn |
256 | && (*slot)->note_ptr_cookie == note_ptr_cookie); | |
17211ab5 GK |
257 | return 0; |
258 | } | |
20c1dc5e | 259 | |
17211ab5 GK |
260 | *slot = xcalloc (sizeof (struct ptr_data), 1); |
261 | (*slot)->obj = obj; | |
262 | (*slot)->note_ptr_fn = note_ptr_fn; | |
263 | (*slot)->note_ptr_cookie = note_ptr_cookie; | |
264 | if (note_ptr_fn == gt_pch_p_S) | |
265 | (*slot)->size = strlen (obj) + 1; | |
266 | else | |
267 | (*slot)->size = ggc_get_size (obj); | |
08cee789 | 268 | (*slot)->type = type; |
17211ab5 GK |
269 | return 1; |
270 | } | |
271 | ||
272 | /* Register an object in the hash table. */ | |
273 | ||
274 | void | |
20c1dc5e AJ |
275 | gt_pch_note_reorder (void *obj, void *note_ptr_cookie, |
276 | gt_handle_reorder reorder_fn) | |
17211ab5 GK |
277 | { |
278 | struct ptr_data *data; | |
20c1dc5e | 279 | |
17211ab5 GK |
280 | if (obj == NULL || obj == (void *) 1) |
281 | return; | |
282 | ||
283 | data = htab_find_with_hash (saving_htab, obj, POINTER_HASH (obj)); | |
282899df | 284 | gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie); |
20c1dc5e | 285 | |
17211ab5 GK |
286 | data->reorder_fn = reorder_fn; |
287 | } | |
288 | ||
289 | /* Hash and equality functions for saving_htab, callbacks for htab_create. */ | |
290 | ||
291 | static hashval_t | |
20c1dc5e | 292 | saving_htab_hash (const void *p) |
17211ab5 | 293 | { |
741ac903 | 294 | return POINTER_HASH (((const struct ptr_data *)p)->obj); |
17211ab5 GK |
295 | } |
296 | ||
297 | static int | |
20c1dc5e | 298 | saving_htab_eq (const void *p1, const void *p2) |
17211ab5 | 299 | { |
741ac903 | 300 | return ((const struct ptr_data *)p1)->obj == p2; |
17211ab5 GK |
301 | } |
302 | ||
303 | /* Handy state for the traversal functions. */ | |
304 | ||
20c1dc5e | 305 | struct traversal_state |
17211ab5 GK |
306 | { |
307 | FILE *f; | |
308 | struct ggc_pch_data *d; | |
309 | size_t count; | |
310 | struct ptr_data **ptrs; | |
311 | size_t ptrs_i; | |
312 | }; | |
313 | ||
314 | /* Callbacks for htab_traverse. */ | |
315 | ||
316 | static int | |
20c1dc5e | 317 | call_count (void **slot, void *state_p) |
17211ab5 GK |
318 | { |
319 | struct ptr_data *d = (struct ptr_data *)*slot; | |
320 | struct traversal_state *state = (struct traversal_state *)state_p; | |
20c1dc5e | 321 | |
08cee789 DJ |
322 | ggc_pch_count_object (state->d, d->obj, d->size, |
323 | d->note_ptr_fn == gt_pch_p_S, | |
324 | d->type); | |
17211ab5 GK |
325 | state->count++; |
326 | return 1; | |
327 | } | |
328 | ||
329 | static int | |
20c1dc5e | 330 | call_alloc (void **slot, void *state_p) |
17211ab5 GK |
331 | { |
332 | struct ptr_data *d = (struct ptr_data *)*slot; | |
333 | struct traversal_state *state = (struct traversal_state *)state_p; | |
20c1dc5e | 334 | |
08cee789 DJ |
335 | d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size, |
336 | d->note_ptr_fn == gt_pch_p_S, | |
337 | d->type); | |
17211ab5 GK |
338 | state->ptrs[state->ptrs_i++] = d; |
339 | return 1; | |
340 | } | |
341 | ||
342 | /* Callback for qsort. */ | |
343 | ||
344 | static int | |
20c1dc5e | 345 | compare_ptr_data (const void *p1_p, const void *p2_p) |
17211ab5 | 346 | { |
58f9752a KG |
347 | const struct ptr_data *const p1 = *(const struct ptr_data *const *)p1_p; |
348 | const struct ptr_data *const p2 = *(const struct ptr_data *const *)p2_p; | |
17211ab5 GK |
349 | return (((size_t)p1->new_addr > (size_t)p2->new_addr) |
350 | - ((size_t)p1->new_addr < (size_t)p2->new_addr)); | |
351 | } | |
352 | ||
353 | /* Callbacks for note_ptr_fn. */ | |
354 | ||
355 | static void | |
20c1dc5e | 356 | relocate_ptrs (void *ptr_p, void *state_p) |
17211ab5 GK |
357 | { |
358 | void **ptr = (void **)ptr_p; | |
20c1dc5e | 359 | struct traversal_state *state ATTRIBUTE_UNUSED |
17211ab5 GK |
360 | = (struct traversal_state *)state_p; |
361 | struct ptr_data *result; | |
362 | ||
363 | if (*ptr == NULL || *ptr == (void *)1) | |
364 | return; | |
20c1dc5e | 365 | |
17211ab5 | 366 | result = htab_find_with_hash (saving_htab, *ptr, POINTER_HASH (*ptr)); |
282899df | 367 | gcc_assert (result); |
17211ab5 GK |
368 | *ptr = result->new_addr; |
369 | } | |
370 | ||
371 | /* Write out, after relocation, the pointers in TAB. */ | |
372 | static void | |
20c1dc5e AJ |
373 | write_pch_globals (const struct ggc_root_tab * const *tab, |
374 | struct traversal_state *state) | |
17211ab5 GK |
375 | { |
376 | const struct ggc_root_tab *const *rt; | |
377 | const struct ggc_root_tab *rti; | |
378 | size_t i; | |
379 | ||
380 | for (rt = tab; *rt; rt++) | |
381 | for (rti = *rt; rti->base != NULL; rti++) | |
382 | for (i = 0; i < rti->nelt; i++) | |
383 | { | |
384 | void *ptr = *(void **)((char *)rti->base + rti->stride * i); | |
385 | struct ptr_data *new_ptr; | |
386 | if (ptr == NULL || ptr == (void *)1) | |
387 | { | |
20c1dc5e | 388 | if (fwrite (&ptr, sizeof (void *), 1, state->f) |
17211ab5 | 389 | != 1) |
fa6ef813 | 390 | fatal_error ("can't write PCH file: %m"); |
17211ab5 GK |
391 | } |
392 | else | |
393 | { | |
20c1dc5e | 394 | new_ptr = htab_find_with_hash (saving_htab, ptr, |
17211ab5 | 395 | POINTER_HASH (ptr)); |
20c1dc5e | 396 | if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f) |
17211ab5 | 397 | != 1) |
fa6ef813 | 398 | fatal_error ("can't write PCH file: %m"); |
17211ab5 GK |
399 | } |
400 | } | |
401 | } | |
402 | ||
403 | /* Hold the information we need to mmap the file back in. */ | |
404 | ||
20c1dc5e | 405 | struct mmap_info |
17211ab5 GK |
406 | { |
407 | size_t offset; | |
408 | size_t size; | |
409 | void *preferred_base; | |
410 | }; | |
411 | ||
412 | /* Write out the state of the compiler to F. */ | |
413 | ||
414 | void | |
20c1dc5e | 415 | gt_pch_save (FILE *f) |
17211ab5 GK |
416 | { |
417 | const struct ggc_root_tab *const *rt; | |
418 | const struct ggc_root_tab *rti; | |
419 | size_t i; | |
420 | struct traversal_state state; | |
421 | char *this_object = NULL; | |
422 | size_t this_object_size = 0; | |
423 | struct mmap_info mmi; | |
90aa6719 | 424 | const size_t mmap_offset_alignment = host_hooks.gt_pch_alloc_granularity(); |
17211ab5 GK |
425 | |
426 | gt_pch_save_stringpool (); | |
427 | ||
428 | saving_htab = htab_create (50000, saving_htab_hash, saving_htab_eq, free); | |
429 | ||
430 | for (rt = gt_ggc_rtab; *rt; rt++) | |
431 | for (rti = *rt; rti->base != NULL; rti++) | |
432 | for (i = 0; i < rti->nelt; i++) | |
433 | (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i)); | |
434 | ||
435 | for (rt = gt_pch_cache_rtab; *rt; rt++) | |
436 | for (rti = *rt; rti->base != NULL; rti++) | |
437 | for (i = 0; i < rti->nelt; i++) | |
438 | (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i)); | |
439 | ||
440 | /* Prepare the objects for writing, determine addresses and such. */ | |
441 | state.f = f; | |
442 | state.d = init_ggc_pch(); | |
443 | state.count = 0; | |
444 | htab_traverse (saving_htab, call_count, &state); | |
445 | ||
446 | mmi.size = ggc_pch_total_size (state.d); | |
447 | ||
18c81520 GK |
448 | /* Try to arrange things so that no relocation is necessary, but |
449 | don't try very hard. On most platforms, this will always work, | |
450 | and on the rest it's a lot of work to do better. | |
451 | (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and | |
452 | HOST_HOOKS_GT_PCH_USE_ADDRESS.) */ | |
4d0c31e6 | 453 | mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f)); |
18c81520 | 454 | |
17211ab5 GK |
455 | ggc_pch_this_base (state.d, mmi.preferred_base); |
456 | ||
5ed6ace5 | 457 | state.ptrs = XNEWVEC (struct ptr_data *, state.count); |
17211ab5 GK |
458 | state.ptrs_i = 0; |
459 | htab_traverse (saving_htab, call_alloc, &state); | |
460 | qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data); | |
461 | ||
462 | /* Write out all the scalar variables. */ | |
463 | for (rt = gt_pch_scalar_rtab; *rt; rt++) | |
464 | for (rti = *rt; rti->base != NULL; rti++) | |
465 | if (fwrite (rti->base, rti->stride, 1, f) != 1) | |
fa6ef813 | 466 | fatal_error ("can't write PCH file: %m"); |
17211ab5 GK |
467 | |
468 | /* Write out all the global pointers, after translation. */ | |
469 | write_pch_globals (gt_ggc_rtab, &state); | |
470 | write_pch_globals (gt_pch_cache_rtab, &state); | |
471 | ||
90aa6719 DS |
472 | /* Pad the PCH file so that the mmapped area starts on an allocation |
473 | granularity (usually page) boundary. */ | |
17211ab5 | 474 | { |
70f8b89f KG |
475 | long o; |
476 | o = ftell (state.f) + sizeof (mmi); | |
477 | if (o == -1) | |
fa6ef813 | 478 | fatal_error ("can't get position in PCH file: %m"); |
90aa6719 DS |
479 | mmi.offset = mmap_offset_alignment - o % mmap_offset_alignment; |
480 | if (mmi.offset == mmap_offset_alignment) | |
17211ab5 GK |
481 | mmi.offset = 0; |
482 | mmi.offset += o; | |
483 | } | |
484 | if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1) | |
fa6ef813 | 485 | fatal_error ("can't write PCH file: %m"); |
17211ab5 GK |
486 | if (mmi.offset != 0 |
487 | && fseek (state.f, mmi.offset, SEEK_SET) != 0) | |
fa6ef813 | 488 | fatal_error ("can't write padding to PCH file: %m"); |
17211ab5 | 489 | |
08cee789 DJ |
490 | ggc_pch_prepare_write (state.d, state.f); |
491 | ||
17211ab5 GK |
492 | /* Actually write out the objects. */ |
493 | for (i = 0; i < state.count; i++) | |
3277221c | 494 | { |
17211ab5 GK |
495 | if (this_object_size < state.ptrs[i]->size) |
496 | { | |
497 | this_object_size = state.ptrs[i]->size; | |
498 | this_object = xrealloc (this_object, this_object_size); | |
499 | } | |
500 | memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size); | |
501 | if (state.ptrs[i]->reorder_fn != NULL) | |
20c1dc5e | 502 | state.ptrs[i]->reorder_fn (state.ptrs[i]->obj, |
17211ab5 GK |
503 | state.ptrs[i]->note_ptr_cookie, |
504 | relocate_ptrs, &state); | |
20c1dc5e | 505 | state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj, |
17211ab5 GK |
506 | state.ptrs[i]->note_ptr_cookie, |
507 | relocate_ptrs, &state); | |
508 | ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj, | |
4d0c31e6 RH |
509 | state.ptrs[i]->new_addr, state.ptrs[i]->size, |
510 | state.ptrs[i]->note_ptr_fn == gt_pch_p_S); | |
17211ab5 GK |
511 | if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S) |
512 | memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size); | |
3277221c | 513 | } |
17211ab5 | 514 | ggc_pch_finish (state.d, state.f); |
d24ecd21 | 515 | gt_pch_fixup_stringpool (); |
17211ab5 GK |
516 | |
517 | free (state.ptrs); | |
518 | htab_delete (saving_htab); | |
519 | } | |
520 | ||
521 | /* Read the state of the compiler back in from F. */ | |
522 | ||
523 | void | |
20c1dc5e | 524 | gt_pch_restore (FILE *f) |
17211ab5 GK |
525 | { |
526 | const struct ggc_root_tab *const *rt; | |
527 | const struct ggc_root_tab *rti; | |
528 | size_t i; | |
529 | struct mmap_info mmi; | |
4d0c31e6 | 530 | int result; |
17211ab5 GK |
531 | |
532 | /* Delete any deletable objects. This makes ggc_pch_read much | |
533 | faster, as it can be sure that no GCable objects remain other | |
534 | than the ones just read in. */ | |
535 | for (rt = gt_ggc_deletable_rtab; *rt; rt++) | |
536 | for (rti = *rt; rti->base != NULL; rti++) | |
537 | memset (rti->base, 0, rti->stride); | |
538 | ||
539 | /* Read in all the scalar variables. */ | |
540 | for (rt = gt_pch_scalar_rtab; *rt; rt++) | |
541 | for (rti = *rt; rti->base != NULL; rti++) | |
542 | if (fread (rti->base, rti->stride, 1, f) != 1) | |
fa6ef813 | 543 | fatal_error ("can't read PCH file: %m"); |
17211ab5 GK |
544 | |
545 | /* Read in all the global pointers, in 6 easy loops. */ | |
546 | for (rt = gt_ggc_rtab; *rt; rt++) | |
547 | for (rti = *rt; rti->base != NULL; rti++) | |
548 | for (i = 0; i < rti->nelt; i++) | |
549 | if (fread ((char *)rti->base + rti->stride * i, | |
550 | sizeof (void *), 1, f) != 1) | |
fa6ef813 | 551 | fatal_error ("can't read PCH file: %m"); |
17211ab5 GK |
552 | |
553 | for (rt = gt_pch_cache_rtab; *rt; rt++) | |
554 | for (rti = *rt; rti->base != NULL; rti++) | |
555 | for (i = 0; i < rti->nelt; i++) | |
556 | if (fread ((char *)rti->base + rti->stride * i, | |
557 | sizeof (void *), 1, f) != 1) | |
fa6ef813 | 558 | fatal_error ("can't read PCH file: %m"); |
17211ab5 GK |
559 | |
560 | if (fread (&mmi, sizeof (mmi), 1, f) != 1) | |
fa6ef813 | 561 | fatal_error ("can't read PCH file: %m"); |
20c1dc5e | 562 | |
4d0c31e6 RH |
563 | result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size, |
564 | fileno (f), mmi.offset); | |
565 | if (result < 0) | |
566 | fatal_error ("had to relocate PCH"); | |
567 | if (result == 0) | |
18c81520 | 568 | { |
4d0c31e6 RH |
569 | if (fseek (f, mmi.offset, SEEK_SET) != 0 |
570 | || fread (mmi.preferred_base, mmi.size, 1, f) != 1) | |
571 | fatal_error ("can't read PCH file: %m"); | |
572 | } | |
573 | else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0) | |
574 | fatal_error ("can't read PCH file: %m"); | |
8eb6a092 | 575 | |
4d0c31e6 | 576 | ggc_pch_read (f, mmi.preferred_base); |
18c81520 | 577 | |
4d0c31e6 RH |
578 | gt_pch_restore_stringpool (); |
579 | } | |
18c81520 | 580 | |
4d0c31e6 RH |
581 | /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present. |
582 | Select no address whatsoever, and let gt_pch_save choose what it will with | |
583 | malloc, presumably. */ | |
ee0d75ef | 584 | |
4d0c31e6 RH |
585 | void * |
586 | default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED, | |
587 | int fd ATTRIBUTE_UNUSED) | |
588 | { | |
589 | return NULL; | |
590 | } | |
ee0d75ef | 591 | |
4d0c31e6 RH |
592 | /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present. |
593 | Allocate SIZE bytes with malloc. Return 0 if the address we got is the | |
594 | same as base, indicating that the memory has been allocated but needs to | |
595 | be read in from the file. Return -1 if the address differs, to relocation | |
596 | of the PCH file would be required. */ | |
597 | ||
598 | int | |
599 | default_gt_pch_use_address (void *base, size_t size, int fd ATTRIBUTE_UNUSED, | |
600 | size_t offset ATTRIBUTE_UNUSED) | |
601 | { | |
602 | void *addr = xmalloc (size); | |
603 | return (addr == base) - 1; | |
604 | } | |
ee0d75ef | 605 | |
90aa6719 DS |
606 | /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS. Return the |
607 | alignment required for allocating virtual memory. Usually this is the | |
608 | same as pagesize. */ | |
609 | ||
610 | size_t | |
611 | default_gt_pch_alloc_granularity (void) | |
612 | { | |
613 | return getpagesize(); | |
614 | } | |
615 | ||
4d0c31e6 RH |
616 | #if HAVE_MMAP_FILE |
617 | /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present. | |
618 | We temporarily allocate SIZE bytes, and let the kernel place the data | |
d1a6adeb | 619 | wherever it will. If it worked, that's our spot, if not we're likely |
4d0c31e6 | 620 | to be in trouble. */ |
8eb6a092 | 621 | |
4d0c31e6 RH |
622 | void * |
623 | mmap_gt_pch_get_address (size_t size, int fd) | |
624 | { | |
625 | void *ret; | |
18c81520 | 626 | |
4d0c31e6 RH |
627 | ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); |
628 | if (ret == (void *) MAP_FAILED) | |
629 | ret = NULL; | |
630 | else | |
631 | munmap (ret, size); | |
3277221c | 632 | |
4d0c31e6 RH |
633 | return ret; |
634 | } | |
3277221c | 635 | |
4d0c31e6 RH |
636 | /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present. |
637 | Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at | |
638 | mapping the data at BASE, -1 if we couldn't. | |
20c1dc5e | 639 | |
4d0c31e6 RH |
640 | This version assumes that the kernel honors the START operand of mmap |
641 | even without MAP_FIXED if START through START+SIZE are not currently | |
642 | mapped with something. */ | |
17211ab5 | 643 | |
4d0c31e6 RH |
644 | int |
645 | mmap_gt_pch_use_address (void *base, size_t size, int fd, size_t offset) | |
646 | { | |
647 | void *addr; | |
17211ab5 | 648 | |
4d0c31e6 RH |
649 | /* We're called with size == 0 if we're not planning to load a PCH |
650 | file at all. This allows the hook to free any static space that | |
651 | we might have allocated at link time. */ | |
652 | if (size == 0) | |
653 | return -1; | |
654 | ||
655 | addr = mmap (base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, | |
656 | fd, offset); | |
657 | ||
658 | return addr == base ? 1 : -1; | |
3277221c | 659 | } |
4d0c31e6 | 660 | #endif /* HAVE_MMAP_FILE */ |
9ac121af | 661 | |
d37e6b50 | 662 | /* Modify the bound based on rlimits. */ |
16226f1e | 663 | static double |
20c1dc5e | 664 | ggc_rlimit_bound (double limit) |
16226f1e KG |
665 | { |
666 | #if defined(HAVE_GETRLIMIT) | |
667 | struct rlimit rlim; | |
d37e6b50 GK |
668 | # if defined (RLIMIT_AS) |
669 | /* RLIMIT_AS is what POSIX says is the limit on mmap. Presumably | |
670 | any OS which has RLIMIT_AS also has a working mmap that GCC will use. */ | |
671 | if (getrlimit (RLIMIT_AS, &rlim) == 0 | |
a2581175 | 672 | && rlim.rlim_cur != (rlim_t) RLIM_INFINITY |
16226f1e KG |
673 | && rlim.rlim_cur < limit) |
674 | limit = rlim.rlim_cur; | |
d37e6b50 GK |
675 | # elif defined (RLIMIT_DATA) |
676 | /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we | |
677 | might be on an OS that has a broken mmap. (Others don't bound | |
678 | mmap at all, apparently.) */ | |
16226f1e | 679 | if (getrlimit (RLIMIT_DATA, &rlim) == 0 |
a2581175 | 680 | && rlim.rlim_cur != (rlim_t) RLIM_INFINITY |
d37e6b50 GK |
681 | && rlim.rlim_cur < limit |
682 | /* Darwin has this horribly bogus default setting of | |
683 | RLIMIT_DATA, to 6144Kb. No-one notices because RLIMIT_DATA | |
684 | appears to be ignored. Ignore such silliness. If a limit | |
685 | this small was actually effective for mmap, GCC wouldn't even | |
686 | start up. */ | |
687 | && rlim.rlim_cur >= 8 * 1024 * 1024) | |
16226f1e | 688 | limit = rlim.rlim_cur; |
d37e6b50 | 689 | # endif /* RLIMIT_AS or RLIMIT_DATA */ |
16226f1e KG |
690 | #endif /* HAVE_GETRLIMIT */ |
691 | ||
692 | return limit; | |
693 | } | |
694 | ||
9ac121af KG |
695 | /* Heuristic to set a default for GGC_MIN_EXPAND. */ |
696 | int | |
20c1dc5e | 697 | ggc_min_expand_heuristic (void) |
9ac121af KG |
698 | { |
699 | double min_expand = physmem_total(); | |
16226f1e KG |
700 | |
701 | /* Adjust for rlimits. */ | |
702 | min_expand = ggc_rlimit_bound (min_expand); | |
20c1dc5e | 703 | |
9ac121af KG |
704 | /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding |
705 | a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */ | |
706 | min_expand /= 1024*1024*1024; | |
707 | min_expand *= 70; | |
708 | min_expand = MIN (min_expand, 70); | |
709 | min_expand += 30; | |
710 | ||
711 | return min_expand; | |
712 | } | |
713 | ||
714 | /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */ | |
715 | int | |
20c1dc5e | 716 | ggc_min_heapsize_heuristic (void) |
9ac121af | 717 | { |
d37e6b50 GK |
718 | double phys_kbytes = physmem_total(); |
719 | double limit_kbytes = ggc_rlimit_bound (phys_kbytes * 2); | |
16226f1e | 720 | |
d37e6b50 GK |
721 | phys_kbytes /= 1024; /* Convert to Kbytes. */ |
722 | limit_kbytes /= 1024; | |
20c1dc5e | 723 | |
9ac121af KG |
724 | /* The heuristic is RAM/8, with a lower bound of 4M and an upper |
725 | bound of 128M (when RAM >= 1GB). */ | |
d37e6b50 GK |
726 | phys_kbytes /= 8; |
727 | ||
728 | #if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS) | |
729 | /* Try not to overrun the RSS limit while doing garbage collection. | |
730 | The RSS limit is only advisory, so no margin is subtracted. */ | |
731 | { | |
732 | struct rlimit rlim; | |
733 | if (getrlimit (RLIMIT_RSS, &rlim) == 0 | |
734 | && rlim.rlim_cur != (rlim_t) RLIM_INFINITY) | |
735 | phys_kbytes = MIN (phys_kbytes, rlim.rlim_cur / 1024); | |
736 | } | |
737 | # endif | |
738 | ||
739 | /* Don't blindly run over our data limit; do GC at least when the | |
ded5f8f4 NF |
740 | *next* GC would be within 20Mb of the limit or within a quarter of |
741 | the limit, whichever is larger. If GCC does hit the data limit, | |
742 | compilation will fail, so this tries to be conservative. */ | |
743 | limit_kbytes = MAX (0, limit_kbytes - MAX (limit_kbytes / 4, 20 * 1024)); | |
d37e6b50 GK |
744 | limit_kbytes = (limit_kbytes * 100) / (110 + ggc_min_expand_heuristic()); |
745 | phys_kbytes = MIN (phys_kbytes, limit_kbytes); | |
746 | ||
747 | phys_kbytes = MAX (phys_kbytes, 4 * 1024); | |
748 | phys_kbytes = MIN (phys_kbytes, 128 * 1024); | |
9ac121af | 749 | |
d37e6b50 | 750 | return phys_kbytes; |
9ac121af KG |
751 | } |
752 | ||
753 | void | |
20c1dc5e | 754 | init_ggc_heuristics (void) |
9ac121af | 755 | { |
d85a0aae | 756 | #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT |
9ac121af KG |
757 | set_param_value ("ggc-min-expand", ggc_min_expand_heuristic()); |
758 | set_param_value ("ggc-min-heapsize", ggc_min_heapsize_heuristic()); | |
759 | #endif | |
760 | } | |
b9dcdee4 JH |
761 | |
762 | #ifdef GATHER_STATISTICS | |
763 | ||
764 | /* Datastructure used to store per-call-site statistics. */ | |
765 | struct loc_descriptor | |
766 | { | |
767 | const char *file; | |
768 | int line; | |
769 | const char *function; | |
770 | int times; | |
771 | size_t allocated; | |
772 | size_t overhead; | |
07724022 JH |
773 | size_t freed; |
774 | size_t collected; | |
b9dcdee4 JH |
775 | }; |
776 | ||
777 | /* Hashtable used for statistics. */ | |
778 | static htab_t loc_hash; | |
779 | ||
780 | /* Hash table helpers functions. */ | |
781 | static hashval_t | |
782 | hash_descriptor (const void *p) | |
783 | { | |
58f9752a | 784 | const struct loc_descriptor *const d = p; |
b9dcdee4 JH |
785 | |
786 | return htab_hash_pointer (d->function) | d->line; | |
787 | } | |
788 | ||
789 | static int | |
790 | eq_descriptor (const void *p1, const void *p2) | |
791 | { | |
58f9752a KG |
792 | const struct loc_descriptor *const d = p1; |
793 | const struct loc_descriptor *const d2 = p2; | |
b9dcdee4 JH |
794 | |
795 | return (d->file == d2->file && d->line == d2->line | |
796 | && d->function == d2->function); | |
797 | } | |
798 | ||
07724022 JH |
799 | /* Hashtable converting address of allocated field to loc descriptor. */ |
800 | static htab_t ptr_hash; | |
801 | struct ptr_hash_entry | |
802 | { | |
803 | void *ptr; | |
804 | struct loc_descriptor *loc; | |
805 | size_t size; | |
806 | }; | |
807 | ||
808 | /* Hash table helpers functions. */ | |
809 | static hashval_t | |
810 | hash_ptr (const void *p) | |
811 | { | |
58f9752a | 812 | const struct ptr_hash_entry *const d = p; |
07724022 JH |
813 | |
814 | return htab_hash_pointer (d->ptr); | |
815 | } | |
816 | ||
817 | static int | |
818 | eq_ptr (const void *p1, const void *p2) | |
819 | { | |
58f9752a | 820 | const struct ptr_hash_entry *const p = p1; |
07724022 JH |
821 | |
822 | return (p->ptr == p2); | |
823 | } | |
824 | ||
b9dcdee4 JH |
825 | /* Return descriptor for given call site, create new one if needed. */ |
826 | static struct loc_descriptor * | |
827 | loc_descriptor (const char *name, int line, const char *function) | |
828 | { | |
829 | struct loc_descriptor loc; | |
830 | struct loc_descriptor **slot; | |
831 | ||
832 | loc.file = name; | |
833 | loc.line = line; | |
834 | loc.function = function; | |
835 | if (!loc_hash) | |
836 | loc_hash = htab_create (10, hash_descriptor, eq_descriptor, NULL); | |
837 | ||
838 | slot = (struct loc_descriptor **) htab_find_slot (loc_hash, &loc, 1); | |
839 | if (*slot) | |
840 | return *slot; | |
841 | *slot = xcalloc (sizeof (**slot), 1); | |
842 | (*slot)->file = name; | |
843 | (*slot)->line = line; | |
844 | (*slot)->function = function; | |
845 | return *slot; | |
846 | } | |
847 | ||
d1a6adeb KH |
848 | /* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION). */ |
849 | void | |
07724022 | 850 | ggc_record_overhead (size_t allocated, size_t overhead, void *ptr, |
d1a6adeb | 851 | const char *name, int line, const char *function) |
b9dcdee4 JH |
852 | { |
853 | struct loc_descriptor *loc = loc_descriptor (name, line, function); | |
5ed6ace5 | 854 | struct ptr_hash_entry *p = XNEW (struct ptr_hash_entry); |
07724022 JH |
855 | PTR *slot; |
856 | ||
857 | p->ptr = ptr; | |
858 | p->loc = loc; | |
859 | p->size = allocated + overhead; | |
860 | if (!ptr_hash) | |
861 | ptr_hash = htab_create (10, hash_ptr, eq_ptr, NULL); | |
862 | slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr), INSERT); | |
282899df | 863 | gcc_assert (!*slot); |
07724022 | 864 | *slot = p; |
b9dcdee4 JH |
865 | |
866 | loc->times++; | |
867 | loc->allocated+=allocated; | |
868 | loc->overhead+=overhead; | |
869 | } | |
870 | ||
07724022 JH |
871 | /* Helper function for prune_overhead_list. See if SLOT is still marked and |
872 | remove it from hashtable if it is not. */ | |
873 | static int | |
874 | ggc_prune_ptr (void **slot, void *b ATTRIBUTE_UNUSED) | |
875 | { | |
876 | struct ptr_hash_entry *p = *slot; | |
877 | if (!ggc_marked_p (p->ptr)) | |
878 | { | |
879 | p->loc->collected += p->size; | |
880 | htab_clear_slot (ptr_hash, slot); | |
881 | free (p); | |
882 | } | |
883 | return 1; | |
884 | } | |
885 | ||
886 | /* After live values has been marked, walk all recorded pointers and see if | |
887 | they are still live. */ | |
888 | void | |
889 | ggc_prune_overhead_list (void) | |
890 | { | |
891 | htab_traverse (ptr_hash, ggc_prune_ptr, NULL); | |
892 | } | |
893 | ||
894 | /* Notice that the pointer has been freed. */ | |
83f676b3 RS |
895 | void |
896 | ggc_free_overhead (void *ptr) | |
07724022 JH |
897 | { |
898 | PTR *slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr), | |
899 | NO_INSERT); | |
900 | struct ptr_hash_entry *p = *slot; | |
901 | p->loc->freed += p->size; | |
902 | htab_clear_slot (ptr_hash, slot); | |
903 | free (p); | |
904 | } | |
905 | ||
b9dcdee4 JH |
906 | /* Helper for qsort; sort descriptors by amount of memory consumed. */ |
907 | static int | |
a5573239 | 908 | final_cmp_statistic (const void *loc1, const void *loc2) |
b9dcdee4 JH |
909 | { |
910 | struct loc_descriptor *l1 = *(struct loc_descriptor **) loc1; | |
911 | struct loc_descriptor *l2 = *(struct loc_descriptor **) loc2; | |
a5573239 JH |
912 | long diff; |
913 | diff = ((long)(l1->allocated + l1->overhead - l1->freed) - | |
85914593 | 914 | (l2->allocated + l2->overhead - l2->freed)); |
a5573239 JH |
915 | return diff > 0 ? 1 : diff < 0 ? -1 : 0; |
916 | } | |
917 | ||
918 | /* Helper for qsort; sort descriptors by amount of memory consumed. */ | |
919 | static int | |
920 | cmp_statistic (const void *loc1, const void *loc2) | |
921 | { | |
922 | struct loc_descriptor *l1 = *(struct loc_descriptor **) loc1; | |
923 | struct loc_descriptor *l2 = *(struct loc_descriptor **) loc2; | |
924 | long diff; | |
925 | ||
926 | diff = ((long)(l1->allocated + l1->overhead - l1->freed - l1->collected) - | |
927 | (l2->allocated + l2->overhead - l2->freed - l2->collected)); | |
928 | if (diff) | |
929 | return diff > 0 ? 1 : diff < 0 ? -1 : 0; | |
930 | diff = ((long)(l1->allocated + l1->overhead - l1->freed) - | |
931 | (l2->allocated + l2->overhead - l2->freed)); | |
932 | return diff > 0 ? 1 : diff < 0 ? -1 : 0; | |
b9dcdee4 JH |
933 | } |
934 | ||
935 | /* Collect array of the descriptors from hashtable. */ | |
936 | struct loc_descriptor **loc_array; | |
937 | static int | |
938 | add_statistics (void **slot, void *b) | |
939 | { | |
940 | int *n = (int *)b; | |
941 | loc_array[*n] = (struct loc_descriptor *) *slot; | |
942 | (*n)++; | |
943 | return 1; | |
944 | } | |
945 | ||
946 | /* Dump per-site memory statistics. */ | |
947 | #endif | |
83f676b3 | 948 | void |
a5573239 | 949 | dump_ggc_loc_statistics (bool final ATTRIBUTE_UNUSED) |
b9dcdee4 JH |
950 | { |
951 | #ifdef GATHER_STATISTICS | |
952 | int nentries = 0; | |
953 | char s[4096]; | |
07724022 | 954 | size_t collected = 0, freed = 0, allocated = 0, overhead = 0, times = 0; |
b9dcdee4 JH |
955 | int i; |
956 | ||
07724022 JH |
957 | ggc_force_collect = true; |
958 | ggc_collect (); | |
959 | ||
b9dcdee4 JH |
960 | loc_array = xcalloc (sizeof (*loc_array), loc_hash->n_elements); |
961 | fprintf (stderr, "-------------------------------------------------------\n"); | |
07724022 JH |
962 | fprintf (stderr, "\n%-48s %10s %10s %10s %10s %10s\n", |
963 | "source location", "Garbage", "Freed", "Leak", "Overhead", "Times"); | |
b9dcdee4 | 964 | fprintf (stderr, "-------------------------------------------------------\n"); |
b9dcdee4 | 965 | htab_traverse (loc_hash, add_statistics, &nentries); |
a5573239 JH |
966 | qsort (loc_array, nentries, sizeof (*loc_array), |
967 | final ? final_cmp_statistic : cmp_statistic); | |
b9dcdee4 JH |
968 | for (i = 0; i < nentries; i++) |
969 | { | |
970 | struct loc_descriptor *d = loc_array[i]; | |
07724022 JH |
971 | allocated += d->allocated; |
972 | times += d->times; | |
973 | freed += d->freed; | |
974 | collected += d->collected; | |
b9dcdee4 JH |
975 | overhead += d->overhead; |
976 | } | |
977 | for (i = 0; i < nentries; i++) | |
978 | { | |
979 | struct loc_descriptor *d = loc_array[i]; | |
980 | if (d->allocated) | |
981 | { | |
982 | const char *s1 = d->file; | |
983 | const char *s2; | |
984 | while ((s2 = strstr (s1, "gcc/"))) | |
985 | s1 = s2 + 4; | |
986 | sprintf (s, "%s:%i (%s)", s1, d->line, d->function); | |
07724022 JH |
987 | s[48] = 0; |
988 | fprintf (stderr, "%-48s %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li\n", s, | |
989 | (long)d->collected, | |
990 | (d->collected) * 100.0 / collected, | |
991 | (long)d->freed, | |
992 | (d->freed) * 100.0 / freed, | |
993 | (long)(d->allocated + d->overhead - d->freed - d->collected), | |
994 | (d->allocated + d->overhead - d->freed - d->collected) * 100.0 | |
995 | / (allocated + overhead - freed - collected), | |
996 | (long)d->overhead, | |
997 | d->overhead * 100.0 / overhead, | |
998 | (long)d->times); | |
b9dcdee4 JH |
999 | } |
1000 | } | |
07724022 JH |
1001 | fprintf (stderr, "%-48s %10ld %10ld %10ld %10ld %10ld\n", |
1002 | "Total", (long)collected, (long)freed, | |
1003 | (long)(allocated + overhead - freed - collected), (long)overhead, | |
1004 | (long)times); | |
1005 | fprintf (stderr, "%-48s %10s %10s %10s %10s %10s\n", | |
1006 | "source location", "Garbage", "Freed", "Leak", "Overhead", "Times"); | |
b9dcdee4 | 1007 | fprintf (stderr, "-------------------------------------------------------\n"); |
dd56b4c5 | 1008 | ggc_force_collect = false; |
b9dcdee4 JH |
1009 | #endif |
1010 | } |