]>
Commit | Line | Data |
---|---|---|
1 | /* Simple garbage collection for the GNU compiler. | |
2 | Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 | |
3 | Free Software Foundation, Inc. | |
4 | ||
5 | This file is part of GCC. | |
6 | ||
7 | GCC is free software; you can redistribute it and/or modify it under | |
8 | the terms of the GNU General Public License as published by the Free | |
9 | Software Foundation; either version 3, or (at your option) any later | |
10 | version. | |
11 | ||
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 | for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
18 | along with GCC; see the file COPYING3. If not see | |
19 | <http://www.gnu.org/licenses/>. */ | |
20 | ||
21 | /* Generic garbage collection (GC) functions and data, not specific to | |
22 | any particular GC implementation. */ | |
23 | ||
24 | #include "config.h" | |
25 | #include "system.h" | |
26 | #include "coretypes.h" | |
27 | #include "hashtab.h" | |
28 | #include "ggc.h" | |
29 | #include "toplev.h" | |
30 | #include "params.h" | |
31 | #include "hosthooks.h" | |
32 | #include "hosthooks-def.h" | |
33 | ||
34 | #ifdef HAVE_SYS_RESOURCE_H | |
35 | # include <sys/resource.h> | |
36 | #endif | |
37 | ||
38 | #ifdef HAVE_MMAP_FILE | |
39 | # include <sys/mman.h> | |
40 | # ifdef HAVE_MINCORE | |
41 | /* This is on Solaris. */ | |
42 | # include <sys/types.h> | |
43 | # endif | |
44 | #endif | |
45 | ||
46 | #ifndef MAP_FAILED | |
47 | # define MAP_FAILED ((void *)-1) | |
48 | #endif | |
49 | ||
50 | /* When set, ggc_collect will do collection. */ | |
51 | bool ggc_force_collect; | |
52 | ||
53 | /* When true, protect the contents of the identifier hash table. */ | |
54 | bool ggc_protect_identifiers = true; | |
55 | ||
56 | /* Statistics about the allocation. */ | |
57 | static ggc_statistics *ggc_stats; | |
58 | ||
59 | struct traversal_state; | |
60 | ||
61 | static int ggc_htab_delete (void **, void *); | |
62 | static hashval_t saving_htab_hash (const void *); | |
63 | static int saving_htab_eq (const void *, const void *); | |
64 | static int call_count (void **, void *); | |
65 | static int call_alloc (void **, void *); | |
66 | static int compare_ptr_data (const void *, const void *); | |
67 | static void relocate_ptrs (void *, void *); | |
68 | static void write_pch_globals (const struct ggc_root_tab * const *tab, | |
69 | struct traversal_state *state); | |
70 | static double ggc_rlimit_bound (double); | |
71 | ||
72 | /* Maintain global roots that are preserved during GC. */ | |
73 | ||
74 | /* Process a slot of an htab by deleting it if it has not been marked. */ | |
75 | ||
76 | static int | |
77 | ggc_htab_delete (void **slot, void *info) | |
78 | { | |
79 | const struct ggc_cache_tab *r = (const struct ggc_cache_tab *) info; | |
80 | ||
81 | if (! (*r->marked_p) (*slot)) | |
82 | htab_clear_slot (*r->base, slot); | |
83 | else | |
84 | (*r->cb) (*slot); | |
85 | ||
86 | return 1; | |
87 | } | |
88 | ||
89 | /* Iterate through all registered roots and mark each element. */ | |
90 | ||
91 | void | |
92 | ggc_mark_roots (void) | |
93 | { | |
94 | const struct ggc_root_tab *const *rt; | |
95 | const struct ggc_root_tab *rti; | |
96 | const struct ggc_cache_tab *const *ct; | |
97 | const struct ggc_cache_tab *cti; | |
98 | size_t i; | |
99 | ||
100 | for (rt = gt_ggc_deletable_rtab; *rt; rt++) | |
101 | for (rti = *rt; rti->base != NULL; rti++) | |
102 | memset (rti->base, 0, rti->stride); | |
103 | ||
104 | for (rt = gt_ggc_rtab; *rt; rt++) | |
105 | for (rti = *rt; rti->base != NULL; rti++) | |
106 | for (i = 0; i < rti->nelt; i++) | |
107 | (*rti->cb)(*(void **)((char *)rti->base + rti->stride * i)); | |
108 | ||
109 | if (ggc_protect_identifiers) | |
110 | ggc_mark_stringpool (); | |
111 | ||
112 | /* Now scan all hash tables that have objects which are to be deleted if | |
113 | they are not already marked. */ | |
114 | for (ct = gt_ggc_cache_rtab; *ct; ct++) | |
115 | for (cti = *ct; cti->base != NULL; cti++) | |
116 | if (*cti->base) | |
117 | { | |
118 | ggc_set_mark (*cti->base); | |
119 | htab_traverse_noresize (*cti->base, ggc_htab_delete, | |
120 | CONST_CAST (void *, (const void *)cti)); | |
121 | ggc_set_mark ((*cti->base)->entries); | |
122 | } | |
123 | ||
124 | if (! ggc_protect_identifiers) | |
125 | ggc_purge_stringpool (); | |
126 | } | |
127 | ||
128 | /* Allocate a block of memory, then clear it. */ | |
129 | void * | |
130 | ggc_alloc_cleared_stat (size_t size MEM_STAT_DECL) | |
131 | { | |
132 | void *buf = ggc_alloc_stat (size PASS_MEM_STAT); | |
133 | memset (buf, 0, size); | |
134 | return buf; | |
135 | } | |
136 | ||
137 | /* Resize a block of memory, possibly re-allocating it. */ | |
138 | void * | |
139 | ggc_realloc_stat (void *x, size_t size MEM_STAT_DECL) | |
140 | { | |
141 | void *r; | |
142 | size_t old_size; | |
143 | ||
144 | if (x == NULL) | |
145 | return ggc_alloc_stat (size PASS_MEM_STAT); | |
146 | ||
147 | old_size = ggc_get_size (x); | |
148 | ||
149 | if (size <= old_size) | |
150 | { | |
151 | /* Mark the unwanted memory as unaccessible. We also need to make | |
152 | the "new" size accessible, since ggc_get_size returns the size of | |
153 | the pool, not the size of the individually allocated object, the | |
154 | size which was previously made accessible. Unfortunately, we | |
155 | don't know that previously allocated size. Without that | |
156 | knowledge we have to lose some initialization-tracking for the | |
157 | old parts of the object. An alternative is to mark the whole | |
158 | old_size as reachable, but that would lose tracking of writes | |
159 | after the end of the object (by small offsets). Discard the | |
160 | handle to avoid handle leak. */ | |
161 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) x + size, | |
162 | old_size - size)); | |
163 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, size)); | |
164 | return x; | |
165 | } | |
166 | ||
167 | r = ggc_alloc_stat (size PASS_MEM_STAT); | |
168 | ||
169 | /* Since ggc_get_size returns the size of the pool, not the size of the | |
170 | individually allocated object, we'd access parts of the old object | |
171 | that were marked invalid with the memcpy below. We lose a bit of the | |
172 | initialization-tracking since some of it may be uninitialized. */ | |
173 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, old_size)); | |
174 | ||
175 | memcpy (r, x, old_size); | |
176 | ||
177 | /* The old object is not supposed to be used anymore. */ | |
178 | ggc_free (x); | |
179 | ||
180 | return r; | |
181 | } | |
182 | ||
183 | /* Like ggc_alloc_cleared, but performs a multiplication. */ | |
184 | void * | |
185 | ggc_calloc (size_t s1, size_t s2) | |
186 | { | |
187 | return ggc_alloc_cleared (s1 * s2); | |
188 | } | |
189 | ||
190 | /* These are for splay_tree_new_ggc. */ | |
191 | void * | |
192 | ggc_splay_alloc (int sz, void *nl) | |
193 | { | |
194 | gcc_assert (!nl); | |
195 | return ggc_alloc (sz); | |
196 | } | |
197 | ||
198 | void | |
199 | ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl) | |
200 | { | |
201 | gcc_assert (!nl); | |
202 | } | |
203 | ||
204 | /* Print statistics that are independent of the collector in use. */ | |
205 | #define SCALE(x) ((unsigned long) ((x) < 1024*10 \ | |
206 | ? (x) \ | |
207 | : ((x) < 1024*1024*10 \ | |
208 | ? (x) / 1024 \ | |
209 | : (x) / (1024*1024)))) | |
210 | #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M')) | |
211 | ||
212 | void | |
213 | ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED, | |
214 | ggc_statistics *stats) | |
215 | { | |
216 | /* Set the pointer so that during collection we will actually gather | |
217 | the statistics. */ | |
218 | ggc_stats = stats; | |
219 | ||
220 | /* Then do one collection to fill in the statistics. */ | |
221 | ggc_collect (); | |
222 | ||
223 | /* At present, we don't really gather any interesting statistics. */ | |
224 | ||
225 | /* Don't gather statistics any more. */ | |
226 | ggc_stats = NULL; | |
227 | } | |
228 | \f | |
229 | /* Functions for saving and restoring GCable memory to disk. */ | |
230 | ||
231 | static htab_t saving_htab; | |
232 | ||
233 | struct ptr_data | |
234 | { | |
235 | void *obj; | |
236 | void *note_ptr_cookie; | |
237 | gt_note_pointers note_ptr_fn; | |
238 | gt_handle_reorder reorder_fn; | |
239 | size_t size; | |
240 | void *new_addr; | |
241 | enum gt_types_enum type; | |
242 | }; | |
243 | ||
244 | #define POINTER_HASH(x) (hashval_t)((long)x >> 3) | |
245 | ||
246 | /* Register an object in the hash table. */ | |
247 | ||
248 | int | |
249 | gt_pch_note_object (void *obj, void *note_ptr_cookie, | |
250 | gt_note_pointers note_ptr_fn, | |
251 | enum gt_types_enum type) | |
252 | { | |
253 | struct ptr_data **slot; | |
254 | ||
255 | if (obj == NULL || obj == (void *) 1) | |
256 | return 0; | |
257 | ||
258 | slot = (struct ptr_data **) | |
259 | htab_find_slot_with_hash (saving_htab, obj, POINTER_HASH (obj), | |
260 | INSERT); | |
261 | if (*slot != NULL) | |
262 | { | |
263 | gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn | |
264 | && (*slot)->note_ptr_cookie == note_ptr_cookie); | |
265 | return 0; | |
266 | } | |
267 | ||
268 | *slot = XCNEW (struct ptr_data); | |
269 | (*slot)->obj = obj; | |
270 | (*slot)->note_ptr_fn = note_ptr_fn; | |
271 | (*slot)->note_ptr_cookie = note_ptr_cookie; | |
272 | if (note_ptr_fn == gt_pch_p_S) | |
273 | (*slot)->size = strlen ((const char *)obj) + 1; | |
274 | else | |
275 | (*slot)->size = ggc_get_size (obj); | |
276 | (*slot)->type = type; | |
277 | return 1; | |
278 | } | |
279 | ||
280 | /* Register an object in the hash table. */ | |
281 | ||
282 | void | |
283 | gt_pch_note_reorder (void *obj, void *note_ptr_cookie, | |
284 | gt_handle_reorder reorder_fn) | |
285 | { | |
286 | struct ptr_data *data; | |
287 | ||
288 | if (obj == NULL || obj == (void *) 1) | |
289 | return; | |
290 | ||
291 | data = (struct ptr_data *) | |
292 | htab_find_with_hash (saving_htab, obj, POINTER_HASH (obj)); | |
293 | gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie); | |
294 | ||
295 | data->reorder_fn = reorder_fn; | |
296 | } | |
297 | ||
298 | /* Hash and equality functions for saving_htab, callbacks for htab_create. */ | |
299 | ||
300 | static hashval_t | |
301 | saving_htab_hash (const void *p) | |
302 | { | |
303 | return POINTER_HASH (((const struct ptr_data *)p)->obj); | |
304 | } | |
305 | ||
306 | static int | |
307 | saving_htab_eq (const void *p1, const void *p2) | |
308 | { | |
309 | return ((const struct ptr_data *)p1)->obj == p2; | |
310 | } | |
311 | ||
312 | /* Handy state for the traversal functions. */ | |
313 | ||
314 | struct traversal_state | |
315 | { | |
316 | FILE *f; | |
317 | struct ggc_pch_data *d; | |
318 | size_t count; | |
319 | struct ptr_data **ptrs; | |
320 | size_t ptrs_i; | |
321 | }; | |
322 | ||
323 | /* Callbacks for htab_traverse. */ | |
324 | ||
325 | static int | |
326 | call_count (void **slot, void *state_p) | |
327 | { | |
328 | struct ptr_data *d = (struct ptr_data *)*slot; | |
329 | struct traversal_state *state = (struct traversal_state *)state_p; | |
330 | ||
331 | ggc_pch_count_object (state->d, d->obj, d->size, | |
332 | d->note_ptr_fn == gt_pch_p_S, | |
333 | d->type); | |
334 | state->count++; | |
335 | return 1; | |
336 | } | |
337 | ||
338 | static int | |
339 | call_alloc (void **slot, void *state_p) | |
340 | { | |
341 | struct ptr_data *d = (struct ptr_data *)*slot; | |
342 | struct traversal_state *state = (struct traversal_state *)state_p; | |
343 | ||
344 | d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size, | |
345 | d->note_ptr_fn == gt_pch_p_S, | |
346 | d->type); | |
347 | state->ptrs[state->ptrs_i++] = d; | |
348 | return 1; | |
349 | } | |
350 | ||
351 | /* Callback for qsort. */ | |
352 | ||
353 | static int | |
354 | compare_ptr_data (const void *p1_p, const void *p2_p) | |
355 | { | |
356 | const struct ptr_data *const p1 = *(const struct ptr_data *const *)p1_p; | |
357 | const struct ptr_data *const p2 = *(const struct ptr_data *const *)p2_p; | |
358 | return (((size_t)p1->new_addr > (size_t)p2->new_addr) | |
359 | - ((size_t)p1->new_addr < (size_t)p2->new_addr)); | |
360 | } | |
361 | ||
362 | /* Callbacks for note_ptr_fn. */ | |
363 | ||
364 | static void | |
365 | relocate_ptrs (void *ptr_p, void *state_p) | |
366 | { | |
367 | void **ptr = (void **)ptr_p; | |
368 | struct traversal_state *state ATTRIBUTE_UNUSED | |
369 | = (struct traversal_state *)state_p; | |
370 | struct ptr_data *result; | |
371 | ||
372 | if (*ptr == NULL || *ptr == (void *)1) | |
373 | return; | |
374 | ||
375 | result = (struct ptr_data *) | |
376 | htab_find_with_hash (saving_htab, *ptr, POINTER_HASH (*ptr)); | |
377 | gcc_assert (result); | |
378 | *ptr = result->new_addr; | |
379 | } | |
380 | ||
381 | /* Write out, after relocation, the pointers in TAB. */ | |
382 | static void | |
383 | write_pch_globals (const struct ggc_root_tab * const *tab, | |
384 | struct traversal_state *state) | |
385 | { | |
386 | const struct ggc_root_tab *const *rt; | |
387 | const struct ggc_root_tab *rti; | |
388 | size_t i; | |
389 | ||
390 | for (rt = tab; *rt; rt++) | |
391 | for (rti = *rt; rti->base != NULL; rti++) | |
392 | for (i = 0; i < rti->nelt; i++) | |
393 | { | |
394 | void *ptr = *(void **)((char *)rti->base + rti->stride * i); | |
395 | struct ptr_data *new_ptr; | |
396 | if (ptr == NULL || ptr == (void *)1) | |
397 | { | |
398 | if (fwrite (&ptr, sizeof (void *), 1, state->f) | |
399 | != 1) | |
400 | fatal_error ("can't write PCH file: %m"); | |
401 | } | |
402 | else | |
403 | { | |
404 | new_ptr = (struct ptr_data *) | |
405 | htab_find_with_hash (saving_htab, ptr, POINTER_HASH (ptr)); | |
406 | if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f) | |
407 | != 1) | |
408 | fatal_error ("can't write PCH file: %m"); | |
409 | } | |
410 | } | |
411 | } | |
412 | ||
413 | /* Hold the information we need to mmap the file back in. */ | |
414 | ||
415 | struct mmap_info | |
416 | { | |
417 | size_t offset; | |
418 | size_t size; | |
419 | void *preferred_base; | |
420 | }; | |
421 | ||
422 | /* Write out the state of the compiler to F. */ | |
423 | ||
424 | void | |
425 | gt_pch_save (FILE *f) | |
426 | { | |
427 | const struct ggc_root_tab *const *rt; | |
428 | const struct ggc_root_tab *rti; | |
429 | size_t i; | |
430 | struct traversal_state state; | |
431 | char *this_object = NULL; | |
432 | size_t this_object_size = 0; | |
433 | struct mmap_info mmi; | |
434 | const size_t mmap_offset_alignment = host_hooks.gt_pch_alloc_granularity(); | |
435 | ||
436 | gt_pch_save_stringpool (); | |
437 | ||
438 | saving_htab = htab_create (50000, saving_htab_hash, saving_htab_eq, free); | |
439 | ||
440 | for (rt = gt_ggc_rtab; *rt; rt++) | |
441 | for (rti = *rt; rti->base != NULL; rti++) | |
442 | for (i = 0; i < rti->nelt; i++) | |
443 | (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i)); | |
444 | ||
445 | for (rt = gt_pch_cache_rtab; *rt; rt++) | |
446 | for (rti = *rt; rti->base != NULL; rti++) | |
447 | for (i = 0; i < rti->nelt; i++) | |
448 | (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i)); | |
449 | ||
450 | /* Prepare the objects for writing, determine addresses and such. */ | |
451 | state.f = f; | |
452 | state.d = init_ggc_pch(); | |
453 | state.count = 0; | |
454 | htab_traverse (saving_htab, call_count, &state); | |
455 | ||
456 | mmi.size = ggc_pch_total_size (state.d); | |
457 | ||
458 | /* Try to arrange things so that no relocation is necessary, but | |
459 | don't try very hard. On most platforms, this will always work, | |
460 | and on the rest it's a lot of work to do better. | |
461 | (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and | |
462 | HOST_HOOKS_GT_PCH_USE_ADDRESS.) */ | |
463 | mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f)); | |
464 | ||
465 | ggc_pch_this_base (state.d, mmi.preferred_base); | |
466 | ||
467 | state.ptrs = XNEWVEC (struct ptr_data *, state.count); | |
468 | state.ptrs_i = 0; | |
469 | htab_traverse (saving_htab, call_alloc, &state); | |
470 | qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data); | |
471 | ||
472 | /* Write out all the scalar variables. */ | |
473 | for (rt = gt_pch_scalar_rtab; *rt; rt++) | |
474 | for (rti = *rt; rti->base != NULL; rti++) | |
475 | if (fwrite (rti->base, rti->stride, 1, f) != 1) | |
476 | fatal_error ("can't write PCH file: %m"); | |
477 | ||
478 | /* Write out all the global pointers, after translation. */ | |
479 | write_pch_globals (gt_ggc_rtab, &state); | |
480 | write_pch_globals (gt_pch_cache_rtab, &state); | |
481 | ||
482 | /* Pad the PCH file so that the mmapped area starts on an allocation | |
483 | granularity (usually page) boundary. */ | |
484 | { | |
485 | long o; | |
486 | o = ftell (state.f) + sizeof (mmi); | |
487 | if (o == -1) | |
488 | fatal_error ("can't get position in PCH file: %m"); | |
489 | mmi.offset = mmap_offset_alignment - o % mmap_offset_alignment; | |
490 | if (mmi.offset == mmap_offset_alignment) | |
491 | mmi.offset = 0; | |
492 | mmi.offset += o; | |
493 | } | |
494 | if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1) | |
495 | fatal_error ("can't write PCH file: %m"); | |
496 | if (mmi.offset != 0 | |
497 | && fseek (state.f, mmi.offset, SEEK_SET) != 0) | |
498 | fatal_error ("can't write padding to PCH file: %m"); | |
499 | ||
500 | ggc_pch_prepare_write (state.d, state.f); | |
501 | ||
502 | /* Actually write out the objects. */ | |
503 | for (i = 0; i < state.count; i++) | |
504 | { | |
505 | if (this_object_size < state.ptrs[i]->size) | |
506 | { | |
507 | this_object_size = state.ptrs[i]->size; | |
508 | this_object = XRESIZEVAR (char, this_object, this_object_size); | |
509 | } | |
510 | memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size); | |
511 | if (state.ptrs[i]->reorder_fn != NULL) | |
512 | state.ptrs[i]->reorder_fn (state.ptrs[i]->obj, | |
513 | state.ptrs[i]->note_ptr_cookie, | |
514 | relocate_ptrs, &state); | |
515 | state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj, | |
516 | state.ptrs[i]->note_ptr_cookie, | |
517 | relocate_ptrs, &state); | |
518 | ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj, | |
519 | state.ptrs[i]->new_addr, state.ptrs[i]->size, | |
520 | state.ptrs[i]->note_ptr_fn == gt_pch_p_S); | |
521 | if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S) | |
522 | memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size); | |
523 | } | |
524 | ggc_pch_finish (state.d, state.f); | |
525 | gt_pch_fixup_stringpool (); | |
526 | ||
527 | free (state.ptrs); | |
528 | htab_delete (saving_htab); | |
529 | } | |
530 | ||
531 | /* Read the state of the compiler back in from F. */ | |
532 | ||
533 | void | |
534 | gt_pch_restore (FILE *f) | |
535 | { | |
536 | const struct ggc_root_tab *const *rt; | |
537 | const struct ggc_root_tab *rti; | |
538 | size_t i; | |
539 | struct mmap_info mmi; | |
540 | int result; | |
541 | ||
542 | /* Delete any deletable objects. This makes ggc_pch_read much | |
543 | faster, as it can be sure that no GCable objects remain other | |
544 | than the ones just read in. */ | |
545 | for (rt = gt_ggc_deletable_rtab; *rt; rt++) | |
546 | for (rti = *rt; rti->base != NULL; rti++) | |
547 | memset (rti->base, 0, rti->stride); | |
548 | ||
549 | /* Read in all the scalar variables. */ | |
550 | for (rt = gt_pch_scalar_rtab; *rt; rt++) | |
551 | for (rti = *rt; rti->base != NULL; rti++) | |
552 | if (fread (rti->base, rti->stride, 1, f) != 1) | |
553 | fatal_error ("can't read PCH file: %m"); | |
554 | ||
555 | /* Read in all the global pointers, in 6 easy loops. */ | |
556 | for (rt = gt_ggc_rtab; *rt; rt++) | |
557 | for (rti = *rt; rti->base != NULL; rti++) | |
558 | for (i = 0; i < rti->nelt; i++) | |
559 | if (fread ((char *)rti->base + rti->stride * i, | |
560 | sizeof (void *), 1, f) != 1) | |
561 | fatal_error ("can't read PCH file: %m"); | |
562 | ||
563 | for (rt = gt_pch_cache_rtab; *rt; rt++) | |
564 | for (rti = *rt; rti->base != NULL; rti++) | |
565 | for (i = 0; i < rti->nelt; i++) | |
566 | if (fread ((char *)rti->base + rti->stride * i, | |
567 | sizeof (void *), 1, f) != 1) | |
568 | fatal_error ("can't read PCH file: %m"); | |
569 | ||
570 | if (fread (&mmi, sizeof (mmi), 1, f) != 1) | |
571 | fatal_error ("can't read PCH file: %m"); | |
572 | ||
573 | result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size, | |
574 | fileno (f), mmi.offset); | |
575 | if (result < 0) | |
576 | fatal_error ("had to relocate PCH"); | |
577 | if (result == 0) | |
578 | { | |
579 | if (fseek (f, mmi.offset, SEEK_SET) != 0 | |
580 | || fread (mmi.preferred_base, mmi.size, 1, f) != 1) | |
581 | fatal_error ("can't read PCH file: %m"); | |
582 | } | |
583 | else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0) | |
584 | fatal_error ("can't read PCH file: %m"); | |
585 | ||
586 | ggc_pch_read (f, mmi.preferred_base); | |
587 | ||
588 | gt_pch_restore_stringpool (); | |
589 | } | |
590 | ||
591 | /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present. | |
592 | Select no address whatsoever, and let gt_pch_save choose what it will with | |
593 | malloc, presumably. */ | |
594 | ||
595 | void * | |
596 | default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED, | |
597 | int fd ATTRIBUTE_UNUSED) | |
598 | { | |
599 | return NULL; | |
600 | } | |
601 | ||
602 | /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present. | |
603 | Allocate SIZE bytes with malloc. Return 0 if the address we got is the | |
604 | same as base, indicating that the memory has been allocated but needs to | |
605 | be read in from the file. Return -1 if the address differs, to relocation | |
606 | of the PCH file would be required. */ | |
607 | ||
608 | int | |
609 | default_gt_pch_use_address (void *base, size_t size, int fd ATTRIBUTE_UNUSED, | |
610 | size_t offset ATTRIBUTE_UNUSED) | |
611 | { | |
612 | void *addr = xmalloc (size); | |
613 | return (addr == base) - 1; | |
614 | } | |
615 | ||
616 | /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS. Return the | |
617 | alignment required for allocating virtual memory. Usually this is the | |
618 | same as pagesize. */ | |
619 | ||
620 | size_t | |
621 | default_gt_pch_alloc_granularity (void) | |
622 | { | |
623 | return getpagesize(); | |
624 | } | |
625 | ||
626 | #if HAVE_MMAP_FILE | |
627 | /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present. | |
628 | We temporarily allocate SIZE bytes, and let the kernel place the data | |
629 | wherever it will. If it worked, that's our spot, if not we're likely | |
630 | to be in trouble. */ | |
631 | ||
632 | void * | |
633 | mmap_gt_pch_get_address (size_t size, int fd) | |
634 | { | |
635 | void *ret; | |
636 | ||
637 | ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); | |
638 | if (ret == (void *) MAP_FAILED) | |
639 | ret = NULL; | |
640 | else | |
641 | munmap (ret, size); | |
642 | ||
643 | return ret; | |
644 | } | |
645 | ||
646 | /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present. | |
647 | Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at | |
648 | mapping the data at BASE, -1 if we couldn't. | |
649 | ||
650 | This version assumes that the kernel honors the START operand of mmap | |
651 | even without MAP_FIXED if START through START+SIZE are not currently | |
652 | mapped with something. */ | |
653 | ||
654 | int | |
655 | mmap_gt_pch_use_address (void *base, size_t size, int fd, size_t offset) | |
656 | { | |
657 | void *addr; | |
658 | ||
659 | /* We're called with size == 0 if we're not planning to load a PCH | |
660 | file at all. This allows the hook to free any static space that | |
661 | we might have allocated at link time. */ | |
662 | if (size == 0) | |
663 | return -1; | |
664 | ||
665 | addr = mmap (base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, | |
666 | fd, offset); | |
667 | ||
668 | return addr == base ? 1 : -1; | |
669 | } | |
670 | #endif /* HAVE_MMAP_FILE */ | |
671 | ||
672 | /* Modify the bound based on rlimits. */ | |
673 | static double | |
674 | ggc_rlimit_bound (double limit) | |
675 | { | |
676 | #if defined(HAVE_GETRLIMIT) | |
677 | struct rlimit rlim; | |
678 | # if defined (RLIMIT_AS) | |
679 | /* RLIMIT_AS is what POSIX says is the limit on mmap. Presumably | |
680 | any OS which has RLIMIT_AS also has a working mmap that GCC will use. */ | |
681 | if (getrlimit (RLIMIT_AS, &rlim) == 0 | |
682 | && rlim.rlim_cur != (rlim_t) RLIM_INFINITY | |
683 | && rlim.rlim_cur < limit) | |
684 | limit = rlim.rlim_cur; | |
685 | # elif defined (RLIMIT_DATA) | |
686 | /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we | |
687 | might be on an OS that has a broken mmap. (Others don't bound | |
688 | mmap at all, apparently.) */ | |
689 | if (getrlimit (RLIMIT_DATA, &rlim) == 0 | |
690 | && rlim.rlim_cur != (rlim_t) RLIM_INFINITY | |
691 | && rlim.rlim_cur < limit | |
692 | /* Darwin has this horribly bogus default setting of | |
693 | RLIMIT_DATA, to 6144Kb. No-one notices because RLIMIT_DATA | |
694 | appears to be ignored. Ignore such silliness. If a limit | |
695 | this small was actually effective for mmap, GCC wouldn't even | |
696 | start up. */ | |
697 | && rlim.rlim_cur >= 8 * 1024 * 1024) | |
698 | limit = rlim.rlim_cur; | |
699 | # endif /* RLIMIT_AS or RLIMIT_DATA */ | |
700 | #endif /* HAVE_GETRLIMIT */ | |
701 | ||
702 | return limit; | |
703 | } | |
704 | ||
705 | /* Heuristic to set a default for GGC_MIN_EXPAND. */ | |
706 | int | |
707 | ggc_min_expand_heuristic (void) | |
708 | { | |
709 | double min_expand = physmem_total(); | |
710 | ||
711 | /* Adjust for rlimits. */ | |
712 | min_expand = ggc_rlimit_bound (min_expand); | |
713 | ||
714 | /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding | |
715 | a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */ | |
716 | min_expand /= 1024*1024*1024; | |
717 | min_expand *= 70; | |
718 | min_expand = MIN (min_expand, 70); | |
719 | min_expand += 30; | |
720 | ||
721 | return min_expand; | |
722 | } | |
723 | ||
724 | /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */ | |
725 | int | |
726 | ggc_min_heapsize_heuristic (void) | |
727 | { | |
728 | double phys_kbytes = physmem_total(); | |
729 | double limit_kbytes = ggc_rlimit_bound (phys_kbytes * 2); | |
730 | ||
731 | phys_kbytes /= 1024; /* Convert to Kbytes. */ | |
732 | limit_kbytes /= 1024; | |
733 | ||
734 | /* The heuristic is RAM/8, with a lower bound of 4M and an upper | |
735 | bound of 128M (when RAM >= 1GB). */ | |
736 | phys_kbytes /= 8; | |
737 | ||
738 | #if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS) | |
739 | /* Try not to overrun the RSS limit while doing garbage collection. | |
740 | The RSS limit is only advisory, so no margin is subtracted. */ | |
741 | { | |
742 | struct rlimit rlim; | |
743 | if (getrlimit (RLIMIT_RSS, &rlim) == 0 | |
744 | && rlim.rlim_cur != (rlim_t) RLIM_INFINITY) | |
745 | phys_kbytes = MIN (phys_kbytes, rlim.rlim_cur / 1024); | |
746 | } | |
747 | # endif | |
748 | ||
749 | /* Don't blindly run over our data limit; do GC at least when the | |
750 | *next* GC would be within 20Mb of the limit or within a quarter of | |
751 | the limit, whichever is larger. If GCC does hit the data limit, | |
752 | compilation will fail, so this tries to be conservative. */ | |
753 | limit_kbytes = MAX (0, limit_kbytes - MAX (limit_kbytes / 4, 20 * 1024)); | |
754 | limit_kbytes = (limit_kbytes * 100) / (110 + ggc_min_expand_heuristic()); | |
755 | phys_kbytes = MIN (phys_kbytes, limit_kbytes); | |
756 | ||
757 | phys_kbytes = MAX (phys_kbytes, 4 * 1024); | |
758 | phys_kbytes = MIN (phys_kbytes, 128 * 1024); | |
759 | ||
760 | return phys_kbytes; | |
761 | } | |
762 | ||
763 | void | |
764 | init_ggc_heuristics (void) | |
765 | { | |
766 | #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT | |
767 | set_param_value ("ggc-min-expand", ggc_min_expand_heuristic()); | |
768 | set_param_value ("ggc-min-heapsize", ggc_min_heapsize_heuristic()); | |
769 | #endif | |
770 | } | |
771 | ||
772 | #ifdef GATHER_STATISTICS | |
773 | ||
774 | /* Datastructure used to store per-call-site statistics. */ | |
775 | struct loc_descriptor | |
776 | { | |
777 | const char *file; | |
778 | int line; | |
779 | const char *function; | |
780 | int times; | |
781 | size_t allocated; | |
782 | size_t overhead; | |
783 | size_t freed; | |
784 | size_t collected; | |
785 | }; | |
786 | ||
787 | /* Hashtable used for statistics. */ | |
788 | static htab_t loc_hash; | |
789 | ||
790 | /* Hash table helpers functions. */ | |
791 | static hashval_t | |
792 | hash_descriptor (const void *p) | |
793 | { | |
794 | const struct loc_descriptor *const d = p; | |
795 | ||
796 | return htab_hash_pointer (d->function) | d->line; | |
797 | } | |
798 | ||
799 | static int | |
800 | eq_descriptor (const void *p1, const void *p2) | |
801 | { | |
802 | const struct loc_descriptor *const d = p1; | |
803 | const struct loc_descriptor *const d2 = p2; | |
804 | ||
805 | return (d->file == d2->file && d->line == d2->line | |
806 | && d->function == d2->function); | |
807 | } | |
808 | ||
809 | /* Hashtable converting address of allocated field to loc descriptor. */ | |
810 | static htab_t ptr_hash; | |
811 | struct ptr_hash_entry | |
812 | { | |
813 | void *ptr; | |
814 | struct loc_descriptor *loc; | |
815 | size_t size; | |
816 | }; | |
817 | ||
818 | /* Hash table helpers functions. */ | |
819 | static hashval_t | |
820 | hash_ptr (const void *p) | |
821 | { | |
822 | const struct ptr_hash_entry *const d = p; | |
823 | ||
824 | return htab_hash_pointer (d->ptr); | |
825 | } | |
826 | ||
827 | static int | |
828 | eq_ptr (const void *p1, const void *p2) | |
829 | { | |
830 | const struct ptr_hash_entry *const p = p1; | |
831 | ||
832 | return (p->ptr == p2); | |
833 | } | |
834 | ||
835 | /* Return descriptor for given call site, create new one if needed. */ | |
836 | static struct loc_descriptor * | |
837 | loc_descriptor (const char *name, int line, const char *function) | |
838 | { | |
839 | struct loc_descriptor loc; | |
840 | struct loc_descriptor **slot; | |
841 | ||
842 | loc.file = name; | |
843 | loc.line = line; | |
844 | loc.function = function; | |
845 | if (!loc_hash) | |
846 | loc_hash = htab_create (10, hash_descriptor, eq_descriptor, NULL); | |
847 | ||
848 | slot = (struct loc_descriptor **) htab_find_slot (loc_hash, &loc, 1); | |
849 | if (*slot) | |
850 | return *slot; | |
851 | *slot = xcalloc (sizeof (**slot), 1); | |
852 | (*slot)->file = name; | |
853 | (*slot)->line = line; | |
854 | (*slot)->function = function; | |
855 | return *slot; | |
856 | } | |
857 | ||
858 | /* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION). */ | |
859 | void | |
860 | ggc_record_overhead (size_t allocated, size_t overhead, void *ptr, | |
861 | const char *name, int line, const char *function) | |
862 | { | |
863 | struct loc_descriptor *loc = loc_descriptor (name, line, function); | |
864 | struct ptr_hash_entry *p = XNEW (struct ptr_hash_entry); | |
865 | PTR *slot; | |
866 | ||
867 | p->ptr = ptr; | |
868 | p->loc = loc; | |
869 | p->size = allocated + overhead; | |
870 | if (!ptr_hash) | |
871 | ptr_hash = htab_create (10, hash_ptr, eq_ptr, NULL); | |
872 | slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr), INSERT); | |
873 | gcc_assert (!*slot); | |
874 | *slot = p; | |
875 | ||
876 | loc->times++; | |
877 | loc->allocated+=allocated; | |
878 | loc->overhead+=overhead; | |
879 | } | |
880 | ||
881 | /* Helper function for prune_overhead_list. See if SLOT is still marked and | |
882 | remove it from hashtable if it is not. */ | |
883 | static int | |
884 | ggc_prune_ptr (void **slot, void *b ATTRIBUTE_UNUSED) | |
885 | { | |
886 | struct ptr_hash_entry *p = *slot; | |
887 | if (!ggc_marked_p (p->ptr)) | |
888 | { | |
889 | p->loc->collected += p->size; | |
890 | htab_clear_slot (ptr_hash, slot); | |
891 | free (p); | |
892 | } | |
893 | return 1; | |
894 | } | |
895 | ||
896 | /* After live values has been marked, walk all recorded pointers and see if | |
897 | they are still live. */ | |
898 | void | |
899 | ggc_prune_overhead_list (void) | |
900 | { | |
901 | htab_traverse (ptr_hash, ggc_prune_ptr, NULL); | |
902 | } | |
903 | ||
904 | /* Notice that the pointer has been freed. */ | |
905 | void | |
906 | ggc_free_overhead (void *ptr) | |
907 | { | |
908 | PTR *slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr), | |
909 | NO_INSERT); | |
910 | struct ptr_hash_entry *p = *slot; | |
911 | p->loc->freed += p->size; | |
912 | htab_clear_slot (ptr_hash, slot); | |
913 | free (p); | |
914 | } | |
915 | ||
916 | /* Helper for qsort; sort descriptors by amount of memory consumed. */ | |
917 | static int | |
918 | final_cmp_statistic (const void *loc1, const void *loc2) | |
919 | { | |
920 | struct loc_descriptor *l1 = *(struct loc_descriptor **) loc1; | |
921 | struct loc_descriptor *l2 = *(struct loc_descriptor **) loc2; | |
922 | long diff; | |
923 | diff = ((long)(l1->allocated + l1->overhead - l1->freed) - | |
924 | (l2->allocated + l2->overhead - l2->freed)); | |
925 | return diff > 0 ? 1 : diff < 0 ? -1 : 0; | |
926 | } | |
927 | ||
928 | /* Helper for qsort; sort descriptors by amount of memory consumed. */ | |
929 | static int | |
930 | cmp_statistic (const void *loc1, const void *loc2) | |
931 | { | |
932 | struct loc_descriptor *l1 = *(struct loc_descriptor **) loc1; | |
933 | struct loc_descriptor *l2 = *(struct loc_descriptor **) loc2; | |
934 | long diff; | |
935 | ||
936 | diff = ((long)(l1->allocated + l1->overhead - l1->freed - l1->collected) - | |
937 | (l2->allocated + l2->overhead - l2->freed - l2->collected)); | |
938 | if (diff) | |
939 | return diff > 0 ? 1 : diff < 0 ? -1 : 0; | |
940 | diff = ((long)(l1->allocated + l1->overhead - l1->freed) - | |
941 | (l2->allocated + l2->overhead - l2->freed)); | |
942 | return diff > 0 ? 1 : diff < 0 ? -1 : 0; | |
943 | } | |
944 | ||
945 | /* Collect array of the descriptors from hashtable. */ | |
946 | struct loc_descriptor **loc_array; | |
947 | static int | |
948 | add_statistics (void **slot, void *b) | |
949 | { | |
950 | int *n = (int *)b; | |
951 | loc_array[*n] = (struct loc_descriptor *) *slot; | |
952 | (*n)++; | |
953 | return 1; | |
954 | } | |
955 | ||
956 | /* Dump per-site memory statistics. */ | |
957 | #endif | |
958 | void | |
959 | dump_ggc_loc_statistics (bool final ATTRIBUTE_UNUSED) | |
960 | { | |
961 | #ifdef GATHER_STATISTICS | |
962 | int nentries = 0; | |
963 | char s[4096]; | |
964 | size_t collected = 0, freed = 0, allocated = 0, overhead = 0, times = 0; | |
965 | int i; | |
966 | ||
967 | ggc_force_collect = true; | |
968 | ggc_collect (); | |
969 | ||
970 | loc_array = xcalloc (sizeof (*loc_array), loc_hash->n_elements); | |
971 | fprintf (stderr, "-------------------------------------------------------\n"); | |
972 | fprintf (stderr, "\n%-48s %10s %10s %10s %10s %10s\n", | |
973 | "source location", "Garbage", "Freed", "Leak", "Overhead", "Times"); | |
974 | fprintf (stderr, "-------------------------------------------------------\n"); | |
975 | htab_traverse (loc_hash, add_statistics, &nentries); | |
976 | qsort (loc_array, nentries, sizeof (*loc_array), | |
977 | final ? final_cmp_statistic : cmp_statistic); | |
978 | for (i = 0; i < nentries; i++) | |
979 | { | |
980 | struct loc_descriptor *d = loc_array[i]; | |
981 | allocated += d->allocated; | |
982 | times += d->times; | |
983 | freed += d->freed; | |
984 | collected += d->collected; | |
985 | overhead += d->overhead; | |
986 | } | |
987 | for (i = 0; i < nentries; i++) | |
988 | { | |
989 | struct loc_descriptor *d = loc_array[i]; | |
990 | if (d->allocated) | |
991 | { | |
992 | const char *s1 = d->file; | |
993 | const char *s2; | |
994 | while ((s2 = strstr (s1, "gcc/"))) | |
995 | s1 = s2 + 4; | |
996 | sprintf (s, "%s:%i (%s)", s1, d->line, d->function); | |
997 | s[48] = 0; | |
998 | fprintf (stderr, "%-48s %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li\n", s, | |
999 | (long)d->collected, | |
1000 | (d->collected) * 100.0 / collected, | |
1001 | (long)d->freed, | |
1002 | (d->freed) * 100.0 / freed, | |
1003 | (long)(d->allocated + d->overhead - d->freed - d->collected), | |
1004 | (d->allocated + d->overhead - d->freed - d->collected) * 100.0 | |
1005 | / (allocated + overhead - freed - collected), | |
1006 | (long)d->overhead, | |
1007 | d->overhead * 100.0 / overhead, | |
1008 | (long)d->times); | |
1009 | } | |
1010 | } | |
1011 | fprintf (stderr, "%-48s %10ld %10ld %10ld %10ld %10ld\n", | |
1012 | "Total", (long)collected, (long)freed, | |
1013 | (long)(allocated + overhead - freed - collected), (long)overhead, | |
1014 | (long)times); | |
1015 | fprintf (stderr, "%-48s %10s %10s %10s %10s %10s\n", | |
1016 | "source location", "Garbage", "Freed", "Leak", "Overhead", "Times"); | |
1017 | fprintf (stderr, "-------------------------------------------------------\n"); | |
1018 | ggc_force_collect = false; | |
1019 | #endif | |
1020 | } |