]>
Commit | Line | Data |
---|---|---|
b6f61163 | 1 | /* "Bag-of-pages" zone garbage collector for the GNU compiler. |
d9221e01 KH |
2 | Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004 |
3 | Free Software Foundation, Inc. | |
02fef853 DB |
4 | Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin |
5 | (dberlin@dberlin.org) | |
b6f61163 DB |
6 | |
7 | ||
8 | This file is part of GCC. | |
9 | ||
10 | GCC is free software; you can redistribute it and/or modify it under | |
11 | the terms of the GNU General Public License as published by the Free | |
12 | Software Foundation; either version 2, or (at your option) any later | |
13 | version. | |
14 | ||
15 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
16 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
17 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
18 | for more details. | |
19 | ||
20 | You should have received a copy of the GNU General Public License | |
21 | along with GCC; see the file COPYING. If not, write to the Free | |
22 | Software Foundation, 59 Temple Place - Suite 330, Boston, MA | |
23 | 02111-1307, USA. */ | |
24 | ||
25 | #include "config.h" | |
26 | #include "system.h" | |
27 | #include "coretypes.h" | |
28 | #include "tm.h" | |
29 | #include "tree.h" | |
30 | #include "rtl.h" | |
31 | #include "tm_p.h" | |
32 | #include "toplev.h" | |
33 | #include "varray.h" | |
34 | #include "flags.h" | |
35 | #include "ggc.h" | |
36 | #include "timevar.h" | |
37 | #include "params.h" | |
38 | #include "bitmap.h" | |
39 | ||
40 | #ifdef ENABLE_VALGRIND_CHECKING | |
a207b594 HPN |
41 | # ifdef HAVE_VALGRIND_MEMCHECK_H |
42 | # include <valgrind/memcheck.h> | |
43 | # elif defined HAVE_MEMCHECK_H | |
44 | # include <memcheck.h> | |
45 | # else | |
46 | # include <valgrind.h> | |
47 | # endif | |
b6f61163 DB |
48 | #else |
49 | /* Avoid #ifdef:s when we can help it. */ | |
50 | #define VALGRIND_DISCARD(x) | |
51 | #define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z) | |
52 | #define VALGRIND_FREELIKE_BLOCK(x,y) | |
53 | #endif | |
54 | /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a | |
55 | file open. Prefer either to valloc. */ | |
56 | #ifdef HAVE_MMAP_ANON | |
57 | # undef HAVE_MMAP_DEV_ZERO | |
58 | ||
59 | # include <sys/mman.h> | |
60 | # ifndef MAP_FAILED | |
61 | # define MAP_FAILED -1 | |
62 | # endif | |
63 | # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON) | |
64 | # define MAP_ANONYMOUS MAP_ANON | |
65 | # endif | |
66 | # define USING_MMAP | |
67 | ||
68 | #endif | |
69 | ||
70 | #ifdef HAVE_MMAP_DEV_ZERO | |
71 | ||
72 | # include <sys/mman.h> | |
73 | # ifndef MAP_FAILED | |
74 | # define MAP_FAILED -1 | |
75 | # endif | |
76 | # define USING_MMAP | |
77 | ||
78 | #endif | |
79 | ||
80 | #ifndef USING_MMAP | |
02fef853 | 81 | #error "Zone collector requires mmap" |
b6f61163 DB |
82 | #endif |
83 | ||
84 | #if (GCC_VERSION < 3001) | |
85 | #define prefetch(X) ((void) X) | |
86 | #else | |
87 | #define prefetch(X) __builtin_prefetch (X) | |
88 | #endif | |
89 | ||
90 | /* NOTES: | |
91 | If we track inter-zone pointers, we can mark single zones at a | |
92 | time. | |
93 | If we have a zone where we guarantee no inter-zone pointers, we | |
ba228239 | 94 | could mark that zone separately. |
b6f61163 DB |
95 | The garbage zone should not be marked, and we should return 1 in |
96 | ggc_set_mark for any object in the garbage zone, which cuts off | |
97 | marking quickly. */ | |
98 | /* Stategy: | |
99 | ||
100 | This garbage-collecting allocator segregates objects into zones. | |
101 | It also segregates objects into "large" and "small" bins. Large | |
102 | objects are greater or equal to page size. | |
103 | ||
104 | Pages for small objects are broken up into chunks, each of which | |
105 | are described by a struct alloc_chunk. One can walk over all | |
106 | chunks on the page by adding the chunk size to the chunk's data | |
107 | address. The free space for a page exists in the free chunk bins. | |
108 | ||
109 | Each page-entry also has a context depth, which is used to track | |
110 | pushing and popping of allocation contexts. Only objects allocated | |
111 | in the current (highest-numbered) context may be collected. | |
112 | ||
113 | Empty pages (of all sizes) are kept on a single page cache list, | |
114 | and are considered first when new pages are required; they are | |
115 | deallocated at the start of the next collection if they haven't | |
116 | been recycled by then. */ | |
117 | ||
118 | /* Define GGC_DEBUG_LEVEL to print debugging information. | |
119 | 0: No debugging output. | |
120 | 1: GC statistics only. | |
121 | 2: Page-entry allocations/deallocations as well. | |
122 | 3: Object allocations as well. | |
123 | 4: Object marks as well. */ | |
124 | #define GGC_DEBUG_LEVEL (0) | |
125 | ||
126 | #ifndef HOST_BITS_PER_PTR | |
127 | #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG | |
128 | #endif | |
9781b6da | 129 | |
b6f61163 DB |
130 | #ifdef COOKIE_CHECKING |
131 | #define CHUNK_MAGIC 0x95321123 | |
132 | #define DEADCHUNK_MAGIC 0x12817317 | |
133 | #endif | |
134 | ||
135 | /* This structure manages small chunks. When the chunk is free, it's | |
136 | linked with other chunks via free_next. When the chunk is allocated, | |
137 | the data starts at u. Large chunks are allocated one at a time to | |
138 | their own page, and so don't come in here. | |
139 | ||
140 | The "type" field is a placeholder for a future change to do | |
141 | generational collection. At present it is 0 when free and | |
142 | and 1 when allocated. */ | |
143 | ||
144 | struct alloc_chunk { | |
145 | #ifdef COOKIE_CHECKING | |
146 | unsigned int magic; | |
147 | #endif | |
148 | unsigned int type:1; | |
02fef853 DB |
149 | unsigned int typecode:14; |
150 | unsigned int large:1; | |
b6f61163 DB |
151 | unsigned int size:15; |
152 | unsigned int mark:1; | |
153 | union { | |
154 | struct alloc_chunk *next_free; | |
155 | char data[1]; | |
156 | ||
157 | /* Make sure the data is sufficiently aligned. */ | |
158 | HOST_WIDEST_INT align_i; | |
159 | #ifdef HAVE_LONG_DOUBLE | |
160 | long double align_d; | |
161 | #else | |
162 | double align_d; | |
163 | #endif | |
164 | } u; | |
165 | } __attribute__ ((packed)); | |
166 | ||
167 | #define CHUNK_OVERHEAD (offsetof (struct alloc_chunk, u)) | |
168 | ||
169 | /* We maintain several bins of free lists for chunks for very small | |
170 | objects. We never exhaustively search other bins -- if we don't | |
171 | find one of the proper size, we allocate from the "larger" bin. */ | |
172 | ||
173 | /* Decreasing the number of free bins increases the time it takes to allocate. | |
174 | Similar with increasing max_free_bin_size without increasing num_free_bins. | |
175 | ||
176 | After much histogramming of allocation sizes and time spent on gc, | |
5d3cc252 | 177 | on a PowerPC G4 7450 - 667 mhz, and a Pentium 4 - 2.8ghz, |
b6f61163 DB |
178 | these were determined to be the optimal values. */ |
179 | #define NUM_FREE_BINS 64 | |
180 | #define MAX_FREE_BIN_SIZE 256 | |
181 | #define FREE_BIN_DELTA (MAX_FREE_BIN_SIZE / NUM_FREE_BINS) | |
182 | #define SIZE_BIN_UP(SIZE) (((SIZE) + FREE_BIN_DELTA - 1) / FREE_BIN_DELTA) | |
183 | #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA) | |
184 | ||
185 | /* Marker used as chunk->size for a large object. Should correspond | |
186 | to the size of the bitfield above. */ | |
187 | #define LARGE_OBJECT_SIZE 0x7fff | |
188 | ||
189 | /* We use this structure to determine the alignment required for | |
190 | allocations. For power-of-two sized allocations, that's not a | |
191 | problem, but it does matter for odd-sized allocations. */ | |
192 | ||
193 | struct max_alignment { | |
194 | char c; | |
195 | union { | |
196 | HOST_WIDEST_INT i; | |
197 | #ifdef HAVE_LONG_DOUBLE | |
198 | long double d; | |
199 | #else | |
200 | double d; | |
201 | #endif | |
202 | } u; | |
203 | }; | |
204 | ||
205 | /* The biggest alignment required. */ | |
206 | ||
207 | #define MAX_ALIGNMENT (offsetof (struct max_alignment, u)) | |
208 | ||
209 | /* Compute the smallest nonnegative number which when added to X gives | |
210 | a multiple of F. */ | |
211 | ||
212 | #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f)) | |
213 | ||
214 | /* Compute the smallest multiple of F that is >= X. */ | |
215 | ||
216 | #define ROUND_UP(x, f) (CEIL (x, f) * (f)) | |
217 | ||
b6f61163 | 218 | |
b6f61163 DB |
219 | /* A page_entry records the status of an allocation page. */ |
220 | typedef struct page_entry | |
221 | { | |
222 | /* The next page-entry with objects of the same size, or NULL if | |
223 | this is the last page-entry. */ | |
224 | struct page_entry *next; | |
225 | ||
226 | /* The number of bytes allocated. (This will always be a multiple | |
227 | of the host system page size.) */ | |
228 | size_t bytes; | |
229 | ||
230 | /* How many collections we've survived. */ | |
231 | size_t survived; | |
232 | ||
233 | /* The address at which the memory is allocated. */ | |
234 | char *page; | |
235 | ||
b6f61163 DB |
236 | /* Context depth of this page. */ |
237 | unsigned short context_depth; | |
238 | ||
239 | /* Does this page contain small objects, or one large object? */ | |
240 | bool large_p; | |
241 | ||
47aeffac | 242 | /* The zone that this page entry belongs to. */ |
b6f61163 DB |
243 | struct alloc_zone *zone; |
244 | } page_entry; | |
245 | ||
b6f61163 DB |
246 | |
247 | /* The global variables. */ | |
248 | static struct globals | |
249 | { | |
b6f61163 DB |
250 | /* The linked list of zones. */ |
251 | struct alloc_zone *zones; | |
252 | ||
253 | /* The system's page size. */ | |
254 | size_t pagesize; | |
255 | size_t lg_pagesize; | |
256 | ||
257 | /* A file descriptor open to /dev/zero for reading. */ | |
258 | #if defined (HAVE_MMAP_DEV_ZERO) | |
259 | int dev_zero_fd; | |
260 | #endif | |
261 | ||
262 | /* The file descriptor for debugging output. */ | |
263 | FILE *debug_file; | |
264 | } G; | |
265 | ||
266 | /* The zone allocation structure. */ | |
267 | struct alloc_zone | |
268 | { | |
269 | /* Name of the zone. */ | |
270 | const char *name; | |
271 | ||
272 | /* Linked list of pages in a zone. */ | |
273 | page_entry *pages; | |
274 | ||
275 | /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size | |
276 | FREE_BIN_DELTA. All other chunks are in slot 0. */ | |
277 | struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1]; | |
278 | ||
279 | /* Bytes currently allocated. */ | |
280 | size_t allocated; | |
281 | ||
282 | /* Bytes currently allocated at the end of the last collection. */ | |
283 | size_t allocated_last_gc; | |
284 | ||
285 | /* Total amount of memory mapped. */ | |
286 | size_t bytes_mapped; | |
287 | ||
288 | /* Bit N set if any allocations have been done at context depth N. */ | |
289 | unsigned long context_depth_allocations; | |
290 | ||
291 | /* Bit N set if any collections have been done at context depth N. */ | |
292 | unsigned long context_depth_collections; | |
293 | ||
294 | /* The current depth in the context stack. */ | |
295 | unsigned short context_depth; | |
296 | ||
297 | /* A cache of free system pages. */ | |
298 | page_entry *free_pages; | |
299 | ||
b6f61163 DB |
300 | /* Next zone in the linked list of zones. */ |
301 | struct alloc_zone *next_zone; | |
302 | ||
b944d187 | 303 | /* True if this zone was collected during this collection. */ |
b6f61163 | 304 | bool was_collected; |
b944d187 SB |
305 | |
306 | /* True if this zone should be destroyed after the next collection. */ | |
307 | bool dead; | |
b6f61163 DB |
308 | } main_zone; |
309 | ||
310 | struct alloc_zone *rtl_zone; | |
311 | struct alloc_zone *garbage_zone; | |
312 | struct alloc_zone *tree_zone; | |
313 | ||
314 | /* Allocate pages in chunks of this size, to throttle calls to memory | |
315 | allocation routines. The first page is used, the rest go onto the | |
316 | free list. This cannot be larger than HOST_BITS_PER_INT for the | |
317 | in_use bitmask for page_group. */ | |
318 | #define GGC_QUIRE_SIZE 16 | |
319 | ||
320 | static int ggc_allocated_p (const void *); | |
b6f61163 DB |
321 | #ifdef USING_MMAP |
322 | static char *alloc_anon (char *, size_t, struct alloc_zone *); | |
323 | #endif | |
b6f61163 DB |
324 | static struct page_entry * alloc_small_page ( struct alloc_zone *); |
325 | static struct page_entry * alloc_large_page (size_t, struct alloc_zone *); | |
326 | static void free_chunk (struct alloc_chunk *, size_t, struct alloc_zone *); | |
327 | static void free_page (struct page_entry *); | |
328 | static void release_pages (struct alloc_zone *); | |
329 | static void sweep_pages (struct alloc_zone *); | |
330 | static void * ggc_alloc_zone_1 (size_t, struct alloc_zone *, short); | |
331 | static bool ggc_collect_1 (struct alloc_zone *, bool); | |
332 | static void check_cookies (void); | |
333 | ||
334 | ||
335 | /* Returns nonzero if P was allocated in GC'able memory. */ | |
336 | ||
337 | static inline int | |
338 | ggc_allocated_p (const void *p) | |
339 | { | |
02fef853 DB |
340 | struct alloc_chunk *chunk; |
341 | chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD); | |
342 | #ifdef COOKIE_CHECKING | |
343 | if (chunk->magic != CHUNK_MAGIC) | |
344 | abort (); | |
b6f61163 | 345 | #endif |
02fef853 DB |
346 | if (chunk->type == 1) |
347 | return true; | |
348 | return false; | |
b6f61163 DB |
349 | } |
350 | ||
b6f61163 DB |
351 | |
352 | #ifdef USING_MMAP | |
353 | /* Allocate SIZE bytes of anonymous memory, preferably near PREF, | |
354 | (if non-null). The ifdef structure here is intended to cause a | |
355 | compile error unless exactly one of the HAVE_* is defined. */ | |
356 | ||
357 | static inline char * | |
358 | alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone) | |
359 | { | |
360 | #ifdef HAVE_MMAP_ANON | |
361 | char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE, | |
362 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | |
363 | #endif | |
364 | #ifdef HAVE_MMAP_DEV_ZERO | |
365 | char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE, | |
366 | MAP_PRIVATE, G.dev_zero_fd, 0); | |
367 | #endif | |
368 | VALGRIND_MALLOCLIKE_BLOCK(page, size, 0, 0); | |
369 | ||
370 | if (page == (char *) MAP_FAILED) | |
371 | { | |
372 | perror ("virtual memory exhausted"); | |
373 | exit (FATAL_EXIT_CODE); | |
374 | } | |
375 | ||
376 | /* Remember that we allocated this memory. */ | |
377 | zone->bytes_mapped += size; | |
378 | /* Pretend we don't have access to the allocated pages. We'll enable | |
379 | access to smaller pieces of the area in ggc_alloc. Discard the | |
380 | handle to avoid handle leak. */ | |
381 | VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size)); | |
382 | return page; | |
383 | } | |
384 | #endif | |
b6f61163 DB |
385 | |
386 | /* Allocate a new page for allocating objects of size 2^ORDER, | |
02fef853 | 387 | and return an entry for it. */ |
b6f61163 DB |
388 | |
389 | static inline struct page_entry * | |
390 | alloc_small_page (struct alloc_zone *zone) | |
391 | { | |
392 | struct page_entry *entry; | |
393 | char *page; | |
b6f61163 DB |
394 | |
395 | page = NULL; | |
396 | ||
397 | /* Check the list of free pages for one we can use. */ | |
398 | entry = zone->free_pages; | |
399 | if (entry != NULL) | |
400 | { | |
401 | /* Recycle the allocated memory from this page ... */ | |
402 | zone->free_pages = entry->next; | |
403 | page = entry->page; | |
404 | ||
02fef853 | 405 | |
b6f61163 DB |
406 | } |
407 | #ifdef USING_MMAP | |
408 | else | |
409 | { | |
410 | /* We want just one page. Allocate a bunch of them and put the | |
411 | extras on the freelist. (Can only do this optimization with | |
412 | mmap for backing store.) */ | |
413 | struct page_entry *e, *f = zone->free_pages; | |
414 | int i; | |
415 | ||
416 | page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, zone); | |
417 | ||
418 | /* This loop counts down so that the chain will be in ascending | |
419 | memory order. */ | |
420 | for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--) | |
421 | { | |
422 | e = (struct page_entry *) xmalloc (sizeof (struct page_entry)); | |
423 | e->bytes = G.pagesize; | |
424 | e->page = page + (i << G.lg_pagesize); | |
425 | e->next = f; | |
426 | f = e; | |
427 | } | |
428 | ||
429 | zone->free_pages = f; | |
430 | } | |
431 | #endif | |
b6f61163 DB |
432 | if (entry == NULL) |
433 | entry = (struct page_entry *) xmalloc (sizeof (struct page_entry)); | |
434 | ||
435 | entry->next = 0; | |
436 | entry->bytes = G.pagesize; | |
b6f61163 DB |
437 | entry->page = page; |
438 | entry->context_depth = zone->context_depth; | |
439 | entry->large_p = false; | |
440 | entry->zone = zone; | |
441 | zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth; | |
442 | ||
b6f61163 DB |
443 | if (GGC_DEBUG_LEVEL >= 2) |
444 | fprintf (G.debug_file, | |
445 | "Allocating %s page at %p, data %p-%p\n", entry->zone->name, | |
446 | (PTR) entry, page, page + G.pagesize - 1); | |
447 | ||
448 | return entry; | |
449 | } | |
02fef853 DB |
450 | /* Compute the smallest multiple of F that is >= X. */ |
451 | ||
452 | #define ROUND_UP(x, f) (CEIL (x, f) * (f)) | |
b6f61163 DB |
453 | |
454 | /* Allocate a large page of size SIZE in ZONE. */ | |
455 | ||
456 | static inline struct page_entry * | |
457 | alloc_large_page (size_t size, struct alloc_zone *zone) | |
458 | { | |
459 | struct page_entry *entry; | |
460 | char *page; | |
02fef853 | 461 | size = ROUND_UP (size, 1024); |
b6f61163 DB |
462 | page = (char *) xmalloc (size + CHUNK_OVERHEAD + sizeof (struct page_entry)); |
463 | entry = (struct page_entry *) (page + size + CHUNK_OVERHEAD); | |
464 | ||
465 | entry->next = 0; | |
466 | entry->bytes = size; | |
b6f61163 DB |
467 | entry->page = page; |
468 | entry->context_depth = zone->context_depth; | |
469 | entry->large_p = true; | |
470 | entry->zone = zone; | |
471 | zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth; | |
472 | ||
b6f61163 DB |
473 | if (GGC_DEBUG_LEVEL >= 2) |
474 | fprintf (G.debug_file, | |
475 | "Allocating %s large page at %p, data %p-%p\n", entry->zone->name, | |
476 | (PTR) entry, page, page + size - 1); | |
477 | ||
478 | return entry; | |
479 | } | |
480 | ||
481 | ||
482 | /* For a page that is no longer needed, put it on the free page list. */ | |
483 | ||
484 | static inline void | |
485 | free_page (page_entry *entry) | |
486 | { | |
487 | if (GGC_DEBUG_LEVEL >= 2) | |
488 | fprintf (G.debug_file, | |
489 | "Deallocating %s page at %p, data %p-%p\n", entry->zone->name, (PTR) entry, | |
490 | entry->page, entry->page + entry->bytes - 1); | |
491 | ||
b6f61163 DB |
492 | if (entry->large_p) |
493 | { | |
494 | free (entry->page); | |
495 | VALGRIND_FREELIKE_BLOCK (entry->page, entry->bytes); | |
496 | } | |
497 | else | |
498 | { | |
499 | /* Mark the page as inaccessible. Discard the handle to | |
500 | avoid handle leak. */ | |
501 | VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes)); | |
502 | ||
b6f61163 DB |
503 | entry->next = entry->zone->free_pages; |
504 | entry->zone->free_pages = entry; | |
505 | } | |
506 | } | |
507 | ||
508 | /* Release the free page cache to the system. */ | |
509 | ||
510 | static void | |
511 | release_pages (struct alloc_zone *zone) | |
512 | { | |
513 | #ifdef USING_MMAP | |
514 | page_entry *p, *next; | |
515 | char *start; | |
516 | size_t len; | |
517 | ||
518 | /* Gather up adjacent pages so they are unmapped together. */ | |
519 | p = zone->free_pages; | |
520 | ||
521 | while (p) | |
522 | { | |
523 | start = p->page; | |
524 | next = p->next; | |
525 | len = p->bytes; | |
526 | free (p); | |
527 | p = next; | |
528 | ||
529 | while (p && p->page == start + len) | |
530 | { | |
531 | next = p->next; | |
532 | len += p->bytes; | |
533 | free (p); | |
534 | p = next; | |
535 | } | |
536 | ||
537 | munmap (start, len); | |
538 | zone->bytes_mapped -= len; | |
539 | } | |
540 | ||
541 | zone->free_pages = NULL; | |
542 | #endif | |
b6f61163 DB |
543 | } |
544 | ||
545 | /* Place CHUNK of size SIZE on the free list for ZONE. */ | |
546 | ||
547 | static inline void | |
548 | free_chunk (struct alloc_chunk *chunk, size_t size, struct alloc_zone *zone) | |
549 | { | |
550 | size_t bin = 0; | |
551 | ||
552 | bin = SIZE_BIN_DOWN (size); | |
553 | if (bin == 0) | |
554 | abort (); | |
555 | if (bin > NUM_FREE_BINS) | |
556 | bin = 0; | |
557 | #ifdef COOKIE_CHECKING | |
558 | if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC) | |
559 | abort (); | |
560 | chunk->magic = DEADCHUNK_MAGIC; | |
561 | #endif | |
562 | chunk->u.next_free = zone->free_chunks[bin]; | |
563 | zone->free_chunks[bin] = chunk; | |
564 | if (GGC_DEBUG_LEVEL >= 3) | |
565 | fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk); | |
566 | VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (chunk, sizeof (struct alloc_chunk))); | |
567 | } | |
568 | ||
569 | /* Allocate a chunk of memory of SIZE bytes. */ | |
570 | ||
571 | static void * | |
572 | ggc_alloc_zone_1 (size_t size, struct alloc_zone *zone, short type) | |
573 | { | |
574 | size_t bin = 0; | |
575 | size_t lsize = 0; | |
576 | struct page_entry *entry; | |
577 | struct alloc_chunk *chunk, *lchunk, **pp; | |
578 | void *result; | |
579 | ||
580 | /* Align size, so that we're assured of aligned allocations. */ | |
581 | if (size < FREE_BIN_DELTA) | |
582 | size = FREE_BIN_DELTA; | |
583 | size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT; | |
584 | ||
585 | /* Large objects are handled specially. */ | |
586 | if (size >= G.pagesize - 2*CHUNK_OVERHEAD - FREE_BIN_DELTA) | |
587 | { | |
02fef853 | 588 | size = ROUND_UP (size, 1024); |
b6f61163 DB |
589 | entry = alloc_large_page (size, zone); |
590 | entry->survived = 0; | |
591 | entry->next = entry->zone->pages; | |
592 | entry->zone->pages = entry; | |
593 | ||
b6f61163 DB |
594 | chunk = (struct alloc_chunk *) entry->page; |
595 | VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk))); | |
02fef853 DB |
596 | chunk->large = 1; |
597 | chunk->size = CEIL (size, 1024); | |
b6f61163 DB |
598 | |
599 | goto found; | |
600 | } | |
601 | ||
602 | /* First look for a tiny object already segregated into its own | |
603 | size bucket. */ | |
604 | bin = SIZE_BIN_UP (size); | |
605 | if (bin <= NUM_FREE_BINS) | |
606 | { | |
607 | chunk = zone->free_chunks[bin]; | |
608 | if (chunk) | |
609 | { | |
610 | zone->free_chunks[bin] = chunk->u.next_free; | |
611 | VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk))); | |
612 | goto found; | |
613 | } | |
614 | } | |
615 | ||
616 | /* Failing that, look through the "other" bucket for a chunk | |
617 | that is large enough. */ | |
618 | pp = &(zone->free_chunks[0]); | |
619 | chunk = *pp; | |
620 | while (chunk && chunk->size < size) | |
621 | { | |
622 | pp = &chunk->u.next_free; | |
623 | chunk = *pp; | |
624 | } | |
625 | ||
626 | /* Failing that, allocate new storage. */ | |
627 | if (!chunk) | |
628 | { | |
629 | entry = alloc_small_page (zone); | |
630 | entry->next = entry->zone->pages; | |
631 | entry->zone->pages = entry; | |
632 | ||
633 | chunk = (struct alloc_chunk *) entry->page; | |
634 | VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk))); | |
635 | chunk->size = G.pagesize - CHUNK_OVERHEAD; | |
02fef853 | 636 | chunk->large = 0; |
b6f61163 DB |
637 | } |
638 | else | |
639 | { | |
640 | *pp = chunk->u.next_free; | |
641 | VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk))); | |
02fef853 | 642 | chunk->large = 0; |
b6f61163 DB |
643 | } |
644 | /* Release extra memory from a chunk that's too big. */ | |
645 | lsize = chunk->size - size; | |
646 | if (lsize >= CHUNK_OVERHEAD + FREE_BIN_DELTA) | |
647 | { | |
648 | VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk))); | |
649 | chunk->size = size; | |
650 | ||
651 | lsize -= CHUNK_OVERHEAD; | |
652 | lchunk = (struct alloc_chunk *)(chunk->u.data + size); | |
653 | VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (lchunk, sizeof (struct alloc_chunk))); | |
654 | #ifdef COOKIE_CHECKING | |
655 | lchunk->magic = CHUNK_MAGIC; | |
656 | #endif | |
657 | lchunk->type = 0; | |
658 | lchunk->mark = 0; | |
659 | lchunk->size = lsize; | |
02fef853 | 660 | lchunk->large = 0; |
b6f61163 DB |
661 | free_chunk (lchunk, lsize, zone); |
662 | } | |
663 | /* Calculate the object's address. */ | |
664 | found: | |
665 | #ifdef COOKIE_CHECKING | |
666 | chunk->magic = CHUNK_MAGIC; | |
667 | #endif | |
668 | chunk->type = 1; | |
669 | chunk->mark = 0; | |
670 | chunk->typecode = type; | |
671 | result = chunk->u.data; | |
672 | ||
673 | #ifdef ENABLE_GC_CHECKING | |
674 | /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the | |
675 | exact same semantics in presence of memory bugs, regardless of | |
676 | ENABLE_VALGRIND_CHECKING. We override this request below. Drop the | |
677 | handle to avoid handle leak. */ | |
678 | VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size)); | |
679 | ||
680 | /* `Poison' the entire allocated object. */ | |
681 | memset (result, 0xaf, size); | |
682 | #endif | |
683 | ||
684 | /* Tell Valgrind that the memory is there, but its content isn't | |
685 | defined. The bytes at the end of the object are still marked | |
686 | unaccessible. */ | |
687 | VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size)); | |
688 | ||
689 | /* Keep track of how many bytes are being allocated. This | |
690 | information is used in deciding when to collect. */ | |
691 | zone->allocated += size + CHUNK_OVERHEAD; | |
692 | ||
693 | if (GGC_DEBUG_LEVEL >= 3) | |
694 | fprintf (G.debug_file, "Allocating object, chunk=%p size=%lu at %p\n", | |
695 | (void *)chunk, (unsigned long) size, result); | |
696 | ||
697 | return result; | |
698 | } | |
699 | ||
d91edf86 | 700 | /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone |
b6f61163 DB |
701 | for that type. */ |
702 | ||
703 | void * | |
704 | ggc_alloc_typed (enum gt_types_enum gte, size_t size) | |
705 | { | |
47aeffac SB |
706 | switch (gte) |
707 | { | |
708 | case gt_ggc_e_14lang_tree_node: | |
709 | return ggc_alloc_zone_1 (size, tree_zone, gte); | |
710 | ||
711 | case gt_ggc_e_7rtx_def: | |
712 | return ggc_alloc_zone_1 (size, rtl_zone, gte); | |
713 | ||
714 | case gt_ggc_e_9rtvec_def: | |
715 | return ggc_alloc_zone_1 (size, rtl_zone, gte); | |
716 | ||
717 | default: | |
718 | return ggc_alloc_zone_1 (size, &main_zone, gte); | |
719 | } | |
b6f61163 DB |
720 | } |
721 | ||
722 | /* Normal ggc_alloc simply allocates into the main zone. */ | |
723 | ||
724 | void * | |
725 | ggc_alloc (size_t size) | |
726 | { | |
727 | return ggc_alloc_zone_1 (size, &main_zone, -1); | |
728 | } | |
729 | ||
730 | /* Zone allocation allocates into the specified zone. */ | |
731 | ||
732 | void * | |
733 | ggc_alloc_zone (size_t size, struct alloc_zone *zone) | |
734 | { | |
735 | return ggc_alloc_zone_1 (size, zone, -1); | |
736 | } | |
737 | ||
9781b6da DB |
738 | /* Poison the chunk. */ |
739 | #ifdef ENABLE_GC_CHECKING | |
740 | #define poison_chunk(CHUNK, SIZE) \ | |
741 | memset ((CHUNK)->u.data, 0xa5, (SIZE)) | |
742 | #else | |
743 | #define poison_chunk(CHUNK, SIZE) | |
744 | #endif | |
745 | ||
746 | /* Free the object at P. */ | |
747 | ||
748 | void | |
749 | ggc_free (void *p) | |
750 | { | |
751 | struct alloc_chunk *chunk; | |
752 | ||
753 | chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD); | |
754 | ||
755 | /* Poison the chunk. */ | |
756 | poison_chunk (chunk, ggc_get_size (p)); | |
757 | ||
758 | /* XXX: We only deal with explicitly freeing large objects ATM. */ | |
759 | if (chunk->large) | |
760 | free (p); | |
761 | } | |
762 | ||
b6f61163 DB |
763 | /* If P is not marked, mark it and return false. Otherwise return true. |
764 | P must have been allocated by the GC allocator; it mustn't point to | |
765 | static objects, stack variables, or memory allocated with malloc. */ | |
766 | ||
767 | int | |
768 | ggc_set_mark (const void *p) | |
769 | { | |
b6f61163 DB |
770 | struct alloc_chunk *chunk; |
771 | ||
b6f61163 DB |
772 | chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD); |
773 | #ifdef COOKIE_CHECKING | |
774 | if (chunk->magic != CHUNK_MAGIC) | |
775 | abort (); | |
776 | #endif | |
777 | if (chunk->mark) | |
778 | return 1; | |
779 | chunk->mark = 1; | |
780 | ||
b6f61163 DB |
781 | if (GGC_DEBUG_LEVEL >= 4) |
782 | fprintf (G.debug_file, "Marking %p\n", p); | |
783 | ||
784 | return 0; | |
785 | } | |
786 | ||
787 | /* Return 1 if P has been marked, zero otherwise. | |
788 | P must have been allocated by the GC allocator; it mustn't point to | |
789 | static objects, stack variables, or memory allocated with malloc. */ | |
790 | ||
791 | int | |
792 | ggc_marked_p (const void *p) | |
793 | { | |
794 | struct alloc_chunk *chunk; | |
795 | ||
b6f61163 DB |
796 | chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD); |
797 | #ifdef COOKIE_CHECKING | |
798 | if (chunk->magic != CHUNK_MAGIC) | |
799 | abort (); | |
800 | #endif | |
801 | return chunk->mark; | |
802 | } | |
803 | ||
804 | /* Return the size of the gc-able object P. */ | |
805 | ||
806 | size_t | |
807 | ggc_get_size (const void *p) | |
808 | { | |
809 | struct alloc_chunk *chunk; | |
b6f61163 DB |
810 | |
811 | chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD); | |
812 | #ifdef COOKIE_CHECKING | |
813 | if (chunk->magic != CHUNK_MAGIC) | |
814 | abort (); | |
815 | #endif | |
02fef853 DB |
816 | if (chunk->large) |
817 | return chunk->size * 1024; | |
b6f61163 DB |
818 | |
819 | return chunk->size; | |
820 | } | |
821 | ||
822 | /* Initialize the ggc-zone-mmap allocator. */ | |
823 | void | |
578e8170 | 824 | init_ggc (void) |
b6f61163 | 825 | { |
47aeffac | 826 | /* Set up the main zone by hand. */ |
b6f61163 DB |
827 | main_zone.name = "Main zone"; |
828 | G.zones = &main_zone; | |
829 | ||
47aeffac SB |
830 | /* Allocate the default zones. */ |
831 | rtl_zone = new_ggc_zone ("RTL zone"); | |
832 | tree_zone = new_ggc_zone ("Tree zone"); | |
833 | garbage_zone = new_ggc_zone ("Garbage zone"); | |
b6f61163 DB |
834 | |
835 | G.pagesize = getpagesize(); | |
836 | G.lg_pagesize = exact_log2 (G.pagesize); | |
837 | #ifdef HAVE_MMAP_DEV_ZERO | |
838 | G.dev_zero_fd = open ("/dev/zero", O_RDONLY); | |
839 | if (G.dev_zero_fd == -1) | |
840 | abort (); | |
841 | #endif | |
842 | ||
843 | #if 0 | |
844 | G.debug_file = fopen ("ggc-mmap.debug", "w"); | |
845 | setlinebuf (G.debug_file); | |
846 | #else | |
847 | G.debug_file = stdout; | |
848 | #endif | |
849 | ||
850 | #ifdef USING_MMAP | |
851 | /* StunOS has an amazing off-by-one error for the first mmap allocation | |
852 | after fiddling with RLIMIT_STACK. The result, as hard as it is to | |
853 | believe, is an unaligned page allocation, which would cause us to | |
854 | hork badly if we tried to use it. */ | |
855 | { | |
856 | char *p = alloc_anon (NULL, G.pagesize, &main_zone); | |
857 | struct page_entry *e; | |
858 | if ((size_t)p & (G.pagesize - 1)) | |
859 | { | |
860 | /* How losing. Discard this one and try another. If we still | |
861 | can't get something useful, give up. */ | |
862 | ||
863 | p = alloc_anon (NULL, G.pagesize, &main_zone); | |
864 | if ((size_t)p & (G.pagesize - 1)) | |
865 | abort (); | |
866 | } | |
867 | ||
868 | /* We have a good page, might as well hold onto it... */ | |
869 | e = (struct page_entry *) xmalloc (sizeof (struct page_entry)); | |
870 | e->bytes = G.pagesize; | |
871 | e->page = p; | |
872 | e->next = main_zone.free_pages; | |
873 | main_zone.free_pages = e; | |
874 | } | |
875 | #endif | |
876 | } | |
877 | ||
47aeffac SB |
878 | /* Start a new GGC zone. */ |
879 | ||
880 | struct alloc_zone * | |
881 | new_ggc_zone (const char * name) | |
882 | { | |
883 | struct alloc_zone *new_zone = xcalloc (1, sizeof (struct alloc_zone)); | |
884 | new_zone->name = name; | |
885 | new_zone->next_zone = G.zones->next_zone; | |
886 | G.zones->next_zone = new_zone; | |
887 | return new_zone; | |
888 | } | |
889 | ||
890 | /* Destroy a GGC zone. */ | |
891 | void | |
892 | destroy_ggc_zone (struct alloc_zone * dead_zone) | |
893 | { | |
894 | struct alloc_zone *z; | |
895 | ||
896 | for (z = G.zones; z && z->next_zone != dead_zone; z = z->next_zone) | |
897 | /* Just find that zone. */ ; | |
898 | ||
b944d187 SB |
899 | #ifdef ENABLE_CHECKING |
900 | /* We should have found the zone in the list. Anything else is fatal. */ | |
901 | if (!z) | |
47aeffac | 902 | abort (); |
b944d187 | 903 | #endif |
47aeffac | 904 | |
b944d187 SB |
905 | /* z is dead, baby. z is dead. */ |
906 | z->dead= true; | |
47aeffac SB |
907 | } |
908 | ||
b6f61163 DB |
909 | /* Increment the `GC context'. Objects allocated in an outer context |
910 | are never freed, eliminating the need to register their roots. */ | |
911 | ||
912 | void | |
578e8170 | 913 | ggc_push_context (void) |
b6f61163 DB |
914 | { |
915 | struct alloc_zone *zone; | |
916 | for (zone = G.zones; zone; zone = zone->next_zone) | |
917 | ++(zone->context_depth); | |
918 | /* Die on wrap. */ | |
919 | if (main_zone.context_depth >= HOST_BITS_PER_LONG) | |
920 | abort (); | |
921 | } | |
922 | ||
923 | /* Decrement the `GC context'. All objects allocated since the | |
924 | previous ggc_push_context are migrated to the outer context. */ | |
925 | ||
926 | static void | |
927 | ggc_pop_context_1 (struct alloc_zone *zone) | |
928 | { | |
929 | unsigned long omask; | |
930 | unsigned depth; | |
931 | page_entry *p; | |
932 | ||
933 | depth = --(zone->context_depth); | |
934 | omask = (unsigned long)1 << (depth + 1); | |
935 | ||
936 | if (!((zone->context_depth_allocations | zone->context_depth_collections) & omask)) | |
937 | return; | |
938 | ||
939 | zone->context_depth_allocations |= (zone->context_depth_allocations & omask) >> 1; | |
940 | zone->context_depth_allocations &= omask - 1; | |
941 | zone->context_depth_collections &= omask - 1; | |
942 | ||
943 | /* Any remaining pages in the popped context are lowered to the new | |
944 | current context; i.e. objects allocated in the popped context and | |
945 | left over are imported into the previous context. */ | |
47aeffac SB |
946 | for (p = zone->pages; p != NULL; p = p->next) |
947 | if (p->context_depth > depth) | |
948 | p->context_depth = depth; | |
b6f61163 DB |
949 | } |
950 | ||
951 | /* Pop all the zone contexts. */ | |
952 | ||
953 | void | |
578e8170 | 954 | ggc_pop_context (void) |
b6f61163 DB |
955 | { |
956 | struct alloc_zone *zone; | |
957 | for (zone = G.zones; zone; zone = zone->next_zone) | |
958 | ggc_pop_context_1 (zone); | |
959 | } | |
b6f61163 DB |
960 | |
961 | /* Free all empty pages and objects within a page for a given zone */ | |
962 | ||
963 | static void | |
964 | sweep_pages (struct alloc_zone *zone) | |
965 | { | |
966 | page_entry **pp, *p, *next; | |
967 | struct alloc_chunk *chunk, *last_free, *end; | |
968 | size_t last_free_size, allocated = 0; | |
02fef853 | 969 | bool nomarksinpage; |
b6f61163 DB |
970 | /* First, reset the free_chunks lists, since we are going to |
971 | re-free free chunks in hopes of coalescing them into large chunks. */ | |
972 | memset (zone->free_chunks, 0, sizeof (zone->free_chunks)); | |
973 | pp = &zone->pages; | |
974 | for (p = zone->pages; p ; p = next) | |
975 | { | |
976 | next = p->next; | |
02fef853 DB |
977 | /* Large pages are all or none affairs. Either they are |
978 | completely empty, or they are completely full. | |
979 | ||
980 | XXX: Should we bother to increment allocated. */ | |
981 | if (p->large_p) | |
b6f61163 | 982 | { |
02fef853 DB |
983 | if (((struct alloc_chunk *)p->page)->mark == 1) |
984 | { | |
985 | ((struct alloc_chunk *)p->page)->mark = 0; | |
986 | } | |
987 | else | |
988 | { | |
989 | *pp = next; | |
b6f61163 DB |
990 | #ifdef ENABLE_GC_CHECKING |
991 | /* Poison the page. */ | |
992 | memset (p->page, 0xb5, p->bytes); | |
993 | #endif | |
02fef853 DB |
994 | free_page (p); |
995 | } | |
b6f61163 DB |
996 | continue; |
997 | } | |
b6f61163 DB |
998 | |
999 | /* This page has now survived another collection. */ | |
1000 | p->survived++; | |
1001 | ||
1002 | /* Which leaves full and partial pages. Step through all chunks, | |
1003 | consolidate those that are free and insert them into the free | |
1004 | lists. Note that consolidation slows down collection | |
1005 | slightly. */ | |
1006 | ||
1007 | chunk = (struct alloc_chunk *)p->page; | |
1008 | end = (struct alloc_chunk *)(p->page + G.pagesize); | |
1009 | last_free = NULL; | |
1010 | last_free_size = 0; | |
02fef853 | 1011 | nomarksinpage = true; |
b6f61163 DB |
1012 | do |
1013 | { | |
1014 | prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size)); | |
1015 | if (chunk->mark || p->context_depth < zone->context_depth) | |
1016 | { | |
02fef853 | 1017 | nomarksinpage = false; |
b6f61163 DB |
1018 | if (last_free) |
1019 | { | |
1020 | last_free->type = 0; | |
1021 | last_free->size = last_free_size; | |
1022 | last_free->mark = 0; | |
1023 | poison_chunk (last_free, last_free_size); | |
1024 | free_chunk (last_free, last_free_size, zone); | |
1025 | last_free = NULL; | |
1026 | } | |
1027 | if (chunk->mark) | |
1028 | { | |
1029 | allocated += chunk->size + CHUNK_OVERHEAD; | |
b6f61163 DB |
1030 | } |
1031 | chunk->mark = 0; | |
b6f61163 DB |
1032 | } |
1033 | else | |
1034 | { | |
1035 | if (last_free) | |
1036 | { | |
1037 | last_free_size += CHUNK_OVERHEAD + chunk->size; | |
1038 | } | |
1039 | else | |
1040 | { | |
1041 | last_free = chunk; | |
1042 | last_free_size = chunk->size; | |
1043 | } | |
1044 | } | |
1045 | ||
1046 | chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size); | |
1047 | } | |
1048 | while (chunk < end); | |
1049 | ||
02fef853 DB |
1050 | if (nomarksinpage) |
1051 | { | |
1052 | *pp = next; | |
1053 | #ifdef ENABLE_GC_CHECKING | |
1054 | /* Poison the page. */ | |
1055 | memset (p->page, 0xb5, p->bytes); | |
1056 | #endif | |
1057 | free_page (p); | |
1058 | continue; | |
1059 | } | |
1060 | else if (last_free) | |
b6f61163 DB |
1061 | { |
1062 | last_free->type = 0; | |
1063 | last_free->size = last_free_size; | |
1064 | last_free->mark = 0; | |
1065 | poison_chunk (last_free, last_free_size); | |
1066 | free_chunk (last_free, last_free_size, zone); | |
1067 | } | |
02fef853 | 1068 | pp = &p->next; |
b6f61163 DB |
1069 | } |
1070 | ||
1071 | zone->allocated = allocated; | |
1072 | } | |
1073 | ||
1074 | /* mark-and-sweep routine for collecting a single zone. NEED_MARKING | |
1075 | is true if we need to mark before sweeping, false if some other | |
1076 | zone collection has already performed marking for us. Returns true | |
1077 | if we collected, false otherwise. */ | |
1078 | ||
1079 | static bool | |
1080 | ggc_collect_1 (struct alloc_zone *zone, bool need_marking) | |
1081 | { | |
b944d187 SB |
1082 | if (!zone->dead) |
1083 | { | |
1084 | /* Avoid frequent unnecessary work by skipping collection if the | |
1085 | total allocations haven't expanded much since the last | |
1086 | collection. */ | |
1087 | float allocated_last_gc = | |
1088 | MAX (zone->allocated_last_gc, | |
1089 | (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024); | |
b6f61163 | 1090 | |
b944d187 | 1091 | float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100; |
b6f61163 | 1092 | |
b944d187 SB |
1093 | if (zone->allocated < allocated_last_gc + min_expand) |
1094 | return false; | |
1095 | } | |
b6f61163 DB |
1096 | |
1097 | if (!quiet_flag) | |
b944d187 SB |
1098 | fprintf (stderr, " {%s GC %luk -> ", |
1099 | zone->name, (unsigned long) zone->allocated / 1024); | |
b6f61163 DB |
1100 | |
1101 | /* Zero the total allocated bytes. This will be recalculated in the | |
1102 | sweep phase. */ | |
1103 | zone->allocated = 0; | |
1104 | ||
1105 | /* Release the pages we freed the last time we collected, but didn't | |
1106 | reuse in the interim. */ | |
1107 | release_pages (zone); | |
1108 | ||
1109 | /* Indicate that we've seen collections at this context depth. */ | |
1110 | zone->context_depth_collections | |
1111 | = ((unsigned long)1 << (zone->context_depth + 1)) - 1; | |
1112 | if (need_marking) | |
1113 | ggc_mark_roots (); | |
1114 | sweep_pages (zone); | |
1115 | zone->was_collected = true; | |
1116 | zone->allocated_last_gc = zone->allocated; | |
1117 | ||
b6f61163 DB |
1118 | if (!quiet_flag) |
1119 | fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024); | |
1120 | return true; | |
1121 | } | |
1122 | ||
1123 | /* Calculate the average page survival rate in terms of number of | |
1124 | collections. */ | |
1125 | ||
1126 | static float | |
1127 | calculate_average_page_survival (struct alloc_zone *zone) | |
1128 | { | |
1129 | float count = 0.0; | |
1130 | float survival = 0.0; | |
1131 | page_entry *p; | |
1132 | for (p = zone->pages; p; p = p->next) | |
1133 | { | |
1134 | count += 1.0; | |
1135 | survival += p->survived; | |
1136 | } | |
1137 | return survival/count; | |
1138 | } | |
1139 | ||
1140 | /* Check the magic cookies all of the chunks contain, to make sure we | |
1141 | aren't doing anything stupid, like stomping on alloc_chunk | |
1142 | structures. */ | |
1143 | ||
1144 | static inline void | |
578e8170 | 1145 | check_cookies (void) |
b6f61163 DB |
1146 | { |
1147 | #ifdef COOKIE_CHECKING | |
1148 | page_entry *p; | |
578e8170 AJ |
1149 | struct alloc_zone *zone; |
1150 | ||
b6f61163 DB |
1151 | for (zone = G.zones; zone; zone = zone->next_zone) |
1152 | { | |
1153 | for (p = zone->pages; p; p = p->next) | |
1154 | { | |
1155 | if (!p->large_p) | |
1156 | { | |
1157 | struct alloc_chunk *chunk = (struct alloc_chunk *)p->page; | |
1158 | struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize); | |
1159 | do | |
1160 | { | |
1161 | if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC) | |
1162 | abort (); | |
1163 | chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size); | |
1164 | } | |
1165 | while (chunk < end); | |
1166 | } | |
1167 | } | |
1168 | } | |
1169 | #endif | |
1170 | } | |
b6f61163 DB |
1171 | /* Top level collection routine. */ |
1172 | ||
1173 | void | |
578e8170 | 1174 | ggc_collect (void) |
b6f61163 DB |
1175 | { |
1176 | struct alloc_zone *zone; | |
1177 | bool marked = false; | |
1178 | float f; | |
1179 | ||
1180 | timevar_push (TV_GC); | |
1181 | check_cookies (); | |
1182 | /* Start by possibly collecting the main zone. */ | |
1183 | main_zone.was_collected = false; | |
1184 | marked |= ggc_collect_1 (&main_zone, true); | |
47aeffac | 1185 | |
b6f61163 DB |
1186 | /* In order to keep the number of collections down, we don't |
1187 | collect other zones unless we are collecting the main zone. This | |
1188 | gives us roughly the same number of collections as we used to | |
1189 | have with the old gc. The number of collection is important | |
1190 | because our main slowdown (according to profiling) is now in | |
1191 | marking. So if we mark twice as often as we used to, we'll be | |
1192 | twice as slow. Hopefully we'll avoid this cost when we mark | |
1193 | zone-at-a-time. */ | |
1194 | ||
1195 | if (main_zone.was_collected) | |
1196 | { | |
47aeffac SB |
1197 | struct alloc_zone *zone; |
1198 | ||
1199 | for (zone = main_zone.next_zone; zone; zone = zone->next_zone) | |
1200 | { | |
1201 | check_cookies (); | |
1202 | zone->was_collected = false; | |
1203 | marked |= ggc_collect_1 (zone, !marked); | |
1204 | } | |
b6f61163 DB |
1205 | } |
1206 | ||
1207 | /* Print page survival stats, if someone wants them. */ | |
1208 | if (GGC_DEBUG_LEVEL >= 2) | |
1209 | { | |
47aeffac | 1210 | for (zone = G.zones; zone; zone = zone->next_zone) |
b6f61163 | 1211 | { |
47aeffac SB |
1212 | if (zone->was_collected) |
1213 | { | |
1214 | f = calculate_average_page_survival (zone); | |
1215 | printf ("Average page survival in zone `%s' is %f\n", | |
1216 | zone->name, f); | |
1217 | } | |
b6f61163 DB |
1218 | } |
1219 | } | |
47aeffac | 1220 | |
b6f61163 DB |
1221 | /* Since we don't mark zone at a time right now, marking in any |
1222 | zone means marking in every zone. So we have to clear all the | |
1223 | marks in all the zones that weren't collected already. */ | |
1224 | if (marked) | |
1225 | { | |
1226 | page_entry *p; | |
1227 | for (zone = G.zones; zone; zone = zone->next_zone) | |
1228 | { | |
1229 | if (zone->was_collected) | |
1230 | continue; | |
1231 | for (p = zone->pages; p; p = p->next) | |
1232 | { | |
1233 | if (!p->large_p) | |
1234 | { | |
1235 | struct alloc_chunk *chunk = (struct alloc_chunk *)p->page; | |
1236 | struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize); | |
1237 | do | |
1238 | { | |
1239 | prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size)); | |
1240 | if (chunk->mark || p->context_depth < zone->context_depth) | |
1241 | { | |
b6f61163 DB |
1242 | chunk->mark = 0; |
1243 | } | |
1244 | chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size); | |
1245 | } | |
1246 | while (chunk < end); | |
1247 | } | |
1248 | else | |
1249 | { | |
b6f61163 DB |
1250 | ((struct alloc_chunk *)p->page)->mark = 0; |
1251 | } | |
1252 | } | |
1253 | } | |
1254 | } | |
b944d187 SB |
1255 | |
1256 | /* Free dead zones. */ | |
1257 | for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone) | |
1258 | { | |
1259 | if (zone->next_zone->dead) | |
1260 | { | |
1261 | struct alloc_zone *dead_zone = zone->next_zone; | |
1262 | ||
1263 | printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name); | |
1264 | ||
1265 | /* The zone must be empty. */ | |
1266 | if (dead_zone->allocated != 0) | |
1267 | abort (); | |
1268 | ||
1269 | /* Unchain the dead zone, release all its pages and free it. */ | |
1270 | zone->next_zone = zone->next_zone->next_zone; | |
1271 | release_pages (dead_zone); | |
1272 | free (dead_zone); | |
1273 | } | |
1274 | } | |
1275 | ||
b6f61163 DB |
1276 | timevar_pop (TV_GC); |
1277 | } | |
1278 | ||
1279 | /* Print allocation statistics. */ | |
1280 | ||
1281 | void | |
578e8170 | 1282 | ggc_print_statistics (void) |
b6f61163 DB |
1283 | { |
1284 | } | |
1285 | ||
1286 | struct ggc_pch_data | |
1287 | { | |
1288 | struct ggc_pch_ondisk | |
1289 | { | |
1290 | unsigned total; | |
1291 | } d; | |
1292 | size_t base; | |
1293 | size_t written; | |
b6f61163 DB |
1294 | }; |
1295 | ||
1296 | /* Initialize the PCH datastructure. */ | |
1297 | ||
1298 | struct ggc_pch_data * | |
1299 | init_ggc_pch (void) | |
1300 | { | |
1301 | return xcalloc (sizeof (struct ggc_pch_data), 1); | |
1302 | } | |
1303 | ||
1304 | /* Add the size of object X to the size of the PCH data. */ | |
1305 | ||
1306 | void | |
1307 | ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED, | |
1308 | size_t size, bool is_string) | |
1309 | { | |
1310 | if (!is_string) | |
1311 | { | |
1312 | d->d.total += size + CHUNK_OVERHEAD; | |
1313 | } | |
1314 | else | |
1315 | d->d.total += size; | |
1316 | } | |
1317 | ||
1318 | /* Return the total size of the PCH data. */ | |
1319 | ||
1320 | size_t | |
1321 | ggc_pch_total_size (struct ggc_pch_data *d) | |
1322 | { | |
1323 | return d->d.total; | |
1324 | } | |
1325 | ||
1326 | /* Set the base address for the objects in the PCH file. */ | |
1327 | ||
1328 | void | |
1329 | ggc_pch_this_base (struct ggc_pch_data *d, void *base) | |
1330 | { | |
1331 | d->base = (size_t) base; | |
1332 | } | |
1333 | ||
1334 | /* Allocate a place for object X of size SIZE in the PCH file. */ | |
1335 | ||
1336 | char * | |
1337 | ggc_pch_alloc_object (struct ggc_pch_data *d, void *x, | |
1338 | size_t size, bool is_string) | |
1339 | { | |
1340 | char *result; | |
1341 | result = (char *)d->base; | |
1342 | if (!is_string) | |
1343 | { | |
1344 | struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD); | |
02fef853 | 1345 | if (chunk->large) |
b6f61163 DB |
1346 | d->base += ggc_get_size (x) + CHUNK_OVERHEAD; |
1347 | else | |
1348 | d->base += chunk->size + CHUNK_OVERHEAD; | |
1349 | return result + CHUNK_OVERHEAD; | |
1350 | } | |
1351 | else | |
1352 | { | |
1353 | d->base += size; | |
1354 | return result; | |
1355 | } | |
1356 | ||
1357 | } | |
1358 | ||
1359 | /* Prepare to write out the PCH data to file F. */ | |
1360 | ||
1361 | void | |
1362 | ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED, | |
1363 | FILE *f ATTRIBUTE_UNUSED) | |
1364 | { | |
1365 | /* Nothing to do. */ | |
1366 | } | |
1367 | ||
1368 | /* Write out object X of SIZE to file F. */ | |
1369 | ||
1370 | void | |
1371 | ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED, | |
1372 | FILE *f, void *x, void *newx ATTRIBUTE_UNUSED, | |
1373 | size_t size, bool is_string) | |
1374 | { | |
1375 | if (!is_string) | |
1376 | { | |
1377 | struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD); | |
7297afa0 | 1378 | size = ggc_get_size (x); |
b6f61163 DB |
1379 | if (fwrite (chunk, size + CHUNK_OVERHEAD, 1, f) != 1) |
1380 | fatal_error ("can't write PCH file: %m"); | |
1381 | d->written += size + CHUNK_OVERHEAD; | |
1382 | } | |
1383 | else | |
1384 | { | |
1385 | if (fwrite (x, size, 1, f) != 1) | |
1386 | fatal_error ("can't write PCH file: %m"); | |
1387 | d->written += size; | |
1388 | } | |
b6f61163 DB |
1389 | } |
1390 | ||
1391 | void | |
1392 | ggc_pch_finish (struct ggc_pch_data *d, FILE *f) | |
1393 | { | |
1394 | if (fwrite (&d->d, sizeof (d->d), 1, f) != 1) | |
1395 | fatal_error ("can't write PCH file: %m"); | |
1396 | free (d); | |
1397 | } | |
b6f61163 DB |
1398 | void |
1399 | ggc_pch_read (FILE *f, void *addr) | |
1400 | { | |
1401 | struct ggc_pch_ondisk d; | |
1402 | struct page_entry *entry; | |
02fef853 | 1403 | struct alloc_zone *pch_zone; |
b6f61163 DB |
1404 | if (fread (&d, sizeof (d), 1, f) != 1) |
1405 | fatal_error ("can't read PCH file: %m"); | |
1406 | entry = xcalloc (1, sizeof (struct page_entry)); | |
1407 | entry->bytes = d.total; | |
1408 | entry->page = addr; | |
1409 | entry->context_depth = 0; | |
02fef853 DB |
1410 | pch_zone = new_ggc_zone ("PCH zone"); |
1411 | entry->zone = pch_zone; | |
1412 | entry->next = entry->zone->pages; | |
1413 | entry->zone->pages = entry; | |
b6f61163 | 1414 | } |