]>
Commit | Line | Data |
---|---|---|
b6f61163 DB |
1 | /* "Bag-of-pages" zone garbage collector for the GNU compiler. |
2 | Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc. | |
3 | Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin (dberlin@dberlin.org) | |
4 | ||
5 | ||
6 | This file is part of GCC. | |
7 | ||
8 | GCC is free software; you can redistribute it and/or modify it under | |
9 | the terms of the GNU General Public License as published by the Free | |
10 | Software Foundation; either version 2, or (at your option) any later | |
11 | version. | |
12 | ||
13 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
14 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 | for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
19 | along with GCC; see the file COPYING. If not, write to the Free | |
20 | Software Foundation, 59 Temple Place - Suite 330, Boston, MA | |
21 | 02111-1307, USA. */ | |
22 | ||
23 | #include "config.h" | |
24 | #include "system.h" | |
25 | #include "coretypes.h" | |
26 | #include "tm.h" | |
27 | #include "tree.h" | |
28 | #include "rtl.h" | |
29 | #include "tm_p.h" | |
30 | #include "toplev.h" | |
31 | #include "varray.h" | |
32 | #include "flags.h" | |
33 | #include "ggc.h" | |
34 | #include "timevar.h" | |
35 | #include "params.h" | |
36 | #include "bitmap.h" | |
37 | ||
38 | #ifdef ENABLE_VALGRIND_CHECKING | |
a207b594 HPN |
39 | # ifdef HAVE_VALGRIND_MEMCHECK_H |
40 | # include <valgrind/memcheck.h> | |
41 | # elif defined HAVE_MEMCHECK_H | |
42 | # include <memcheck.h> | |
43 | # else | |
44 | # include <valgrind.h> | |
45 | # endif | |
b6f61163 DB |
46 | #else |
47 | /* Avoid #ifdef:s when we can help it. */ | |
48 | #define VALGRIND_DISCARD(x) | |
49 | #define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z) | |
50 | #define VALGRIND_FREELIKE_BLOCK(x,y) | |
51 | #endif | |
52 | /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a | |
53 | file open. Prefer either to valloc. */ | |
54 | #ifdef HAVE_MMAP_ANON | |
55 | # undef HAVE_MMAP_DEV_ZERO | |
56 | ||
57 | # include <sys/mman.h> | |
58 | # ifndef MAP_FAILED | |
59 | # define MAP_FAILED -1 | |
60 | # endif | |
61 | # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON) | |
62 | # define MAP_ANONYMOUS MAP_ANON | |
63 | # endif | |
64 | # define USING_MMAP | |
65 | ||
66 | #endif | |
67 | ||
68 | #ifdef HAVE_MMAP_DEV_ZERO | |
69 | ||
70 | # include <sys/mman.h> | |
71 | # ifndef MAP_FAILED | |
72 | # define MAP_FAILED -1 | |
73 | # endif | |
74 | # define USING_MMAP | |
75 | ||
76 | #endif | |
77 | ||
78 | #ifndef USING_MMAP | |
79 | #define USING_MALLOC_PAGE_GROUPS | |
80 | #endif | |
81 | ||
82 | #if (GCC_VERSION < 3001) | |
83 | #define prefetch(X) ((void) X) | |
84 | #else | |
85 | #define prefetch(X) __builtin_prefetch (X) | |
86 | #endif | |
87 | ||
88 | /* NOTES: | |
89 | If we track inter-zone pointers, we can mark single zones at a | |
90 | time. | |
91 | If we have a zone where we guarantee no inter-zone pointers, we | |
92 | could mark that zone seperately. | |
93 | The garbage zone should not be marked, and we should return 1 in | |
94 | ggc_set_mark for any object in the garbage zone, which cuts off | |
95 | marking quickly. */ | |
96 | /* Stategy: | |
97 | ||
98 | This garbage-collecting allocator segregates objects into zones. | |
99 | It also segregates objects into "large" and "small" bins. Large | |
100 | objects are greater or equal to page size. | |
101 | ||
102 | Pages for small objects are broken up into chunks, each of which | |
103 | are described by a struct alloc_chunk. One can walk over all | |
104 | chunks on the page by adding the chunk size to the chunk's data | |
105 | address. The free space for a page exists in the free chunk bins. | |
106 | ||
107 | Each page-entry also has a context depth, which is used to track | |
108 | pushing and popping of allocation contexts. Only objects allocated | |
109 | in the current (highest-numbered) context may be collected. | |
110 | ||
111 | Empty pages (of all sizes) are kept on a single page cache list, | |
112 | and are considered first when new pages are required; they are | |
113 | deallocated at the start of the next collection if they haven't | |
114 | been recycled by then. */ | |
115 | ||
116 | /* Define GGC_DEBUG_LEVEL to print debugging information. | |
117 | 0: No debugging output. | |
118 | 1: GC statistics only. | |
119 | 2: Page-entry allocations/deallocations as well. | |
120 | 3: Object allocations as well. | |
121 | 4: Object marks as well. */ | |
122 | #define GGC_DEBUG_LEVEL (0) | |
123 | ||
124 | #ifndef HOST_BITS_PER_PTR | |
125 | #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG | |
126 | #endif | |
127 | #ifdef COOKIE_CHECKING | |
128 | #define CHUNK_MAGIC 0x95321123 | |
129 | #define DEADCHUNK_MAGIC 0x12817317 | |
130 | #endif | |
131 | ||
132 | /* This structure manages small chunks. When the chunk is free, it's | |
133 | linked with other chunks via free_next. When the chunk is allocated, | |
134 | the data starts at u. Large chunks are allocated one at a time to | |
135 | their own page, and so don't come in here. | |
136 | ||
137 | The "type" field is a placeholder for a future change to do | |
138 | generational collection. At present it is 0 when free and | |
139 | and 1 when allocated. */ | |
140 | ||
141 | struct alloc_chunk { | |
142 | #ifdef COOKIE_CHECKING | |
143 | unsigned int magic; | |
144 | #endif | |
145 | unsigned int type:1; | |
146 | unsigned int typecode:15; | |
147 | unsigned int size:15; | |
148 | unsigned int mark:1; | |
149 | union { | |
150 | struct alloc_chunk *next_free; | |
151 | char data[1]; | |
152 | ||
153 | /* Make sure the data is sufficiently aligned. */ | |
154 | HOST_WIDEST_INT align_i; | |
155 | #ifdef HAVE_LONG_DOUBLE | |
156 | long double align_d; | |
157 | #else | |
158 | double align_d; | |
159 | #endif | |
160 | } u; | |
161 | } __attribute__ ((packed)); | |
162 | ||
163 | #define CHUNK_OVERHEAD (offsetof (struct alloc_chunk, u)) | |
164 | ||
165 | /* We maintain several bins of free lists for chunks for very small | |
166 | objects. We never exhaustively search other bins -- if we don't | |
167 | find one of the proper size, we allocate from the "larger" bin. */ | |
168 | ||
169 | /* Decreasing the number of free bins increases the time it takes to allocate. | |
170 | Similar with increasing max_free_bin_size without increasing num_free_bins. | |
171 | ||
172 | After much histogramming of allocation sizes and time spent on gc, | |
173 | on a powerpc G4 7450 - 667 mhz, and an pentium 4 - 2.8ghz, | |
174 | these were determined to be the optimal values. */ | |
175 | #define NUM_FREE_BINS 64 | |
176 | #define MAX_FREE_BIN_SIZE 256 | |
177 | #define FREE_BIN_DELTA (MAX_FREE_BIN_SIZE / NUM_FREE_BINS) | |
178 | #define SIZE_BIN_UP(SIZE) (((SIZE) + FREE_BIN_DELTA - 1) / FREE_BIN_DELTA) | |
179 | #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA) | |
180 | ||
181 | /* Marker used as chunk->size for a large object. Should correspond | |
182 | to the size of the bitfield above. */ | |
183 | #define LARGE_OBJECT_SIZE 0x7fff | |
184 | ||
185 | /* We use this structure to determine the alignment required for | |
186 | allocations. For power-of-two sized allocations, that's not a | |
187 | problem, but it does matter for odd-sized allocations. */ | |
188 | ||
189 | struct max_alignment { | |
190 | char c; | |
191 | union { | |
192 | HOST_WIDEST_INT i; | |
193 | #ifdef HAVE_LONG_DOUBLE | |
194 | long double d; | |
195 | #else | |
196 | double d; | |
197 | #endif | |
198 | } u; | |
199 | }; | |
200 | ||
201 | /* The biggest alignment required. */ | |
202 | ||
203 | #define MAX_ALIGNMENT (offsetof (struct max_alignment, u)) | |
204 | ||
205 | /* Compute the smallest nonnegative number which when added to X gives | |
206 | a multiple of F. */ | |
207 | ||
208 | #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f)) | |
209 | ||
210 | /* Compute the smallest multiple of F that is >= X. */ | |
211 | ||
212 | #define ROUND_UP(x, f) (CEIL (x, f) * (f)) | |
213 | ||
214 | /* A two-level tree is used to look up the page-entry for a given | |
215 | pointer. Two chunks of the pointer's bits are extracted to index | |
216 | the first and second levels of the tree, as follows: | |
217 | ||
218 | HOST_PAGE_SIZE_BITS | |
219 | 32 | | | |
220 | msb +----------------+----+------+------+ lsb | |
221 | | | | | |
222 | PAGE_L1_BITS | | |
223 | | | | |
224 | PAGE_L2_BITS | |
225 | ||
226 | The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry | |
227 | pages are aligned on system page boundaries. The next most | |
228 | significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first | |
229 | index values in the lookup table, respectively. | |
230 | ||
231 | For 32-bit architectures and the settings below, there are no | |
232 | leftover bits. For architectures with wider pointers, the lookup | |
233 | tree points to a list of pages, which must be scanned to find the | |
234 | correct one. */ | |
235 | ||
236 | #define PAGE_L1_BITS (8) | |
237 | #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize) | |
238 | #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS) | |
239 | #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS) | |
240 | ||
241 | #define LOOKUP_L1(p) \ | |
242 | (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1)) | |
243 | ||
244 | #define LOOKUP_L2(p) \ | |
245 | (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1)) | |
246 | ||
247 | struct alloc_zone; | |
248 | /* A page_entry records the status of an allocation page. */ | |
249 | typedef struct page_entry | |
250 | { | |
251 | /* The next page-entry with objects of the same size, or NULL if | |
252 | this is the last page-entry. */ | |
253 | struct page_entry *next; | |
254 | ||
255 | /* The number of bytes allocated. (This will always be a multiple | |
256 | of the host system page size.) */ | |
257 | size_t bytes; | |
258 | ||
259 | /* How many collections we've survived. */ | |
260 | size_t survived; | |
261 | ||
262 | /* The address at which the memory is allocated. */ | |
263 | char *page; | |
264 | ||
265 | #ifdef USING_MALLOC_PAGE_GROUPS | |
266 | /* Back pointer to the page group this page came from. */ | |
267 | struct page_group *group; | |
268 | #endif | |
269 | ||
270 | /* Number of bytes on the page unallocated. Only used during | |
271 | collection, and even then large pages merely set this non-zero. */ | |
272 | size_t bytes_free; | |
273 | ||
274 | /* Context depth of this page. */ | |
275 | unsigned short context_depth; | |
276 | ||
277 | /* Does this page contain small objects, or one large object? */ | |
278 | bool large_p; | |
279 | ||
280 | struct alloc_zone *zone; | |
281 | } page_entry; | |
282 | ||
283 | #ifdef USING_MALLOC_PAGE_GROUPS | |
284 | /* A page_group describes a large allocation from malloc, from which | |
285 | we parcel out aligned pages. */ | |
286 | typedef struct page_group | |
287 | { | |
288 | /* A linked list of all extant page groups. */ | |
289 | struct page_group *next; | |
290 | ||
291 | /* The address we received from malloc. */ | |
292 | char *allocation; | |
293 | ||
294 | /* The size of the block. */ | |
295 | size_t alloc_size; | |
296 | ||
297 | /* A bitmask of pages in use. */ | |
298 | unsigned int in_use; | |
299 | } page_group; | |
300 | #endif | |
301 | ||
302 | #if HOST_BITS_PER_PTR <= 32 | |
303 | ||
304 | /* On 32-bit hosts, we use a two level page table, as pictured above. */ | |
305 | typedef page_entry **page_table[PAGE_L1_SIZE]; | |
306 | ||
307 | #else | |
308 | ||
309 | /* On 64-bit hosts, we use the same two level page tables plus a linked | |
310 | list that disambiguates the top 32-bits. There will almost always be | |
311 | exactly one entry in the list. */ | |
312 | typedef struct page_table_chain | |
313 | { | |
314 | struct page_table_chain *next; | |
315 | size_t high_bits; | |
316 | page_entry **table[PAGE_L1_SIZE]; | |
317 | } *page_table; | |
318 | ||
319 | #endif | |
320 | ||
321 | /* The global variables. */ | |
322 | static struct globals | |
323 | { | |
324 | /* The page lookup table. A single page can only belong to one | |
325 | zone. This means free pages are zone-specific ATM. */ | |
326 | page_table lookup; | |
327 | /* The linked list of zones. */ | |
328 | struct alloc_zone *zones; | |
329 | ||
330 | /* The system's page size. */ | |
331 | size_t pagesize; | |
332 | size_t lg_pagesize; | |
333 | ||
334 | /* A file descriptor open to /dev/zero for reading. */ | |
335 | #if defined (HAVE_MMAP_DEV_ZERO) | |
336 | int dev_zero_fd; | |
337 | #endif | |
338 | ||
339 | /* The file descriptor for debugging output. */ | |
340 | FILE *debug_file; | |
341 | } G; | |
342 | ||
343 | /* The zone allocation structure. */ | |
344 | struct alloc_zone | |
345 | { | |
346 | /* Name of the zone. */ | |
347 | const char *name; | |
348 | ||
349 | /* Linked list of pages in a zone. */ | |
350 | page_entry *pages; | |
351 | ||
352 | /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size | |
353 | FREE_BIN_DELTA. All other chunks are in slot 0. */ | |
354 | struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1]; | |
355 | ||
356 | /* Bytes currently allocated. */ | |
357 | size_t allocated; | |
358 | ||
359 | /* Bytes currently allocated at the end of the last collection. */ | |
360 | size_t allocated_last_gc; | |
361 | ||
362 | /* Total amount of memory mapped. */ | |
363 | size_t bytes_mapped; | |
364 | ||
365 | /* Bit N set if any allocations have been done at context depth N. */ | |
366 | unsigned long context_depth_allocations; | |
367 | ||
368 | /* Bit N set if any collections have been done at context depth N. */ | |
369 | unsigned long context_depth_collections; | |
370 | ||
371 | /* The current depth in the context stack. */ | |
372 | unsigned short context_depth; | |
373 | ||
374 | /* A cache of free system pages. */ | |
375 | page_entry *free_pages; | |
376 | ||
377 | #ifdef USING_MALLOC_PAGE_GROUPS | |
378 | page_group *page_groups; | |
379 | #endif | |
380 | ||
381 | /* Next zone in the linked list of zones. */ | |
382 | struct alloc_zone *next_zone; | |
383 | ||
384 | /* Return true if this zone was collected during this collection. */ | |
385 | bool was_collected; | |
386 | } main_zone; | |
387 | ||
388 | struct alloc_zone *rtl_zone; | |
389 | struct alloc_zone *garbage_zone; | |
390 | struct alloc_zone *tree_zone; | |
391 | ||
392 | /* Allocate pages in chunks of this size, to throttle calls to memory | |
393 | allocation routines. The first page is used, the rest go onto the | |
394 | free list. This cannot be larger than HOST_BITS_PER_INT for the | |
395 | in_use bitmask for page_group. */ | |
396 | #define GGC_QUIRE_SIZE 16 | |
397 | ||
398 | static int ggc_allocated_p (const void *); | |
399 | static page_entry *lookup_page_table_entry (const void *); | |
400 | static void set_page_table_entry (void *, page_entry *); | |
401 | #ifdef USING_MMAP | |
402 | static char *alloc_anon (char *, size_t, struct alloc_zone *); | |
403 | #endif | |
404 | #ifdef USING_MALLOC_PAGE_GROUPS | |
405 | static size_t page_group_index (char *, char *); | |
406 | static void set_page_group_in_use (page_group *, char *); | |
407 | static void clear_page_group_in_use (page_group *, char *); | |
408 | #endif | |
409 | static struct page_entry * alloc_small_page ( struct alloc_zone *); | |
410 | static struct page_entry * alloc_large_page (size_t, struct alloc_zone *); | |
411 | static void free_chunk (struct alloc_chunk *, size_t, struct alloc_zone *); | |
412 | static void free_page (struct page_entry *); | |
413 | static void release_pages (struct alloc_zone *); | |
414 | static void sweep_pages (struct alloc_zone *); | |
415 | static void * ggc_alloc_zone_1 (size_t, struct alloc_zone *, short); | |
416 | static bool ggc_collect_1 (struct alloc_zone *, bool); | |
417 | static void check_cookies (void); | |
418 | ||
419 | ||
420 | /* Returns nonzero if P was allocated in GC'able memory. */ | |
421 | ||
422 | static inline int | |
423 | ggc_allocated_p (const void *p) | |
424 | { | |
425 | page_entry ***base; | |
426 | size_t L1, L2; | |
427 | ||
428 | #if HOST_BITS_PER_PTR <= 32 | |
429 | base = &G.lookup[0]; | |
430 | #else | |
431 | page_table table = G.lookup; | |
432 | size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff; | |
433 | while (1) | |
434 | { | |
435 | if (table == NULL) | |
436 | return 0; | |
437 | if (table->high_bits == high_bits) | |
438 | break; | |
439 | table = table->next; | |
440 | } | |
441 | base = &table->table[0]; | |
442 | #endif | |
443 | ||
444 | /* Extract the level 1 and 2 indices. */ | |
445 | L1 = LOOKUP_L1 (p); | |
446 | L2 = LOOKUP_L2 (p); | |
447 | ||
448 | return base[L1] && base[L1][L2]; | |
449 | } | |
450 | ||
451 | /* Traverse the page table and find the entry for a page. | |
452 | Die (probably) if the object wasn't allocated via GC. */ | |
453 | ||
454 | static inline page_entry * | |
455 | lookup_page_table_entry(const void *p) | |
456 | { | |
457 | page_entry ***base; | |
458 | size_t L1, L2; | |
459 | ||
460 | #if HOST_BITS_PER_PTR <= 32 | |
461 | base = &G.lookup[0]; | |
462 | #else | |
463 | page_table table = G.lookup; | |
464 | size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff; | |
465 | while (table->high_bits != high_bits) | |
466 | table = table->next; | |
467 | base = &table->table[0]; | |
468 | #endif | |
469 | ||
470 | /* Extract the level 1 and 2 indices. */ | |
471 | L1 = LOOKUP_L1 (p); | |
472 | L2 = LOOKUP_L2 (p); | |
473 | ||
474 | return base[L1][L2]; | |
475 | ||
476 | } | |
477 | ||
478 | /* Set the page table entry for a page. */ | |
479 | ||
480 | static void | |
481 | set_page_table_entry(void *p, page_entry *entry) | |
482 | { | |
483 | page_entry ***base; | |
484 | size_t L1, L2; | |
485 | ||
486 | #if HOST_BITS_PER_PTR <= 32 | |
487 | base = &G.lookup[0]; | |
488 | #else | |
489 | page_table table; | |
490 | size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff; | |
491 | for (table = G.lookup; table; table = table->next) | |
492 | if (table->high_bits == high_bits) | |
493 | goto found; | |
494 | ||
495 | /* Not found -- allocate a new table. */ | |
496 | table = (page_table) xcalloc (1, sizeof(*table)); | |
497 | table->next = G.lookup; | |
498 | table->high_bits = high_bits; | |
499 | G.lookup = table; | |
500 | found: | |
501 | base = &table->table[0]; | |
502 | #endif | |
503 | ||
504 | /* Extract the level 1 and 2 indices. */ | |
505 | L1 = LOOKUP_L1 (p); | |
506 | L2 = LOOKUP_L2 (p); | |
507 | ||
508 | if (base[L1] == NULL) | |
509 | base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *)); | |
510 | ||
511 | base[L1][L2] = entry; | |
512 | } | |
513 | ||
514 | #ifdef USING_MMAP | |
515 | /* Allocate SIZE bytes of anonymous memory, preferably near PREF, | |
516 | (if non-null). The ifdef structure here is intended to cause a | |
517 | compile error unless exactly one of the HAVE_* is defined. */ | |
518 | ||
519 | static inline char * | |
520 | alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone) | |
521 | { | |
522 | #ifdef HAVE_MMAP_ANON | |
523 | char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE, | |
524 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | |
525 | #endif | |
526 | #ifdef HAVE_MMAP_DEV_ZERO | |
527 | char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE, | |
528 | MAP_PRIVATE, G.dev_zero_fd, 0); | |
529 | #endif | |
530 | VALGRIND_MALLOCLIKE_BLOCK(page, size, 0, 0); | |
531 | ||
532 | if (page == (char *) MAP_FAILED) | |
533 | { | |
534 | perror ("virtual memory exhausted"); | |
535 | exit (FATAL_EXIT_CODE); | |
536 | } | |
537 | ||
538 | /* Remember that we allocated this memory. */ | |
539 | zone->bytes_mapped += size; | |
540 | /* Pretend we don't have access to the allocated pages. We'll enable | |
541 | access to smaller pieces of the area in ggc_alloc. Discard the | |
542 | handle to avoid handle leak. */ | |
543 | VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size)); | |
544 | return page; | |
545 | } | |
546 | #endif | |
547 | #ifdef USING_MALLOC_PAGE_GROUPS | |
548 | /* Compute the index for this page into the page group. */ | |
549 | ||
550 | static inline size_t | |
551 | page_group_index (char *allocation, char *page) | |
552 | { | |
553 | return (size_t) (page - allocation) >> G.lg_pagesize; | |
554 | } | |
555 | ||
556 | /* Set and clear the in_use bit for this page in the page group. */ | |
557 | ||
558 | static inline void | |
559 | set_page_group_in_use (page_group *group, char *page) | |
560 | { | |
561 | group->in_use |= 1 << page_group_index (group->allocation, page); | |
562 | } | |
563 | ||
564 | static inline void | |
565 | clear_page_group_in_use (page_group *group, char *page) | |
566 | { | |
567 | group->in_use &= ~(1 << page_group_index (group->allocation, page)); | |
568 | } | |
569 | #endif | |
570 | ||
571 | /* Allocate a new page for allocating objects of size 2^ORDER, | |
572 | and return an entry for it. The entry is not added to the | |
573 | appropriate page_table list. */ | |
574 | ||
575 | static inline struct page_entry * | |
576 | alloc_small_page (struct alloc_zone *zone) | |
577 | { | |
578 | struct page_entry *entry; | |
579 | char *page; | |
580 | #ifdef USING_MALLOC_PAGE_GROUPS | |
581 | page_group *group; | |
582 | #endif | |
583 | ||
584 | page = NULL; | |
585 | ||
586 | /* Check the list of free pages for one we can use. */ | |
587 | entry = zone->free_pages; | |
588 | if (entry != NULL) | |
589 | { | |
590 | /* Recycle the allocated memory from this page ... */ | |
591 | zone->free_pages = entry->next; | |
592 | page = entry->page; | |
593 | ||
594 | #ifdef USING_MALLOC_PAGE_GROUPS | |
595 | group = entry->group; | |
596 | #endif | |
597 | } | |
598 | #ifdef USING_MMAP | |
599 | else | |
600 | { | |
601 | /* We want just one page. Allocate a bunch of them and put the | |
602 | extras on the freelist. (Can only do this optimization with | |
603 | mmap for backing store.) */ | |
604 | struct page_entry *e, *f = zone->free_pages; | |
605 | int i; | |
606 | ||
607 | page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, zone); | |
608 | ||
609 | /* This loop counts down so that the chain will be in ascending | |
610 | memory order. */ | |
611 | for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--) | |
612 | { | |
613 | e = (struct page_entry *) xmalloc (sizeof (struct page_entry)); | |
614 | e->bytes = G.pagesize; | |
615 | e->page = page + (i << G.lg_pagesize); | |
616 | e->next = f; | |
617 | f = e; | |
618 | } | |
619 | ||
620 | zone->free_pages = f; | |
621 | } | |
622 | #endif | |
623 | #ifdef USING_MALLOC_PAGE_GROUPS | |
624 | else | |
625 | { | |
626 | /* Allocate a large block of memory and serve out the aligned | |
627 | pages therein. This results in much less memory wastage | |
628 | than the traditional implementation of valloc. */ | |
629 | ||
630 | char *allocation, *a, *enda; | |
631 | size_t alloc_size, head_slop, tail_slop; | |
632 | int multiple_pages = (entry_size == G.pagesize); | |
633 | ||
634 | if (multiple_pages) | |
635 | alloc_size = GGC_QUIRE_SIZE * G.pagesize; | |
636 | else | |
637 | alloc_size = entry_size + G.pagesize - 1; | |
638 | allocation = xmalloc (alloc_size); | |
639 | VALGRIND_MALLOCLIKE_BLOCK(addr, alloc_size, 0, 0); | |
640 | ||
641 | page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize); | |
642 | head_slop = page - allocation; | |
643 | if (multiple_pages) | |
644 | tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1); | |
645 | else | |
646 | tail_slop = alloc_size - entry_size - head_slop; | |
647 | enda = allocation + alloc_size - tail_slop; | |
648 | ||
649 | /* We allocated N pages, which are likely not aligned, leaving | |
650 | us with N-1 usable pages. We plan to place the page_group | |
651 | structure somewhere in the slop. */ | |
652 | if (head_slop >= sizeof (page_group)) | |
653 | group = (page_group *)page - 1; | |
654 | else | |
655 | { | |
656 | /* We magically got an aligned allocation. Too bad, we have | |
657 | to waste a page anyway. */ | |
658 | if (tail_slop == 0) | |
659 | { | |
660 | enda -= G.pagesize; | |
661 | tail_slop += G.pagesize; | |
662 | } | |
663 | if (tail_slop < sizeof (page_group)) | |
664 | abort (); | |
665 | group = (page_group *)enda; | |
666 | tail_slop -= sizeof (page_group); | |
667 | } | |
668 | ||
669 | /* Remember that we allocated this memory. */ | |
670 | group->next = G.page_groups; | |
671 | group->allocation = allocation; | |
672 | group->alloc_size = alloc_size; | |
673 | group->in_use = 0; | |
674 | zone->page_groups = group; | |
675 | G.bytes_mapped += alloc_size; | |
676 | ||
677 | /* If we allocated multiple pages, put the rest on the free list. */ | |
678 | if (multiple_pages) | |
679 | { | |
680 | struct page_entry *e, *f = G.free_pages; | |
681 | for (a = enda - G.pagesize; a != page; a -= G.pagesize) | |
682 | { | |
683 | e = (struct page_entry *) xmalloc (sizeof (struct page_entry)); | |
684 | e->bytes = G.pagesize; | |
685 | e->page = a; | |
686 | e->group = group; | |
687 | e->next = f; | |
688 | f = e; | |
689 | } | |
690 | zone->free_pages = f; | |
691 | } | |
692 | } | |
693 | #endif | |
694 | ||
695 | if (entry == NULL) | |
696 | entry = (struct page_entry *) xmalloc (sizeof (struct page_entry)); | |
697 | ||
698 | entry->next = 0; | |
699 | entry->bytes = G.pagesize; | |
700 | entry->bytes_free = G.pagesize; | |
701 | entry->page = page; | |
702 | entry->context_depth = zone->context_depth; | |
703 | entry->large_p = false; | |
704 | entry->zone = zone; | |
705 | zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth; | |
706 | ||
707 | #ifdef USING_MALLOC_PAGE_GROUPS | |
708 | entry->group = group; | |
709 | set_page_group_in_use (group, page); | |
710 | #endif | |
711 | ||
712 | set_page_table_entry (page, entry); | |
713 | ||
714 | if (GGC_DEBUG_LEVEL >= 2) | |
715 | fprintf (G.debug_file, | |
716 | "Allocating %s page at %p, data %p-%p\n", entry->zone->name, | |
717 | (PTR) entry, page, page + G.pagesize - 1); | |
718 | ||
719 | return entry; | |
720 | } | |
721 | ||
722 | /* Allocate a large page of size SIZE in ZONE. */ | |
723 | ||
724 | static inline struct page_entry * | |
725 | alloc_large_page (size_t size, struct alloc_zone *zone) | |
726 | { | |
727 | struct page_entry *entry; | |
728 | char *page; | |
729 | ||
730 | page = (char *) xmalloc (size + CHUNK_OVERHEAD + sizeof (struct page_entry)); | |
731 | entry = (struct page_entry *) (page + size + CHUNK_OVERHEAD); | |
732 | ||
733 | entry->next = 0; | |
734 | entry->bytes = size; | |
735 | entry->bytes_free = LARGE_OBJECT_SIZE + CHUNK_OVERHEAD; | |
736 | entry->page = page; | |
737 | entry->context_depth = zone->context_depth; | |
738 | entry->large_p = true; | |
739 | entry->zone = zone; | |
740 | zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth; | |
741 | ||
742 | #ifdef USING_MALLOC_PAGE_GROUPS | |
743 | entry->group = NULL; | |
744 | #endif | |
745 | set_page_table_entry (page, entry); | |
746 | ||
747 | if (GGC_DEBUG_LEVEL >= 2) | |
748 | fprintf (G.debug_file, | |
749 | "Allocating %s large page at %p, data %p-%p\n", entry->zone->name, | |
750 | (PTR) entry, page, page + size - 1); | |
751 | ||
752 | return entry; | |
753 | } | |
754 | ||
755 | ||
756 | /* For a page that is no longer needed, put it on the free page list. */ | |
757 | ||
758 | static inline void | |
759 | free_page (page_entry *entry) | |
760 | { | |
761 | if (GGC_DEBUG_LEVEL >= 2) | |
762 | fprintf (G.debug_file, | |
763 | "Deallocating %s page at %p, data %p-%p\n", entry->zone->name, (PTR) entry, | |
764 | entry->page, entry->page + entry->bytes - 1); | |
765 | ||
766 | set_page_table_entry (entry->page, NULL); | |
767 | ||
768 | if (entry->large_p) | |
769 | { | |
770 | free (entry->page); | |
771 | VALGRIND_FREELIKE_BLOCK (entry->page, entry->bytes); | |
772 | } | |
773 | else | |
774 | { | |
775 | /* Mark the page as inaccessible. Discard the handle to | |
776 | avoid handle leak. */ | |
777 | VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes)); | |
778 | ||
779 | #ifdef USING_MALLOC_PAGE_GROUPS | |
780 | clear_page_group_in_use (entry->group, entry->page); | |
781 | #endif | |
782 | ||
783 | entry->next = entry->zone->free_pages; | |
784 | entry->zone->free_pages = entry; | |
785 | } | |
786 | } | |
787 | ||
788 | /* Release the free page cache to the system. */ | |
789 | ||
790 | static void | |
791 | release_pages (struct alloc_zone *zone) | |
792 | { | |
793 | #ifdef USING_MMAP | |
794 | page_entry *p, *next; | |
795 | char *start; | |
796 | size_t len; | |
797 | ||
798 | /* Gather up adjacent pages so they are unmapped together. */ | |
799 | p = zone->free_pages; | |
800 | ||
801 | while (p) | |
802 | { | |
803 | start = p->page; | |
804 | next = p->next; | |
805 | len = p->bytes; | |
806 | free (p); | |
807 | p = next; | |
808 | ||
809 | while (p && p->page == start + len) | |
810 | { | |
811 | next = p->next; | |
812 | len += p->bytes; | |
813 | free (p); | |
814 | p = next; | |
815 | } | |
816 | ||
817 | munmap (start, len); | |
818 | zone->bytes_mapped -= len; | |
819 | } | |
820 | ||
821 | zone->free_pages = NULL; | |
822 | #endif | |
823 | #ifdef USING_MALLOC_PAGE_GROUPS | |
824 | page_entry **pp, *p; | |
825 | page_group **gp, *g; | |
826 | ||
827 | /* Remove all pages from free page groups from the list. */ | |
828 | pp = &(zone->free_pages); | |
829 | while ((p = *pp) != NULL) | |
830 | if (p->group->in_use == 0) | |
831 | { | |
832 | *pp = p->next; | |
833 | free (p); | |
834 | } | |
835 | else | |
836 | pp = &p->next; | |
837 | ||
838 | /* Remove all free page groups, and release the storage. */ | |
839 | gp = &(zone->page_groups); | |
840 | while ((g = *gp) != NULL) | |
841 | if (g->in_use == 0) | |
842 | { | |
843 | *gp = g->next; | |
844 | zone->bytes_mapped -= g->alloc_size; | |
845 | free (g->allocation); | |
846 | VALGRIND_FREELIKE_BLOCK(g->allocation, 0); | |
847 | } | |
848 | else | |
849 | gp = &g->next; | |
850 | #endif | |
851 | } | |
852 | ||
853 | /* Place CHUNK of size SIZE on the free list for ZONE. */ | |
854 | ||
855 | static inline void | |
856 | free_chunk (struct alloc_chunk *chunk, size_t size, struct alloc_zone *zone) | |
857 | { | |
858 | size_t bin = 0; | |
859 | ||
860 | bin = SIZE_BIN_DOWN (size); | |
861 | if (bin == 0) | |
862 | abort (); | |
863 | if (bin > NUM_FREE_BINS) | |
864 | bin = 0; | |
865 | #ifdef COOKIE_CHECKING | |
866 | if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC) | |
867 | abort (); | |
868 | chunk->magic = DEADCHUNK_MAGIC; | |
869 | #endif | |
870 | chunk->u.next_free = zone->free_chunks[bin]; | |
871 | zone->free_chunks[bin] = chunk; | |
872 | if (GGC_DEBUG_LEVEL >= 3) | |
873 | fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk); | |
874 | VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (chunk, sizeof (struct alloc_chunk))); | |
875 | } | |
876 | ||
877 | /* Allocate a chunk of memory of SIZE bytes. */ | |
878 | ||
879 | static void * | |
880 | ggc_alloc_zone_1 (size_t size, struct alloc_zone *zone, short type) | |
881 | { | |
882 | size_t bin = 0; | |
883 | size_t lsize = 0; | |
884 | struct page_entry *entry; | |
885 | struct alloc_chunk *chunk, *lchunk, **pp; | |
886 | void *result; | |
887 | ||
888 | /* Align size, so that we're assured of aligned allocations. */ | |
889 | if (size < FREE_BIN_DELTA) | |
890 | size = FREE_BIN_DELTA; | |
891 | size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT; | |
892 | ||
893 | /* Large objects are handled specially. */ | |
894 | if (size >= G.pagesize - 2*CHUNK_OVERHEAD - FREE_BIN_DELTA) | |
895 | { | |
896 | entry = alloc_large_page (size, zone); | |
897 | entry->survived = 0; | |
898 | entry->next = entry->zone->pages; | |
899 | entry->zone->pages = entry; | |
900 | ||
901 | ||
902 | chunk = (struct alloc_chunk *) entry->page; | |
903 | VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk))); | |
904 | chunk->size = LARGE_OBJECT_SIZE; | |
905 | ||
906 | goto found; | |
907 | } | |
908 | ||
909 | /* First look for a tiny object already segregated into its own | |
910 | size bucket. */ | |
911 | bin = SIZE_BIN_UP (size); | |
912 | if (bin <= NUM_FREE_BINS) | |
913 | { | |
914 | chunk = zone->free_chunks[bin]; | |
915 | if (chunk) | |
916 | { | |
917 | zone->free_chunks[bin] = chunk->u.next_free; | |
918 | VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk))); | |
919 | goto found; | |
920 | } | |
921 | } | |
922 | ||
923 | /* Failing that, look through the "other" bucket for a chunk | |
924 | that is large enough. */ | |
925 | pp = &(zone->free_chunks[0]); | |
926 | chunk = *pp; | |
927 | while (chunk && chunk->size < size) | |
928 | { | |
929 | pp = &chunk->u.next_free; | |
930 | chunk = *pp; | |
931 | } | |
932 | ||
933 | /* Failing that, allocate new storage. */ | |
934 | if (!chunk) | |
935 | { | |
936 | entry = alloc_small_page (zone); | |
937 | entry->next = entry->zone->pages; | |
938 | entry->zone->pages = entry; | |
939 | ||
940 | chunk = (struct alloc_chunk *) entry->page; | |
941 | VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk))); | |
942 | chunk->size = G.pagesize - CHUNK_OVERHEAD; | |
943 | } | |
944 | else | |
945 | { | |
946 | *pp = chunk->u.next_free; | |
947 | VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk))); | |
948 | } | |
949 | /* Release extra memory from a chunk that's too big. */ | |
950 | lsize = chunk->size - size; | |
951 | if (lsize >= CHUNK_OVERHEAD + FREE_BIN_DELTA) | |
952 | { | |
953 | VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk))); | |
954 | chunk->size = size; | |
955 | ||
956 | lsize -= CHUNK_OVERHEAD; | |
957 | lchunk = (struct alloc_chunk *)(chunk->u.data + size); | |
958 | VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (lchunk, sizeof (struct alloc_chunk))); | |
959 | #ifdef COOKIE_CHECKING | |
960 | lchunk->magic = CHUNK_MAGIC; | |
961 | #endif | |
962 | lchunk->type = 0; | |
963 | lchunk->mark = 0; | |
964 | lchunk->size = lsize; | |
965 | free_chunk (lchunk, lsize, zone); | |
966 | } | |
967 | /* Calculate the object's address. */ | |
968 | found: | |
969 | #ifdef COOKIE_CHECKING | |
970 | chunk->magic = CHUNK_MAGIC; | |
971 | #endif | |
972 | chunk->type = 1; | |
973 | chunk->mark = 0; | |
974 | chunk->typecode = type; | |
975 | result = chunk->u.data; | |
976 | ||
977 | #ifdef ENABLE_GC_CHECKING | |
978 | /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the | |
979 | exact same semantics in presence of memory bugs, regardless of | |
980 | ENABLE_VALGRIND_CHECKING. We override this request below. Drop the | |
981 | handle to avoid handle leak. */ | |
982 | VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size)); | |
983 | ||
984 | /* `Poison' the entire allocated object. */ | |
985 | memset (result, 0xaf, size); | |
986 | #endif | |
987 | ||
988 | /* Tell Valgrind that the memory is there, but its content isn't | |
989 | defined. The bytes at the end of the object are still marked | |
990 | unaccessible. */ | |
991 | VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size)); | |
992 | ||
993 | /* Keep track of how many bytes are being allocated. This | |
994 | information is used in deciding when to collect. */ | |
995 | zone->allocated += size + CHUNK_OVERHEAD; | |
996 | ||
997 | if (GGC_DEBUG_LEVEL >= 3) | |
998 | fprintf (G.debug_file, "Allocating object, chunk=%p size=%lu at %p\n", | |
999 | (void *)chunk, (unsigned long) size, result); | |
1000 | ||
1001 | return result; | |
1002 | } | |
1003 | ||
d91edf86 | 1004 | /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone |
b6f61163 DB |
1005 | for that type. */ |
1006 | ||
1007 | void * | |
1008 | ggc_alloc_typed (enum gt_types_enum gte, size_t size) | |
1009 | { | |
1010 | if (gte == gt_ggc_e_14lang_tree_node) | |
1011 | return ggc_alloc_zone_1 (size, tree_zone, gte); | |
1012 | else if (gte == gt_ggc_e_7rtx_def) | |
1013 | return ggc_alloc_zone_1 (size, rtl_zone, gte); | |
1014 | else if (gte == gt_ggc_e_9rtvec_def) | |
1015 | return ggc_alloc_zone_1 (size, rtl_zone, gte); | |
1016 | else | |
1017 | return ggc_alloc_zone_1 (size, &main_zone, gte); | |
1018 | } | |
1019 | ||
1020 | /* Normal ggc_alloc simply allocates into the main zone. */ | |
1021 | ||
1022 | void * | |
1023 | ggc_alloc (size_t size) | |
1024 | { | |
1025 | return ggc_alloc_zone_1 (size, &main_zone, -1); | |
1026 | } | |
1027 | ||
1028 | /* Zone allocation allocates into the specified zone. */ | |
1029 | ||
1030 | void * | |
1031 | ggc_alloc_zone (size_t size, struct alloc_zone *zone) | |
1032 | { | |
1033 | return ggc_alloc_zone_1 (size, zone, -1); | |
1034 | } | |
1035 | ||
1036 | /* If P is not marked, mark it and return false. Otherwise return true. | |
1037 | P must have been allocated by the GC allocator; it mustn't point to | |
1038 | static objects, stack variables, or memory allocated with malloc. */ | |
1039 | ||
1040 | int | |
1041 | ggc_set_mark (const void *p) | |
1042 | { | |
1043 | page_entry *entry; | |
1044 | struct alloc_chunk *chunk; | |
1045 | ||
1046 | #ifdef ENABLE_CHECKING | |
1047 | /* Look up the page on which the object is alloced. If the object | |
1048 | wasn't allocated by the collector, we'll probably die. */ | |
1049 | entry = lookup_page_table_entry (p); | |
1050 | if (entry == NULL) | |
1051 | abort (); | |
1052 | #endif | |
1053 | chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD); | |
1054 | #ifdef COOKIE_CHECKING | |
1055 | if (chunk->magic != CHUNK_MAGIC) | |
1056 | abort (); | |
1057 | #endif | |
1058 | if (chunk->mark) | |
1059 | return 1; | |
1060 | chunk->mark = 1; | |
1061 | ||
1062 | #ifndef ENABLE_CHECKING | |
1063 | entry = lookup_page_table_entry (p); | |
1064 | #endif | |
1065 | ||
1066 | /* Large pages are either completely full or completely empty. So if | |
1067 | they are marked, they are completely full. */ | |
1068 | if (entry->large_p) | |
1069 | entry->bytes_free = 0; | |
1070 | else | |
1071 | entry->bytes_free -= chunk->size + CHUNK_OVERHEAD; | |
1072 | ||
1073 | if (GGC_DEBUG_LEVEL >= 4) | |
1074 | fprintf (G.debug_file, "Marking %p\n", p); | |
1075 | ||
1076 | return 0; | |
1077 | } | |
1078 | ||
1079 | /* Return 1 if P has been marked, zero otherwise. | |
1080 | P must have been allocated by the GC allocator; it mustn't point to | |
1081 | static objects, stack variables, or memory allocated with malloc. */ | |
1082 | ||
1083 | int | |
1084 | ggc_marked_p (const void *p) | |
1085 | { | |
1086 | struct alloc_chunk *chunk; | |
1087 | ||
1088 | #ifdef ENABLE_CHECKING | |
1089 | { | |
1090 | page_entry *entry = lookup_page_table_entry (p); | |
1091 | if (entry == NULL) | |
1092 | abort (); | |
1093 | } | |
1094 | #endif | |
1095 | ||
1096 | chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD); | |
1097 | #ifdef COOKIE_CHECKING | |
1098 | if (chunk->magic != CHUNK_MAGIC) | |
1099 | abort (); | |
1100 | #endif | |
1101 | return chunk->mark; | |
1102 | } | |
1103 | ||
1104 | /* Return the size of the gc-able object P. */ | |
1105 | ||
1106 | size_t | |
1107 | ggc_get_size (const void *p) | |
1108 | { | |
1109 | struct alloc_chunk *chunk; | |
1110 | struct page_entry *entry; | |
1111 | ||
1112 | #ifdef ENABLE_CHECKING | |
1113 | entry = lookup_page_table_entry (p); | |
1114 | if (entry == NULL) | |
1115 | abort (); | |
1116 | #endif | |
1117 | ||
1118 | chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD); | |
1119 | #ifdef COOKIE_CHECKING | |
1120 | if (chunk->magic != CHUNK_MAGIC) | |
1121 | abort (); | |
1122 | #endif | |
1123 | if (chunk->size == LARGE_OBJECT_SIZE) | |
1124 | { | |
1125 | #ifndef ENABLE_CHECKING | |
1126 | entry = lookup_page_table_entry (p); | |
1127 | #endif | |
1128 | return entry->bytes; | |
1129 | } | |
1130 | ||
1131 | return chunk->size; | |
1132 | } | |
1133 | ||
1134 | /* Initialize the ggc-zone-mmap allocator. */ | |
1135 | void | |
578e8170 | 1136 | init_ggc (void) |
b6f61163 DB |
1137 | { |
1138 | /* Create the zones. */ | |
1139 | main_zone.name = "Main zone"; | |
1140 | G.zones = &main_zone; | |
1141 | ||
1142 | rtl_zone = xcalloc (1, sizeof (struct alloc_zone)); | |
1143 | rtl_zone->name = "RTL zone"; | |
1144 | /* The main zone's connected to the ... rtl_zone */ | |
1145 | G.zones->next_zone = rtl_zone; | |
1146 | ||
1147 | garbage_zone = xcalloc (1, sizeof (struct alloc_zone)); | |
1148 | garbage_zone->name = "Garbage zone"; | |
1149 | /* The rtl zone's connected to the ... garbage zone */ | |
1150 | rtl_zone->next_zone = garbage_zone; | |
1151 | ||
1152 | tree_zone = xcalloc (1, sizeof (struct alloc_zone)); | |
1153 | tree_zone->name = "Tree zone"; | |
1154 | /* The garbage zone's connected to ... the tree zone */ | |
1155 | garbage_zone->next_zone = tree_zone; | |
1156 | ||
1157 | G.pagesize = getpagesize(); | |
1158 | G.lg_pagesize = exact_log2 (G.pagesize); | |
1159 | #ifdef HAVE_MMAP_DEV_ZERO | |
1160 | G.dev_zero_fd = open ("/dev/zero", O_RDONLY); | |
1161 | if (G.dev_zero_fd == -1) | |
1162 | abort (); | |
1163 | #endif | |
1164 | ||
1165 | #if 0 | |
1166 | G.debug_file = fopen ("ggc-mmap.debug", "w"); | |
1167 | setlinebuf (G.debug_file); | |
1168 | #else | |
1169 | G.debug_file = stdout; | |
1170 | #endif | |
1171 | ||
1172 | #ifdef USING_MMAP | |
1173 | /* StunOS has an amazing off-by-one error for the first mmap allocation | |
1174 | after fiddling with RLIMIT_STACK. The result, as hard as it is to | |
1175 | believe, is an unaligned page allocation, which would cause us to | |
1176 | hork badly if we tried to use it. */ | |
1177 | { | |
1178 | char *p = alloc_anon (NULL, G.pagesize, &main_zone); | |
1179 | struct page_entry *e; | |
1180 | if ((size_t)p & (G.pagesize - 1)) | |
1181 | { | |
1182 | /* How losing. Discard this one and try another. If we still | |
1183 | can't get something useful, give up. */ | |
1184 | ||
1185 | p = alloc_anon (NULL, G.pagesize, &main_zone); | |
1186 | if ((size_t)p & (G.pagesize - 1)) | |
1187 | abort (); | |
1188 | } | |
1189 | ||
1190 | /* We have a good page, might as well hold onto it... */ | |
1191 | e = (struct page_entry *) xmalloc (sizeof (struct page_entry)); | |
1192 | e->bytes = G.pagesize; | |
1193 | e->page = p; | |
1194 | e->next = main_zone.free_pages; | |
1195 | main_zone.free_pages = e; | |
1196 | } | |
1197 | #endif | |
1198 | } | |
1199 | ||
1200 | /* Increment the `GC context'. Objects allocated in an outer context | |
1201 | are never freed, eliminating the need to register their roots. */ | |
1202 | ||
1203 | void | |
578e8170 | 1204 | ggc_push_context (void) |
b6f61163 DB |
1205 | { |
1206 | struct alloc_zone *zone; | |
1207 | for (zone = G.zones; zone; zone = zone->next_zone) | |
1208 | ++(zone->context_depth); | |
1209 | /* Die on wrap. */ | |
1210 | if (main_zone.context_depth >= HOST_BITS_PER_LONG) | |
1211 | abort (); | |
1212 | } | |
1213 | ||
1214 | /* Decrement the `GC context'. All objects allocated since the | |
1215 | previous ggc_push_context are migrated to the outer context. */ | |
1216 | ||
1217 | static void | |
1218 | ggc_pop_context_1 (struct alloc_zone *zone) | |
1219 | { | |
1220 | unsigned long omask; | |
1221 | unsigned depth; | |
1222 | page_entry *p; | |
1223 | ||
1224 | depth = --(zone->context_depth); | |
1225 | omask = (unsigned long)1 << (depth + 1); | |
1226 | ||
1227 | if (!((zone->context_depth_allocations | zone->context_depth_collections) & omask)) | |
1228 | return; | |
1229 | ||
1230 | zone->context_depth_allocations |= (zone->context_depth_allocations & omask) >> 1; | |
1231 | zone->context_depth_allocations &= omask - 1; | |
1232 | zone->context_depth_collections &= omask - 1; | |
1233 | ||
1234 | /* Any remaining pages in the popped context are lowered to the new | |
1235 | current context; i.e. objects allocated in the popped context and | |
1236 | left over are imported into the previous context. */ | |
1237 | for (p = zone->pages; p != NULL; p = p->next) | |
1238 | if (p->context_depth > depth) | |
1239 | p->context_depth = depth; | |
1240 | } | |
1241 | ||
1242 | /* Pop all the zone contexts. */ | |
1243 | ||
1244 | void | |
578e8170 | 1245 | ggc_pop_context (void) |
b6f61163 DB |
1246 | { |
1247 | struct alloc_zone *zone; | |
1248 | for (zone = G.zones; zone; zone = zone->next_zone) | |
1249 | ggc_pop_context_1 (zone); | |
1250 | } | |
1251 | ||
1252 | ||
1253 | /* Poison the chunk. */ | |
1254 | #ifdef ENABLE_GC_CHECKING | |
1255 | #define poison_chunk(CHUNK, SIZE) \ | |
1256 | memset ((CHUNK)->u.data, 0xa5, (SIZE)) | |
1257 | #else | |
1258 | #define poison_chunk(CHUNK, SIZE) | |
1259 | #endif | |
1260 | ||
1261 | /* Free all empty pages and objects within a page for a given zone */ | |
1262 | ||
1263 | static void | |
1264 | sweep_pages (struct alloc_zone *zone) | |
1265 | { | |
1266 | page_entry **pp, *p, *next; | |
1267 | struct alloc_chunk *chunk, *last_free, *end; | |
1268 | size_t last_free_size, allocated = 0; | |
1269 | ||
1270 | /* First, reset the free_chunks lists, since we are going to | |
1271 | re-free free chunks in hopes of coalescing them into large chunks. */ | |
1272 | memset (zone->free_chunks, 0, sizeof (zone->free_chunks)); | |
1273 | pp = &zone->pages; | |
1274 | for (p = zone->pages; p ; p = next) | |
1275 | { | |
1276 | next = p->next; | |
1277 | ||
1278 | /* For empty pages, just free the page. */ | |
1279 | if (p->bytes_free == G.pagesize && p->context_depth == zone->context_depth) | |
1280 | { | |
1281 | *pp = next; | |
1282 | #ifdef ENABLE_GC_CHECKING | |
1283 | /* Poison the page. */ | |
1284 | memset (p->page, 0xb5, p->bytes); | |
1285 | #endif | |
1286 | free_page (p); | |
1287 | continue; | |
1288 | } | |
1289 | ||
1290 | /* Large pages are all or none affairs. Either they are | |
d91edf86 | 1291 | completely empty, or they are completely full. |
b6f61163 DB |
1292 | Thus, if the above didn't catch it, we need not do anything |
1293 | except remove the mark and reset the bytes_free. | |
1294 | ||
1295 | XXX: Should we bother to increment allocated. */ | |
1296 | else if (p->large_p) | |
1297 | { | |
1298 | p->bytes_free = p->bytes; | |
1299 | ((struct alloc_chunk *)p->page)->mark = 0; | |
1300 | continue; | |
1301 | } | |
1302 | pp = &p->next; | |
1303 | ||
1304 | /* This page has now survived another collection. */ | |
1305 | p->survived++; | |
1306 | ||
1307 | /* Which leaves full and partial pages. Step through all chunks, | |
1308 | consolidate those that are free and insert them into the free | |
1309 | lists. Note that consolidation slows down collection | |
1310 | slightly. */ | |
1311 | ||
1312 | chunk = (struct alloc_chunk *)p->page; | |
1313 | end = (struct alloc_chunk *)(p->page + G.pagesize); | |
1314 | last_free = NULL; | |
1315 | last_free_size = 0; | |
1316 | ||
1317 | do | |
1318 | { | |
1319 | prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size)); | |
1320 | if (chunk->mark || p->context_depth < zone->context_depth) | |
1321 | { | |
1322 | if (last_free) | |
1323 | { | |
1324 | last_free->type = 0; | |
1325 | last_free->size = last_free_size; | |
1326 | last_free->mark = 0; | |
1327 | poison_chunk (last_free, last_free_size); | |
1328 | free_chunk (last_free, last_free_size, zone); | |
1329 | last_free = NULL; | |
1330 | } | |
1331 | if (chunk->mark) | |
1332 | { | |
1333 | allocated += chunk->size + CHUNK_OVERHEAD; | |
1334 | p->bytes_free += chunk->size + CHUNK_OVERHEAD; | |
1335 | } | |
1336 | chunk->mark = 0; | |
1337 | #ifdef ENABLE_CHECKING | |
1338 | if (p->bytes_free > p->bytes) | |
1339 | abort (); | |
1340 | #endif | |
1341 | } | |
1342 | else | |
1343 | { | |
1344 | if (last_free) | |
1345 | { | |
1346 | last_free_size += CHUNK_OVERHEAD + chunk->size; | |
1347 | } | |
1348 | else | |
1349 | { | |
1350 | last_free = chunk; | |
1351 | last_free_size = chunk->size; | |
1352 | } | |
1353 | } | |
1354 | ||
1355 | chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size); | |
1356 | } | |
1357 | while (chunk < end); | |
1358 | ||
1359 | if (last_free) | |
1360 | { | |
1361 | last_free->type = 0; | |
1362 | last_free->size = last_free_size; | |
1363 | last_free->mark = 0; | |
1364 | poison_chunk (last_free, last_free_size); | |
1365 | free_chunk (last_free, last_free_size, zone); | |
1366 | } | |
1367 | } | |
1368 | ||
1369 | zone->allocated = allocated; | |
1370 | } | |
1371 | ||
1372 | /* mark-and-sweep routine for collecting a single zone. NEED_MARKING | |
1373 | is true if we need to mark before sweeping, false if some other | |
1374 | zone collection has already performed marking for us. Returns true | |
1375 | if we collected, false otherwise. */ | |
1376 | ||
1377 | static bool | |
1378 | ggc_collect_1 (struct alloc_zone *zone, bool need_marking) | |
1379 | { | |
1380 | /* Avoid frequent unnecessary work by skipping collection if the | |
1381 | total allocations haven't expanded much since the last | |
1382 | collection. */ | |
1383 | float allocated_last_gc = | |
1384 | MAX (zone->allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024); | |
1385 | ||
1386 | float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100; | |
1387 | ||
1388 | if (zone->allocated < allocated_last_gc + min_expand) | |
1389 | return false; | |
1390 | ||
1391 | if (!quiet_flag) | |
1392 | fprintf (stderr, " {%s GC %luk -> ", zone->name, (unsigned long) zone->allocated / 1024); | |
1393 | ||
1394 | /* Zero the total allocated bytes. This will be recalculated in the | |
1395 | sweep phase. */ | |
1396 | zone->allocated = 0; | |
1397 | ||
1398 | /* Release the pages we freed the last time we collected, but didn't | |
1399 | reuse in the interim. */ | |
1400 | release_pages (zone); | |
1401 | ||
1402 | /* Indicate that we've seen collections at this context depth. */ | |
1403 | zone->context_depth_collections | |
1404 | = ((unsigned long)1 << (zone->context_depth + 1)) - 1; | |
1405 | if (need_marking) | |
1406 | ggc_mark_roots (); | |
1407 | sweep_pages (zone); | |
1408 | zone->was_collected = true; | |
1409 | zone->allocated_last_gc = zone->allocated; | |
1410 | ||
1411 | ||
1412 | if (!quiet_flag) | |
1413 | fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024); | |
1414 | return true; | |
1415 | } | |
1416 | ||
1417 | /* Calculate the average page survival rate in terms of number of | |
1418 | collections. */ | |
1419 | ||
1420 | static float | |
1421 | calculate_average_page_survival (struct alloc_zone *zone) | |
1422 | { | |
1423 | float count = 0.0; | |
1424 | float survival = 0.0; | |
1425 | page_entry *p; | |
1426 | for (p = zone->pages; p; p = p->next) | |
1427 | { | |
1428 | count += 1.0; | |
1429 | survival += p->survived; | |
1430 | } | |
1431 | return survival/count; | |
1432 | } | |
1433 | ||
1434 | /* Check the magic cookies all of the chunks contain, to make sure we | |
1435 | aren't doing anything stupid, like stomping on alloc_chunk | |
1436 | structures. */ | |
1437 | ||
1438 | static inline void | |
578e8170 | 1439 | check_cookies (void) |
b6f61163 DB |
1440 | { |
1441 | #ifdef COOKIE_CHECKING | |
1442 | page_entry *p; | |
578e8170 AJ |
1443 | struct alloc_zone *zone; |
1444 | ||
b6f61163 DB |
1445 | for (zone = G.zones; zone; zone = zone->next_zone) |
1446 | { | |
1447 | for (p = zone->pages; p; p = p->next) | |
1448 | { | |
1449 | if (!p->large_p) | |
1450 | { | |
1451 | struct alloc_chunk *chunk = (struct alloc_chunk *)p->page; | |
1452 | struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize); | |
1453 | do | |
1454 | { | |
1455 | if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC) | |
1456 | abort (); | |
1457 | chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size); | |
1458 | } | |
1459 | while (chunk < end); | |
1460 | } | |
1461 | } | |
1462 | } | |
1463 | #endif | |
1464 | } | |
1465 | ||
1466 | ||
1467 | /* Top level collection routine. */ | |
1468 | ||
1469 | void | |
578e8170 | 1470 | ggc_collect (void) |
b6f61163 DB |
1471 | { |
1472 | struct alloc_zone *zone; | |
1473 | bool marked = false; | |
1474 | float f; | |
1475 | ||
1476 | timevar_push (TV_GC); | |
1477 | check_cookies (); | |
1478 | /* Start by possibly collecting the main zone. */ | |
1479 | main_zone.was_collected = false; | |
1480 | marked |= ggc_collect_1 (&main_zone, true); | |
1481 | /* In order to keep the number of collections down, we don't | |
1482 | collect other zones unless we are collecting the main zone. This | |
1483 | gives us roughly the same number of collections as we used to | |
1484 | have with the old gc. The number of collection is important | |
1485 | because our main slowdown (according to profiling) is now in | |
1486 | marking. So if we mark twice as often as we used to, we'll be | |
1487 | twice as slow. Hopefully we'll avoid this cost when we mark | |
1488 | zone-at-a-time. */ | |
1489 | ||
1490 | if (main_zone.was_collected) | |
1491 | { | |
1492 | check_cookies (); | |
1493 | rtl_zone->was_collected = false; | |
1494 | marked |= ggc_collect_1 (rtl_zone, !marked); | |
1495 | check_cookies (); | |
1496 | tree_zone->was_collected = false; | |
1497 | marked |= ggc_collect_1 (tree_zone, !marked); | |
1498 | check_cookies (); | |
1499 | garbage_zone->was_collected = false; | |
1500 | marked |= ggc_collect_1 (garbage_zone, !marked); | |
1501 | } | |
1502 | ||
1503 | /* Print page survival stats, if someone wants them. */ | |
1504 | if (GGC_DEBUG_LEVEL >= 2) | |
1505 | { | |
1506 | if (rtl_zone->was_collected) | |
1507 | { | |
1508 | f = calculate_average_page_survival (rtl_zone); | |
1509 | printf ("Average RTL page survival is %f\n", f); | |
1510 | } | |
1511 | if (main_zone.was_collected) | |
1512 | { | |
1513 | f = calculate_average_page_survival (&main_zone); | |
1514 | printf ("Average main page survival is %f\n", f); | |
1515 | } | |
1516 | if (tree_zone->was_collected) | |
1517 | { | |
1518 | f = calculate_average_page_survival (tree_zone); | |
1519 | printf ("Average tree page survival is %f\n", f); | |
1520 | } | |
1521 | } | |
1522 | /* Since we don't mark zone at a time right now, marking in any | |
1523 | zone means marking in every zone. So we have to clear all the | |
1524 | marks in all the zones that weren't collected already. */ | |
1525 | if (marked) | |
1526 | { | |
1527 | page_entry *p; | |
1528 | for (zone = G.zones; zone; zone = zone->next_zone) | |
1529 | { | |
1530 | if (zone->was_collected) | |
1531 | continue; | |
1532 | for (p = zone->pages; p; p = p->next) | |
1533 | { | |
1534 | if (!p->large_p) | |
1535 | { | |
1536 | struct alloc_chunk *chunk = (struct alloc_chunk *)p->page; | |
1537 | struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize); | |
1538 | do | |
1539 | { | |
1540 | prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size)); | |
1541 | if (chunk->mark || p->context_depth < zone->context_depth) | |
1542 | { | |
1543 | if (chunk->mark) | |
1544 | p->bytes_free += chunk->size + CHUNK_OVERHEAD; | |
1545 | #ifdef ENABLE_CHECKING | |
1546 | if (p->bytes_free > p->bytes) | |
1547 | abort (); | |
1548 | #endif | |
1549 | chunk->mark = 0; | |
1550 | } | |
1551 | chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size); | |
1552 | } | |
1553 | while (chunk < end); | |
1554 | } | |
1555 | else | |
1556 | { | |
1557 | p->bytes_free = p->bytes; | |
1558 | ((struct alloc_chunk *)p->page)->mark = 0; | |
1559 | } | |
1560 | } | |
1561 | } | |
1562 | } | |
1563 | timevar_pop (TV_GC); | |
1564 | } | |
1565 | ||
1566 | /* Print allocation statistics. */ | |
1567 | ||
1568 | void | |
578e8170 | 1569 | ggc_print_statistics (void) |
b6f61163 DB |
1570 | { |
1571 | } | |
1572 | ||
1573 | struct ggc_pch_data | |
1574 | { | |
1575 | struct ggc_pch_ondisk | |
1576 | { | |
1577 | unsigned total; | |
1578 | } d; | |
1579 | size_t base; | |
1580 | size_t written; | |
1581 | ||
1582 | }; | |
1583 | ||
1584 | /* Initialize the PCH datastructure. */ | |
1585 | ||
1586 | struct ggc_pch_data * | |
1587 | init_ggc_pch (void) | |
1588 | { | |
1589 | return xcalloc (sizeof (struct ggc_pch_data), 1); | |
1590 | } | |
1591 | ||
1592 | /* Add the size of object X to the size of the PCH data. */ | |
1593 | ||
1594 | void | |
1595 | ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED, | |
1596 | size_t size, bool is_string) | |
1597 | { | |
1598 | if (!is_string) | |
1599 | { | |
1600 | d->d.total += size + CHUNK_OVERHEAD; | |
1601 | } | |
1602 | else | |
1603 | d->d.total += size; | |
1604 | } | |
1605 | ||
1606 | /* Return the total size of the PCH data. */ | |
1607 | ||
1608 | size_t | |
1609 | ggc_pch_total_size (struct ggc_pch_data *d) | |
1610 | { | |
1611 | return d->d.total; | |
1612 | } | |
1613 | ||
1614 | /* Set the base address for the objects in the PCH file. */ | |
1615 | ||
1616 | void | |
1617 | ggc_pch_this_base (struct ggc_pch_data *d, void *base) | |
1618 | { | |
1619 | d->base = (size_t) base; | |
1620 | } | |
1621 | ||
1622 | /* Allocate a place for object X of size SIZE in the PCH file. */ | |
1623 | ||
1624 | char * | |
1625 | ggc_pch_alloc_object (struct ggc_pch_data *d, void *x, | |
1626 | size_t size, bool is_string) | |
1627 | { | |
1628 | char *result; | |
1629 | result = (char *)d->base; | |
1630 | if (!is_string) | |
1631 | { | |
1632 | struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD); | |
1633 | if (chunk->size == LARGE_OBJECT_SIZE) | |
1634 | d->base += ggc_get_size (x) + CHUNK_OVERHEAD; | |
1635 | else | |
1636 | d->base += chunk->size + CHUNK_OVERHEAD; | |
1637 | return result + CHUNK_OVERHEAD; | |
1638 | } | |
1639 | else | |
1640 | { | |
1641 | d->base += size; | |
1642 | return result; | |
1643 | } | |
1644 | ||
1645 | } | |
1646 | ||
1647 | /* Prepare to write out the PCH data to file F. */ | |
1648 | ||
1649 | void | |
1650 | ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED, | |
1651 | FILE *f ATTRIBUTE_UNUSED) | |
1652 | { | |
1653 | /* Nothing to do. */ | |
1654 | } | |
1655 | ||
1656 | /* Write out object X of SIZE to file F. */ | |
1657 | ||
1658 | void | |
1659 | ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED, | |
1660 | FILE *f, void *x, void *newx ATTRIBUTE_UNUSED, | |
1661 | size_t size, bool is_string) | |
1662 | { | |
1663 | if (!is_string) | |
1664 | { | |
1665 | struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD); | |
1666 | size = chunk->size; | |
1667 | if (fwrite (chunk, size + CHUNK_OVERHEAD, 1, f) != 1) | |
1668 | fatal_error ("can't write PCH file: %m"); | |
1669 | d->written += size + CHUNK_OVERHEAD; | |
1670 | } | |
1671 | else | |
1672 | { | |
1673 | if (fwrite (x, size, 1, f) != 1) | |
1674 | fatal_error ("can't write PCH file: %m"); | |
1675 | d->written += size; | |
1676 | } | |
1677 | if (d->written == d->d.total | |
1678 | && fseek (f, ROUND_UP_VALUE (d->d.total, G.pagesize), SEEK_CUR) != 0) | |
1679 | fatal_error ("can't write PCH file: %m"); | |
1680 | } | |
1681 | ||
1682 | void | |
1683 | ggc_pch_finish (struct ggc_pch_data *d, FILE *f) | |
1684 | { | |
1685 | if (fwrite (&d->d, sizeof (d->d), 1, f) != 1) | |
1686 | fatal_error ("can't write PCH file: %m"); | |
1687 | free (d); | |
1688 | } | |
1689 | ||
1690 | ||
1691 | void | |
1692 | ggc_pch_read (FILE *f, void *addr) | |
1693 | { | |
1694 | struct ggc_pch_ondisk d; | |
1695 | struct page_entry *entry; | |
1696 | char *pte; | |
1697 | if (fread (&d, sizeof (d), 1, f) != 1) | |
1698 | fatal_error ("can't read PCH file: %m"); | |
1699 | entry = xcalloc (1, sizeof (struct page_entry)); | |
1700 | entry->bytes = d.total; | |
1701 | entry->page = addr; | |
1702 | entry->context_depth = 0; | |
1703 | entry->zone = &main_zone; | |
1704 | for (pte = entry->page; | |
1705 | pte < entry->page + entry->bytes; | |
1706 | pte += G.pagesize) | |
1707 | set_page_table_entry (pte, entry); | |
1708 | ||
1709 | } |