]>
Commit | Line | Data |
---|---|---|
21341cfd | 1 | /* "Bag-of-pages" garbage collector for the GNU compiler. |
283334f0 KH |
2 | Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004 |
3 | Free Software Foundation, Inc. | |
21341cfd | 4 | |
1322177d | 5 | This file is part of GCC. |
21341cfd | 6 | |
1322177d LB |
7 | GCC is free software; you can redistribute it and/or modify it under |
8 | the terms of the GNU General Public License as published by the Free | |
9 | Software Foundation; either version 2, or (at your option) any later | |
10 | version. | |
21341cfd | 11 | |
1322177d LB |
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 | for more details. | |
21341cfd | 16 | |
b9bfacf0 | 17 | You should have received a copy of the GNU General Public License |
1322177d LB |
18 | along with GCC; see the file COPYING. If not, write to the Free |
19 | Software Foundation, 59 Temple Place - Suite 330, Boston, MA | |
20 | 02111-1307, USA. */ | |
21341cfd | 21 | |
21341cfd | 22 | #include "config.h" |
21341cfd | 23 | #include "system.h" |
4977bab6 ZW |
24 | #include "coretypes.h" |
25 | #include "tm.h" | |
21341cfd | 26 | #include "tree.h" |
e5ecd4ea | 27 | #include "rtl.h" |
1b42a6a9 | 28 | #include "tm_p.h" |
b9bfacf0 | 29 | #include "toplev.h" |
21341cfd | 30 | #include "flags.h" |
e5ecd4ea | 31 | #include "ggc.h" |
2a9a326b | 32 | #include "timevar.h" |
3788cc17 | 33 | #include "params.h" |
9a0a7d5d | 34 | #ifdef ENABLE_VALGRIND_CHECKING |
a207b594 HPN |
35 | # ifdef HAVE_VALGRIND_MEMCHECK_H |
36 | # include <valgrind/memcheck.h> | |
37 | # elif defined HAVE_MEMCHECK_H | |
38 | # include <memcheck.h> | |
14011ca4 | 39 | # else |
a207b594 | 40 | # include <valgrind.h> |
14011ca4 | 41 | # endif |
9a0a7d5d HPN |
42 | #else |
43 | /* Avoid #ifdef:s when we can help it. */ | |
44 | #define VALGRIND_DISCARD(x) | |
45 | #endif | |
e5ecd4ea | 46 | |
825b6926 ZW |
47 | /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a |
48 | file open. Prefer either to valloc. */ | |
49 | #ifdef HAVE_MMAP_ANON | |
50 | # undef HAVE_MMAP_DEV_ZERO | |
825b6926 ZW |
51 | |
52 | # include <sys/mman.h> | |
53 | # ifndef MAP_FAILED | |
54 | # define MAP_FAILED -1 | |
55 | # endif | |
56 | # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON) | |
57 | # define MAP_ANONYMOUS MAP_ANON | |
58 | # endif | |
59 | # define USING_MMAP | |
60 | ||
005537df | 61 | #endif |
21341cfd | 62 | |
825b6926 | 63 | #ifdef HAVE_MMAP_DEV_ZERO |
825b6926 ZW |
64 | |
65 | # include <sys/mman.h> | |
66 | # ifndef MAP_FAILED | |
67 | # define MAP_FAILED -1 | |
68 | # endif | |
69 | # define USING_MMAP | |
70 | ||
8342b467 RH |
71 | #endif |
72 | ||
130fadbb RH |
73 | #ifndef USING_MMAP |
74 | #define USING_MALLOC_PAGE_GROUPS | |
5b918807 | 75 | #endif |
21341cfd | 76 | |
589005ff | 77 | /* Stategy: |
21341cfd AS |
78 | |
79 | This garbage-collecting allocator allocates objects on one of a set | |
80 | of pages. Each page can allocate objects of a single size only; | |
81 | available sizes are powers of two starting at four bytes. The size | |
82 | of an allocation request is rounded up to the next power of two | |
83 | (`order'), and satisfied from the appropriate page. | |
84 | ||
85 | Each page is recorded in a page-entry, which also maintains an | |
86 | in-use bitmap of object positions on the page. This allows the | |
87 | allocation state of a particular object to be flipped without | |
88 | touching the page itself. | |
89 | ||
90 | Each page-entry also has a context depth, which is used to track | |
91 | pushing and popping of allocation contexts. Only objects allocated | |
589005ff | 92 | in the current (highest-numbered) context may be collected. |
21341cfd AS |
93 | |
94 | Page entries are arranged in an array of singly-linked lists. The | |
95 | array is indexed by the allocation size, in bits, of the pages on | |
96 | it; i.e. all pages on a list allocate objects of the same size. | |
97 | Pages are ordered on the list such that all non-full pages precede | |
98 | all full pages, with non-full pages arranged in order of decreasing | |
99 | context depth. | |
100 | ||
101 | Empty pages (of all orders) are kept on a single page cache list, | |
102 | and are considered first when new pages are required; they are | |
103 | deallocated at the start of the next collection if they haven't | |
104 | been recycled by then. */ | |
105 | ||
21341cfd AS |
106 | /* Define GGC_DEBUG_LEVEL to print debugging information. |
107 | 0: No debugging output. | |
108 | 1: GC statistics only. | |
109 | 2: Page-entry allocations/deallocations as well. | |
110 | 3: Object allocations as well. | |
6d2f8887 | 111 | 4: Object marks as well. */ |
21341cfd AS |
112 | #define GGC_DEBUG_LEVEL (0) |
113 | \f | |
114 | #ifndef HOST_BITS_PER_PTR | |
115 | #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG | |
116 | #endif | |
117 | ||
21341cfd AS |
118 | \f |
119 | /* A two-level tree is used to look up the page-entry for a given | |
120 | pointer. Two chunks of the pointer's bits are extracted to index | |
121 | the first and second levels of the tree, as follows: | |
122 | ||
123 | HOST_PAGE_SIZE_BITS | |
124 | 32 | | | |
125 | msb +----------------+----+------+------+ lsb | |
126 | | | | | |
127 | PAGE_L1_BITS | | |
128 | | | | |
129 | PAGE_L2_BITS | |
130 | ||
131 | The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry | |
132 | pages are aligned on system page boundaries. The next most | |
133 | significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first | |
589005ff | 134 | index values in the lookup table, respectively. |
21341cfd | 135 | |
005537df RH |
136 | For 32-bit architectures and the settings below, there are no |
137 | leftover bits. For architectures with wider pointers, the lookup | |
138 | tree points to a list of pages, which must be scanned to find the | |
139 | correct one. */ | |
21341cfd AS |
140 | |
141 | #define PAGE_L1_BITS (8) | |
142 | #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize) | |
143 | #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS) | |
144 | #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS) | |
145 | ||
146 | #define LOOKUP_L1(p) \ | |
147 | (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1)) | |
148 | ||
149 | #define LOOKUP_L2(p) \ | |
150 | (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1)) | |
151 | ||
2be510b8 MM |
152 | /* The number of objects per allocation page, for objects on a page of |
153 | the indicated ORDER. */ | |
154 | #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER] | |
155 | ||
17211ab5 GK |
156 | /* The number of objects in P. */ |
157 | #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order)) | |
158 | ||
2be510b8 MM |
159 | /* The size of an object on a page of the indicated ORDER. */ |
160 | #define OBJECT_SIZE(ORDER) object_size_table[ORDER] | |
161 | ||
8537ed68 ZW |
162 | /* For speed, we avoid doing a general integer divide to locate the |
163 | offset in the allocation bitmap, by precalculating numbers M, S | |
164 | such that (O * M) >> S == O / Z (modulo 2^32), for any offset O | |
165 | within the page which is evenly divisible by the object size Z. */ | |
166 | #define DIV_MULT(ORDER) inverse_table[ORDER].mult | |
167 | #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift | |
168 | #define OFFSET_TO_BIT(OFFSET, ORDER) \ | |
169 | (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER)) | |
170 | ||
2be510b8 MM |
171 | /* The number of extra orders, not corresponding to power-of-two sized |
172 | objects. */ | |
173 | ||
ca7558fc | 174 | #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table) |
2be510b8 | 175 | |
d1f1cc6a | 176 | #define RTL_SIZE(NSLOTS) \ |
e1de1560 | 177 | (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion)) |
d1f1cc6a | 178 | |
5e26df64 SB |
179 | #define TREE_EXP_SIZE(OPS) \ |
180 | (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree)) | |
181 | ||
2be510b8 MM |
182 | /* The Ith entry is the maximum size of an object to be stored in the |
183 | Ith extra order. Adding a new entry to this array is the *only* | |
184 | thing you need to do to add a new special allocation size. */ | |
185 | ||
186 | static const size_t extra_order_size_table[] = { | |
187 | sizeof (struct tree_decl), | |
d1f1cc6a | 188 | sizeof (struct tree_list), |
5e26df64 | 189 | TREE_EXP_SIZE (2), |
adc4adcd | 190 | RTL_SIZE (2), /* MEM, PLUS, etc. */ |
60c1d0d8 | 191 | RTL_SIZE (9), /* INSN */ |
2be510b8 MM |
192 | }; |
193 | ||
194 | /* The total number of orders. */ | |
195 | ||
196 | #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS) | |
197 | ||
b1095f9c MM |
198 | /* We use this structure to determine the alignment required for |
199 | allocations. For power-of-two sized allocations, that's not a | |
200 | problem, but it does matter for odd-sized allocations. */ | |
201 | ||
202 | struct max_alignment { | |
203 | char c; | |
204 | union { | |
205 | HOST_WIDEST_INT i; | |
b1095f9c | 206 | long double d; |
b1095f9c MM |
207 | } u; |
208 | }; | |
209 | ||
210 | /* The biggest alignment required. */ | |
211 | ||
212 | #define MAX_ALIGNMENT (offsetof (struct max_alignment, u)) | |
213 | ||
17211ab5 GK |
214 | /* Compute the smallest nonnegative number which when added to X gives |
215 | a multiple of F. */ | |
216 | ||
217 | #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f)) | |
218 | ||
219 | /* Compute the smallest multiple of F that is >= X. */ | |
220 | ||
221 | #define ROUND_UP(x, f) (CEIL (x, f) * (f)) | |
222 | ||
2be510b8 MM |
223 | /* The Ith entry is the number of objects on a page or order I. */ |
224 | ||
225 | static unsigned objects_per_page_table[NUM_ORDERS]; | |
226 | ||
227 | /* The Ith entry is the size of an object on a page of order I. */ | |
228 | ||
229 | static size_t object_size_table[NUM_ORDERS]; | |
21341cfd | 230 | |
8537ed68 ZW |
231 | /* The Ith entry is a pair of numbers (mult, shift) such that |
232 | ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32, | |
233 | for all k evenly divisible by OBJECT_SIZE(I). */ | |
234 | ||
235 | static struct | |
236 | { | |
75d75435 | 237 | size_t mult; |
8537ed68 ZW |
238 | unsigned int shift; |
239 | } | |
240 | inverse_table[NUM_ORDERS]; | |
241 | ||
21341cfd AS |
242 | /* A page_entry records the status of an allocation page. This |
243 | structure is dynamically sized to fit the bitmap in_use_p. */ | |
589005ff | 244 | typedef struct page_entry |
21341cfd AS |
245 | { |
246 | /* The next page-entry with objects of the same size, or NULL if | |
247 | this is the last page-entry. */ | |
248 | struct page_entry *next; | |
249 | ||
9bf793f9 JL |
250 | /* The previous page-entry with objects of the same size, or NULL if |
251 | this is the first page-entry. The PREV pointer exists solely to | |
71cc389b | 252 | keep the cost of ggc_free manageable. */ |
9bf793f9 JL |
253 | struct page_entry *prev; |
254 | ||
21341cfd AS |
255 | /* The number of bytes allocated. (This will always be a multiple |
256 | of the host system page size.) */ | |
257 | size_t bytes; | |
258 | ||
259 | /* The address at which the memory is allocated. */ | |
260 | char *page; | |
261 | ||
130fadbb RH |
262 | #ifdef USING_MALLOC_PAGE_GROUPS |
263 | /* Back pointer to the page group this page came from. */ | |
264 | struct page_group *group; | |
265 | #endif | |
266 | ||
c4775f82 MS |
267 | /* This is the index in the by_depth varray where this page table |
268 | can be found. */ | |
269 | unsigned long index_by_depth; | |
21341cfd AS |
270 | |
271 | /* Context depth of this page. */ | |
ae373eda | 272 | unsigned short context_depth; |
21341cfd AS |
273 | |
274 | /* The number of free objects remaining on this page. */ | |
275 | unsigned short num_free_objects; | |
276 | ||
277 | /* A likely candidate for the bit position of a free object for the | |
278 | next allocation from this page. */ | |
279 | unsigned short next_bit_hint; | |
280 | ||
ae373eda MM |
281 | /* The lg of size of objects allocated from this page. */ |
282 | unsigned char order; | |
283 | ||
21341cfd AS |
284 | /* A bit vector indicating whether or not objects are in use. The |
285 | Nth bit is one if the Nth object on this page is allocated. This | |
286 | array is dynamically sized. */ | |
287 | unsigned long in_use_p[1]; | |
288 | } page_entry; | |
289 | ||
130fadbb RH |
290 | #ifdef USING_MALLOC_PAGE_GROUPS |
291 | /* A page_group describes a large allocation from malloc, from which | |
292 | we parcel out aligned pages. */ | |
293 | typedef struct page_group | |
294 | { | |
295 | /* A linked list of all extant page groups. */ | |
296 | struct page_group *next; | |
297 | ||
298 | /* The address we received from malloc. */ | |
299 | char *allocation; | |
300 | ||
301 | /* The size of the block. */ | |
302 | size_t alloc_size; | |
303 | ||
304 | /* A bitmask of pages in use. */ | |
305 | unsigned int in_use; | |
306 | } page_group; | |
307 | #endif | |
21341cfd AS |
308 | |
309 | #if HOST_BITS_PER_PTR <= 32 | |
310 | ||
311 | /* On 32-bit hosts, we use a two level page table, as pictured above. */ | |
312 | typedef page_entry **page_table[PAGE_L1_SIZE]; | |
313 | ||
314 | #else | |
315 | ||
005537df RH |
316 | /* On 64-bit hosts, we use the same two level page tables plus a linked |
317 | list that disambiguates the top 32-bits. There will almost always be | |
21341cfd AS |
318 | exactly one entry in the list. */ |
319 | typedef struct page_table_chain | |
320 | { | |
321 | struct page_table_chain *next; | |
322 | size_t high_bits; | |
323 | page_entry **table[PAGE_L1_SIZE]; | |
324 | } *page_table; | |
325 | ||
326 | #endif | |
327 | ||
328 | /* The rest of the global variables. */ | |
329 | static struct globals | |
330 | { | |
331 | /* The Nth element in this array is a page with objects of size 2^N. | |
332 | If there are any pages with free objects, they will be at the | |
333 | head of the list. NULL if there are no page-entries for this | |
334 | object size. */ | |
2be510b8 | 335 | page_entry *pages[NUM_ORDERS]; |
21341cfd AS |
336 | |
337 | /* The Nth element in this array is the last page with objects of | |
338 | size 2^N. NULL if there are no page-entries for this object | |
339 | size. */ | |
2be510b8 | 340 | page_entry *page_tails[NUM_ORDERS]; |
21341cfd AS |
341 | |
342 | /* Lookup table for associating allocation pages with object addresses. */ | |
343 | page_table lookup; | |
344 | ||
345 | /* The system's page size. */ | |
346 | size_t pagesize; | |
347 | size_t lg_pagesize; | |
348 | ||
349 | /* Bytes currently allocated. */ | |
350 | size_t allocated; | |
351 | ||
352 | /* Bytes currently allocated at the end of the last collection. */ | |
353 | size_t allocated_last_gc; | |
354 | ||
3277221c MM |
355 | /* Total amount of memory mapped. */ |
356 | size_t bytes_mapped; | |
357 | ||
52895e1a RH |
358 | /* Bit N set if any allocations have been done at context depth N. */ |
359 | unsigned long context_depth_allocations; | |
360 | ||
361 | /* Bit N set if any collections have been done at context depth N. */ | |
362 | unsigned long context_depth_collections; | |
363 | ||
21341cfd | 364 | /* The current depth in the context stack. */ |
d416576b | 365 | unsigned short context_depth; |
21341cfd AS |
366 | |
367 | /* A file descriptor open to /dev/zero for reading. */ | |
825b6926 | 368 | #if defined (HAVE_MMAP_DEV_ZERO) |
21341cfd AS |
369 | int dev_zero_fd; |
370 | #endif | |
371 | ||
372 | /* A cache of free system pages. */ | |
373 | page_entry *free_pages; | |
374 | ||
130fadbb RH |
375 | #ifdef USING_MALLOC_PAGE_GROUPS |
376 | page_group *page_groups; | |
377 | #endif | |
378 | ||
21341cfd AS |
379 | /* The file descriptor for debugging output. */ |
380 | FILE *debug_file; | |
c4775f82 MS |
381 | |
382 | /* Current number of elements in use in depth below. */ | |
383 | unsigned int depth_in_use; | |
384 | ||
385 | /* Maximum number of elements that can be used before resizing. */ | |
386 | unsigned int depth_max; | |
387 | ||
388 | /* Each element of this arry is an index in by_depth where the given | |
389 | depth starts. This structure is indexed by that given depth we | |
390 | are interested in. */ | |
391 | unsigned int *depth; | |
392 | ||
393 | /* Current number of elements in use in by_depth below. */ | |
394 | unsigned int by_depth_in_use; | |
395 | ||
396 | /* Maximum number of elements that can be used before resizing. */ | |
397 | unsigned int by_depth_max; | |
398 | ||
399 | /* Each element of this array is a pointer to a page_entry, all | |
400 | page_entries can be found in here by increasing depth. | |
401 | index_by_depth in the page_entry is the index into this data | |
402 | structure where that page_entry can be found. This is used to | |
403 | speed up finding all page_entries at a particular depth. */ | |
404 | page_entry **by_depth; | |
405 | ||
406 | /* Each element is a pointer to the saved in_use_p bits, if any, | |
407 | zero otherwise. We allocate them all together, to enable a | |
408 | better runtime data access pattern. */ | |
409 | unsigned long **save_in_use; | |
685fe032 RH |
410 | |
411 | #ifdef ENABLE_GC_ALWAYS_COLLECT | |
412 | /* List of free objects to be verified as actually free on the | |
413 | next collection. */ | |
414 | struct free_object | |
415 | { | |
416 | void *object; | |
417 | struct free_object *next; | |
418 | } *free_object_list; | |
419 | #endif | |
420 | ||
adc4adcd GP |
421 | #ifdef GATHER_STATISTICS |
422 | struct | |
423 | { | |
439a7e54 | 424 | /* Total memory allocated with ggc_alloc. */ |
adc4adcd | 425 | unsigned long long total_allocated; |
439a7e54 | 426 | /* Total overhead for memory to be allocated with ggc_alloc. */ |
adc4adcd GP |
427 | unsigned long long total_overhead; |
428 | ||
429 | /* Total allocations and overhead for sizes less than 32, 64 and 128. | |
430 | These sizes are interesting because they are typical cache line | |
938d968e | 431 | sizes. */ |
adc4adcd GP |
432 | |
433 | unsigned long long total_allocated_under32; | |
434 | unsigned long long total_overhead_under32; | |
435 | ||
436 | unsigned long long total_allocated_under64; | |
437 | unsigned long long total_overhead_under64; | |
438 | ||
439 | unsigned long long total_allocated_under128; | |
440 | unsigned long long total_overhead_under128; | |
441 | ||
439a7e54 DN |
442 | /* The allocations for each of the allocation orders. */ |
443 | unsigned long long total_allocated_per_order[NUM_ORDERS]; | |
444 | ||
938d968e | 445 | /* The overhead for each of the allocation orders. */ |
adc4adcd GP |
446 | unsigned long long total_overhead_per_order[NUM_ORDERS]; |
447 | } stats; | |
448 | #endif | |
21341cfd AS |
449 | } G; |
450 | ||
21341cfd AS |
451 | /* The size in bytes required to maintain a bitmap for the objects |
452 | on a page-entry. */ | |
453 | #define BITMAP_SIZE(Num_objects) \ | |
2be510b8 | 454 | (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long)) |
21341cfd | 455 | |
130fadbb RH |
456 | /* Allocate pages in chunks of this size, to throttle calls to memory |
457 | allocation routines. The first page is used, the rest go onto the | |
458 | free list. This cannot be larger than HOST_BITS_PER_INT for the | |
459 | in_use bitmask for page_group. */ | |
054f5e69 | 460 | #define GGC_QUIRE_SIZE 16 |
c4775f82 MS |
461 | |
462 | /* Initial guess as to how many page table entries we might need. */ | |
463 | #define INITIAL_PTE_COUNT 128 | |
21341cfd | 464 | \f |
20c1dc5e AJ |
465 | static int ggc_allocated_p (const void *); |
466 | static page_entry *lookup_page_table_entry (const void *); | |
467 | static void set_page_table_entry (void *, page_entry *); | |
130fadbb | 468 | #ifdef USING_MMAP |
20c1dc5e | 469 | static char *alloc_anon (char *, size_t); |
130fadbb RH |
470 | #endif |
471 | #ifdef USING_MALLOC_PAGE_GROUPS | |
20c1dc5e AJ |
472 | static size_t page_group_index (char *, char *); |
473 | static void set_page_group_in_use (page_group *, char *); | |
474 | static void clear_page_group_in_use (page_group *, char *); | |
130fadbb | 475 | #endif |
20c1dc5e AJ |
476 | static struct page_entry * alloc_page (unsigned); |
477 | static void free_page (struct page_entry *); | |
478 | static void release_pages (void); | |
479 | static void clear_marks (void); | |
480 | static void sweep_pages (void); | |
481 | static void ggc_recalculate_in_use_p (page_entry *); | |
482 | static void compute_inverse (unsigned); | |
483 | static inline void adjust_depth (void); | |
484 | static void move_ptes_to_front (int, int); | |
21341cfd | 485 | |
20c1dc5e AJ |
486 | void debug_print_page_list (int); |
487 | static void push_depth (unsigned int); | |
488 | static void push_by_depth (page_entry *, unsigned long *); | |
b6f61163 DB |
489 | struct alloc_zone *rtl_zone = NULL; |
490 | struct alloc_zone *tree_zone = NULL; | |
491 | struct alloc_zone *garbage_zone = NULL; | |
492 | ||
c4775f82 MS |
493 | /* Push an entry onto G.depth. */ |
494 | ||
495 | inline static void | |
20c1dc5e | 496 | push_depth (unsigned int i) |
c4775f82 MS |
497 | { |
498 | if (G.depth_in_use >= G.depth_max) | |
499 | { | |
500 | G.depth_max *= 2; | |
703ad42b | 501 | G.depth = xrealloc (G.depth, G.depth_max * sizeof (unsigned int)); |
c4775f82 MS |
502 | } |
503 | G.depth[G.depth_in_use++] = i; | |
504 | } | |
505 | ||
506 | /* Push an entry onto G.by_depth and G.save_in_use. */ | |
507 | ||
508 | inline static void | |
20c1dc5e | 509 | push_by_depth (page_entry *p, unsigned long *s) |
c4775f82 MS |
510 | { |
511 | if (G.by_depth_in_use >= G.by_depth_max) | |
512 | { | |
513 | G.by_depth_max *= 2; | |
703ad42b KG |
514 | G.by_depth = xrealloc (G.by_depth, |
515 | G.by_depth_max * sizeof (page_entry *)); | |
516 | G.save_in_use = xrealloc (G.save_in_use, | |
517 | G.by_depth_max * sizeof (unsigned long *)); | |
c4775f82 MS |
518 | } |
519 | G.by_depth[G.by_depth_in_use] = p; | |
520 | G.save_in_use[G.by_depth_in_use++] = s; | |
521 | } | |
522 | ||
523 | #if (GCC_VERSION < 3001) | |
524 | #define prefetch(X) ((void) X) | |
525 | #else | |
526 | #define prefetch(X) __builtin_prefetch (X) | |
527 | #endif | |
528 | ||
529 | #define save_in_use_p_i(__i) \ | |
530 | (G.save_in_use[__i]) | |
531 | #define save_in_use_p(__p) \ | |
532 | (save_in_use_p_i (__p->index_by_depth)) | |
533 | ||
cc2902df | 534 | /* Returns nonzero if P was allocated in GC'able memory. */ |
21341cfd | 535 | |
005537df | 536 | static inline int |
20c1dc5e | 537 | ggc_allocated_p (const void *p) |
21341cfd AS |
538 | { |
539 | page_entry ***base; | |
005537df | 540 | size_t L1, L2; |
21341cfd AS |
541 | |
542 | #if HOST_BITS_PER_PTR <= 32 | |
543 | base = &G.lookup[0]; | |
544 | #else | |
545 | page_table table = G.lookup; | |
546 | size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff; | |
005537df RH |
547 | while (1) |
548 | { | |
549 | if (table == NULL) | |
550 | return 0; | |
551 | if (table->high_bits == high_bits) | |
552 | break; | |
553 | table = table->next; | |
554 | } | |
21341cfd AS |
555 | base = &table->table[0]; |
556 | #endif | |
557 | ||
eaec9b3d | 558 | /* Extract the level 1 and 2 indices. */ |
74c937ca MM |
559 | L1 = LOOKUP_L1 (p); |
560 | L2 = LOOKUP_L2 (p); | |
561 | ||
562 | return base[L1] && base[L1][L2]; | |
563 | } | |
564 | ||
589005ff | 565 | /* Traverse the page table and find the entry for a page. |
74c937ca MM |
566 | Die (probably) if the object wasn't allocated via GC. */ |
567 | ||
568 | static inline page_entry * | |
20c1dc5e | 569 | lookup_page_table_entry (const void *p) |
74c937ca MM |
570 | { |
571 | page_entry ***base; | |
572 | size_t L1, L2; | |
573 | ||
005537df RH |
574 | #if HOST_BITS_PER_PTR <= 32 |
575 | base = &G.lookup[0]; | |
576 | #else | |
577 | page_table table = G.lookup; | |
578 | size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff; | |
579 | while (table->high_bits != high_bits) | |
580 | table = table->next; | |
581 | base = &table->table[0]; | |
582 | #endif | |
74c937ca | 583 | |
eaec9b3d | 584 | /* Extract the level 1 and 2 indices. */ |
21341cfd AS |
585 | L1 = LOOKUP_L1 (p); |
586 | L2 = LOOKUP_L2 (p); | |
587 | ||
588 | return base[L1][L2]; | |
589 | } | |
590 | ||
21341cfd | 591 | /* Set the page table entry for a page. */ |
cb2ec151 | 592 | |
21341cfd | 593 | static void |
20c1dc5e | 594 | set_page_table_entry (void *p, page_entry *entry) |
21341cfd AS |
595 | { |
596 | page_entry ***base; | |
597 | size_t L1, L2; | |
598 | ||
599 | #if HOST_BITS_PER_PTR <= 32 | |
600 | base = &G.lookup[0]; | |
601 | #else | |
602 | page_table table; | |
603 | size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff; | |
604 | for (table = G.lookup; table; table = table->next) | |
605 | if (table->high_bits == high_bits) | |
606 | goto found; | |
607 | ||
608 | /* Not found -- allocate a new table. */ | |
703ad42b | 609 | table = xcalloc (1, sizeof(*table)); |
21341cfd AS |
610 | table->next = G.lookup; |
611 | table->high_bits = high_bits; | |
612 | G.lookup = table; | |
613 | found: | |
614 | base = &table->table[0]; | |
615 | #endif | |
616 | ||
eaec9b3d | 617 | /* Extract the level 1 and 2 indices. */ |
21341cfd AS |
618 | L1 = LOOKUP_L1 (p); |
619 | L2 = LOOKUP_L2 (p); | |
620 | ||
621 | if (base[L1] == NULL) | |
703ad42b | 622 | base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *)); |
21341cfd AS |
623 | |
624 | base[L1][L2] = entry; | |
625 | } | |
626 | ||
21341cfd | 627 | /* Prints the page-entry for object size ORDER, for debugging. */ |
cb2ec151 | 628 | |
21341cfd | 629 | void |
20c1dc5e | 630 | debug_print_page_list (int order) |
21341cfd AS |
631 | { |
632 | page_entry *p; | |
20c1dc5e AJ |
633 | printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order], |
634 | (void *) G.page_tails[order]); | |
21341cfd AS |
635 | p = G.pages[order]; |
636 | while (p != NULL) | |
637 | { | |
20c1dc5e | 638 | printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth, |
683eb0e9 | 639 | p->num_free_objects); |
21341cfd AS |
640 | p = p->next; |
641 | } | |
642 | printf ("NULL\n"); | |
643 | fflush (stdout); | |
644 | } | |
645 | ||
130fadbb | 646 | #ifdef USING_MMAP |
21341cfd | 647 | /* Allocate SIZE bytes of anonymous memory, preferably near PREF, |
825b6926 ZW |
648 | (if non-null). The ifdef structure here is intended to cause a |
649 | compile error unless exactly one of the HAVE_* is defined. */ | |
cb2ec151 | 650 | |
21341cfd | 651 | static inline char * |
20c1dc5e | 652 | alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size) |
21341cfd | 653 | { |
825b6926 | 654 | #ifdef HAVE_MMAP_ANON |
400e39e3 KH |
655 | char *page = mmap (pref, size, PROT_READ | PROT_WRITE, |
656 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | |
825b6926 ZW |
657 | #endif |
658 | #ifdef HAVE_MMAP_DEV_ZERO | |
400e39e3 KH |
659 | char *page = mmap (pref, size, PROT_READ | PROT_WRITE, |
660 | MAP_PRIVATE, G.dev_zero_fd, 0); | |
21341cfd | 661 | #endif |
825b6926 ZW |
662 | |
663 | if (page == (char *) MAP_FAILED) | |
005537df | 664 | { |
1f978f5f | 665 | perror ("virtual memory exhausted"); |
bd0f0717 | 666 | exit (FATAL_EXIT_CODE); |
005537df | 667 | } |
21341cfd | 668 | |
3277221c MM |
669 | /* Remember that we allocated this memory. */ |
670 | G.bytes_mapped += size; | |
671 | ||
9a0a7d5d HPN |
672 | /* Pretend we don't have access to the allocated pages. We'll enable |
673 | access to smaller pieces of the area in ggc_alloc. Discard the | |
674 | handle to avoid handle leak. */ | |
675 | VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size)); | |
676 | ||
21341cfd AS |
677 | return page; |
678 | } | |
130fadbb RH |
679 | #endif |
680 | #ifdef USING_MALLOC_PAGE_GROUPS | |
681 | /* Compute the index for this page into the page group. */ | |
682 | ||
683 | static inline size_t | |
20c1dc5e | 684 | page_group_index (char *allocation, char *page) |
130fadbb | 685 | { |
c4f2c499 | 686 | return (size_t) (page - allocation) >> G.lg_pagesize; |
130fadbb RH |
687 | } |
688 | ||
689 | /* Set and clear the in_use bit for this page in the page group. */ | |
690 | ||
691 | static inline void | |
20c1dc5e | 692 | set_page_group_in_use (page_group *group, char *page) |
130fadbb RH |
693 | { |
694 | group->in_use |= 1 << page_group_index (group->allocation, page); | |
695 | } | |
696 | ||
697 | static inline void | |
20c1dc5e | 698 | clear_page_group_in_use (page_group *group, char *page) |
130fadbb RH |
699 | { |
700 | group->in_use &= ~(1 << page_group_index (group->allocation, page)); | |
701 | } | |
702 | #endif | |
21341cfd AS |
703 | |
704 | /* Allocate a new page for allocating objects of size 2^ORDER, | |
705 | and return an entry for it. The entry is not added to the | |
706 | appropriate page_table list. */ | |
cb2ec151 | 707 | |
21341cfd | 708 | static inline struct page_entry * |
20c1dc5e | 709 | alloc_page (unsigned order) |
21341cfd AS |
710 | { |
711 | struct page_entry *entry, *p, **pp; | |
712 | char *page; | |
713 | size_t num_objects; | |
714 | size_t bitmap_size; | |
715 | size_t page_entry_size; | |
716 | size_t entry_size; | |
130fadbb RH |
717 | #ifdef USING_MALLOC_PAGE_GROUPS |
718 | page_group *group; | |
719 | #endif | |
21341cfd AS |
720 | |
721 | num_objects = OBJECTS_PER_PAGE (order); | |
722 | bitmap_size = BITMAP_SIZE (num_objects + 1); | |
723 | page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size; | |
2be510b8 | 724 | entry_size = num_objects * OBJECT_SIZE (order); |
ca79429a RH |
725 | if (entry_size < G.pagesize) |
726 | entry_size = G.pagesize; | |
21341cfd AS |
727 | |
728 | entry = NULL; | |
729 | page = NULL; | |
730 | ||
731 | /* Check the list of free pages for one we can use. */ | |
bd0f0717 | 732 | for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp) |
21341cfd AS |
733 | if (p->bytes == entry_size) |
734 | break; | |
735 | ||
736 | if (p != NULL) | |
737 | { | |
dc297297 | 738 | /* Recycle the allocated memory from this page ... */ |
21341cfd AS |
739 | *pp = p->next; |
740 | page = p->page; | |
bd0f0717 | 741 | |
130fadbb RH |
742 | #ifdef USING_MALLOC_PAGE_GROUPS |
743 | group = p->group; | |
744 | #endif | |
bd0f0717 | 745 | |
21341cfd AS |
746 | /* ... and, if possible, the page entry itself. */ |
747 | if (p->order == order) | |
748 | { | |
749 | entry = p; | |
750 | memset (entry, 0, page_entry_size); | |
751 | } | |
752 | else | |
753 | free (p); | |
754 | } | |
825b6926 | 755 | #ifdef USING_MMAP |
054f5e69 | 756 | else if (entry_size == G.pagesize) |
21341cfd | 757 | { |
054f5e69 ZW |
758 | /* We want just one page. Allocate a bunch of them and put the |
759 | extras on the freelist. (Can only do this optimization with | |
760 | mmap for backing store.) */ | |
761 | struct page_entry *e, *f = G.free_pages; | |
762 | int i; | |
763 | ||
ca79429a | 764 | page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE); |
bd0f0717 | 765 | |
054f5e69 ZW |
766 | /* This loop counts down so that the chain will be in ascending |
767 | memory order. */ | |
768 | for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--) | |
769 | { | |
703ad42b | 770 | e = xcalloc (1, page_entry_size); |
ca79429a RH |
771 | e->order = order; |
772 | e->bytes = G.pagesize; | |
773 | e->page = page + (i << G.lg_pagesize); | |
054f5e69 ZW |
774 | e->next = f; |
775 | f = e; | |
776 | } | |
bd0f0717 | 777 | |
054f5e69 | 778 | G.free_pages = f; |
21341cfd | 779 | } |
054f5e69 ZW |
780 | else |
781 | page = alloc_anon (NULL, entry_size); | |
130fadbb RH |
782 | #endif |
783 | #ifdef USING_MALLOC_PAGE_GROUPS | |
784 | else | |
785 | { | |
786 | /* Allocate a large block of memory and serve out the aligned | |
787 | pages therein. This results in much less memory wastage | |
788 | than the traditional implementation of valloc. */ | |
789 | ||
790 | char *allocation, *a, *enda; | |
791 | size_t alloc_size, head_slop, tail_slop; | |
792 | int multiple_pages = (entry_size == G.pagesize); | |
793 | ||
794 | if (multiple_pages) | |
795 | alloc_size = GGC_QUIRE_SIZE * G.pagesize; | |
796 | else | |
797 | alloc_size = entry_size + G.pagesize - 1; | |
798 | allocation = xmalloc (alloc_size); | |
799 | ||
c4f2c499 | 800 | page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize); |
130fadbb RH |
801 | head_slop = page - allocation; |
802 | if (multiple_pages) | |
803 | tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1); | |
804 | else | |
805 | tail_slop = alloc_size - entry_size - head_slop; | |
806 | enda = allocation + alloc_size - tail_slop; | |
807 | ||
808 | /* We allocated N pages, which are likely not aligned, leaving | |
809 | us with N-1 usable pages. We plan to place the page_group | |
810 | structure somewhere in the slop. */ | |
811 | if (head_slop >= sizeof (page_group)) | |
812 | group = (page_group *)page - 1; | |
813 | else | |
814 | { | |
815 | /* We magically got an aligned allocation. Too bad, we have | |
816 | to waste a page anyway. */ | |
817 | if (tail_slop == 0) | |
818 | { | |
819 | enda -= G.pagesize; | |
820 | tail_slop += G.pagesize; | |
821 | } | |
822 | if (tail_slop < sizeof (page_group)) | |
823 | abort (); | |
824 | group = (page_group *)enda; | |
825 | tail_slop -= sizeof (page_group); | |
826 | } | |
827 | ||
828 | /* Remember that we allocated this memory. */ | |
829 | group->next = G.page_groups; | |
830 | group->allocation = allocation; | |
831 | group->alloc_size = alloc_size; | |
832 | group->in_use = 0; | |
833 | G.page_groups = group; | |
834 | G.bytes_mapped += alloc_size; | |
835 | ||
836 | /* If we allocated multiple pages, put the rest on the free list. */ | |
837 | if (multiple_pages) | |
838 | { | |
839 | struct page_entry *e, *f = G.free_pages; | |
840 | for (a = enda - G.pagesize; a != page; a -= G.pagesize) | |
841 | { | |
703ad42b | 842 | e = xcalloc (1, page_entry_size); |
130fadbb RH |
843 | e->order = order; |
844 | e->bytes = G.pagesize; | |
845 | e->page = a; | |
846 | e->group = group; | |
847 | e->next = f; | |
848 | f = e; | |
849 | } | |
850 | G.free_pages = f; | |
851 | } | |
852 | } | |
853 | #endif | |
21341cfd AS |
854 | |
855 | if (entry == NULL) | |
703ad42b | 856 | entry = xcalloc (1, page_entry_size); |
21341cfd AS |
857 | |
858 | entry->bytes = entry_size; | |
859 | entry->page = page; | |
860 | entry->context_depth = G.context_depth; | |
861 | entry->order = order; | |
862 | entry->num_free_objects = num_objects; | |
863 | entry->next_bit_hint = 1; | |
864 | ||
52895e1a RH |
865 | G.context_depth_allocations |= (unsigned long)1 << G.context_depth; |
866 | ||
130fadbb RH |
867 | #ifdef USING_MALLOC_PAGE_GROUPS |
868 | entry->group = group; | |
869 | set_page_group_in_use (group, page); | |
870 | #endif | |
871 | ||
21341cfd AS |
872 | /* Set the one-past-the-end in-use bit. This acts as a sentry as we |
873 | increment the hint. */ | |
874 | entry->in_use_p[num_objects / HOST_BITS_PER_LONG] | |
875 | = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG); | |
876 | ||
877 | set_page_table_entry (page, entry); | |
878 | ||
879 | if (GGC_DEBUG_LEVEL >= 2) | |
589005ff | 880 | fprintf (G.debug_file, |
8a951190 | 881 | "Allocating page at %p, object size=%lu, data %p-%p\n", |
20c1dc5e | 882 | (void *) entry, (unsigned long) OBJECT_SIZE (order), page, |
bd0f0717 | 883 | page + entry_size - 1); |
21341cfd AS |
884 | |
885 | return entry; | |
886 | } | |
887 | ||
c4775f82 MS |
888 | /* Adjust the size of G.depth so that no index greater than the one |
889 | used by the top of the G.by_depth is used. */ | |
890 | ||
891 | static inline void | |
20c1dc5e | 892 | adjust_depth (void) |
c4775f82 MS |
893 | { |
894 | page_entry *top; | |
895 | ||
896 | if (G.by_depth_in_use) | |
897 | { | |
898 | top = G.by_depth[G.by_depth_in_use-1]; | |
899 | ||
e0bb17a8 KH |
900 | /* Peel back indices in depth that index into by_depth, so that |
901 | as new elements are added to by_depth, we note the indices | |
c4775f82 MS |
902 | of those elements, if they are for new context depths. */ |
903 | while (G.depth_in_use > (size_t)top->context_depth+1) | |
904 | --G.depth_in_use; | |
905 | } | |
906 | } | |
907 | ||
cb2ec151 | 908 | /* For a page that is no longer needed, put it on the free page list. */ |
21341cfd | 909 | |
685fe032 | 910 | static void |
20c1dc5e | 911 | free_page (page_entry *entry) |
21341cfd AS |
912 | { |
913 | if (GGC_DEBUG_LEVEL >= 2) | |
589005ff | 914 | fprintf (G.debug_file, |
20c1dc5e | 915 | "Deallocating page at %p, data %p-%p\n", (void *) entry, |
21341cfd AS |
916 | entry->page, entry->page + entry->bytes - 1); |
917 | ||
9a0a7d5d HPN |
918 | /* Mark the page as inaccessible. Discard the handle to avoid handle |
919 | leak. */ | |
920 | VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes)); | |
921 | ||
21341cfd AS |
922 | set_page_table_entry (entry->page, NULL); |
923 | ||
130fadbb RH |
924 | #ifdef USING_MALLOC_PAGE_GROUPS |
925 | clear_page_group_in_use (entry->group, entry->page); | |
926 | #endif | |
927 | ||
c4775f82 MS |
928 | if (G.by_depth_in_use > 1) |
929 | { | |
930 | page_entry *top = G.by_depth[G.by_depth_in_use-1]; | |
931 | ||
932 | /* If they are at the same depth, put top element into freed | |
933 | slot. */ | |
934 | if (entry->context_depth == top->context_depth) | |
935 | { | |
936 | int i = entry->index_by_depth; | |
937 | G.by_depth[i] = top; | |
938 | G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1]; | |
939 | top->index_by_depth = i; | |
940 | } | |
941 | else | |
942 | { | |
943 | /* We cannot free a page from a context deeper than the | |
944 | current one. */ | |
945 | abort (); | |
946 | } | |
947 | } | |
948 | --G.by_depth_in_use; | |
949 | ||
950 | adjust_depth (); | |
951 | ||
21341cfd AS |
952 | entry->next = G.free_pages; |
953 | G.free_pages = entry; | |
954 | } | |
955 | ||
cb2ec151 | 956 | /* Release the free page cache to the system. */ |
21341cfd | 957 | |
4934cc53 | 958 | static void |
20c1dc5e | 959 | release_pages (void) |
21341cfd | 960 | { |
825b6926 | 961 | #ifdef USING_MMAP |
130fadbb | 962 | page_entry *p, *next; |
21341cfd AS |
963 | char *start; |
964 | size_t len; | |
965 | ||
054f5e69 | 966 | /* Gather up adjacent pages so they are unmapped together. */ |
21341cfd | 967 | p = G.free_pages; |
21341cfd AS |
968 | |
969 | while (p) | |
970 | { | |
054f5e69 | 971 | start = p->page; |
21341cfd | 972 | next = p->next; |
054f5e69 | 973 | len = p->bytes; |
21341cfd AS |
974 | free (p); |
975 | p = next; | |
21341cfd | 976 | |
054f5e69 ZW |
977 | while (p && p->page == start + len) |
978 | { | |
979 | next = p->next; | |
980 | len += p->bytes; | |
981 | free (p); | |
982 | p = next; | |
983 | } | |
984 | ||
985 | munmap (start, len); | |
986 | G.bytes_mapped -= len; | |
987 | } | |
005537df | 988 | |
21341cfd | 989 | G.free_pages = NULL; |
130fadbb RH |
990 | #endif |
991 | #ifdef USING_MALLOC_PAGE_GROUPS | |
992 | page_entry **pp, *p; | |
993 | page_group **gp, *g; | |
994 | ||
995 | /* Remove all pages from free page groups from the list. */ | |
996 | pp = &G.free_pages; | |
997 | while ((p = *pp) != NULL) | |
998 | if (p->group->in_use == 0) | |
999 | { | |
1000 | *pp = p->next; | |
1001 | free (p); | |
1002 | } | |
1003 | else | |
1004 | pp = &p->next; | |
1005 | ||
1006 | /* Remove all free page groups, and release the storage. */ | |
1007 | gp = &G.page_groups; | |
1008 | while ((g = *gp) != NULL) | |
1009 | if (g->in_use == 0) | |
1010 | { | |
1011 | *gp = g->next; | |
589005ff | 1012 | G.bytes_mapped -= g->alloc_size; |
130fadbb RH |
1013 | free (g->allocation); |
1014 | } | |
1015 | else | |
1016 | gp = &g->next; | |
1017 | #endif | |
21341cfd AS |
1018 | } |
1019 | ||
21341cfd | 1020 | /* This table provides a fast way to determine ceil(log_2(size)) for |
9fd51e67 | 1021 | allocation requests. The minimum allocation size is eight bytes. */ |
cb2ec151 | 1022 | |
589005ff | 1023 | static unsigned char size_lookup[257] = |
9fd51e67 | 1024 | { |
589005ff KH |
1025 | 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, |
1026 | 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, | |
1027 | 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, | |
1028 | 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, | |
1029 | 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, | |
1030 | 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, | |
1031 | 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, | |
21341cfd | 1032 | 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, |
21341cfd AS |
1033 | 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, |
1034 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1035 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1036 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1037 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1038 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1039 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1040 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1041 | 8 | |
1042 | }; | |
1043 | ||
b6f61163 DB |
1044 | /* Typed allocation function. Does nothing special in this collector. */ |
1045 | ||
1046 | void * | |
b9dcdee4 JH |
1047 | ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size |
1048 | MEM_STAT_DECL) | |
b6f61163 | 1049 | { |
b9dcdee4 | 1050 | return ggc_alloc_stat (size PASS_MEM_STAT); |
b6f61163 DB |
1051 | } |
1052 | ||
1053 | /* Zone allocation function. Does nothing special in this collector. */ | |
1054 | ||
1055 | void * | |
b9dcdee4 JH |
1056 | ggc_alloc_zone_stat (size_t size, struct alloc_zone *zone ATTRIBUTE_UNUSED |
1057 | MEM_STAT_DECL) | |
b6f61163 | 1058 | { |
b9dcdee4 | 1059 | return ggc_alloc_stat (size PASS_MEM_STAT); |
b6f61163 DB |
1060 | } |
1061 | ||
aa40083d | 1062 | /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */ |
cb2ec151 | 1063 | |
005537df | 1064 | void * |
b9dcdee4 | 1065 | ggc_alloc_stat (size_t size MEM_STAT_DECL) |
21341cfd | 1066 | { |
685fe032 | 1067 | size_t order, word, bit, object_offset, object_size; |
21341cfd AS |
1068 | struct page_entry *entry; |
1069 | void *result; | |
1070 | ||
1071 | if (size <= 256) | |
685fe032 RH |
1072 | { |
1073 | order = size_lookup[size]; | |
1074 | object_size = OBJECT_SIZE (order); | |
1075 | } | |
21341cfd AS |
1076 | else |
1077 | { | |
1078 | order = 9; | |
685fe032 | 1079 | while (size > (object_size = OBJECT_SIZE (order))) |
21341cfd AS |
1080 | order++; |
1081 | } | |
1082 | ||
1083 | /* If there are non-full pages for this size allocation, they are at | |
1084 | the head of the list. */ | |
1085 | entry = G.pages[order]; | |
1086 | ||
1087 | /* If there is no page for this object size, or all pages in this | |
1088 | context are full, allocate a new page. */ | |
4934cc53 | 1089 | if (entry == NULL || entry->num_free_objects == 0) |
21341cfd AS |
1090 | { |
1091 | struct page_entry *new_entry; | |
1092 | new_entry = alloc_page (order); | |
589005ff | 1093 | |
c4775f82 MS |
1094 | new_entry->index_by_depth = G.by_depth_in_use; |
1095 | push_by_depth (new_entry, 0); | |
1096 | ||
1097 | /* We can skip context depths, if we do, make sure we go all the | |
1098 | way to the new depth. */ | |
1099 | while (new_entry->context_depth >= G.depth_in_use) | |
1100 | push_depth (G.by_depth_in_use-1); | |
1101 | ||
9bf793f9 JL |
1102 | /* If this is the only entry, it's also the tail. If it is not |
1103 | the only entry, then we must update the PREV pointer of the | |
1104 | ENTRY (G.pages[order]) to point to our new page entry. */ | |
21341cfd AS |
1105 | if (entry == NULL) |
1106 | G.page_tails[order] = new_entry; | |
9bf793f9 JL |
1107 | else |
1108 | entry->prev = new_entry; | |
589005ff | 1109 | |
9bf793f9 JL |
1110 | /* Put new pages at the head of the page list. By definition the |
1111 | entry at the head of the list always has a NULL pointer. */ | |
21341cfd | 1112 | new_entry->next = entry; |
9bf793f9 | 1113 | new_entry->prev = NULL; |
21341cfd AS |
1114 | entry = new_entry; |
1115 | G.pages[order] = new_entry; | |
1116 | ||
1117 | /* For a new page, we know the word and bit positions (in the | |
1118 | in_use bitmap) of the first available object -- they're zero. */ | |
1119 | new_entry->next_bit_hint = 1; | |
1120 | word = 0; | |
1121 | bit = 0; | |
1122 | object_offset = 0; | |
1123 | } | |
1124 | else | |
1125 | { | |
1126 | /* First try to use the hint left from the previous allocation | |
1127 | to locate a clear bit in the in-use bitmap. We've made sure | |
1128 | that the one-past-the-end bit is always set, so if the hint | |
1129 | has run over, this test will fail. */ | |
1130 | unsigned hint = entry->next_bit_hint; | |
1131 | word = hint / HOST_BITS_PER_LONG; | |
1132 | bit = hint % HOST_BITS_PER_LONG; | |
589005ff | 1133 | |
21341cfd AS |
1134 | /* If the hint didn't work, scan the bitmap from the beginning. */ |
1135 | if ((entry->in_use_p[word] >> bit) & 1) | |
1136 | { | |
1137 | word = bit = 0; | |
1138 | while (~entry->in_use_p[word] == 0) | |
1139 | ++word; | |
1140 | while ((entry->in_use_p[word] >> bit) & 1) | |
1141 | ++bit; | |
1142 | hint = word * HOST_BITS_PER_LONG + bit; | |
1143 | } | |
1144 | ||
1145 | /* Next time, try the next bit. */ | |
1146 | entry->next_bit_hint = hint + 1; | |
1147 | ||
685fe032 | 1148 | object_offset = hint * object_size; |
21341cfd AS |
1149 | } |
1150 | ||
1151 | /* Set the in-use bit. */ | |
1152 | entry->in_use_p[word] |= ((unsigned long) 1 << bit); | |
1153 | ||
1154 | /* Keep a running total of the number of free objects. If this page | |
1155 | fills up, we may have to move it to the end of the list if the | |
1156 | next page isn't full. If the next page is full, all subsequent | |
1157 | pages are full, so there's no need to move it. */ | |
1158 | if (--entry->num_free_objects == 0 | |
1159 | && entry->next != NULL | |
1160 | && entry->next->num_free_objects > 0) | |
1161 | { | |
9bf793f9 | 1162 | /* We have a new head for the list. */ |
21341cfd | 1163 | G.pages[order] = entry->next; |
9bf793f9 JL |
1164 | |
1165 | /* We are moving ENTRY to the end of the page table list. | |
1166 | The new page at the head of the list will have NULL in | |
1167 | its PREV field and ENTRY will have NULL in its NEXT field. */ | |
1168 | entry->next->prev = NULL; | |
21341cfd | 1169 | entry->next = NULL; |
9bf793f9 JL |
1170 | |
1171 | /* Append ENTRY to the tail of the list. */ | |
1172 | entry->prev = G.page_tails[order]; | |
21341cfd AS |
1173 | G.page_tails[order]->next = entry; |
1174 | G.page_tails[order] = entry; | |
1175 | } | |
b9dcdee4 JH |
1176 | #ifdef GATHER_STATISTICS |
1177 | ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size PASS_MEM_STAT); | |
1178 | #endif | |
21341cfd AS |
1179 | |
1180 | /* Calculate the object's address. */ | |
1181 | result = entry->page + object_offset; | |
1182 | ||
3788cc17 | 1183 | #ifdef ENABLE_GC_CHECKING |
9a0a7d5d HPN |
1184 | /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the |
1185 | exact same semantics in presence of memory bugs, regardless of | |
1186 | ENABLE_VALGRIND_CHECKING. We override this request below. Drop the | |
1187 | handle to avoid handle leak. */ | |
685fe032 | 1188 | VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, object_size)); |
9a0a7d5d | 1189 | |
f8a83ee3 ZW |
1190 | /* `Poison' the entire allocated object, including any padding at |
1191 | the end. */ | |
685fe032 | 1192 | memset (result, 0xaf, object_size); |
9a0a7d5d HPN |
1193 | |
1194 | /* Make the bytes after the end of the object unaccessible. Discard the | |
1195 | handle to avoid handle leak. */ | |
1196 | VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size, | |
685fe032 | 1197 | object_size - size)); |
21341cfd | 1198 | #endif |
cb2ec151 | 1199 | |
9a0a7d5d HPN |
1200 | /* Tell Valgrind that the memory is there, but its content isn't |
1201 | defined. The bytes at the end of the object are still marked | |
1202 | unaccessible. */ | |
1203 | VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size)); | |
1204 | ||
21341cfd AS |
1205 | /* Keep track of how many bytes are being allocated. This |
1206 | information is used in deciding when to collect. */ | |
685fe032 | 1207 | G.allocated += object_size; |
21341cfd | 1208 | |
adc4adcd GP |
1209 | #ifdef GATHER_STATISTICS |
1210 | { | |
685fe032 | 1211 | size_t overhead = object_size - size; |
adc4adcd | 1212 | |
685fe032 RH |
1213 | G.stats.total_overhead += overhead; |
1214 | G.stats.total_allocated += object_size; | |
1215 | G.stats.total_overhead_per_order[order] += overhead; | |
1216 | G.stats.total_allocated_per_order[order] += object_size; | |
adc4adcd | 1217 | |
685fe032 RH |
1218 | if (size <= 32) |
1219 | { | |
1220 | G.stats.total_overhead_under32 += overhead; | |
1221 | G.stats.total_allocated_under32 += object_size; | |
1222 | } | |
1223 | if (size <= 64) | |
1224 | { | |
1225 | G.stats.total_overhead_under64 += overhead; | |
1226 | G.stats.total_allocated_under64 += object_size; | |
1227 | } | |
1228 | if (size <= 128) | |
1229 | { | |
1230 | G.stats.total_overhead_under128 += overhead; | |
1231 | G.stats.total_allocated_under128 += object_size; | |
1232 | } | |
adc4adcd GP |
1233 | } |
1234 | #endif | |
685fe032 | 1235 | |
21341cfd | 1236 | if (GGC_DEBUG_LEVEL >= 3) |
589005ff | 1237 | fprintf (G.debug_file, |
8a951190 | 1238 | "Allocating object, requested size=%lu, actual=%lu at %p on %p\n", |
685fe032 | 1239 | (unsigned long) size, (unsigned long) object_size, result, |
20c1dc5e | 1240 | (void *) entry); |
21341cfd AS |
1241 | |
1242 | return result; | |
1243 | } | |
1244 | ||
cb2ec151 | 1245 | /* If P is not marked, marks it and return false. Otherwise return true. |
21341cfd AS |
1246 | P must have been allocated by the GC allocator; it mustn't point to |
1247 | static objects, stack variables, or memory allocated with malloc. */ | |
cb2ec151 | 1248 | |
005537df | 1249 | int |
20c1dc5e | 1250 | ggc_set_mark (const void *p) |
21341cfd AS |
1251 | { |
1252 | page_entry *entry; | |
1253 | unsigned bit, word; | |
1254 | unsigned long mask; | |
1255 | ||
1256 | /* Look up the page on which the object is alloced. If the object | |
1257 | wasn't allocated by the collector, we'll probably die. */ | |
74c937ca | 1258 | entry = lookup_page_table_entry (p); |
21341cfd AS |
1259 | #ifdef ENABLE_CHECKING |
1260 | if (entry == NULL) | |
1261 | abort (); | |
1262 | #endif | |
1263 | ||
1264 | /* Calculate the index of the object on the page; this is its bit | |
1265 | position in the in_use_p bitmap. */ | |
8537ed68 | 1266 | bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order); |
21341cfd AS |
1267 | word = bit / HOST_BITS_PER_LONG; |
1268 | mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG); | |
589005ff | 1269 | |
dc297297 | 1270 | /* If the bit was previously set, skip it. */ |
21341cfd AS |
1271 | if (entry->in_use_p[word] & mask) |
1272 | return 1; | |
1273 | ||
1274 | /* Otherwise set it, and decrement the free object count. */ | |
1275 | entry->in_use_p[word] |= mask; | |
1276 | entry->num_free_objects -= 1; | |
1277 | ||
21341cfd AS |
1278 | if (GGC_DEBUG_LEVEL >= 4) |
1279 | fprintf (G.debug_file, "Marking %p\n", p); | |
1280 | ||
1281 | return 0; | |
1282 | } | |
1283 | ||
589005ff | 1284 | /* Return 1 if P has been marked, zero otherwise. |
4c160717 RK |
1285 | P must have been allocated by the GC allocator; it mustn't point to |
1286 | static objects, stack variables, or memory allocated with malloc. */ | |
1287 | ||
1288 | int | |
20c1dc5e | 1289 | ggc_marked_p (const void *p) |
4c160717 RK |
1290 | { |
1291 | page_entry *entry; | |
1292 | unsigned bit, word; | |
1293 | unsigned long mask; | |
1294 | ||
1295 | /* Look up the page on which the object is alloced. If the object | |
1296 | wasn't allocated by the collector, we'll probably die. */ | |
1297 | entry = lookup_page_table_entry (p); | |
1298 | #ifdef ENABLE_CHECKING | |
1299 | if (entry == NULL) | |
1300 | abort (); | |
1301 | #endif | |
1302 | ||
1303 | /* Calculate the index of the object on the page; this is its bit | |
1304 | position in the in_use_p bitmap. */ | |
8537ed68 | 1305 | bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order); |
4c160717 RK |
1306 | word = bit / HOST_BITS_PER_LONG; |
1307 | mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG); | |
589005ff | 1308 | |
a4b5b2ae | 1309 | return (entry->in_use_p[word] & mask) != 0; |
4c160717 RK |
1310 | } |
1311 | ||
cb2ec151 RH |
1312 | /* Return the size of the gc-able object P. */ |
1313 | ||
3277221c | 1314 | size_t |
20c1dc5e | 1315 | ggc_get_size (const void *p) |
3277221c MM |
1316 | { |
1317 | page_entry *pe = lookup_page_table_entry (p); | |
2be510b8 | 1318 | return OBJECT_SIZE (pe->order); |
3277221c | 1319 | } |
685fe032 RH |
1320 | |
1321 | /* Release the memory for object P. */ | |
1322 | ||
1323 | void | |
1324 | ggc_free (void *p) | |
1325 | { | |
1326 | page_entry *pe = lookup_page_table_entry (p); | |
1327 | size_t order = pe->order; | |
1328 | size_t size = OBJECT_SIZE (order); | |
1329 | ||
1330 | if (GGC_DEBUG_LEVEL >= 3) | |
1331 | fprintf (G.debug_file, | |
1332 | "Freeing object, actual size=%lu, at %p on %p\n", | |
1333 | (unsigned long) size, p, (void *) pe); | |
1334 | ||
1335 | #ifdef ENABLE_GC_CHECKING | |
1336 | /* Poison the data, to indicate the data is garbage. */ | |
1337 | VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (p, size)); | |
1338 | memset (p, 0xa5, size); | |
1339 | #endif | |
1340 | /* Let valgrind know the object is free. */ | |
1341 | VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (p, size)); | |
1342 | ||
1343 | #ifdef ENABLE_GC_ALWAYS_COLLECT | |
1344 | /* In the completely-anal-checking mode, we do *not* immediately free | |
1345 | the data, but instead verify that the data is *actually* not | |
1346 | reachable the next time we collect. */ | |
1347 | { | |
1348 | struct free_object *fo = xmalloc (sizeof (struct free_object)); | |
1349 | fo->object = p; | |
1350 | fo->next = G.free_object_list; | |
1351 | G.free_object_list = fo; | |
1352 | } | |
1353 | #else | |
1354 | { | |
1355 | unsigned int bit_offset, word, bit; | |
1356 | ||
1357 | G.allocated -= size; | |
1358 | ||
1359 | /* Mark the object not-in-use. */ | |
1360 | bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order); | |
1361 | word = bit_offset / HOST_BITS_PER_LONG; | |
1362 | bit = bit_offset % HOST_BITS_PER_LONG; | |
1363 | pe->in_use_p[word] &= ~(1UL << bit); | |
1364 | ||
1365 | if (pe->num_free_objects++ == 0) | |
1366 | { | |
9bf793f9 JL |
1367 | page_entry *p, *q; |
1368 | ||
685fe032 RH |
1369 | /* If the page is completely full, then it's supposed to |
1370 | be after all pages that aren't. Since we've freed one | |
1371 | object from a page that was full, we need to move the | |
9bf793f9 | 1372 | page to the head of the list. |
685fe032 | 1373 | |
9bf793f9 JL |
1374 | PE is the node we want to move. Q is the previous node |
1375 | and P is the next node in the list. */ | |
1376 | q = pe->prev; | |
685fe032 RH |
1377 | if (q && q->num_free_objects == 0) |
1378 | { | |
1379 | p = pe->next; | |
9bf793f9 | 1380 | |
685fe032 | 1381 | q->next = p; |
9bf793f9 JL |
1382 | |
1383 | /* If PE was at the end of the list, then Q becomes the | |
1384 | new end of the list. If PE was not the end of the | |
1385 | list, then we need to update the PREV field for P. */ | |
685fe032 RH |
1386 | if (!p) |
1387 | G.page_tails[order] = q; | |
9bf793f9 JL |
1388 | else |
1389 | p->prev = q; | |
1390 | ||
1391 | /* Move PE to the head of the list. */ | |
685fe032 | 1392 | pe->next = G.pages[order]; |
9bf793f9 JL |
1393 | pe->prev = NULL; |
1394 | G.pages[order]->prev = pe; | |
685fe032 RH |
1395 | G.pages[order] = pe; |
1396 | } | |
1397 | ||
1398 | /* Reset the hint bit to point to the only free object. */ | |
1399 | pe->next_bit_hint = bit_offset; | |
1400 | } | |
1401 | } | |
1402 | #endif | |
1403 | } | |
21341cfd | 1404 | \f |
8537ed68 ZW |
1405 | /* Subroutine of init_ggc which computes the pair of numbers used to |
1406 | perform division by OBJECT_SIZE (order) and fills in inverse_table[]. | |
1407 | ||
1408 | This algorithm is taken from Granlund and Montgomery's paper | |
1409 | "Division by Invariant Integers using Multiplication" | |
1410 | (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by | |
1411 | constants). */ | |
1412 | ||
1413 | static void | |
20c1dc5e | 1414 | compute_inverse (unsigned order) |
8537ed68 | 1415 | { |
75d75435 UW |
1416 | size_t size, inv; |
1417 | unsigned int e; | |
280cf02a | 1418 | |
8537ed68 ZW |
1419 | size = OBJECT_SIZE (order); |
1420 | e = 0; | |
1421 | while (size % 2 == 0) | |
1422 | { | |
1423 | e++; | |
1424 | size >>= 1; | |
1425 | } | |
cb2ec151 | 1426 | |
8537ed68 ZW |
1427 | inv = size; |
1428 | while (inv * size != 1) | |
1429 | inv = inv * (2 - inv*size); | |
1430 | ||
1431 | DIV_MULT (order) = inv; | |
1432 | DIV_SHIFT (order) = e; | |
1433 | } | |
1434 | ||
1435 | /* Initialize the ggc-mmap allocator. */ | |
21341cfd | 1436 | void |
20c1dc5e | 1437 | init_ggc (void) |
21341cfd | 1438 | { |
2be510b8 MM |
1439 | unsigned order; |
1440 | ||
21341cfd AS |
1441 | G.pagesize = getpagesize(); |
1442 | G.lg_pagesize = exact_log2 (G.pagesize); | |
1443 | ||
825b6926 | 1444 | #ifdef HAVE_MMAP_DEV_ZERO |
21341cfd AS |
1445 | G.dev_zero_fd = open ("/dev/zero", O_RDONLY); |
1446 | if (G.dev_zero_fd == -1) | |
c770ac2b | 1447 | internal_error ("open /dev/zero: %m"); |
21341cfd AS |
1448 | #endif |
1449 | ||
1450 | #if 0 | |
1451 | G.debug_file = fopen ("ggc-mmap.debug", "w"); | |
1452 | #else | |
1453 | G.debug_file = stdout; | |
1454 | #endif | |
1455 | ||
825b6926 | 1456 | #ifdef USING_MMAP |
1b3e1423 RH |
1457 | /* StunOS has an amazing off-by-one error for the first mmap allocation |
1458 | after fiddling with RLIMIT_STACK. The result, as hard as it is to | |
1459 | believe, is an unaligned page allocation, which would cause us to | |
1460 | hork badly if we tried to use it. */ | |
1461 | { | |
1462 | char *p = alloc_anon (NULL, G.pagesize); | |
825b6926 | 1463 | struct page_entry *e; |
1b3e1423 RH |
1464 | if ((size_t)p & (G.pagesize - 1)) |
1465 | { | |
1466 | /* How losing. Discard this one and try another. If we still | |
1467 | can't get something useful, give up. */ | |
1468 | ||
1469 | p = alloc_anon (NULL, G.pagesize); | |
1470 | if ((size_t)p & (G.pagesize - 1)) | |
1471 | abort (); | |
1472 | } | |
825b6926 | 1473 | |
dc297297 | 1474 | /* We have a good page, might as well hold onto it... */ |
703ad42b | 1475 | e = xcalloc (1, sizeof (struct page_entry)); |
825b6926 ZW |
1476 | e->bytes = G.pagesize; |
1477 | e->page = p; | |
1478 | e->next = G.free_pages; | |
1479 | G.free_pages = e; | |
1b3e1423 RH |
1480 | } |
1481 | #endif | |
2be510b8 MM |
1482 | |
1483 | /* Initialize the object size table. */ | |
1484 | for (order = 0; order < HOST_BITS_PER_PTR; ++order) | |
1485 | object_size_table[order] = (size_t) 1 << order; | |
1486 | for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order) | |
b1095f9c MM |
1487 | { |
1488 | size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR]; | |
1489 | ||
1490 | /* If S is not a multiple of the MAX_ALIGNMENT, then round it up | |
1491 | so that we're sure of getting aligned memory. */ | |
17211ab5 | 1492 | s = ROUND_UP (s, MAX_ALIGNMENT); |
b1095f9c MM |
1493 | object_size_table[order] = s; |
1494 | } | |
2be510b8 | 1495 | |
8537ed68 | 1496 | /* Initialize the objects-per-page and inverse tables. */ |
2be510b8 MM |
1497 | for (order = 0; order < NUM_ORDERS; ++order) |
1498 | { | |
1499 | objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order); | |
1500 | if (objects_per_page_table[order] == 0) | |
1501 | objects_per_page_table[order] = 1; | |
8537ed68 | 1502 | compute_inverse (order); |
2be510b8 MM |
1503 | } |
1504 | ||
1505 | /* Reset the size_lookup array to put appropriately sized objects in | |
1506 | the special orders. All objects bigger than the previous power | |
1507 | of two, but no greater than the special size, should go in the | |
1508 | new order. */ | |
1509 | for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order) | |
1510 | { | |
1511 | int o; | |
1512 | int i; | |
1513 | ||
1514 | o = size_lookup[OBJECT_SIZE (order)]; | |
1515 | for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i) | |
1516 | size_lookup[i] = order; | |
1517 | } | |
c4775f82 MS |
1518 | |
1519 | G.depth_in_use = 0; | |
1520 | G.depth_max = 10; | |
703ad42b | 1521 | G.depth = xmalloc (G.depth_max * sizeof (unsigned int)); |
c4775f82 MS |
1522 | |
1523 | G.by_depth_in_use = 0; | |
1524 | G.by_depth_max = INITIAL_PTE_COUNT; | |
703ad42b KG |
1525 | G.by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *)); |
1526 | G.save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *)); | |
21341cfd AS |
1527 | } |
1528 | ||
47aeffac SB |
1529 | /* Start a new GGC zone. */ |
1530 | ||
1531 | struct alloc_zone * | |
1532 | new_ggc_zone (const char *name ATTRIBUTE_UNUSED) | |
1533 | { | |
1534 | return NULL; | |
1535 | } | |
1536 | ||
1537 | /* Destroy a GGC zone. */ | |
1538 | void | |
1539 | destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED) | |
1540 | { | |
1541 | } | |
1542 | ||
cb2ec151 RH |
1543 | /* Increment the `GC context'. Objects allocated in an outer context |
1544 | are never freed, eliminating the need to register their roots. */ | |
21341cfd AS |
1545 | |
1546 | void | |
20c1dc5e | 1547 | ggc_push_context (void) |
21341cfd AS |
1548 | { |
1549 | ++G.context_depth; | |
1550 | ||
1551 | /* Die on wrap. */ | |
52895e1a | 1552 | if (G.context_depth >= HOST_BITS_PER_LONG) |
21341cfd AS |
1553 | abort (); |
1554 | } | |
1555 | ||
4934cc53 MM |
1556 | /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P |
1557 | reflects reality. Recalculate NUM_FREE_OBJECTS as well. */ | |
1558 | ||
1559 | static void | |
20c1dc5e | 1560 | ggc_recalculate_in_use_p (page_entry *p) |
4934cc53 MM |
1561 | { |
1562 | unsigned int i; | |
1563 | size_t num_objects; | |
1564 | ||
589005ff | 1565 | /* Because the past-the-end bit in in_use_p is always set, we |
4934cc53 | 1566 | pretend there is one additional object. */ |
17211ab5 | 1567 | num_objects = OBJECTS_IN_PAGE (p) + 1; |
4934cc53 MM |
1568 | |
1569 | /* Reset the free object count. */ | |
1570 | p->num_free_objects = num_objects; | |
1571 | ||
1572 | /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */ | |
589005ff | 1573 | for (i = 0; |
2be510b8 MM |
1574 | i < CEIL (BITMAP_SIZE (num_objects), |
1575 | sizeof (*p->in_use_p)); | |
4934cc53 MM |
1576 | ++i) |
1577 | { | |
1578 | unsigned long j; | |
1579 | ||
1580 | /* Something is in use if it is marked, or if it was in use in a | |
1581 | context further down the context stack. */ | |
c4775f82 | 1582 | p->in_use_p[i] |= save_in_use_p (p)[i]; |
4934cc53 MM |
1583 | |
1584 | /* Decrement the free object count for every object allocated. */ | |
1585 | for (j = p->in_use_p[i]; j; j >>= 1) | |
1586 | p->num_free_objects -= (j & 1); | |
1587 | } | |
1588 | ||
1589 | if (p->num_free_objects >= num_objects) | |
1590 | abort (); | |
1591 | } | |
1592 | ||
589005ff | 1593 | /* Decrement the `GC context'. All objects allocated since the |
cb2ec151 | 1594 | previous ggc_push_context are migrated to the outer context. */ |
21341cfd AS |
1595 | |
1596 | void | |
20c1dc5e | 1597 | ggc_pop_context (void) |
21341cfd | 1598 | { |
52895e1a | 1599 | unsigned long omask; |
c4775f82 MS |
1600 | unsigned int depth, i, e; |
1601 | #ifdef ENABLE_CHECKING | |
1602 | unsigned int order; | |
1603 | #endif | |
21341cfd AS |
1604 | |
1605 | depth = --G.context_depth; | |
52895e1a RH |
1606 | omask = (unsigned long)1 << (depth + 1); |
1607 | ||
1608 | if (!((G.context_depth_allocations | G.context_depth_collections) & omask)) | |
1609 | return; | |
1610 | ||
1611 | G.context_depth_allocations |= (G.context_depth_allocations & omask) >> 1; | |
1612 | G.context_depth_allocations &= omask - 1; | |
1613 | G.context_depth_collections &= omask - 1; | |
21341cfd | 1614 | |
a98ebe2e | 1615 | /* The G.depth array is shortened so that the last index is the |
c4775f82 MS |
1616 | context_depth of the top element of by_depth. */ |
1617 | if (depth+1 < G.depth_in_use) | |
1618 | e = G.depth[depth+1]; | |
1619 | else | |
1620 | e = G.by_depth_in_use; | |
1621 | ||
1622 | /* We might not have any PTEs of depth depth. */ | |
1623 | if (depth < G.depth_in_use) | |
20c1dc5e | 1624 | { |
c4775f82 MS |
1625 | |
1626 | /* First we go through all the pages at depth depth to | |
1627 | recalculate the in use bits. */ | |
1628 | for (i = G.depth[depth]; i < e; ++i) | |
1629 | { | |
1630 | page_entry *p; | |
1631 | ||
1632 | #ifdef ENABLE_CHECKING | |
1633 | p = G.by_depth[i]; | |
1634 | ||
1635 | /* Check that all of the pages really are at the depth that | |
1636 | we expect. */ | |
1637 | if (p->context_depth != depth) | |
1638 | abort (); | |
1639 | if (p->index_by_depth != i) | |
1640 | abort (); | |
1641 | #endif | |
1642 | ||
1643 | prefetch (&save_in_use_p_i (i+8)); | |
1644 | prefetch (&save_in_use_p_i (i+16)); | |
1645 | if (save_in_use_p_i (i)) | |
1646 | { | |
1647 | p = G.by_depth[i]; | |
1648 | ggc_recalculate_in_use_p (p); | |
1649 | free (save_in_use_p_i (i)); | |
1650 | save_in_use_p_i (i) = 0; | |
1651 | } | |
1652 | } | |
1653 | } | |
1654 | ||
1655 | /* Then, we reset all page_entries with a depth greater than depth | |
1656 | to be at depth. */ | |
1657 | for (i = e; i < G.by_depth_in_use; ++i) | |
1658 | { | |
1659 | page_entry *p = G.by_depth[i]; | |
1660 | ||
1661 | /* Check that all of the pages really are at the depth we | |
1662 | expect. */ | |
1663 | #ifdef ENABLE_CHECKING | |
1664 | if (p->context_depth <= depth) | |
1665 | abort (); | |
1666 | if (p->index_by_depth != i) | |
1667 | abort (); | |
1668 | #endif | |
1669 | p->context_depth = depth; | |
1670 | } | |
1671 | ||
1672 | adjust_depth (); | |
1673 | ||
1674 | #ifdef ENABLE_CHECKING | |
2be510b8 | 1675 | for (order = 2; order < NUM_ORDERS; order++) |
21341cfd | 1676 | { |
21341cfd AS |
1677 | page_entry *p; |
1678 | ||
1679 | for (p = G.pages[order]; p != NULL; p = p->next) | |
1680 | { | |
1681 | if (p->context_depth > depth) | |
c4775f82 MS |
1682 | abort (); |
1683 | else if (p->context_depth == depth && save_in_use_p (p)) | |
1684 | abort (); | |
21341cfd AS |
1685 | } |
1686 | } | |
c4775f82 | 1687 | #endif |
21341cfd | 1688 | } |
21341cfd | 1689 | \f |
cb2ec151 RH |
1690 | /* Unmark all objects. */ |
1691 | ||
685fe032 | 1692 | static void |
20c1dc5e | 1693 | clear_marks (void) |
21341cfd AS |
1694 | { |
1695 | unsigned order; | |
1696 | ||
2be510b8 | 1697 | for (order = 2; order < NUM_ORDERS; order++) |
21341cfd | 1698 | { |
21341cfd AS |
1699 | page_entry *p; |
1700 | ||
1701 | for (p = G.pages[order]; p != NULL; p = p->next) | |
1702 | { | |
17211ab5 GK |
1703 | size_t num_objects = OBJECTS_IN_PAGE (p); |
1704 | size_t bitmap_size = BITMAP_SIZE (num_objects + 1); | |
1705 | ||
21341cfd AS |
1706 | #ifdef ENABLE_CHECKING |
1707 | /* The data should be page-aligned. */ | |
1708 | if ((size_t) p->page & (G.pagesize - 1)) | |
1709 | abort (); | |
1710 | #endif | |
1711 | ||
1712 | /* Pages that aren't in the topmost context are not collected; | |
1713 | nevertheless, we need their in-use bit vectors to store GC | |
1714 | marks. So, back them up first. */ | |
4934cc53 | 1715 | if (p->context_depth < G.context_depth) |
21341cfd | 1716 | { |
c4775f82 MS |
1717 | if (! save_in_use_p (p)) |
1718 | save_in_use_p (p) = xmalloc (bitmap_size); | |
1719 | memcpy (save_in_use_p (p), p->in_use_p, bitmap_size); | |
21341cfd AS |
1720 | } |
1721 | ||
1722 | /* Reset reset the number of free objects and clear the | |
1723 | in-use bits. These will be adjusted by mark_obj. */ | |
1724 | p->num_free_objects = num_objects; | |
1725 | memset (p->in_use_p, 0, bitmap_size); | |
1726 | ||
1727 | /* Make sure the one-past-the-end bit is always set. */ | |
589005ff | 1728 | p->in_use_p[num_objects / HOST_BITS_PER_LONG] |
21341cfd AS |
1729 | = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG)); |
1730 | } | |
1731 | } | |
1732 | } | |
1733 | ||
cb2ec151 RH |
1734 | /* Free all empty pages. Partially empty pages need no attention |
1735 | because the `mark' bit doubles as an `unused' bit. */ | |
1736 | ||
685fe032 | 1737 | static void |
20c1dc5e | 1738 | sweep_pages (void) |
21341cfd AS |
1739 | { |
1740 | unsigned order; | |
1741 | ||
2be510b8 | 1742 | for (order = 2; order < NUM_ORDERS; order++) |
21341cfd AS |
1743 | { |
1744 | /* The last page-entry to consider, regardless of entries | |
1745 | placed at the end of the list. */ | |
1746 | page_entry * const last = G.page_tails[order]; | |
1747 | ||
17211ab5 | 1748 | size_t num_objects; |
054f5e69 | 1749 | size_t live_objects; |
21341cfd AS |
1750 | page_entry *p, *previous; |
1751 | int done; | |
589005ff | 1752 | |
21341cfd AS |
1753 | p = G.pages[order]; |
1754 | if (p == NULL) | |
1755 | continue; | |
1756 | ||
1757 | previous = NULL; | |
1758 | do | |
1759 | { | |
1760 | page_entry *next = p->next; | |
1761 | ||
1762 | /* Loop until all entries have been examined. */ | |
1763 | done = (p == last); | |
20c1dc5e | 1764 | |
17211ab5 | 1765 | num_objects = OBJECTS_IN_PAGE (p); |
21341cfd | 1766 | |
054f5e69 ZW |
1767 | /* Add all live objects on this page to the count of |
1768 | allocated memory. */ | |
1769 | live_objects = num_objects - p->num_free_objects; | |
1770 | ||
2be510b8 | 1771 | G.allocated += OBJECT_SIZE (order) * live_objects; |
054f5e69 | 1772 | |
21341cfd AS |
1773 | /* Only objects on pages in the topmost context should get |
1774 | collected. */ | |
1775 | if (p->context_depth < G.context_depth) | |
1776 | ; | |
1777 | ||
1778 | /* Remove the page if it's empty. */ | |
054f5e69 | 1779 | else if (live_objects == 0) |
21341cfd | 1780 | { |
9bf793f9 JL |
1781 | /* If P was the first page in the list, then NEXT |
1782 | becomes the new first page in the list, otherwise | |
1783 | splice P out of the forward pointers. */ | |
21341cfd AS |
1784 | if (! previous) |
1785 | G.pages[order] = next; | |
1786 | else | |
1787 | previous->next = next; | |
9bf793f9 JL |
1788 | |
1789 | /* Splice P out of the back pointers too. */ | |
1790 | if (next) | |
1791 | next->prev = previous; | |
21341cfd AS |
1792 | |
1793 | /* Are we removing the last element? */ | |
1794 | if (p == G.page_tails[order]) | |
1795 | G.page_tails[order] = previous; | |
1796 | free_page (p); | |
1797 | p = previous; | |
1798 | } | |
1799 | ||
1800 | /* If the page is full, move it to the end. */ | |
1801 | else if (p->num_free_objects == 0) | |
1802 | { | |
1803 | /* Don't move it if it's already at the end. */ | |
1804 | if (p != G.page_tails[order]) | |
1805 | { | |
1806 | /* Move p to the end of the list. */ | |
1807 | p->next = NULL; | |
9bf793f9 | 1808 | p->prev = G.page_tails[order]; |
21341cfd AS |
1809 | G.page_tails[order]->next = p; |
1810 | ||
1811 | /* Update the tail pointer... */ | |
1812 | G.page_tails[order] = p; | |
1813 | ||
1814 | /* ... and the head pointer, if necessary. */ | |
1815 | if (! previous) | |
1816 | G.pages[order] = next; | |
1817 | else | |
1818 | previous->next = next; | |
9bf793f9 JL |
1819 | |
1820 | /* And update the backpointer in NEXT if necessary. */ | |
1821 | if (next) | |
1822 | next->prev = previous; | |
1823 | ||
21341cfd AS |
1824 | p = previous; |
1825 | } | |
1826 | } | |
1827 | ||
1828 | /* If we've fallen through to here, it's a page in the | |
1829 | topmost context that is neither full nor empty. Such a | |
1830 | page must precede pages at lesser context depth in the | |
1831 | list, so move it to the head. */ | |
1832 | else if (p != G.pages[order]) | |
1833 | { | |
1834 | previous->next = p->next; | |
9bf793f9 JL |
1835 | |
1836 | /* Update the backchain in the next node if it exists. */ | |
1837 | if (p->next) | |
1838 | p->next->prev = previous; | |
1839 | ||
1840 | /* Move P to the head of the list. */ | |
21341cfd | 1841 | p->next = G.pages[order]; |
9bf793f9 JL |
1842 | p->prev = NULL; |
1843 | G.pages[order]->prev = p; | |
1844 | ||
1845 | /* Update the head pointer. */ | |
21341cfd | 1846 | G.pages[order] = p; |
9bf793f9 | 1847 | |
21341cfd AS |
1848 | /* Are we moving the last element? */ |
1849 | if (G.page_tails[order] == p) | |
1850 | G.page_tails[order] = previous; | |
1851 | p = previous; | |
1852 | } | |
1853 | ||
1854 | previous = p; | |
1855 | p = next; | |
589005ff | 1856 | } |
21341cfd | 1857 | while (! done); |
4934cc53 MM |
1858 | |
1859 | /* Now, restore the in_use_p vectors for any pages from contexts | |
1860 | other than the current one. */ | |
1861 | for (p = G.pages[order]; p; p = p->next) | |
1862 | if (p->context_depth != G.context_depth) | |
1863 | ggc_recalculate_in_use_p (p); | |
21341cfd AS |
1864 | } |
1865 | } | |
1866 | ||
3788cc17 | 1867 | #ifdef ENABLE_GC_CHECKING |
cb2ec151 RH |
1868 | /* Clobber all free objects. */ |
1869 | ||
685fe032 | 1870 | static void |
20c1dc5e | 1871 | poison_pages (void) |
21341cfd AS |
1872 | { |
1873 | unsigned order; | |
1874 | ||
2be510b8 | 1875 | for (order = 2; order < NUM_ORDERS; order++) |
21341cfd | 1876 | { |
2be510b8 | 1877 | size_t size = OBJECT_SIZE (order); |
21341cfd AS |
1878 | page_entry *p; |
1879 | ||
1880 | for (p = G.pages[order]; p != NULL; p = p->next) | |
1881 | { | |
17211ab5 | 1882 | size_t num_objects; |
21341cfd | 1883 | size_t i; |
c831fdea MM |
1884 | |
1885 | if (p->context_depth != G.context_depth) | |
1886 | /* Since we don't do any collection for pages in pushed | |
1887 | contexts, there's no need to do any poisoning. And | |
1888 | besides, the IN_USE_P array isn't valid until we pop | |
1889 | contexts. */ | |
1890 | continue; | |
1891 | ||
17211ab5 | 1892 | num_objects = OBJECTS_IN_PAGE (p); |
21341cfd AS |
1893 | for (i = 0; i < num_objects; i++) |
1894 | { | |
1895 | size_t word, bit; | |
1896 | word = i / HOST_BITS_PER_LONG; | |
1897 | bit = i % HOST_BITS_PER_LONG; | |
1898 | if (((p->in_use_p[word] >> bit) & 1) == 0) | |
9a0a7d5d HPN |
1899 | { |
1900 | char *object = p->page + i * size; | |
1901 | ||
1902 | /* Keep poison-by-write when we expect to use Valgrind, | |
1903 | so the exact same memory semantics is kept, in case | |
1904 | there are memory errors. We override this request | |
1905 | below. */ | |
1906 | VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size)); | |
1907 | memset (object, 0xa5, size); | |
1908 | ||
1909 | /* Drop the handle to avoid handle leak. */ | |
1910 | VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size)); | |
1911 | } | |
21341cfd AS |
1912 | } |
1913 | } | |
1914 | } | |
1915 | } | |
685fe032 RH |
1916 | #else |
1917 | #define poison_pages() | |
1918 | #endif | |
1919 | ||
1920 | #ifdef ENABLE_GC_ALWAYS_COLLECT | |
1921 | /* Validate that the reportedly free objects actually are. */ | |
1922 | ||
1923 | static void | |
1924 | validate_free_objects (void) | |
1925 | { | |
1926 | struct free_object *f, *next, *still_free = NULL; | |
1927 | ||
1928 | for (f = G.free_object_list; f ; f = next) | |
1929 | { | |
1930 | page_entry *pe = lookup_page_table_entry (f->object); | |
1931 | size_t bit, word; | |
1932 | ||
1933 | bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order); | |
1934 | word = bit / HOST_BITS_PER_LONG; | |
1935 | bit = bit % HOST_BITS_PER_LONG; | |
1936 | next = f->next; | |
1937 | ||
1938 | /* Make certain it isn't visible from any root. Notice that we | |
1939 | do this check before sweep_pages merges save_in_use_p. */ | |
1940 | if (pe->in_use_p[word] & (1UL << bit)) | |
1941 | abort (); | |
1942 | ||
1943 | /* If the object comes from an outer context, then retain the | |
1944 | free_object entry, so that we can verify that the address | |
1945 | isn't live on the stack in some outer context. */ | |
1946 | if (pe->context_depth != G.context_depth) | |
1947 | { | |
1948 | f->next = still_free; | |
1949 | still_free = f; | |
1950 | } | |
1951 | else | |
1952 | free (f); | |
1953 | } | |
1954 | ||
1955 | G.free_object_list = still_free; | |
1956 | } | |
1957 | #else | |
1958 | #define validate_free_objects() | |
21341cfd AS |
1959 | #endif |
1960 | ||
cb2ec151 RH |
1961 | /* Top level mark-and-sweep routine. */ |
1962 | ||
21341cfd | 1963 | void |
20c1dc5e | 1964 | ggc_collect (void) |
21341cfd | 1965 | { |
21341cfd AS |
1966 | /* Avoid frequent unnecessary work by skipping collection if the |
1967 | total allocations haven't expanded much since the last | |
1968 | collection. */ | |
19cc0dd4 | 1969 | float allocated_last_gc = |
3788cc17 ZW |
1970 | MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024); |
1971 | ||
19cc0dd4 | 1972 | float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100; |
3788cc17 ZW |
1973 | |
1974 | if (G.allocated < allocated_last_gc + min_expand) | |
21341cfd | 1975 | return; |
21341cfd | 1976 | |
2a9a326b | 1977 | timevar_push (TV_GC); |
21341cfd | 1978 | if (!quiet_flag) |
b9bfacf0 | 1979 | fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024); |
685fe032 RH |
1980 | if (GGC_DEBUG_LEVEL >= 2) |
1981 | fprintf (G.debug_file, "BEGIN COLLECTING\n"); | |
21341cfd | 1982 | |
054f5e69 ZW |
1983 | /* Zero the total allocated bytes. This will be recalculated in the |
1984 | sweep phase. */ | |
21341cfd AS |
1985 | G.allocated = 0; |
1986 | ||
589005ff | 1987 | /* Release the pages we freed the last time we collected, but didn't |
21341cfd AS |
1988 | reuse in the interim. */ |
1989 | release_pages (); | |
1990 | ||
52895e1a RH |
1991 | /* Indicate that we've seen collections at this context depth. */ |
1992 | G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1; | |
1993 | ||
21341cfd AS |
1994 | clear_marks (); |
1995 | ggc_mark_roots (); | |
21341cfd | 1996 | poison_pages (); |
685fe032 | 1997 | validate_free_objects (); |
cb2ec151 RH |
1998 | sweep_pages (); |
1999 | ||
21341cfd AS |
2000 | G.allocated_last_gc = G.allocated; |
2001 | ||
2a9a326b | 2002 | timevar_pop (TV_GC); |
21341cfd | 2003 | |
21341cfd | 2004 | if (!quiet_flag) |
2a9a326b | 2005 | fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024); |
685fe032 RH |
2006 | if (GGC_DEBUG_LEVEL >= 2) |
2007 | fprintf (G.debug_file, "END COLLECTING\n"); | |
21341cfd | 2008 | } |
3277221c MM |
2009 | |
2010 | /* Print allocation statistics. */ | |
fba0bfd4 ZW |
2011 | #define SCALE(x) ((unsigned long) ((x) < 1024*10 \ |
2012 | ? (x) \ | |
2013 | : ((x) < 1024*1024*10 \ | |
2014 | ? (x) / 1024 \ | |
2015 | : (x) / (1024*1024)))) | |
2016 | #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M')) | |
3277221c MM |
2017 | |
2018 | void | |
20c1dc5e | 2019 | ggc_print_statistics (void) |
3277221c MM |
2020 | { |
2021 | struct ggc_statistics stats; | |
4934cc53 | 2022 | unsigned int i; |
fba0bfd4 | 2023 | size_t total_overhead = 0; |
3277221c MM |
2024 | |
2025 | /* Clear the statistics. */ | |
d219c7f1 | 2026 | memset (&stats, 0, sizeof (stats)); |
589005ff | 2027 | |
3277221c MM |
2028 | /* Make sure collection will really occur. */ |
2029 | G.allocated_last_gc = 0; | |
2030 | ||
2031 | /* Collect and print the statistics common across collectors. */ | |
fba0bfd4 | 2032 | ggc_print_common_statistics (stderr, &stats); |
3277221c | 2033 | |
4934cc53 MM |
2034 | /* Release free pages so that we will not count the bytes allocated |
2035 | there as part of the total allocated memory. */ | |
2036 | release_pages (); | |
2037 | ||
589005ff | 2038 | /* Collect some information about the various sizes of |
3277221c | 2039 | allocation. */ |
439a7e54 DN |
2040 | fprintf (stderr, |
2041 | "Memory still allocated at the end of the compilation process\n"); | |
adc4adcd | 2042 | fprintf (stderr, "%-5s %10s %10s %10s\n", |
9fd51e67 | 2043 | "Size", "Allocated", "Used", "Overhead"); |
2be510b8 | 2044 | for (i = 0; i < NUM_ORDERS; ++i) |
3277221c MM |
2045 | { |
2046 | page_entry *p; | |
2047 | size_t allocated; | |
2048 | size_t in_use; | |
fba0bfd4 | 2049 | size_t overhead; |
3277221c MM |
2050 | |
2051 | /* Skip empty entries. */ | |
2052 | if (!G.pages[i]) | |
2053 | continue; | |
2054 | ||
fba0bfd4 | 2055 | overhead = allocated = in_use = 0; |
3277221c MM |
2056 | |
2057 | /* Figure out the total number of bytes allocated for objects of | |
fba0bfd4 ZW |
2058 | this size, and how many of them are actually in use. Also figure |
2059 | out how much memory the page table is using. */ | |
3277221c MM |
2060 | for (p = G.pages[i]; p; p = p->next) |
2061 | { | |
2062 | allocated += p->bytes; | |
20c1dc5e | 2063 | in_use += |
17211ab5 | 2064 | (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i); |
fba0bfd4 ZW |
2065 | |
2066 | overhead += (sizeof (page_entry) - sizeof (long) | |
17211ab5 | 2067 | + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1)); |
3277221c | 2068 | } |
8a951190 AJ |
2069 | fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n", |
2070 | (unsigned long) OBJECT_SIZE (i), | |
fba0bfd4 ZW |
2071 | SCALE (allocated), LABEL (allocated), |
2072 | SCALE (in_use), LABEL (in_use), | |
2073 | SCALE (overhead), LABEL (overhead)); | |
2074 | total_overhead += overhead; | |
3277221c | 2075 | } |
8a951190 | 2076 | fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total", |
fba0bfd4 ZW |
2077 | SCALE (G.bytes_mapped), LABEL (G.bytes_mapped), |
2078 | SCALE (G.allocated), LABEL(G.allocated), | |
2079 | SCALE (total_overhead), LABEL (total_overhead)); | |
adc4adcd GP |
2080 | |
2081 | #ifdef GATHER_STATISTICS | |
2082 | { | |
439a7e54 DN |
2083 | fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n"); |
2084 | ||
adc4adcd GP |
2085 | fprintf (stderr, "Total Overhead: %10lld\n", |
2086 | G.stats.total_overhead); | |
2087 | fprintf (stderr, "Total Allocated: %10lld\n", | |
2088 | G.stats.total_allocated); | |
2089 | ||
2090 | fprintf (stderr, "Total Overhead under 32B: %10lld\n", | |
2091 | G.stats.total_overhead_under32); | |
2092 | fprintf (stderr, "Total Allocated under 32B: %10lld\n", | |
2093 | G.stats.total_allocated_under32); | |
2094 | fprintf (stderr, "Total Overhead under 64B: %10lld\n", | |
2095 | G.stats.total_overhead_under64); | |
2096 | fprintf (stderr, "Total Allocated under 64B: %10lld\n", | |
2097 | G.stats.total_allocated_under64); | |
2098 | fprintf (stderr, "Total Overhead under 128B: %10lld\n", | |
2099 | G.stats.total_overhead_under128); | |
2100 | fprintf (stderr, "Total Allocated under 128B: %10lld\n", | |
2101 | G.stats.total_allocated_under128); | |
2102 | ||
2103 | for (i = 0; i < NUM_ORDERS; i++) | |
439a7e54 DN |
2104 | if (G.stats.total_allocated_per_order[i]) |
2105 | { | |
2106 | fprintf (stderr, "Total Overhead page size %7d: %10lld\n", | |
2107 | OBJECT_SIZE (i), G.stats.total_overhead_per_order[i]); | |
2108 | fprintf (stderr, "Total Allocated page size %7d: %10lld\n", | |
2109 | OBJECT_SIZE (i), G.stats.total_allocated_per_order[i]); | |
2110 | } | |
adc4adcd GP |
2111 | } |
2112 | #endif | |
3277221c | 2113 | } |
17211ab5 GK |
2114 | \f |
2115 | struct ggc_pch_data | |
2116 | { | |
20c1dc5e | 2117 | struct ggc_pch_ondisk |
17211ab5 GK |
2118 | { |
2119 | unsigned totals[NUM_ORDERS]; | |
2120 | } d; | |
2121 | size_t base[NUM_ORDERS]; | |
2122 | size_t written[NUM_ORDERS]; | |
2123 | }; | |
2124 | ||
2125 | struct ggc_pch_data * | |
20c1dc5e | 2126 | init_ggc_pch (void) |
17211ab5 GK |
2127 | { |
2128 | return xcalloc (sizeof (struct ggc_pch_data), 1); | |
2129 | } | |
2130 | ||
20c1dc5e AJ |
2131 | void |
2132 | ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED, | |
b6f61163 | 2133 | size_t size, bool is_string ATTRIBUTE_UNUSED) |
17211ab5 GK |
2134 | { |
2135 | unsigned order; | |
2136 | ||
2137 | if (size <= 256) | |
2138 | order = size_lookup[size]; | |
2139 | else | |
2140 | { | |
2141 | order = 9; | |
2142 | while (size > OBJECT_SIZE (order)) | |
2143 | order++; | |
2144 | } | |
20c1dc5e | 2145 | |
17211ab5 GK |
2146 | d->d.totals[order]++; |
2147 | } | |
20c1dc5e | 2148 | |
17211ab5 | 2149 | size_t |
20c1dc5e | 2150 | ggc_pch_total_size (struct ggc_pch_data *d) |
17211ab5 GK |
2151 | { |
2152 | size_t a = 0; | |
2153 | unsigned i; | |
2154 | ||
2155 | for (i = 0; i < NUM_ORDERS; i++) | |
2156 | a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize); | |
2157 | return a; | |
2158 | } | |
2159 | ||
2160 | void | |
20c1dc5e | 2161 | ggc_pch_this_base (struct ggc_pch_data *d, void *base) |
17211ab5 GK |
2162 | { |
2163 | size_t a = (size_t) base; | |
2164 | unsigned i; | |
20c1dc5e | 2165 | |
17211ab5 GK |
2166 | for (i = 0; i < NUM_ORDERS; i++) |
2167 | { | |
2168 | d->base[i] = a; | |
2169 | a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize); | |
2170 | } | |
2171 | } | |
2172 | ||
2173 | ||
2174 | char * | |
20c1dc5e | 2175 | ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED, |
b6f61163 | 2176 | size_t size, bool is_string ATTRIBUTE_UNUSED) |
17211ab5 GK |
2177 | { |
2178 | unsigned order; | |
2179 | char *result; | |
20c1dc5e | 2180 | |
17211ab5 GK |
2181 | if (size <= 256) |
2182 | order = size_lookup[size]; | |
2183 | else | |
2184 | { | |
2185 | order = 9; | |
2186 | while (size > OBJECT_SIZE (order)) | |
2187 | order++; | |
2188 | } | |
2189 | ||
2190 | result = (char *) d->base[order]; | |
2191 | d->base[order] += OBJECT_SIZE (order); | |
2192 | return result; | |
2193 | } | |
2194 | ||
20c1dc5e AJ |
2195 | void |
2196 | ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED, | |
2197 | FILE *f ATTRIBUTE_UNUSED) | |
17211ab5 GK |
2198 | { |
2199 | /* Nothing to do. */ | |
2200 | } | |
2201 | ||
2202 | void | |
20c1dc5e AJ |
2203 | ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED, |
2204 | FILE *f, void *x, void *newx ATTRIBUTE_UNUSED, | |
b6f61163 | 2205 | size_t size, bool is_string ATTRIBUTE_UNUSED) |
17211ab5 GK |
2206 | { |
2207 | unsigned order; | |
674c7ef1 | 2208 | static const char emptyBytes[256]; |
17211ab5 GK |
2209 | |
2210 | if (size <= 256) | |
2211 | order = size_lookup[size]; | |
2212 | else | |
2213 | { | |
2214 | order = 9; | |
2215 | while (size > OBJECT_SIZE (order)) | |
2216 | order++; | |
2217 | } | |
20c1dc5e | 2218 | |
17211ab5 | 2219 | if (fwrite (x, size, 1, f) != 1) |
fa6ef813 | 2220 | fatal_error ("can't write PCH file: %m"); |
17211ab5 | 2221 | |
674c7ef1 | 2222 | /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the |
0ee55ad8 | 2223 | object out to OBJECT_SIZE(order). This happens for strings. */ |
674c7ef1 RB |
2224 | |
2225 | if (size != OBJECT_SIZE (order)) | |
2226 | { | |
2227 | unsigned padding = OBJECT_SIZE(order) - size; | |
2228 | ||
2229 | /* To speed small writes, we use a nulled-out array that's larger | |
2230 | than most padding requests as the source for our null bytes. This | |
2231 | permits us to do the padding with fwrite() rather than fseek(), and | |
2232 | limits the chance the the OS may try to flush any outstanding | |
0ee55ad8 | 2233 | writes. */ |
674c7ef1 RB |
2234 | if (padding <= sizeof(emptyBytes)) |
2235 | { | |
2236 | if (fwrite (emptyBytes, 1, padding, f) != padding) | |
2237 | fatal_error ("can't write PCH file"); | |
2238 | } | |
2239 | else | |
2240 | { | |
0ee55ad8 | 2241 | /* Larger than our buffer? Just default to fseek. */ |
674c7ef1 RB |
2242 | if (fseek (f, padding, SEEK_CUR) != 0) |
2243 | fatal_error ("can't write PCH file"); | |
2244 | } | |
2245 | } | |
17211ab5 GK |
2246 | |
2247 | d->written[order]++; | |
2248 | if (d->written[order] == d->d.totals[order] | |
2249 | && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order), | |
2250 | G.pagesize), | |
2251 | SEEK_CUR) != 0) | |
fa6ef813 | 2252 | fatal_error ("can't write PCH file: %m"); |
17211ab5 GK |
2253 | } |
2254 | ||
2255 | void | |
20c1dc5e | 2256 | ggc_pch_finish (struct ggc_pch_data *d, FILE *f) |
17211ab5 GK |
2257 | { |
2258 | if (fwrite (&d->d, sizeof (d->d), 1, f) != 1) | |
fa6ef813 | 2259 | fatal_error ("can't write PCH file: %m"); |
17211ab5 GK |
2260 | free (d); |
2261 | } | |
2262 | ||
c4775f82 MS |
2263 | /* Move the PCH PTE entries just added to the end of by_depth, to the |
2264 | front. */ | |
2265 | ||
2266 | static void | |
20c1dc5e | 2267 | move_ptes_to_front (int count_old_page_tables, int count_new_page_tables) |
c4775f82 MS |
2268 | { |
2269 | unsigned i; | |
2270 | ||
2271 | /* First, we swap the new entries to the front of the varrays. */ | |
2272 | page_entry **new_by_depth; | |
2273 | unsigned long **new_save_in_use; | |
2274 | ||
703ad42b KG |
2275 | new_by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *)); |
2276 | new_save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *)); | |
c4775f82 MS |
2277 | |
2278 | memcpy (&new_by_depth[0], | |
2279 | &G.by_depth[count_old_page_tables], | |
2280 | count_new_page_tables * sizeof (void *)); | |
2281 | memcpy (&new_by_depth[count_new_page_tables], | |
2282 | &G.by_depth[0], | |
2283 | count_old_page_tables * sizeof (void *)); | |
2284 | memcpy (&new_save_in_use[0], | |
2285 | &G.save_in_use[count_old_page_tables], | |
2286 | count_new_page_tables * sizeof (void *)); | |
2287 | memcpy (&new_save_in_use[count_new_page_tables], | |
2288 | &G.save_in_use[0], | |
2289 | count_old_page_tables * sizeof (void *)); | |
2290 | ||
2291 | free (G.by_depth); | |
2292 | free (G.save_in_use); | |
20c1dc5e | 2293 | |
c4775f82 MS |
2294 | G.by_depth = new_by_depth; |
2295 | G.save_in_use = new_save_in_use; | |
2296 | ||
2297 | /* Now update all the index_by_depth fields. */ | |
2298 | for (i = G.by_depth_in_use; i > 0; --i) | |
2299 | { | |
2300 | page_entry *p = G.by_depth[i-1]; | |
2301 | p->index_by_depth = i-1; | |
2302 | } | |
2303 | ||
2304 | /* And last, we update the depth pointers in G.depth. The first | |
2305 | entry is already 0, and context 0 entries always start at index | |
2306 | 0, so there is nothing to update in the first slot. We need a | |
2307 | second slot, only if we have old ptes, and if we do, they start | |
2308 | at index count_new_page_tables. */ | |
2309 | if (count_old_page_tables) | |
2310 | push_depth (count_new_page_tables); | |
2311 | } | |
2312 | ||
17211ab5 | 2313 | void |
20c1dc5e | 2314 | ggc_pch_read (FILE *f, void *addr) |
17211ab5 GK |
2315 | { |
2316 | struct ggc_pch_ondisk d; | |
2317 | unsigned i; | |
2318 | char *offs = addr; | |
c4775f82 MS |
2319 | unsigned long count_old_page_tables; |
2320 | unsigned long count_new_page_tables; | |
2321 | ||
2322 | count_old_page_tables = G.by_depth_in_use; | |
2323 | ||
2324 | /* We've just read in a PCH file. So, every object that used to be | |
2325 | allocated is now free. */ | |
17211ab5 | 2326 | clear_marks (); |
c5d6d04a | 2327 | #ifdef ENABLE_GC_CHECKING |
17211ab5 GK |
2328 | poison_pages (); |
2329 | #endif | |
2330 | ||
2331 | /* No object read from a PCH file should ever be freed. So, set the | |
2332 | context depth to 1, and set the depth of all the currently-allocated | |
2333 | pages to be 1 too. PCH pages will have depth 0. */ | |
2334 | if (G.context_depth != 0) | |
2335 | abort (); | |
2336 | G.context_depth = 1; | |
2337 | for (i = 0; i < NUM_ORDERS; i++) | |
2338 | { | |
2339 | page_entry *p; | |
2340 | for (p = G.pages[i]; p != NULL; p = p->next) | |
2341 | p->context_depth = G.context_depth; | |
2342 | } | |
2343 | ||
2344 | /* Allocate the appropriate page-table entries for the pages read from | |
2345 | the PCH file. */ | |
2346 | if (fread (&d, sizeof (d), 1, f) != 1) | |
fa6ef813 | 2347 | fatal_error ("can't read PCH file: %m"); |
20c1dc5e | 2348 | |
17211ab5 GK |
2349 | for (i = 0; i < NUM_ORDERS; i++) |
2350 | { | |
2351 | struct page_entry *entry; | |
2352 | char *pte; | |
2353 | size_t bytes; | |
2354 | size_t num_objs; | |
2355 | size_t j; | |
c4775f82 | 2356 | |
17211ab5 GK |
2357 | if (d.totals[i] == 0) |
2358 | continue; | |
c4775f82 | 2359 | |
17211ab5 GK |
2360 | bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize); |
2361 | num_objs = bytes / OBJECT_SIZE (i); | |
20c1dc5e | 2362 | entry = xcalloc (1, (sizeof (struct page_entry) |
17211ab5 GK |
2363 | - sizeof (long) |
2364 | + BITMAP_SIZE (num_objs + 1))); | |
2365 | entry->bytes = bytes; | |
2366 | entry->page = offs; | |
2367 | entry->context_depth = 0; | |
2368 | offs += bytes; | |
2369 | entry->num_free_objects = 0; | |
2370 | entry->order = i; | |
2371 | ||
20c1dc5e | 2372 | for (j = 0; |
17211ab5 GK |
2373 | j + HOST_BITS_PER_LONG <= num_objs + 1; |
2374 | j += HOST_BITS_PER_LONG) | |
2375 | entry->in_use_p[j / HOST_BITS_PER_LONG] = -1; | |
2376 | for (; j < num_objs + 1; j++) | |
20c1dc5e | 2377 | entry->in_use_p[j / HOST_BITS_PER_LONG] |
17211ab5 GK |
2378 | |= 1L << (j % HOST_BITS_PER_LONG); |
2379 | ||
20c1dc5e AJ |
2380 | for (pte = entry->page; |
2381 | pte < entry->page + entry->bytes; | |
17211ab5 GK |
2382 | pte += G.pagesize) |
2383 | set_page_table_entry (pte, entry); | |
2384 | ||
2385 | if (G.page_tails[i] != NULL) | |
2386 | G.page_tails[i]->next = entry; | |
2387 | else | |
2388 | G.pages[i] = entry; | |
2389 | G.page_tails[i] = entry; | |
c4775f82 MS |
2390 | |
2391 | /* We start off by just adding all the new information to the | |
2392 | end of the varrays, later, we will move the new information | |
2393 | to the front of the varrays, as the PCH page tables are at | |
2394 | context 0. */ | |
2395 | push_by_depth (entry, 0); | |
17211ab5 GK |
2396 | } |
2397 | ||
c4775f82 MS |
2398 | /* Now, we update the various data structures that speed page table |
2399 | handling. */ | |
2400 | count_new_page_tables = G.by_depth_in_use - count_old_page_tables; | |
2401 | ||
2402 | move_ptes_to_front (count_old_page_tables, count_new_page_tables); | |
2403 | ||
17211ab5 GK |
2404 | /* Update the statistics. */ |
2405 | G.allocated = G.allocated_last_gc = offs - (char *)addr; | |
2406 | } |