]> gcc.gnu.org Git - gcc.git/blame - gcc/ggc-page.c
flags.h (time_report, mem_report): New global flags.
[gcc.git] / gcc / ggc-page.c
CommitLineData
21341cfd 1/* "Bag-of-pages" garbage collector for the GNU compiler.
4acab94b 2 Copyright (C) 1999, 2000 Free Software Foundation, Inc.
21341cfd 3
b9bfacf0 4This file is part of GNU CC.
21341cfd 5
b9bfacf0
RK
6GNU CC is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
8the Free Software Foundation; either version 2, or (at your option)
9any later version.
21341cfd 10
b9bfacf0
RK
11GNU CC is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
21341cfd 15
b9bfacf0
RK
16You should have received a copy of the GNU General Public License
17along with GNU CC; see the file COPYING. If not, write to
18the Free Software Foundation, 59 Temple Place - Suite 330,
19Boston, MA 02111-1307, USA. */
21341cfd 20
21341cfd 21#include "config.h"
21341cfd
AS
22#include "system.h"
23#include "tree.h"
e5ecd4ea 24#include "rtl.h"
1b42a6a9 25#include "tm_p.h"
b9bfacf0 26#include "toplev.h"
21341cfd
AS
27#include "varray.h"
28#include "flags.h"
e5ecd4ea 29#include "ggc.h"
2a9a326b 30#include "timevar.h"
e5ecd4ea 31
4acab94b 32#ifdef HAVE_MMAP_ANYWHERE
e5ecd4ea 33#include <sys/mman.h>
005537df 34#endif
21341cfd 35
8342b467
RH
36#ifndef MAP_FAILED
37#define MAP_FAILED -1
38#endif
39
5b918807
RE
40#if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
41#define MAP_ANONYMOUS MAP_ANON
42#endif
21341cfd
AS
43
44/* Stategy:
45
46 This garbage-collecting allocator allocates objects on one of a set
47 of pages. Each page can allocate objects of a single size only;
48 available sizes are powers of two starting at four bytes. The size
49 of an allocation request is rounded up to the next power of two
50 (`order'), and satisfied from the appropriate page.
51
52 Each page is recorded in a page-entry, which also maintains an
53 in-use bitmap of object positions on the page. This allows the
54 allocation state of a particular object to be flipped without
55 touching the page itself.
56
57 Each page-entry also has a context depth, which is used to track
58 pushing and popping of allocation contexts. Only objects allocated
59 in the current (highest-numbered) context may be collected.
60
61 Page entries are arranged in an array of singly-linked lists. The
62 array is indexed by the allocation size, in bits, of the pages on
63 it; i.e. all pages on a list allocate objects of the same size.
64 Pages are ordered on the list such that all non-full pages precede
65 all full pages, with non-full pages arranged in order of decreasing
66 context depth.
67
68 Empty pages (of all orders) are kept on a single page cache list,
69 and are considered first when new pages are required; they are
70 deallocated at the start of the next collection if they haven't
71 been recycled by then. */
72
73
74/* Define GGC_POISON to poison memory marked unused by the collector. */
75#undef GGC_POISON
76
77/* Define GGC_ALWAYS_COLLECT to perform collection every time
78 ggc_collect is invoked. Otherwise, collection is performed only
79 when a significant amount of memory has been allocated since the
80 last collection. */
85f88abf 81#undef GGC_ALWAYS_COLLECT
21341cfd 82
f4524c9e 83#ifdef ENABLE_GC_CHECKING
21341cfd 84#define GGC_POISON
f4524c9e
ZW
85#endif
86#ifdef ENABLE_GC_ALWAYS_COLLECT
21341cfd
AS
87#define GGC_ALWAYS_COLLECT
88#endif
89
90/* Define GGC_DEBUG_LEVEL to print debugging information.
91 0: No debugging output.
92 1: GC statistics only.
93 2: Page-entry allocations/deallocations as well.
94 3: Object allocations as well.
95 4: Object marks as well. */
96#define GGC_DEBUG_LEVEL (0)
97\f
98#ifndef HOST_BITS_PER_PTR
99#define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
100#endif
101
21341cfd
AS
102/* The "" allocated string. */
103char *empty_string;
104\f
105/* A two-level tree is used to look up the page-entry for a given
106 pointer. Two chunks of the pointer's bits are extracted to index
107 the first and second levels of the tree, as follows:
108
109 HOST_PAGE_SIZE_BITS
110 32 | |
111 msb +----------------+----+------+------+ lsb
112 | | |
113 PAGE_L1_BITS |
114 | |
115 PAGE_L2_BITS
116
117 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
118 pages are aligned on system page boundaries. The next most
119 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
120 index values in the lookup table, respectively.
121
005537df
RH
122 For 32-bit architectures and the settings below, there are no
123 leftover bits. For architectures with wider pointers, the lookup
124 tree points to a list of pages, which must be scanned to find the
125 correct one. */
21341cfd
AS
126
127#define PAGE_L1_BITS (8)
128#define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
129#define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
130#define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
131
132#define LOOKUP_L1(p) \
133 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
134
135#define LOOKUP_L2(p) \
136 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
137
138
139/* A page_entry records the status of an allocation page. This
140 structure is dynamically sized to fit the bitmap in_use_p. */
141typedef struct page_entry
142{
143 /* The next page-entry with objects of the same size, or NULL if
144 this is the last page-entry. */
145 struct page_entry *next;
146
147 /* The number of bytes allocated. (This will always be a multiple
148 of the host system page size.) */
149 size_t bytes;
150
151 /* The address at which the memory is allocated. */
152 char *page;
153
154 /* Saved in-use bit vector for pages that aren't in the topmost
155 context during collection. */
156 unsigned long *save_in_use_p;
157
158 /* Context depth of this page. */
ae373eda 159 unsigned short context_depth;
21341cfd
AS
160
161 /* The number of free objects remaining on this page. */
162 unsigned short num_free_objects;
163
164 /* A likely candidate for the bit position of a free object for the
165 next allocation from this page. */
166 unsigned short next_bit_hint;
167
ae373eda
MM
168 /* The lg of size of objects allocated from this page. */
169 unsigned char order;
170
21341cfd
AS
171 /* A bit vector indicating whether or not objects are in use. The
172 Nth bit is one if the Nth object on this page is allocated. This
173 array is dynamically sized. */
174 unsigned long in_use_p[1];
175} page_entry;
176
177
178#if HOST_BITS_PER_PTR <= 32
179
180/* On 32-bit hosts, we use a two level page table, as pictured above. */
181typedef page_entry **page_table[PAGE_L1_SIZE];
182
183#else
184
005537df
RH
185/* On 64-bit hosts, we use the same two level page tables plus a linked
186 list that disambiguates the top 32-bits. There will almost always be
21341cfd
AS
187 exactly one entry in the list. */
188typedef struct page_table_chain
189{
190 struct page_table_chain *next;
191 size_t high_bits;
192 page_entry **table[PAGE_L1_SIZE];
193} *page_table;
194
195#endif
196
197/* The rest of the global variables. */
198static struct globals
199{
200 /* The Nth element in this array is a page with objects of size 2^N.
201 If there are any pages with free objects, they will be at the
202 head of the list. NULL if there are no page-entries for this
203 object size. */
204 page_entry *pages[HOST_BITS_PER_PTR];
205
206 /* The Nth element in this array is the last page with objects of
207 size 2^N. NULL if there are no page-entries for this object
208 size. */
209 page_entry *page_tails[HOST_BITS_PER_PTR];
210
211 /* Lookup table for associating allocation pages with object addresses. */
212 page_table lookup;
213
214 /* The system's page size. */
215 size_t pagesize;
216 size_t lg_pagesize;
217
218 /* Bytes currently allocated. */
219 size_t allocated;
220
221 /* Bytes currently allocated at the end of the last collection. */
222 size_t allocated_last_gc;
223
3277221c
MM
224 /* Total amount of memory mapped. */
225 size_t bytes_mapped;
226
21341cfd 227 /* The current depth in the context stack. */
d416576b 228 unsigned short context_depth;
21341cfd
AS
229
230 /* A file descriptor open to /dev/zero for reading. */
4acab94b 231#if defined (HAVE_MMAP_ANYWHERE) && !defined(MAP_ANONYMOUS)
21341cfd
AS
232 int dev_zero_fd;
233#endif
234
235 /* A cache of free system pages. */
236 page_entry *free_pages;
237
238 /* The file descriptor for debugging output. */
239 FILE *debug_file;
240} G;
241
242
243/* Compute DIVIDEND / DIVISOR, rounded up. */
244#define DIV_ROUND_UP(Dividend, Divisor) \
4934cc53 245 (((Dividend) + (Divisor) - 1) / (Divisor))
21341cfd
AS
246
247/* The number of objects per allocation page, for objects of size
248 2^ORDER. */
249#define OBJECTS_PER_PAGE(Order) \
250 ((Order) >= G.lg_pagesize ? 1 : G.pagesize / ((size_t)1 << (Order)))
251
252/* The size in bytes required to maintain a bitmap for the objects
253 on a page-entry. */
254#define BITMAP_SIZE(Num_objects) \
255 (DIV_ROUND_UP ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
256
257/* Skip garbage collection if the current allocation is not at least
258 this factor times the allocation at the end of the last collection.
259 In other words, total allocation must expand by (this factor minus
260 one) before collection is performed. */
261#define GGC_MIN_EXPAND_FOR_GC (1.3)
262
a70261ee
RH
263/* Bound `allocated_last_gc' to 4MB, to prevent the memory expansion
264 test from triggering too often when the heap is small. */
265#define GGC_MIN_LAST_ALLOCATED (4 * 1024 * 1024)
266
054f5e69
ZW
267/* Allocate pages in chunks of this size, to throttle calls to mmap.
268 The first page is used, the rest go onto the free list. */
269#define GGC_QUIRE_SIZE 16
270
21341cfd 271\f
3fe41456
KG
272static int ggc_allocated_p PARAMS ((const void *));
273static page_entry *lookup_page_table_entry PARAMS ((const void *));
274static void set_page_table_entry PARAMS ((void *, page_entry *));
275static char *alloc_anon PARAMS ((char *, size_t));
276static struct page_entry * alloc_page PARAMS ((unsigned));
277static void free_page PARAMS ((struct page_entry *));
278static void release_pages PARAMS ((void));
279static void clear_marks PARAMS ((void));
280static void sweep_pages PARAMS ((void));
281static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
21341cfd
AS
282
283#ifdef GGC_POISON
3fe41456 284static void poison_pages PARAMS ((void));
21341cfd
AS
285#endif
286
3fe41456 287void debug_print_page_list PARAMS ((int));
21341cfd 288\f
005537df 289/* Returns non-zero if P was allocated in GC'able memory. */
21341cfd 290
005537df
RH
291static inline int
292ggc_allocated_p (p)
293 const void *p;
21341cfd
AS
294{
295 page_entry ***base;
005537df 296 size_t L1, L2;
21341cfd
AS
297
298#if HOST_BITS_PER_PTR <= 32
299 base = &G.lookup[0];
300#else
301 page_table table = G.lookup;
302 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
005537df
RH
303 while (1)
304 {
305 if (table == NULL)
306 return 0;
307 if (table->high_bits == high_bits)
308 break;
309 table = table->next;
310 }
21341cfd
AS
311 base = &table->table[0];
312#endif
313
74c937ca
MM
314 /* Extract the level 1 and 2 indicies. */
315 L1 = LOOKUP_L1 (p);
316 L2 = LOOKUP_L2 (p);
317
318 return base[L1] && base[L1][L2];
319}
320
321/* Traverse the page table and find the entry for a page.
322 Die (probably) if the object wasn't allocated via GC. */
323
324static inline page_entry *
325lookup_page_table_entry(p)
005537df 326 const void *p;
74c937ca
MM
327{
328 page_entry ***base;
329 size_t L1, L2;
330
005537df
RH
331#if HOST_BITS_PER_PTR <= 32
332 base = &G.lookup[0];
333#else
334 page_table table = G.lookup;
335 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
336 while (table->high_bits != high_bits)
337 table = table->next;
338 base = &table->table[0];
339#endif
74c937ca 340
21341cfd
AS
341 /* Extract the level 1 and 2 indicies. */
342 L1 = LOOKUP_L1 (p);
343 L2 = LOOKUP_L2 (p);
344
345 return base[L1][L2];
346}
347
21341cfd 348/* Set the page table entry for a page. */
cb2ec151 349
21341cfd
AS
350static void
351set_page_table_entry(p, entry)
352 void *p;
353 page_entry *entry;
354{
355 page_entry ***base;
356 size_t L1, L2;
357
358#if HOST_BITS_PER_PTR <= 32
359 base = &G.lookup[0];
360#else
361 page_table table;
362 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
363 for (table = G.lookup; table; table = table->next)
364 if (table->high_bits == high_bits)
365 goto found;
366
367 /* Not found -- allocate a new table. */
368 table = (page_table) xcalloc (1, sizeof(*table));
369 table->next = G.lookup;
370 table->high_bits = high_bits;
371 G.lookup = table;
372found:
373 base = &table->table[0];
374#endif
375
376 /* Extract the level 1 and 2 indicies. */
377 L1 = LOOKUP_L1 (p);
378 L2 = LOOKUP_L2 (p);
379
380 if (base[L1] == NULL)
381 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
382
383 base[L1][L2] = entry;
384}
385
21341cfd 386/* Prints the page-entry for object size ORDER, for debugging. */
cb2ec151 387
21341cfd
AS
388void
389debug_print_page_list (order)
390 int order;
391{
392 page_entry *p;
683eb0e9
JM
393 printf ("Head=%p, Tail=%p:\n", (PTR) G.pages[order],
394 (PTR) G.page_tails[order]);
21341cfd
AS
395 p = G.pages[order];
396 while (p != NULL)
397 {
683eb0e9
JM
398 printf ("%p(%1d|%3d) -> ", (PTR) p, p->context_depth,
399 p->num_free_objects);
21341cfd
AS
400 p = p->next;
401 }
402 printf ("NULL\n");
403 fflush (stdout);
404}
405
21341cfd
AS
406/* Allocate SIZE bytes of anonymous memory, preferably near PREF,
407 (if non-null). */
cb2ec151 408
21341cfd
AS
409static inline char *
410alloc_anon (pref, size)
005537df 411 char *pref ATTRIBUTE_UNUSED;
21341cfd
AS
412 size_t size;
413{
414 char *page;
415
4acab94b 416#ifdef HAVE_MMAP_ANYWHERE
21341cfd
AS
417#ifdef MAP_ANONYMOUS
418 page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
419 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
420#else
421 page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
422 MAP_PRIVATE, G.dev_zero_fd, 0);
423#endif
424 if (page == (char *) MAP_FAILED)
425 {
426 fputs ("Virtual memory exhausted!\n", stderr);
427 exit(1);
428 }
005537df
RH
429#else
430#ifdef HAVE_VALLOC
431 page = (char *) valloc (size);
432 if (!page)
433 {
434 fputs ("Virtual memory exhausted!\n", stderr);
435 exit(1);
436 }
437#endif /* HAVE_VALLOC */
4acab94b 438#endif /* HAVE_MMAP_ANYWHERE */
21341cfd 439
3277221c
MM
440 /* Remember that we allocated this memory. */
441 G.bytes_mapped += size;
442
21341cfd
AS
443 return page;
444}
445
446/* Allocate a new page for allocating objects of size 2^ORDER,
447 and return an entry for it. The entry is not added to the
448 appropriate page_table list. */
cb2ec151 449
21341cfd
AS
450static inline struct page_entry *
451alloc_page (order)
452 unsigned order;
453{
454 struct page_entry *entry, *p, **pp;
455 char *page;
456 size_t num_objects;
457 size_t bitmap_size;
458 size_t page_entry_size;
459 size_t entry_size;
460
461 num_objects = OBJECTS_PER_PAGE (order);
462 bitmap_size = BITMAP_SIZE (num_objects + 1);
463 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
464 entry_size = num_objects * (1 << order);
465
466 entry = NULL;
467 page = NULL;
468
469 /* Check the list of free pages for one we can use. */
470 for (pp = &G.free_pages, p = *pp; p ; pp = &p->next, p = *pp)
471 if (p->bytes == entry_size)
472 break;
473
474 if (p != NULL)
475 {
476 /* Recycle the allocated memory from this page ... */
477 *pp = p->next;
478 page = p->page;
479 /* ... and, if possible, the page entry itself. */
480 if (p->order == order)
481 {
482 entry = p;
483 memset (entry, 0, page_entry_size);
484 }
485 else
486 free (p);
487 }
054f5e69
ZW
488#ifdef HAVE_MMAP_ANYWHERE
489 else if (entry_size == G.pagesize)
21341cfd 490 {
054f5e69
ZW
491 /* We want just one page. Allocate a bunch of them and put the
492 extras on the freelist. (Can only do this optimization with
493 mmap for backing store.) */
494 struct page_entry *e, *f = G.free_pages;
495 int i;
496
497 page = alloc_anon (NULL, entry_size * GGC_QUIRE_SIZE);
498 /* This loop counts down so that the chain will be in ascending
499 memory order. */
500 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
501 {
502 e = (struct page_entry *) xcalloc (1, sizeof (struct page_entry));
503 e->bytes = entry_size;
504 e->page = page + i*entry_size;
505 e->next = f;
506 f = e;
507 }
508 G.free_pages = f;
21341cfd 509 }
054f5e69
ZW
510#endif
511 else
512 page = alloc_anon (NULL, entry_size);
21341cfd
AS
513
514 if (entry == NULL)
515 entry = (struct page_entry *) xcalloc (1, page_entry_size);
516
517 entry->bytes = entry_size;
518 entry->page = page;
519 entry->context_depth = G.context_depth;
520 entry->order = order;
521 entry->num_free_objects = num_objects;
522 entry->next_bit_hint = 1;
523
524 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
525 increment the hint. */
526 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
527 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
528
529 set_page_table_entry (page, entry);
530
531 if (GGC_DEBUG_LEVEL >= 2)
532 fprintf (G.debug_file,
683eb0e9
JM
533 "Allocating page at %p, object size=%d, data %p-%p\n",
534 (PTR) entry, 1 << order, page, page + entry_size - 1);
21341cfd
AS
535
536 return entry;
537}
538
cb2ec151 539/* For a page that is no longer needed, put it on the free page list. */
21341cfd 540
21341cfd
AS
541static inline void
542free_page (entry)
543 page_entry *entry;
544{
545 if (GGC_DEBUG_LEVEL >= 2)
546 fprintf (G.debug_file,
683eb0e9 547 "Deallocating page at %p, data %p-%p\n", (PTR) entry,
21341cfd
AS
548 entry->page, entry->page + entry->bytes - 1);
549
550 set_page_table_entry (entry->page, NULL);
551
552 entry->next = G.free_pages;
553 G.free_pages = entry;
554}
555
cb2ec151 556/* Release the free page cache to the system. */
21341cfd 557
4934cc53 558static void
21341cfd
AS
559release_pages ()
560{
561 page_entry *p, *next;
054f5e69
ZW
562
563#ifdef HAVE_MMAP_ANYWHERE
21341cfd
AS
564 char *start;
565 size_t len;
566
054f5e69 567 /* Gather up adjacent pages so they are unmapped together. */
21341cfd 568 p = G.free_pages;
21341cfd
AS
569
570 while (p)
571 {
054f5e69 572 start = p->page;
21341cfd 573 next = p->next;
054f5e69 574 len = p->bytes;
21341cfd
AS
575 free (p);
576 p = next;
21341cfd 577
054f5e69
ZW
578 while (p && p->page == start + len)
579 {
580 next = p->next;
581 len += p->bytes;
582 free (p);
583 p = next;
584 }
585
586 munmap (start, len);
587 G.bytes_mapped -= len;
588 }
005537df
RH
589#else
590#ifdef HAVE_VALLOC
005537df 591
054f5e69 592 for (p = G.free_pages; p; p = next)
005537df
RH
593 {
594 next = p->next;
595 free (p->page);
3277221c 596 G.bytes_mapped -= p->bytes;
005537df
RH
597 free (p);
598 }
599#endif /* HAVE_VALLOC */
4acab94b 600#endif /* HAVE_MMAP_ANYWHERE */
005537df 601
21341cfd
AS
602 G.free_pages = NULL;
603}
604
21341cfd
AS
605/* This table provides a fast way to determine ceil(log_2(size)) for
606 allocation requests. The minimum allocation size is four bytes. */
cb2ec151 607
21341cfd
AS
608static unsigned char const size_lookup[257] =
609{
610 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
611 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
612 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
613 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
614 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
615 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
616 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
617 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
618 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
619 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
620 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
621 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
622 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
623 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
624 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
625 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
626 8
627};
628
629/* Allocate a chunk of memory of SIZE bytes. If ZERO is non-zero, the
630 memory is zeroed; otherwise, its contents are undefined. */
cb2ec151 631
005537df 632void *
f8a83ee3 633ggc_alloc (size)
21341cfd 634 size_t size;
21341cfd
AS
635{
636 unsigned order, word, bit, object_offset;
637 struct page_entry *entry;
638 void *result;
639
640 if (size <= 256)
641 order = size_lookup[size];
642 else
643 {
644 order = 9;
645 while (size > ((size_t) 1 << order))
646 order++;
647 }
648
649 /* If there are non-full pages for this size allocation, they are at
650 the head of the list. */
651 entry = G.pages[order];
652
653 /* If there is no page for this object size, or all pages in this
654 context are full, allocate a new page. */
4934cc53 655 if (entry == NULL || entry->num_free_objects == 0)
21341cfd
AS
656 {
657 struct page_entry *new_entry;
658 new_entry = alloc_page (order);
659
660 /* If this is the only entry, it's also the tail. */
661 if (entry == NULL)
662 G.page_tails[order] = new_entry;
663
664 /* Put new pages at the head of the page list. */
665 new_entry->next = entry;
666 entry = new_entry;
667 G.pages[order] = new_entry;
668
669 /* For a new page, we know the word and bit positions (in the
670 in_use bitmap) of the first available object -- they're zero. */
671 new_entry->next_bit_hint = 1;
672 word = 0;
673 bit = 0;
674 object_offset = 0;
675 }
676 else
677 {
678 /* First try to use the hint left from the previous allocation
679 to locate a clear bit in the in-use bitmap. We've made sure
680 that the one-past-the-end bit is always set, so if the hint
681 has run over, this test will fail. */
682 unsigned hint = entry->next_bit_hint;
683 word = hint / HOST_BITS_PER_LONG;
684 bit = hint % HOST_BITS_PER_LONG;
685
686 /* If the hint didn't work, scan the bitmap from the beginning. */
687 if ((entry->in_use_p[word] >> bit) & 1)
688 {
689 word = bit = 0;
690 while (~entry->in_use_p[word] == 0)
691 ++word;
692 while ((entry->in_use_p[word] >> bit) & 1)
693 ++bit;
694 hint = word * HOST_BITS_PER_LONG + bit;
695 }
696
697 /* Next time, try the next bit. */
698 entry->next_bit_hint = hint + 1;
699
700 object_offset = hint << order;
701 }
702
703 /* Set the in-use bit. */
704 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
705
706 /* Keep a running total of the number of free objects. If this page
707 fills up, we may have to move it to the end of the list if the
708 next page isn't full. If the next page is full, all subsequent
709 pages are full, so there's no need to move it. */
710 if (--entry->num_free_objects == 0
711 && entry->next != NULL
712 && entry->next->num_free_objects > 0)
713 {
714 G.pages[order] = entry->next;
715 entry->next = NULL;
716 G.page_tails[order]->next = entry;
717 G.page_tails[order] = entry;
718 }
719
720 /* Calculate the object's address. */
721 result = entry->page + object_offset;
722
723#ifdef GGC_POISON
f8a83ee3
ZW
724 /* `Poison' the entire allocated object, including any padding at
725 the end. */
cb2ec151 726 memset (result, 0xaf, 1 << order);
21341cfd 727#endif
cb2ec151 728
21341cfd
AS
729 /* Keep track of how many bytes are being allocated. This
730 information is used in deciding when to collect. */
731 G.allocated += (size_t) 1 << order;
732
733 if (GGC_DEBUG_LEVEL >= 3)
734 fprintf (G.debug_file,
735 "Allocating object, requested size=%d, actual=%d at %p on %p\n",
683eb0e9 736 (int) size, 1 << order, result, (PTR) entry);
21341cfd
AS
737
738 return result;
739}
740
cb2ec151 741/* If P is not marked, marks it and return false. Otherwise return true.
21341cfd
AS
742 P must have been allocated by the GC allocator; it mustn't point to
743 static objects, stack variables, or memory allocated with malloc. */
cb2ec151 744
005537df
RH
745int
746ggc_set_mark (p)
3cce094d 747 const void *p;
21341cfd
AS
748{
749 page_entry *entry;
750 unsigned bit, word;
751 unsigned long mask;
752
753 /* Look up the page on which the object is alloced. If the object
754 wasn't allocated by the collector, we'll probably die. */
74c937ca 755 entry = lookup_page_table_entry (p);
21341cfd
AS
756#ifdef ENABLE_CHECKING
757 if (entry == NULL)
758 abort ();
759#endif
760
761 /* Calculate the index of the object on the page; this is its bit
762 position in the in_use_p bitmap. */
3cce094d 763 bit = (((const char *) p) - entry->page) >> entry->order;
21341cfd
AS
764 word = bit / HOST_BITS_PER_LONG;
765 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
766
767 /* If the bit was previously set, skip it. */
768 if (entry->in_use_p[word] & mask)
769 return 1;
770
771 /* Otherwise set it, and decrement the free object count. */
772 entry->in_use_p[word] |= mask;
773 entry->num_free_objects -= 1;
774
21341cfd
AS
775 if (GGC_DEBUG_LEVEL >= 4)
776 fprintf (G.debug_file, "Marking %p\n", p);
777
778 return 0;
779}
780
cb2ec151
RH
781/* Mark P, but check first that it was allocated by the collector. */
782
005537df
RH
783void
784ggc_mark_if_gcable (p)
3cce094d 785 const void *p;
005537df
RH
786{
787 if (p && ggc_allocated_p (p))
788 ggc_set_mark (p);
789}
3277221c 790
cb2ec151
RH
791/* Return the size of the gc-able object P. */
792
3277221c
MM
793size_t
794ggc_get_size (p)
3cce094d 795 const void *p;
3277221c
MM
796{
797 page_entry *pe = lookup_page_table_entry (p);
798 return 1 << pe->order;
799}
21341cfd
AS
800\f
801/* Initialize the ggc-mmap allocator. */
cb2ec151 802
21341cfd
AS
803void
804init_ggc ()
805{
806 G.pagesize = getpagesize();
807 G.lg_pagesize = exact_log2 (G.pagesize);
808
4acab94b 809#if defined (HAVE_MMAP_ANYWHERE) && !defined(MAP_ANONYMOUS)
21341cfd
AS
810 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
811 if (G.dev_zero_fd == -1)
812 abort ();
813#endif
814
815#if 0
816 G.debug_file = fopen ("ggc-mmap.debug", "w");
817#else
818 G.debug_file = stdout;
819#endif
820
a70261ee 821 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
21341cfd 822
4acab94b 823#ifdef HAVE_MMAP_ANYWHERE
1b3e1423
RH
824 /* StunOS has an amazing off-by-one error for the first mmap allocation
825 after fiddling with RLIMIT_STACK. The result, as hard as it is to
826 believe, is an unaligned page allocation, which would cause us to
827 hork badly if we tried to use it. */
828 {
829 char *p = alloc_anon (NULL, G.pagesize);
830 if ((size_t)p & (G.pagesize - 1))
831 {
832 /* How losing. Discard this one and try another. If we still
833 can't get something useful, give up. */
834
835 p = alloc_anon (NULL, G.pagesize);
836 if ((size_t)p & (G.pagesize - 1))
837 abort ();
838 }
839 munmap (p, G.pagesize);
840 }
841#endif
842
21341cfd
AS
843 empty_string = ggc_alloc_string ("", 0);
844 ggc_add_string_root (&empty_string, 1);
845}
846
cb2ec151
RH
847/* Increment the `GC context'. Objects allocated in an outer context
848 are never freed, eliminating the need to register their roots. */
21341cfd
AS
849
850void
851ggc_push_context ()
852{
853 ++G.context_depth;
854
855 /* Die on wrap. */
856 if (G.context_depth == 0)
857 abort ();
858}
859
4934cc53
MM
860/* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
861 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
862
863static void
864ggc_recalculate_in_use_p (p)
865 page_entry *p;
866{
867 unsigned int i;
868 size_t num_objects;
869
870 /* Because the past-the-end bit in in_use_p is always set, we
871 pretend there is one additional object. */
872 num_objects = OBJECTS_PER_PAGE (p->order) + 1;
873
874 /* Reset the free object count. */
875 p->num_free_objects = num_objects;
876
877 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
878 for (i = 0;
879 i < DIV_ROUND_UP (BITMAP_SIZE (num_objects),
880 sizeof (*p->in_use_p));
881 ++i)
882 {
883 unsigned long j;
884
885 /* Something is in use if it is marked, or if it was in use in a
886 context further down the context stack. */
887 p->in_use_p[i] |= p->save_in_use_p[i];
888
889 /* Decrement the free object count for every object allocated. */
890 for (j = p->in_use_p[i]; j; j >>= 1)
891 p->num_free_objects -= (j & 1);
892 }
893
894 if (p->num_free_objects >= num_objects)
895 abort ();
896}
897
cb2ec151
RH
898/* Decrement the `GC context'. All objects allocated since the
899 previous ggc_push_context are migrated to the outer context. */
21341cfd
AS
900
901void
902ggc_pop_context ()
903{
904 unsigned order, depth;
905
906 depth = --G.context_depth;
907
908 /* Any remaining pages in the popped context are lowered to the new
909 current context; i.e. objects allocated in the popped context and
910 left over are imported into the previous context. */
911 for (order = 2; order < HOST_BITS_PER_PTR; order++)
912 {
21341cfd
AS
913 page_entry *p;
914
915 for (p = G.pages[order]; p != NULL; p = p->next)
916 {
917 if (p->context_depth > depth)
4934cc53 918 p->context_depth = depth;
21341cfd
AS
919
920 /* If this page is now in the topmost context, and we'd
921 saved its allocation state, restore it. */
922 else if (p->context_depth == depth && p->save_in_use_p)
923 {
4934cc53 924 ggc_recalculate_in_use_p (p);
21341cfd
AS
925 free (p->save_in_use_p);
926 p->save_in_use_p = 0;
21341cfd
AS
927 }
928 }
929 }
930}
21341cfd 931\f
cb2ec151
RH
932/* Unmark all objects. */
933
21341cfd
AS
934static inline void
935clear_marks ()
936{
937 unsigned order;
938
939 for (order = 2; order < HOST_BITS_PER_PTR; order++)
940 {
941 size_t num_objects = OBJECTS_PER_PAGE (order);
4934cc53 942 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
21341cfd
AS
943 page_entry *p;
944
945 for (p = G.pages[order]; p != NULL; p = p->next)
946 {
947#ifdef ENABLE_CHECKING
948 /* The data should be page-aligned. */
949 if ((size_t) p->page & (G.pagesize - 1))
950 abort ();
951#endif
952
953 /* Pages that aren't in the topmost context are not collected;
954 nevertheless, we need their in-use bit vectors to store GC
955 marks. So, back them up first. */
4934cc53 956 if (p->context_depth < G.context_depth)
21341cfd 957 {
4934cc53
MM
958 if (! p->save_in_use_p)
959 p->save_in_use_p = xmalloc (bitmap_size);
21341cfd 960 memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
21341cfd
AS
961 }
962
963 /* Reset reset the number of free objects and clear the
964 in-use bits. These will be adjusted by mark_obj. */
965 p->num_free_objects = num_objects;
966 memset (p->in_use_p, 0, bitmap_size);
967
968 /* Make sure the one-past-the-end bit is always set. */
969 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
970 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
971 }
972 }
973}
974
cb2ec151
RH
975/* Free all empty pages. Partially empty pages need no attention
976 because the `mark' bit doubles as an `unused' bit. */
977
21341cfd
AS
978static inline void
979sweep_pages ()
980{
981 unsigned order;
982
983 for (order = 2; order < HOST_BITS_PER_PTR; order++)
984 {
985 /* The last page-entry to consider, regardless of entries
986 placed at the end of the list. */
987 page_entry * const last = G.page_tails[order];
988
989 size_t num_objects = OBJECTS_PER_PAGE (order);
054f5e69 990 size_t live_objects;
21341cfd
AS
991 page_entry *p, *previous;
992 int done;
993
994 p = G.pages[order];
995 if (p == NULL)
996 continue;
997
998 previous = NULL;
999 do
1000 {
1001 page_entry *next = p->next;
1002
1003 /* Loop until all entries have been examined. */
1004 done = (p == last);
1005
054f5e69
ZW
1006 /* Add all live objects on this page to the count of
1007 allocated memory. */
1008 live_objects = num_objects - p->num_free_objects;
1009
1010 G.allocated += ((size_t) 1 << order) * live_objects;
1011
21341cfd
AS
1012 /* Only objects on pages in the topmost context should get
1013 collected. */
1014 if (p->context_depth < G.context_depth)
1015 ;
1016
1017 /* Remove the page if it's empty. */
054f5e69 1018 else if (live_objects == 0)
21341cfd
AS
1019 {
1020 if (! previous)
1021 G.pages[order] = next;
1022 else
1023 previous->next = next;
1024
1025 /* Are we removing the last element? */
1026 if (p == G.page_tails[order])
1027 G.page_tails[order] = previous;
1028 free_page (p);
1029 p = previous;
1030 }
1031
1032 /* If the page is full, move it to the end. */
1033 else if (p->num_free_objects == 0)
1034 {
1035 /* Don't move it if it's already at the end. */
1036 if (p != G.page_tails[order])
1037 {
1038 /* Move p to the end of the list. */
1039 p->next = NULL;
1040 G.page_tails[order]->next = p;
1041
1042 /* Update the tail pointer... */
1043 G.page_tails[order] = p;
1044
1045 /* ... and the head pointer, if necessary. */
1046 if (! previous)
1047 G.pages[order] = next;
1048 else
1049 previous->next = next;
1050 p = previous;
1051 }
1052 }
1053
1054 /* If we've fallen through to here, it's a page in the
1055 topmost context that is neither full nor empty. Such a
1056 page must precede pages at lesser context depth in the
1057 list, so move it to the head. */
1058 else if (p != G.pages[order])
1059 {
1060 previous->next = p->next;
1061 p->next = G.pages[order];
1062 G.pages[order] = p;
1063 /* Are we moving the last element? */
1064 if (G.page_tails[order] == p)
1065 G.page_tails[order] = previous;
1066 p = previous;
1067 }
1068
1069 previous = p;
1070 p = next;
1071 }
1072 while (! done);
4934cc53
MM
1073
1074 /* Now, restore the in_use_p vectors for any pages from contexts
1075 other than the current one. */
1076 for (p = G.pages[order]; p; p = p->next)
1077 if (p->context_depth != G.context_depth)
1078 ggc_recalculate_in_use_p (p);
21341cfd
AS
1079 }
1080}
1081
1082#ifdef GGC_POISON
cb2ec151
RH
1083/* Clobber all free objects. */
1084
21341cfd
AS
1085static inline void
1086poison_pages ()
1087{
1088 unsigned order;
1089
1090 for (order = 2; order < HOST_BITS_PER_PTR; order++)
1091 {
1092 size_t num_objects = OBJECTS_PER_PAGE (order);
1093 size_t size = (size_t) 1 << order;
1094 page_entry *p;
1095
1096 for (p = G.pages[order]; p != NULL; p = p->next)
1097 {
1098 size_t i;
c831fdea
MM
1099
1100 if (p->context_depth != G.context_depth)
1101 /* Since we don't do any collection for pages in pushed
1102 contexts, there's no need to do any poisoning. And
1103 besides, the IN_USE_P array isn't valid until we pop
1104 contexts. */
1105 continue;
1106
21341cfd
AS
1107 for (i = 0; i < num_objects; i++)
1108 {
1109 size_t word, bit;
1110 word = i / HOST_BITS_PER_LONG;
1111 bit = i % HOST_BITS_PER_LONG;
1112 if (((p->in_use_p[word] >> bit) & 1) == 0)
cb2ec151 1113 memset (p->page + i * size, 0xa5, size);
21341cfd
AS
1114 }
1115 }
1116 }
1117}
1118#endif
1119
cb2ec151
RH
1120/* Top level mark-and-sweep routine. */
1121
21341cfd
AS
1122void
1123ggc_collect ()
1124{
21341cfd
AS
1125 /* Avoid frequent unnecessary work by skipping collection if the
1126 total allocations haven't expanded much since the last
1127 collection. */
1128#ifndef GGC_ALWAYS_COLLECT
1129 if (G.allocated < GGC_MIN_EXPAND_FOR_GC * G.allocated_last_gc)
1130 return;
1131#endif
1132
2a9a326b 1133 timevar_push (TV_GC);
21341cfd 1134 if (!quiet_flag)
b9bfacf0 1135 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
21341cfd 1136
054f5e69
ZW
1137 /* Zero the total allocated bytes. This will be recalculated in the
1138 sweep phase. */
21341cfd
AS
1139 G.allocated = 0;
1140
1141 /* Release the pages we freed the last time we collected, but didn't
1142 reuse in the interim. */
1143 release_pages ();
1144
1145 clear_marks ();
1146 ggc_mark_roots ();
21341cfd
AS
1147
1148#ifdef GGC_POISON
1149 poison_pages ();
1150#endif
1151
cb2ec151
RH
1152 sweep_pages ();
1153
21341cfd 1154 G.allocated_last_gc = G.allocated;
a70261ee
RH
1155 if (G.allocated_last_gc < GGC_MIN_LAST_ALLOCATED)
1156 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
21341cfd 1157
2a9a326b 1158 timevar_pop (TV_GC);
21341cfd 1159
21341cfd 1160 if (!quiet_flag)
2a9a326b 1161 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
21341cfd 1162}
3277221c
MM
1163
1164/* Print allocation statistics. */
fba0bfd4
ZW
1165#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1166 ? (x) \
1167 : ((x) < 1024*1024*10 \
1168 ? (x) / 1024 \
1169 : (x) / (1024*1024))))
1170#define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
3277221c
MM
1171
1172void
fba0bfd4 1173ggc_print_statistics ()
3277221c
MM
1174{
1175 struct ggc_statistics stats;
4934cc53 1176 unsigned int i;
fba0bfd4 1177 size_t total_overhead = 0;
3277221c
MM
1178
1179 /* Clear the statistics. */
d219c7f1 1180 memset (&stats, 0, sizeof (stats));
3277221c
MM
1181
1182 /* Make sure collection will really occur. */
1183 G.allocated_last_gc = 0;
1184
1185 /* Collect and print the statistics common across collectors. */
fba0bfd4 1186 ggc_print_common_statistics (stderr, &stats);
3277221c 1187
4934cc53
MM
1188 /* Release free pages so that we will not count the bytes allocated
1189 there as part of the total allocated memory. */
1190 release_pages ();
1191
3277221c
MM
1192 /* Collect some information about the various sizes of
1193 allocation. */
fba0bfd4
ZW
1194 fprintf (stderr, "\n%-5s %10s %10s %10s\n",
1195 "Log", "Allocated", "Used", "Overhead");
3277221c
MM
1196 for (i = 0; i < HOST_BITS_PER_PTR; ++i)
1197 {
1198 page_entry *p;
1199 size_t allocated;
1200 size_t in_use;
fba0bfd4 1201 size_t overhead;
3277221c
MM
1202
1203 /* Skip empty entries. */
1204 if (!G.pages[i])
1205 continue;
1206
fba0bfd4 1207 overhead = allocated = in_use = 0;
3277221c
MM
1208
1209 /* Figure out the total number of bytes allocated for objects of
fba0bfd4
ZW
1210 this size, and how many of them are actually in use. Also figure
1211 out how much memory the page table is using. */
3277221c
MM
1212 for (p = G.pages[i]; p; p = p->next)
1213 {
1214 allocated += p->bytes;
1215 in_use +=
1216 (OBJECTS_PER_PAGE (i) - p->num_free_objects) * (1 << i);
fba0bfd4
ZW
1217
1218 overhead += (sizeof (page_entry) - sizeof (long)
1219 + BITMAP_SIZE (OBJECTS_PER_PAGE (i) + 1));
3277221c 1220 }
fba0bfd4
ZW
1221 fprintf (stderr, "%-5d %10ld%c %10ld%c %10ld%c\n", i,
1222 SCALE (allocated), LABEL (allocated),
1223 SCALE (in_use), LABEL (in_use),
1224 SCALE (overhead), LABEL (overhead));
1225 total_overhead += overhead;
3277221c 1226 }
fba0bfd4
ZW
1227 fprintf (stderr, "%-5s %10ld%c %10ld%c %10ld%c\n", "Total",
1228 SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
1229 SCALE (G.allocated), LABEL(G.allocated),
1230 SCALE (total_overhead), LABEL (total_overhead));
3277221c 1231}
This page took 0.376826 seconds and 5 git commands to generate.