]> gcc.gnu.org Git - gcc.git/blame - boehm-gc/allchblk.c
Merged GC 5.0alpha4 with local changes, plus:
[gcc.git] / boehm-gc / allchblk.c
CommitLineData
73ffefd0
TT
1/*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
20bbd3cd
TT
4 * Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
73ffefd0
TT
6 *
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 *
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
15 */
73ffefd0
TT
16
17#define DEBUG
18#undef DEBUG
19#include <stdio.h>
20#include "gc_priv.h"
21
22
23/*
20bbd3cd
TT
24 * Free heap blocks are kept on one of several free lists,
25 * depending on the size of the block. Each free list is doubly linked.
26 * Adjacent free blocks are coalesced.
73ffefd0
TT
27 */
28
73ffefd0
TT
29
30# define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
31 /* largest block we will allocate starting on a black */
32 /* listed block. Must be >= HBLKSIZE. */
33
73ffefd0 34
20bbd3cd
TT
35# define UNIQUE_THRESHOLD 32
36 /* Sizes up to this many HBLKs each have their own free list */
37# define HUGE_THRESHOLD 256
38 /* Sizes of at least this many heap blocks are mapped to a */
39 /* single free list. */
40# define FL_COMPRESSION 8
41 /* In between sizes map this many distinct sizes to a single */
42 /* bin. */
43
44# define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
45 + UNIQUE_THRESHOLD
46
47struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
48
49/* Map a number of blocks to the appropriate large block free list index. */
50int GC_hblk_fl_from_blocks(blocks_needed)
51word blocks_needed;
52{
53 if (blocks_needed <= UNIQUE_THRESHOLD) return blocks_needed;
54 if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
55 return (blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
56 + UNIQUE_THRESHOLD;
57
58}
59
60# define HBLK_IS_FREE(hdr) ((hdr) -> hb_map == GC_invalid_map)
61# define PHDR(hhdr) HDR(hhdr -> hb_prev)
62# define NHDR(hhdr) HDR(hhdr -> hb_next)
63
64# ifdef USE_MUNMAP
65# define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
66# else /* !USE_MMAP */
67# define IS_MAPPED(hhdr) 1
68# endif /* USE_MUNMAP */
73ffefd0
TT
69
70# if !defined(NO_DEBUGGING)
71void GC_print_hblkfreelist()
72{
20bbd3cd 73 struct hblk * h;
73ffefd0 74 word total_free = 0;
20bbd3cd 75 hdr * hhdr;
73ffefd0 76 word sz;
20bbd3cd 77 int i;
73ffefd0 78
20bbd3cd
TT
79 for (i = 0; i <= N_HBLK_FLS; ++i) {
80 h = GC_hblkfreelist[i];
81 if (0 != h) GC_printf1("Free list %ld:\n", (unsigned long)i);
82 while (h != 0) {
83 hhdr = HDR(h);
73ffefd0 84 sz = hhdr -> hb_sz;
20bbd3cd 85 GC_printf2("\t0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
73ffefd0
TT
86 total_free += sz;
87 if (GC_is_black_listed(h, HBLKSIZE) != 0) {
88 GC_printf0("start black listed\n");
89 } else if (GC_is_black_listed(h, hhdr -> hb_sz) != 0) {
90 GC_printf0("partially black listed\n");
91 } else {
92 GC_printf0("not black listed\n");
93 }
94 h = hhdr -> hb_next;
20bbd3cd
TT
95 }
96 }
97 if (total_free != GC_large_free_bytes) {
98 GC_printf1("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
99 (unsigned long) GC_large_free_bytes);
73ffefd0
TT
100 }
101 GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free);
102}
103
20bbd3cd
TT
104/* Return the free list index on which the block described by the header */
105/* appears, or -1 if it appears nowhere. */
106int free_list_index_of(wanted)
107hdr * wanted;
108{
109 struct hblk * h;
110 hdr * hhdr;
111 int i;
112
113 for (i = 0; i <= N_HBLK_FLS; ++i) {
114 h = GC_hblkfreelist[i];
115 while (h != 0) {
116 hhdr = HDR(h);
117 if (hhdr == wanted) return i;
118 h = hhdr -> hb_next;
119 }
120 }
121 return -1;
122}
123
124void GC_dump_regions()
125{
126 unsigned i;
127 ptr_t start, end;
128 ptr_t p;
129 size_t bytes;
130 hdr *hhdr;
131 for (i = 0; i < GC_n_heap_sects; ++i) {
132 start = GC_heap_sects[i].hs_start;
133 bytes = GC_heap_sects[i].hs_bytes;
134 end = start + bytes;
135 /* Merge in contiguous sections. */
136 while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
137 ++i;
138 end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
139 }
140 GC_printf2("***Section from 0x%lx to 0x%lx\n", start, end);
141 for (p = start; p < end;) {
142 hhdr = HDR(p);
143 GC_printf1("\t0x%lx ", (unsigned long)p);
144 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
145 GC_printf1("Missing header!!\n", hhdr);
146 p += HBLKSIZE;
147 continue;
148 }
149 if (HBLK_IS_FREE(hhdr)) {
150 int correct_index = GC_hblk_fl_from_blocks(
151 divHBLKSZ(hhdr -> hb_sz));
152 int actual_index;
153
154 GC_printf1("\tfree block of size 0x%lx bytes",
155 (unsigned long)(hhdr -> hb_sz));
156 if (IS_MAPPED(hhdr)) {
157 GC_printf0("\n");
158 } else {
159 GC_printf0("(unmapped)\n");
160 }
161 actual_index = free_list_index_of(hhdr);
162 if (-1 == actual_index) {
163 GC_printf1("\t\tBlock not on free list %ld!!\n",
164 correct_index);
165 } else if (correct_index != actual_index) {
166 GC_printf2("\t\tBlock on list %ld, should be on %ld!!\n",
167 actual_index, correct_index);
168 }
169 p += hhdr -> hb_sz;
170 } else {
171 GC_printf1("\tused for blocks of size 0x%lx bytes\n",
172 (unsigned long)WORDS_TO_BYTES(hhdr -> hb_sz));
173 p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
174 }
175 }
176 }
177}
178
73ffefd0
TT
179# endif /* NO_DEBUGGING */
180
181/* Initialize hdr for a block containing the indicated size and */
182/* kind of objects. */
183/* Return FALSE on failure. */
184static GC_bool setup_header(hhdr, sz, kind, flags)
185register hdr * hhdr;
186word sz; /* object size in words */
187int kind;
188unsigned char flags;
189{
190 register word descr;
191
192 /* Add description of valid object pointers */
193 if (!GC_add_map_entry(sz)) return(FALSE);
194 hhdr -> hb_map = GC_obj_map[sz > MAXOBJSZ? 0 : sz];
195
196 /* Set size, kind and mark proc fields */
197 hhdr -> hb_sz = sz;
198 hhdr -> hb_obj_kind = kind;
199 hhdr -> hb_flags = flags;
200 descr = GC_obj_kinds[kind].ok_descriptor;
201 if (GC_obj_kinds[kind].ok_relocate_descr) descr += WORDS_TO_BYTES(sz);
202 hhdr -> hb_descr = descr;
203
204 /* Clear mark bits */
205 GC_clear_hdr_marks(hhdr);
206
207 hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
208 return(TRUE);
209}
210
20bbd3cd
TT
211#define FL_UNKNOWN -1
212/*
213 * Remove hhdr from the appropriate free list.
214 * We assume it is on the nth free list, or on the size
215 * appropriate free list if n is FL_UNKNOWN.
216 */
217void GC_remove_from_fl(hhdr, n)
218hdr * hhdr;
219int n;
220{
221 GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
222 if (hhdr -> hb_prev == 0) {
223 int index;
224 if (FL_UNKNOWN == n) {
225 index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
226 } else {
227 index = n;
228 }
229 GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
230 GC_hblkfreelist[index] = hhdr -> hb_next;
231 } else {
232 PHDR(hhdr) -> hb_next = hhdr -> hb_next;
233 }
234 if (0 != hhdr -> hb_next) {
235 GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
236 NHDR(hhdr) -> hb_prev = hhdr -> hb_prev;
237 }
238}
239
240/*
241 * Return a pointer to the free block ending just before h, if any.
242 */
243struct hblk * GC_free_block_ending_at(h)
244struct hblk *h;
245{
246 struct hblk * p = h - 1;
247 hdr * phdr = HDR(p);
248
249 while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
250 p = FORWARDED_ADDR(p,phdr);
251 phdr = HDR(p);
252 }
253 if (0 != phdr && HBLK_IS_FREE(phdr)) return p;
254 p = GC_prev_block(h - 1);
255 if (0 != p) {
256 phdr = HDR(p);
257 if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
258 return p;
259 }
260 }
261 return 0;
262}
263
264/*
265 * Add hhdr to the appropriate free list.
266 * We maintain individual free lists sorted by address.
267 */
268void GC_add_to_fl(h, hhdr)
269struct hblk *h;
270hdr * hhdr;
271{
272 int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
273 struct hblk *second = GC_hblkfreelist[index];
274# ifdef GC_ASSERTIONS
275 struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
276 hdr * nexthdr = HDR(next);
277 struct hblk *prev = GC_free_block_ending_at(h);
278 hdr * prevhdr = HDR(prev);
279 GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr) || !IS_MAPPED(nexthdr));
280 GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr) || !IS_MAPPED(prevhdr));
281# endif
282 GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
283 GC_hblkfreelist[index] = h;
284 hhdr -> hb_next = second;
285 hhdr -> hb_prev = 0;
286 if (0 != second) HDR(second) -> hb_prev = h;
287 GC_invalidate_map(hhdr);
288}
289
290#ifdef USE_MUNMAP
291
292/* Unmap blocks that haven't been recently touched. This is the only way */
293/* way blocks are ever unmapped. */
294void GC_unmap_old(void)
295{
296 struct hblk * h;
297 hdr * hhdr;
298 word sz;
299 unsigned short last_rec, threshold;
300 int i;
301# define UNMAP_THRESHOLD 6
302
303 for (i = 0; i <= N_HBLK_FLS; ++i) {
304 for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
305 hhdr = HDR(h);
306 if (!IS_MAPPED(hhdr)) continue;
307 threshold = (unsigned short)(GC_gc_no - UNMAP_THRESHOLD);
308 last_rec = hhdr -> hb_last_reclaimed;
309 if (last_rec > GC_gc_no
310 || last_rec < threshold && threshold < GC_gc_no
311 /* not recently wrapped */) {
312 sz = hhdr -> hb_sz;
313 GC_unmap((ptr_t)h, sz);
314 hhdr -> hb_flags |= WAS_UNMAPPED;
315 }
316 }
317 }
318}
319
320/* Merge all unmapped blocks that are adjacent to other free */
321/* blocks. This may involve remapping, since all blocks are either */
322/* fully mapped or fully unmapped. */
323void GC_merge_unmapped(void)
324{
325 struct hblk * h, *next;
326 hdr * hhdr, *nexthdr;
327 word size, nextsize;
328 int i;
329
330 for (i = 0; i <= N_HBLK_FLS; ++i) {
331 h = GC_hblkfreelist[i];
332 while (h != 0) {
333 hhdr = HDR(h);
334 size = hhdr->hb_sz;
335 next = (struct hblk *)((word)h + size);
336 nexthdr = HDR(next);
337 /* Coalesce with successor, if possible */
338 if (0 != nexthdr && HBLK_IS_FREE(nexthdr)) {
339 nextsize = nexthdr -> hb_sz;
340 if (IS_MAPPED(hhdr)) {
341 GC_ASSERT(!IS_MAPPED(nexthdr));
342 /* make both consistent, so that we can merge */
343 if (size > nextsize) {
344 GC_remap((ptr_t)next, nextsize);
345 } else {
346 GC_unmap((ptr_t)h, size);
347 hhdr -> hb_flags |= WAS_UNMAPPED;
348 }
349 } else if (IS_MAPPED(nexthdr)) {
350 GC_ASSERT(!IS_MAPPED(hhdr));
351 if (size > nextsize) {
352 GC_unmap((ptr_t)next, nextsize);
353 } else {
354 GC_remap((ptr_t)h, size);
355 hhdr -> hb_flags &= ~WAS_UNMAPPED;
356 }
357 } else {
358 /* Unmap any gap in the middle */
359 GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz);
360 }
361 /* If they are both unmapped, we merge, but leave unmapped. */
362 GC_remove_from_fl(hhdr, i);
363 GC_remove_from_fl(nexthdr, FL_UNKNOWN);
364 hhdr -> hb_sz += nexthdr -> hb_sz;
365 GC_remove_header(next);
366 GC_add_to_fl(h, hhdr);
367 /* Start over at beginning of list */
368 h = GC_hblkfreelist[i];
369 } else /* not mergable with successor */ {
370 h = hhdr -> hb_next;
371 }
372 } /* while (h != 0) ... */
373 } /* for ... */
374}
375
376#endif /* USE_MUNMAP */
377
378/*
379 * Return a pointer to a block starting at h of length bytes.
380 * Memory for the block is mapped.
381 * Remove the block from its free list, and return the remainder (if any)
382 * to its appropriate free list.
383 * May fail by returning 0.
384 * The header for the returned block must be set up by the caller.
385 * If the return value is not 0, then hhdr is the header for it.
386 */
387struct hblk * GC_get_first_part(h, hhdr, bytes, index)
388struct hblk *h;
389hdr * hhdr;
390word bytes;
391int index;
392{
393 word total_size = hhdr -> hb_sz;
394 struct hblk * rest;
395 hdr * rest_hdr;
396
397 GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
398 GC_remove_from_fl(hhdr, index);
399 if (total_size == bytes) return h;
400 rest = (struct hblk *)((word)h + bytes);
401 if (!GC_install_header(rest)) return(0);
402 rest_hdr = HDR(rest);
403 rest_hdr -> hb_sz = total_size - bytes;
404 rest_hdr -> hb_flags = 0;
405# ifdef GC_ASSERTIONS
406 // Mark h not free, to avoid assertion about adjacent free blocks.
407 hhdr -> hb_map = 0;
408# endif
409 GC_add_to_fl(rest, rest_hdr);
410 return h;
411}
412
413/*
414 * H is a free block. N points at an address inside it.
415 * A new header for n has already been set up. Fix up h's header
416 * to reflect the fact that it is being split, move it to the
417 * appropriate free list.
418 * N replaces h in the original free list.
419 *
420 * Nhdr is not completely filled in, since it is about to allocated.
421 * It may in fact end up on the wrong free list for its size.
422 * (Hence adding it to a free list is silly. But this path is hopefully
423 * rare enough that it doesn't matter. The code is cleaner this way.)
424 */
425void GC_split_block(h, hhdr, n, nhdr, index)
426struct hblk *h;
427hdr * hhdr;
428struct hblk *n;
429hdr * nhdr;
430int index; /* Index of free list */
431{
432 word total_size = hhdr -> hb_sz;
433 word h_size = (word)n - (word)h;
434 struct hblk *prev = hhdr -> hb_prev;
435 struct hblk *next = hhdr -> hb_next;
436
437 /* Replace h with n on its freelist */
438 nhdr -> hb_prev = prev;
439 nhdr -> hb_next = next;
440 nhdr -> hb_sz = total_size - h_size;
441 nhdr -> hb_flags = 0;
442 if (0 != prev) {
443 HDR(prev) -> hb_next = n;
444 } else {
445 GC_hblkfreelist[index] = n;
446 }
447 if (0 != next) {
448 HDR(next) -> hb_prev = n;
449 }
450# ifdef GC_ASSERTIONS
451 nhdr -> hb_map = 0; /* Don't fail test for consecutive */
452 /* free blocks in GC_add_to_fl. */
453# endif
454# ifdef USE_MUNMAP
455 hhdr -> hb_last_reclaimed = GC_gc_no;
456# endif
457 hhdr -> hb_sz = h_size;
458 GC_add_to_fl(h, hhdr);
459 GC_invalidate_map(nhdr);
460}
73ffefd0 461
20bbd3cd
TT
462struct hblk * GC_allochblk_nth();
463
73ffefd0
TT
464/*
465 * Allocate (and return pointer to) a heap block
20bbd3cd 466 * for objects of size sz words, searching the nth free list.
73ffefd0
TT
467 *
468 * NOTE: We set obj_map field in header correctly.
20bbd3cd 469 * Caller is responsible for building an object freelist in block.
73ffefd0
TT
470 *
471 * We clear the block if it is destined for large objects, and if
472 * kind requires that newly allocated objects be cleared.
473 */
474struct hblk *
475GC_allochblk(sz, kind, flags)
476word sz;
477int kind;
478unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
479{
20bbd3cd
TT
480 int start_list = GC_hblk_fl_from_blocks(OBJ_SZ_TO_BLOCKS(sz));
481 int i;
482 for (i = start_list; i <= N_HBLK_FLS; ++i) {
483 struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);
484 if (0 != result) return result;
485 }
486 return 0;
487}
488/*
489 * The same, but with search restricted to nth free list.
490 */
491struct hblk *
492GC_allochblk_nth(sz, kind, flags, n)
493word sz;
494int kind;
495unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
496int n;
497{
73ffefd0
TT
498 register struct hblk *hbp;
499 register hdr * hhdr; /* Header corr. to hbp */
20bbd3cd
TT
500 register struct hblk *thishbp;
501 register hdr * thishdr; /* Header corr. to hbp */
73ffefd0
TT
502 signed_word size_needed; /* number of bytes in requested objects */
503 signed_word size_avail; /* bytes available in this block */
73ffefd0
TT
504
505 size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
506
507 /* search for a big enough block in free list */
20bbd3cd 508 hbp = GC_hblkfreelist[n];
73ffefd0 509 hhdr = HDR(hbp);
20bbd3cd 510 for(; 0 != hbp; hbp = hhdr -> hb_next, hhdr = HDR(hbp)) {
73ffefd0 511 size_avail = hhdr->hb_sz;
73ffefd0
TT
512 if (size_avail < size_needed) continue;
513# ifdef PRESERVE_LAST
514 if (size_avail != size_needed
20bbd3cd 515 && !GC_incremental && GC_should_collect()) {
73ffefd0
TT
516 continue;
517 }
518# endif
519 /* If the next heap block is obviously better, go on. */
520 /* This prevents us from disassembling a single large block */
521 /* to get tiny blocks. */
522 {
523 signed_word next_size;
524
525 thishbp = hhdr -> hb_next;
20bbd3cd
TT
526 if (thishbp != 0) {
527 thishdr = HDR(thishbp);
528 next_size = (signed_word)(thishdr -> hb_sz);
529 if (next_size < size_avail
73ffefd0
TT
530 && next_size >= size_needed
531 && !GC_is_black_listed(thishbp, (word)size_needed)) {
532 continue;
20bbd3cd 533 }
73ffefd0
TT
534 }
535 }
536 if ( !IS_UNCOLLECTABLE(kind) &&
537 (kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC)) {
538 struct hblk * lasthbp = hbp;
539 ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
540 signed_word orig_avail = size_avail;
541 signed_word eff_size_needed = ((flags & IGNORE_OFF_PAGE)?
542 HBLKSIZE
543 : size_needed);
544
545
546 while ((ptr_t)lasthbp <= search_end
547 && (thishbp = GC_is_black_listed(lasthbp,
548 (word)eff_size_needed))) {
549 lasthbp = thishbp;
550 }
551 size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
552 thishbp = lasthbp;
553 if (size_avail >= size_needed) {
554 if (thishbp != hbp && GC_install_header(thishbp)) {
20bbd3cd
TT
555 /* Make sure it's mapped before we mangle it. */
556# ifdef USE_MUNMAP
557 if (!IS_MAPPED(hhdr)) {
558 GC_remap((ptr_t)hbp, size_avail);
559 hhdr -> hb_flags &= ~WAS_UNMAPPED;
560 }
561# endif
73ffefd0
TT
562 /* Split the block at thishbp */
563 thishdr = HDR(thishbp);
20bbd3cd 564 GC_split_block(hbp, hhdr, thishbp, thishdr, n);
73ffefd0 565 /* Advance to thishbp */
73ffefd0
TT
566 hbp = thishbp;
567 hhdr = thishdr;
20bbd3cd
TT
568 /* We must now allocate thishbp, since it may */
569 /* be on the wrong free list. */
73ffefd0
TT
570 }
571 } else if (size_needed > (signed_word)BL_LIMIT
572 && orig_avail - size_needed
573 > (signed_word)BL_LIMIT) {
574 /* Punt, since anything else risks unreasonable heap growth. */
575 WARN("Needed to allocate blacklisted block at 0x%lx\n",
576 (word)hbp);
73ffefd0 577 size_avail = orig_avail;
20bbd3cd
TT
578 } else if (size_avail == 0 && size_needed == HBLKSIZE
579 && IS_MAPPED(hhdr)) {
580 if (!GC_find_leak) {
73ffefd0
TT
581 static unsigned count = 0;
582
583 /* The block is completely blacklisted. We need */
584 /* to drop some such blocks, since otherwise we spend */
585 /* all our time traversing them if pointerfree */
586 /* blocks are unpopular. */
587 /* A dropped block will be reconsidered at next GC. */
588 if ((++count & 3) == 0) {
589 /* Allocate and drop the block in small chunks, to */
590 /* maximize the chance that we will recover some */
591 /* later. */
20bbd3cd
TT
592 word total_size = hhdr -> hb_sz;
593 struct hblk * limit = hbp + divHBLKSZ(total_size);
73ffefd0 594 struct hblk * h;
20bbd3cd 595 struct hblk * prev = hhdr -> hb_prev;
73ffefd0 596
20bbd3cd
TT
597 GC_words_wasted += total_size;
598 GC_large_free_bytes -= total_size;
599 GC_remove_from_fl(hhdr, n);
73ffefd0
TT
600 for (h = hbp; h < limit; h++) {
601 if (h == hbp || GC_install_header(h)) {
602 hhdr = HDR(h);
603 (void) setup_header(
604 hhdr,
605 BYTES_TO_WORDS(HBLKSIZE - HDR_BYTES),
606 PTRFREE, 0); /* Cant fail */
607 if (GC_debugging_started) {
20bbd3cd 608 BZERO(h + HDR_BYTES, HBLKSIZE - HDR_BYTES);
73ffefd0
TT
609 }
610 }
611 }
612 /* Restore hbp to point at free block */
20bbd3cd
TT
613 hbp = prev;
614 if (0 == hbp) {
615 return GC_allochblk_nth(sz, kind, flags, n);
616 }
617 hhdr = HDR(hbp);
73ffefd0 618 }
20bbd3cd 619 }
73ffefd0
TT
620 }
621 }
622 if( size_avail >= size_needed ) {
20bbd3cd
TT
623# ifdef USE_MUNMAP
624 if (!IS_MAPPED(hhdr)) {
625 GC_remap((ptr_t)hbp, size_avail);
626 hhdr -> hb_flags &= ~WAS_UNMAPPED;
627 }
628# endif
629 /* hbp may be on the wrong freelist; the parameter n */
630 /* is important. */
631 hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
73ffefd0
TT
632 break;
633 }
634 }
20bbd3cd
TT
635
636 if (0 == hbp) return 0;
73ffefd0
TT
637
638 /* Notify virtual dirty bit implementation that we are about to write. */
20bbd3cd 639 GC_write_hint(hbp);
73ffefd0
TT
640
641 /* Add it to map of valid blocks */
20bbd3cd 642 if (!GC_install_counts(hbp, (word)size_needed)) return(0);
73ffefd0
TT
643 /* This leaks memory under very rare conditions. */
644
645 /* Set up header */
20bbd3cd
TT
646 if (!setup_header(hhdr, sz, kind, flags)) {
647 GC_remove_counts(hbp, (word)size_needed);
73ffefd0
TT
648 return(0); /* ditto */
649 }
650
651 /* Clear block if necessary */
652 if (GC_debugging_started
653 || sz > MAXOBJSZ && GC_obj_kinds[kind].ok_init) {
20bbd3cd 654 BZERO(hbp + HDR_BYTES, size_needed - HDR_BYTES);
73ffefd0
TT
655 }
656
657 /* We just successfully allocated a block. Restart count of */
658 /* consecutive failures. */
659 {
660 extern unsigned GC_fail_count;
661
662 GC_fail_count = 0;
663 }
20bbd3cd
TT
664
665 GC_large_free_bytes -= size_needed;
73ffefd0 666
20bbd3cd
TT
667 GC_ASSERT(IS_MAPPED(hhdr));
668 return( hbp );
73ffefd0
TT
669}
670
671struct hblk * GC_freehblk_ptr = 0; /* Search position hint for GC_freehblk */
672
673/*
674 * Free a heap block.
675 *
676 * Coalesce the block with its neighbors if possible.
677 *
678 * All mark words are assumed to be cleared.
679 */
680void
20bbd3cd
TT
681GC_freehblk(hbp)
682struct hblk *hbp;
73ffefd0 683{
20bbd3cd
TT
684struct hblk *next, *prev;
685hdr *hhdr, *prevhdr, *nexthdr;
686signed_word size;
73ffefd0 687
73ffefd0 688
73ffefd0 689 hhdr = HDR(hbp);
20bbd3cd
TT
690 size = hhdr->hb_sz;
691 size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
692 GC_remove_counts(hbp, (word)size);
693 hhdr->hb_sz = size;
73ffefd0
TT
694
695 /* Check for duplicate deallocation in the easy case */
20bbd3cd 696 if (HBLK_IS_FREE(hhdr)) {
73ffefd0 697 GC_printf1("Duplicate large block deallocation of 0x%lx\n",
20bbd3cd 698 (unsigned long) hbp);
73ffefd0
TT
699 }
700
20bbd3cd
TT
701 GC_ASSERT(IS_MAPPED(hhdr));
702 GC_invalidate_map(hhdr);
703 next = (struct hblk *)((word)hbp + size);
704 nexthdr = HDR(next);
705 prev = GC_free_block_ending_at(hbp);
73ffefd0 706 /* Coalesce with successor, if possible */
20bbd3cd
TT
707 if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)) {
708 GC_remove_from_fl(nexthdr, FL_UNKNOWN);
709 hhdr -> hb_sz += nexthdr -> hb_sz;
710 GC_remove_header(next);
711 }
712 /* Coalesce with predecessor, if possible. */
713 if (0 != prev) {
714 prevhdr = HDR(prev);
715 if (IS_MAPPED(prevhdr)) {
716 GC_remove_from_fl(prevhdr, FL_UNKNOWN);
717 prevhdr -> hb_sz += hhdr -> hb_sz;
718 GC_remove_header(hbp);
719 hbp = prev;
720 hhdr = prevhdr;
721 }
73ffefd0
TT
722 }
723
20bbd3cd
TT
724 GC_large_free_bytes += size;
725 GC_add_to_fl(hbp, hhdr);
73ffefd0
TT
726}
727
This page took 0.130601 seconds and 5 git commands to generate.