]>
Commit | Line | Data |
---|---|---|
73ffefd0 TT |
1 | /* |
2 | * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers | |
3 | * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. | |
4 | * | |
5 | * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED | |
6 | * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. | |
7 | * | |
8 | * Permission is hereby granted to use or copy this program | |
9 | * for any purpose, provided the above notices are retained on all copies. | |
10 | * Permission to modify the code and to distribute modified code is granted, | |
11 | * provided the above notices are retained, and a notice that the code was | |
12 | * modified is included with the above copyright notice. | |
13 | * | |
14 | */ | |
15 | /* Boehm, February 16, 1996 2:26 pm PST */ | |
16 | ||
17 | ||
18 | # include "gc_priv.h" | |
19 | ||
20 | # include <stdio.h> | |
21 | # ifndef MACOS | |
22 | # include <signal.h> | |
23 | # include <sys/types.h> | |
24 | # endif | |
25 | ||
26 | /* | |
27 | * Separate free lists are maintained for different sized objects | |
28 | * up to MAXOBJSZ. | |
29 | * The call GC_allocobj(i,k) ensures that the freelist for | |
30 | * kind k objects of size i points to a non-empty | |
31 | * free list. It returns a pointer to the first entry on the free list. | |
32 | * In a single-threaded world, GC_allocobj may be called to allocate | |
33 | * an object of (small) size i as follows: | |
34 | * | |
35 | * opp = &(GC_objfreelist[i]); | |
36 | * if (*opp == 0) GC_allocobj(i, NORMAL); | |
37 | * ptr = *opp; | |
38 | * *opp = obj_link(ptr); | |
39 | * | |
40 | * Note that this is very fast if the free list is non-empty; it should | |
41 | * only involve the execution of 4 or 5 simple instructions. | |
42 | * All composite objects on freelists are cleared, except for | |
43 | * their first word. | |
44 | */ | |
45 | ||
46 | /* | |
47 | * The allocator uses GC_allochblk to allocate large chunks of objects. | |
48 | * These chunks all start on addresses which are multiples of | |
49 | * HBLKSZ. Each allocated chunk has an associated header, | |
50 | * which can be located quickly based on the address of the chunk. | |
51 | * (See headers.c for details.) | |
52 | * This makes it possible to check quickly whether an | |
53 | * arbitrary address corresponds to an object administered by the | |
54 | * allocator. | |
55 | */ | |
56 | ||
57 | word GC_non_gc_bytes = 0; /* Number of bytes not intended to be collected */ | |
58 | ||
59 | word GC_gc_no = 0; | |
60 | ||
61 | int GC_incremental = 0; /* By default, stop the world. */ | |
62 | ||
63 | int GC_full_freq = 4; /* Every 5th collection is a full */ | |
64 | /* collection. */ | |
65 | ||
66 | char * GC_copyright[] = | |
67 | {"Copyright 1988,1989 Hans-J. Boehm and Alan J. Demers ", | |
68 | "Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved. ", | |
69 | "Copyright (c) 1996-1997 by Silicon Graphics. All rights reserved. ", | |
70 | "THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY", | |
71 | " EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.", | |
72 | "See source code for details." }; | |
73 | ||
74 | # include "version.h" | |
75 | ||
76 | /* some more variables */ | |
77 | ||
78 | extern signed_word GC_mem_found; /* Number of reclaimed longwords */ | |
79 | /* after garbage collection */ | |
80 | ||
81 | GC_bool GC_dont_expand = 0; | |
82 | ||
83 | word GC_free_space_divisor = 4; | |
84 | ||
85 | extern GC_bool GC_collection_in_progress(); | |
86 | ||
87 | int GC_never_stop_func GC_PROTO((void)) { return(0); } | |
88 | ||
89 | CLOCK_TYPE GC_start_time; | |
90 | ||
91 | int GC_timeout_stop_func GC_PROTO((void)) | |
92 | { | |
93 | CLOCK_TYPE current_time; | |
94 | static unsigned count = 0; | |
95 | unsigned long time_diff; | |
96 | ||
97 | if ((count++ & 3) != 0) return(0); | |
98 | GET_TIME(current_time); | |
99 | time_diff = MS_TIME_DIFF(current_time,GC_start_time); | |
100 | if (time_diff >= TIME_LIMIT) { | |
101 | # ifdef PRINTSTATS | |
102 | GC_printf0("Abandoning stopped marking after "); | |
103 | GC_printf1("%lu msecs\n", (unsigned long)time_diff); | |
104 | # endif | |
105 | return(1); | |
106 | } | |
107 | return(0); | |
108 | } | |
109 | ||
110 | /* Return the minimum number of words that must be allocated between */ | |
111 | /* collections to amortize the collection cost. */ | |
112 | static word min_words_allocd() | |
113 | { | |
114 | # ifdef THREADS | |
115 | /* We punt, for now. */ | |
116 | register signed_word stack_size = 10000; | |
117 | # else | |
118 | int dummy; | |
119 | register signed_word stack_size = (ptr_t)(&dummy) - GC_stackbottom; | |
120 | # endif | |
121 | register word total_root_size; /* includes double stack size, */ | |
122 | /* since the stack is expensive */ | |
123 | /* to scan. */ | |
124 | ||
125 | if (stack_size < 0) stack_size = -stack_size; | |
126 | total_root_size = 2 * stack_size + GC_root_size; | |
127 | if (GC_incremental) { | |
128 | return(BYTES_TO_WORDS(GC_heapsize + total_root_size) | |
129 | / (2 * GC_free_space_divisor)); | |
130 | } else { | |
131 | return(BYTES_TO_WORDS(GC_heapsize + total_root_size) | |
132 | / GC_free_space_divisor); | |
133 | } | |
134 | } | |
135 | ||
136 | /* Return the number of words allocated, adjusted for explicit storage */ | |
137 | /* management, etc.. This number is used in deciding when to trigger */ | |
138 | /* collections. */ | |
139 | word GC_adj_words_allocd() | |
140 | { | |
141 | register signed_word result; | |
142 | register signed_word expl_managed = | |
143 | BYTES_TO_WORDS((long)GC_non_gc_bytes | |
144 | - (long)GC_non_gc_bytes_at_gc); | |
145 | ||
146 | /* Don't count what was explicitly freed, or newly allocated for */ | |
147 | /* explicit management. Note that deallocating an explicitly */ | |
148 | /* managed object should not alter result, assuming the client */ | |
149 | /* is playing by the rules. */ | |
150 | result = (signed_word)GC_words_allocd | |
151 | - (signed_word)GC_mem_freed - expl_managed; | |
152 | if (result > (signed_word)GC_words_allocd) { | |
153 | result = GC_words_allocd; | |
154 | /* probably client bug or unfortunate scheduling */ | |
155 | } | |
156 | result += GC_words_finalized; | |
157 | /* We count objects enqueued for finalization as though they */ | |
158 | /* had been reallocated this round. Finalization is user */ | |
159 | /* visible progress. And if we don't count this, we have */ | |
160 | /* stability problems for programs that finalize all objects. */ | |
161 | result += GC_words_wasted; | |
162 | /* This doesn't reflect useful work. But if there is lots of */ | |
163 | /* new fragmentation, the same is probably true of the heap, */ | |
164 | /* and the collection will be correspondingly cheaper. */ | |
165 | if (result < (signed_word)(GC_words_allocd >> 3)) { | |
166 | /* Always count at least 1/8 of the allocations. We don't want */ | |
167 | /* to collect too infrequently, since that would inhibit */ | |
168 | /* coalescing of free storage blocks. */ | |
169 | /* This also makes us partially robust against client bugs. */ | |
170 | return(GC_words_allocd >> 3); | |
171 | } else { | |
172 | return(result); | |
173 | } | |
174 | } | |
175 | ||
176 | ||
177 | /* Clear up a few frames worth of garbage left at the top of the stack. */ | |
178 | /* This is used to prevent us from accidentally treating garbade left */ | |
179 | /* on the stack by other parts of the collector as roots. This */ | |
180 | /* differs from the code in misc.c, which actually tries to keep the */ | |
181 | /* stack clear of long-lived, client-generated garbage. */ | |
182 | void GC_clear_a_few_frames() | |
183 | { | |
184 | # define NWORDS 64 | |
185 | word frames[NWORDS]; | |
186 | register int i; | |
187 | ||
188 | for (i = 0; i < NWORDS; i++) frames[i] = 0; | |
189 | } | |
190 | ||
191 | /* Have we allocated enough to amortize a collection? */ | |
192 | GC_bool GC_should_collect() | |
193 | { | |
194 | return(GC_adj_words_allocd() >= min_words_allocd()); | |
195 | } | |
196 | ||
197 | void GC_notify_full_gc() | |
198 | { | |
199 | if (GC_start_call_back != (void (*)())0) { | |
200 | (*GC_start_call_back)(); | |
201 | } | |
202 | } | |
203 | ||
204 | /* | |
205 | * Initiate a garbage collection if appropriate. | |
206 | * Choose judiciously | |
207 | * between partial, full, and stop-world collections. | |
208 | * Assumes lock held, signals disabled. | |
209 | */ | |
210 | void GC_maybe_gc() | |
211 | { | |
212 | static int n_partial_gcs = 0; | |
213 | if (GC_should_collect()) { | |
214 | if (!GC_incremental) { | |
215 | GC_notify_full_gc(); | |
216 | GC_gcollect_inner(); | |
217 | n_partial_gcs = 0; | |
218 | return; | |
219 | } else if (n_partial_gcs >= GC_full_freq) { | |
220 | # ifdef PRINTSTATS | |
221 | GC_printf2( | |
222 | "***>Full mark for collection %lu after %ld allocd bytes\n", | |
223 | (unsigned long) GC_gc_no+1, | |
224 | (long)WORDS_TO_BYTES(GC_words_allocd)); | |
225 | # endif | |
226 | GC_promote_black_lists(); | |
227 | (void)GC_reclaim_all((GC_stop_func)0, TRUE); | |
228 | GC_clear_marks(); | |
229 | n_partial_gcs = 0; | |
230 | GC_notify_full_gc(); | |
231 | } else { | |
232 | n_partial_gcs++; | |
233 | } | |
234 | /* We try to mark with the world stopped. */ | |
235 | /* If we run out of time, this turns into */ | |
236 | /* incremental marking. */ | |
237 | GET_TIME(GC_start_time); | |
238 | if (GC_stopped_mark(GC_timeout_stop_func)) { | |
239 | # ifdef SAVE_CALL_CHAIN | |
240 | GC_save_callers(GC_last_stack); | |
241 | # endif | |
242 | GC_finish_collection(); | |
243 | } | |
244 | } | |
245 | } | |
246 | ||
247 | ||
248 | /* | |
249 | * Stop the world garbage collection. Assumes lock held, signals disabled. | |
250 | * If stop_func is not GC_never_stop_func, then abort if stop_func returns TRUE. | |
251 | */ | |
252 | GC_bool GC_try_to_collect_inner(stop_func) | |
253 | GC_stop_func stop_func; | |
254 | { | |
255 | if (GC_collection_in_progress()) { | |
256 | # ifdef PRINTSTATS | |
257 | GC_printf0( | |
258 | "GC_try_to_collect_inner: finishing collection in progress\n"); | |
259 | # endif /* PRINTSTATS */ | |
260 | /* Just finish collection already in progress. */ | |
261 | while(GC_collection_in_progress()) { | |
262 | if (stop_func()) return(FALSE); | |
263 | GC_collect_a_little_inner(1); | |
264 | } | |
265 | } | |
266 | # ifdef PRINTSTATS | |
267 | GC_printf2( | |
268 | "Initiating full world-stop collection %lu after %ld allocd bytes\n", | |
269 | (unsigned long) GC_gc_no+1, | |
270 | (long)WORDS_TO_BYTES(GC_words_allocd)); | |
271 | # endif | |
272 | GC_promote_black_lists(); | |
273 | /* Make sure all blocks have been reclaimed, so sweep routines */ | |
274 | /* don't see cleared mark bits. */ | |
275 | /* If we're guaranteed to finish, then this is unnecessary. */ | |
276 | if (stop_func != GC_never_stop_func | |
277 | && !GC_reclaim_all(stop_func, FALSE)) { | |
278 | /* Aborted. So far everything is still consistent. */ | |
279 | return(FALSE); | |
280 | } | |
281 | GC_invalidate_mark_state(); /* Flush mark stack. */ | |
282 | GC_clear_marks(); | |
283 | # ifdef SAVE_CALL_CHAIN | |
284 | GC_save_callers(GC_last_stack); | |
285 | # endif | |
286 | if (!GC_stopped_mark(stop_func)) { | |
287 | if (!GC_incremental) { | |
288 | /* We're partially done and have no way to complete or use */ | |
289 | /* current work. Reestablish invariants as cheaply as */ | |
290 | /* possible. */ | |
291 | GC_invalidate_mark_state(); | |
292 | GC_unpromote_black_lists(); | |
293 | } /* else we claim the world is already still consistent. We'll */ | |
294 | /* finish incrementally. */ | |
295 | return(FALSE); | |
296 | } | |
297 | GC_finish_collection(); | |
298 | return(TRUE); | |
299 | } | |
300 | ||
301 | ||
302 | ||
303 | /* | |
304 | * Perform n units of garbage collection work. A unit is intended to touch | |
305 | * roughly a GC_RATE pages. Every once in a while, we do more than that. | |
306 | */ | |
307 | # define GC_RATE 8 | |
308 | ||
309 | int GC_deficit = 0; /* The number of extra calls to GC_mark_some */ | |
310 | /* that we have made. */ | |
311 | /* Negative values are equivalent to 0. */ | |
312 | ||
313 | void GC_collect_a_little_inner(n) | |
314 | int n; | |
315 | { | |
316 | register int i; | |
317 | ||
318 | if (GC_collection_in_progress()) { | |
319 | for (i = GC_deficit; i < GC_RATE*n; i++) { | |
320 | if (GC_mark_some()) { | |
321 | /* Need to finish a collection */ | |
322 | # ifdef SAVE_CALL_CHAIN | |
323 | GC_save_callers(GC_last_stack); | |
324 | # endif | |
325 | (void) GC_stopped_mark(GC_never_stop_func); | |
326 | GC_finish_collection(); | |
327 | break; | |
328 | } | |
329 | } | |
330 | if (GC_deficit > 0) GC_deficit -= GC_RATE*n; | |
331 | } else { | |
332 | GC_maybe_gc(); | |
333 | } | |
334 | } | |
335 | ||
336 | int GC_collect_a_little GC_PROTO(()) | |
337 | { | |
338 | int result; | |
339 | DCL_LOCK_STATE; | |
340 | ||
341 | DISABLE_SIGNALS(); | |
342 | LOCK(); | |
343 | GC_collect_a_little_inner(1); | |
344 | result = (int)GC_collection_in_progress(); | |
345 | UNLOCK(); | |
346 | ENABLE_SIGNALS(); | |
347 | return(result); | |
348 | } | |
349 | ||
350 | /* | |
351 | * Assumes lock is held, signals are disabled. | |
352 | * We stop the world. | |
353 | * If final is TRUE, then we finish the collection, no matter how long | |
354 | * it takes. | |
355 | * Otherwise we may fail and return FALSE if this takes too long. | |
356 | * Increment GC_gc_no if we succeed. | |
357 | */ | |
358 | GC_bool GC_stopped_mark(stop_func) | |
359 | GC_stop_func stop_func; | |
360 | { | |
361 | register int i; | |
362 | # ifdef PRINTSTATS | |
363 | CLOCK_TYPE start_time, current_time; | |
364 | # endif | |
365 | ||
366 | STOP_WORLD(); | |
367 | # ifdef PRINTSTATS | |
368 | GET_TIME(start_time); | |
369 | GC_printf1("--> Marking for collection %lu ", | |
370 | (unsigned long) GC_gc_no + 1); | |
371 | GC_printf2("after %lu allocd bytes + %lu wasted bytes\n", | |
372 | (unsigned long) WORDS_TO_BYTES(GC_words_allocd), | |
373 | (unsigned long) WORDS_TO_BYTES(GC_words_wasted)); | |
374 | # endif | |
375 | ||
376 | /* Mark from all roots. */ | |
377 | /* Minimize junk left in my registers and on the stack */ | |
378 | GC_clear_a_few_frames(); | |
379 | GC_noop(0,0,0,0,0,0); | |
380 | GC_initiate_gc(); | |
381 | for(i = 0;;i++) { | |
382 | if ((*stop_func)()) { | |
383 | # ifdef PRINTSTATS | |
384 | GC_printf0("Abandoned stopped marking after "); | |
385 | GC_printf1("%lu iterations\n", | |
386 | (unsigned long)i); | |
387 | # endif | |
388 | GC_deficit = i; /* Give the mutator a chance. */ | |
389 | START_WORLD(); | |
390 | return(FALSE); | |
391 | } | |
392 | if (GC_mark_some()) break; | |
393 | } | |
394 | ||
395 | GC_gc_no++; | |
396 | # ifdef PRINTSTATS | |
397 | GC_printf2("Collection %lu reclaimed %ld bytes", | |
398 | (unsigned long) GC_gc_no - 1, | |
399 | (long)WORDS_TO_BYTES(GC_mem_found)); | |
400 | GC_printf1(" ---> heapsize = %lu bytes\n", | |
401 | (unsigned long) GC_heapsize); | |
402 | /* Printf arguments may be pushed in funny places. Clear the */ | |
403 | /* space. */ | |
404 | GC_printf0(""); | |
405 | # endif | |
406 | ||
407 | /* Check all debugged objects for consistency */ | |
408 | if (GC_debugging_started) { | |
409 | (*GC_check_heap)(); | |
410 | } | |
411 | ||
412 | # ifdef PRINTTIMES | |
413 | GET_TIME(current_time); | |
414 | GC_printf1("World-stopped marking took %lu msecs\n", | |
415 | MS_TIME_DIFF(current_time,start_time)); | |
416 | # endif | |
417 | START_WORLD(); | |
418 | return(TRUE); | |
419 | } | |
420 | ||
421 | ||
422 | /* Finish up a collection. Assumes lock is held, signals are disabled, */ | |
423 | /* but the world is otherwise running. */ | |
424 | void GC_finish_collection() | |
425 | { | |
426 | # ifdef PRINTTIMES | |
427 | CLOCK_TYPE start_time; | |
428 | CLOCK_TYPE finalize_time; | |
429 | CLOCK_TYPE done_time; | |
430 | ||
431 | GET_TIME(start_time); | |
432 | finalize_time = start_time; | |
433 | # endif | |
434 | ||
435 | # ifdef GATHERSTATS | |
436 | GC_mem_found = 0; | |
437 | # endif | |
438 | # ifdef FIND_LEAK | |
439 | /* Mark all objects on the free list. All objects should be */ | |
440 | /* marked when we're done. */ | |
441 | { | |
442 | register word size; /* current object size */ | |
443 | register ptr_t p; /* pointer to current object */ | |
444 | register struct hblk * h; /* pointer to block containing *p */ | |
445 | register hdr * hhdr; | |
446 | register int word_no; /* "index" of *p in *q */ | |
447 | int kind; | |
448 | ||
449 | for (kind = 0; kind < GC_n_kinds; kind++) { | |
450 | for (size = 1; size <= MAXOBJSZ; size++) { | |
451 | for (p= GC_obj_kinds[kind].ok_freelist[size]; | |
452 | p != 0; p=obj_link(p)){ | |
453 | h = HBLKPTR(p); | |
454 | hhdr = HDR(h); | |
455 | word_no = (((word *)p) - ((word *)h)); | |
456 | set_mark_bit_from_hdr(hhdr, word_no); | |
457 | } | |
458 | } | |
459 | } | |
460 | } | |
461 | /* Check that everything is marked */ | |
462 | GC_start_reclaim(TRUE); | |
463 | # else | |
464 | ||
465 | GC_finalize(); | |
466 | # ifdef STUBBORN_ALLOC | |
467 | GC_clean_changing_list(); | |
468 | # endif | |
469 | ||
470 | # ifdef PRINTTIMES | |
471 | GET_TIME(finalize_time); | |
472 | # endif | |
473 | ||
474 | /* Clear free list mark bits, in case they got accidentally marked */ | |
475 | /* Note: HBLKPTR(p) == pointer to head of block containing *p */ | |
476 | /* Also subtract memory remaining from GC_mem_found count. */ | |
477 | /* Note that composite objects on free list are cleared. */ | |
478 | /* Thus accidentally marking a free list is not a problem; only */ | |
479 | /* objects on the list itself will be marked, and that's fixed here. */ | |
480 | { | |
481 | register word size; /* current object size */ | |
482 | register ptr_t p; /* pointer to current object */ | |
483 | register struct hblk * h; /* pointer to block containing *p */ | |
484 | register hdr * hhdr; | |
485 | register int word_no; /* "index" of *p in *q */ | |
486 | int kind; | |
487 | ||
488 | for (kind = 0; kind < GC_n_kinds; kind++) { | |
489 | for (size = 1; size <= MAXOBJSZ; size++) { | |
490 | for (p= GC_obj_kinds[kind].ok_freelist[size]; | |
491 | p != 0; p=obj_link(p)){ | |
492 | h = HBLKPTR(p); | |
493 | hhdr = HDR(h); | |
494 | word_no = (((word *)p) - ((word *)h)); | |
495 | clear_mark_bit_from_hdr(hhdr, word_no); | |
496 | # ifdef GATHERSTATS | |
497 | GC_mem_found -= size; | |
498 | # endif | |
499 | } | |
500 | } | |
501 | } | |
502 | } | |
503 | ||
504 | ||
505 | # ifdef PRINTSTATS | |
506 | GC_printf1("Bytes recovered before sweep - f.l. count = %ld\n", | |
507 | (long)WORDS_TO_BYTES(GC_mem_found)); | |
508 | # endif | |
509 | ||
510 | /* Reconstruct free lists to contain everything not marked */ | |
511 | GC_start_reclaim(FALSE); | |
512 | ||
513 | # endif /* !FIND_LEAK */ | |
514 | ||
515 | # ifdef PRINTSTATS | |
516 | GC_printf2( | |
517 | "Immediately reclaimed %ld bytes in heap of size %lu bytes\n", | |
518 | (long)WORDS_TO_BYTES(GC_mem_found), | |
519 | (unsigned long)GC_heapsize); | |
520 | GC_printf2("%lu (atomic) + %lu (composite) collectable bytes in use\n", | |
521 | (unsigned long)WORDS_TO_BYTES(GC_atomic_in_use), | |
522 | (unsigned long)WORDS_TO_BYTES(GC_composite_in_use)); | |
523 | # endif | |
524 | ||
525 | /* Reset or increment counters for next cycle */ | |
526 | GC_words_allocd_before_gc += GC_words_allocd; | |
527 | GC_non_gc_bytes_at_gc = GC_non_gc_bytes; | |
528 | GC_words_allocd = 0; | |
529 | GC_words_wasted = 0; | |
530 | GC_mem_freed = 0; | |
531 | ||
532 | # ifdef PRINTTIMES | |
533 | GET_TIME(done_time); | |
534 | GC_printf2("Finalize + initiate sweep took %lu + %lu msecs\n", | |
535 | MS_TIME_DIFF(finalize_time,start_time), | |
536 | MS_TIME_DIFF(done_time,finalize_time)); | |
537 | # endif | |
538 | } | |
539 | ||
540 | /* Externally callable routine to invoke full, stop-world collection */ | |
541 | # if defined(__STDC__) || defined(__cplusplus) | |
542 | int GC_try_to_collect(GC_stop_func stop_func) | |
543 | # else | |
544 | int GC_try_to_collect(stop_func) | |
545 | GC_stop_func stop_func; | |
546 | # endif | |
547 | { | |
548 | int result; | |
549 | DCL_LOCK_STATE; | |
550 | ||
551 | GC_INVOKE_FINALIZERS(); | |
552 | DISABLE_SIGNALS(); | |
553 | LOCK(); | |
554 | ENTER_GC(); | |
555 | if (!GC_is_initialized) GC_init_inner(); | |
556 | /* Minimize junk left in my registers */ | |
557 | GC_noop(0,0,0,0,0,0); | |
558 | result = (int)GC_try_to_collect_inner(stop_func); | |
559 | EXIT_GC(); | |
560 | UNLOCK(); | |
561 | ENABLE_SIGNALS(); | |
562 | if(result) GC_INVOKE_FINALIZERS(); | |
563 | return(result); | |
564 | } | |
565 | ||
566 | void GC_gcollect GC_PROTO(()) | |
567 | { | |
568 | GC_notify_full_gc(); | |
569 | (void)GC_try_to_collect(GC_never_stop_func); | |
570 | } | |
571 | ||
572 | word GC_n_heap_sects = 0; /* Number of sections currently in heap. */ | |
573 | ||
574 | /* | |
575 | * Use the chunk of memory starting at p of syze bytes as part of the heap. | |
576 | * Assumes p is HBLKSIZE aligned, and bytes is a multiple of HBLKSIZE. | |
577 | */ | |
578 | void GC_add_to_heap(p, bytes) | |
579 | struct hblk *p; | |
580 | word bytes; | |
581 | { | |
582 | word words; | |
583 | ||
584 | if (GC_n_heap_sects >= MAX_HEAP_SECTS) { | |
585 | ABORT("Too many heap sections: Increase MAXHINCR or MAX_HEAP_SECTS"); | |
586 | } | |
587 | if (!GC_install_header(p)) { | |
588 | /* This is extremely unlikely. Can't add it. This will */ | |
589 | /* almost certainly result in a 0 return from the allocator, */ | |
590 | /* which is entirely appropriate. */ | |
591 | return; | |
592 | } | |
593 | GC_heap_sects[GC_n_heap_sects].hs_start = (ptr_t)p; | |
594 | GC_heap_sects[GC_n_heap_sects].hs_bytes = bytes; | |
595 | GC_n_heap_sects++; | |
596 | words = BYTES_TO_WORDS(bytes - HDR_BYTES); | |
597 | HDR(p) -> hb_sz = words; | |
598 | GC_freehblk(p); | |
599 | GC_heapsize += bytes; | |
600 | if ((ptr_t)p <= GC_least_plausible_heap_addr | |
601 | || GC_least_plausible_heap_addr == 0) { | |
602 | GC_least_plausible_heap_addr = (ptr_t)p - sizeof(word); | |
603 | /* Making it a little smaller than necessary prevents */ | |
604 | /* us from getting a false hit from the variable */ | |
605 | /* itself. There's some unintentional reflection */ | |
606 | /* here. */ | |
607 | } | |
608 | if ((ptr_t)p + bytes >= GC_greatest_plausible_heap_addr) { | |
609 | GC_greatest_plausible_heap_addr = (ptr_t)p + bytes; | |
610 | } | |
611 | } | |
612 | ||
613 | #ifdef PRESERVE_LAST | |
614 | GC_bool GC_in_last_heap_sect(p) | |
615 | ptr_t p; | |
616 | { | |
617 | struct HeapSect * last_heap_sect = &(GC_heap_sects[GC_n_heap_sects-1]); | |
618 | ptr_t start = last_heap_sect -> hs_start; | |
619 | ptr_t end; | |
620 | ||
621 | if (p < start) return FALSE; | |
622 | end = start + last_heap_sect -> hs_bytes; | |
623 | if (p >= end) return FALSE; | |
624 | return TRUE; | |
625 | } | |
626 | #endif | |
627 | ||
628 | # if !defined(NO_DEBUGGING) | |
629 | void GC_print_heap_sects() | |
630 | { | |
631 | register unsigned i; | |
632 | ||
633 | GC_printf1("Total heap size: %lu\n", (unsigned long) GC_heapsize); | |
634 | for (i = 0; i < GC_n_heap_sects; i++) { | |
635 | unsigned long start = (unsigned long) GC_heap_sects[i].hs_start; | |
636 | unsigned long len = (unsigned long) GC_heap_sects[i].hs_bytes; | |
637 | struct hblk *h; | |
638 | unsigned nbl = 0; | |
639 | ||
640 | GC_printf3("Section %ld from 0x%lx to 0x%lx ", (unsigned long)i, | |
641 | start, (unsigned long)(start + len)); | |
642 | for (h = (struct hblk *)start; h < (struct hblk *)(start + len); h++) { | |
643 | if (GC_is_black_listed(h, HBLKSIZE)) nbl++; | |
644 | } | |
645 | GC_printf2("%lu/%lu blacklisted\n", (unsigned long)nbl, | |
646 | (unsigned long)(len/HBLKSIZE)); | |
647 | } | |
648 | } | |
649 | # endif | |
650 | ||
651 | ptr_t GC_least_plausible_heap_addr = (ptr_t)ONES; | |
652 | ptr_t GC_greatest_plausible_heap_addr = 0; | |
653 | ||
654 | ptr_t GC_max(x,y) | |
655 | ptr_t x, y; | |
656 | { | |
657 | return(x > y? x : y); | |
658 | } | |
659 | ||
660 | ptr_t GC_min(x,y) | |
661 | ptr_t x, y; | |
662 | { | |
663 | return(x < y? x : y); | |
664 | } | |
665 | ||
666 | # if defined(__STDC__) || defined(__cplusplus) | |
667 | void GC_set_max_heap_size(GC_word n) | |
668 | # else | |
669 | void GC_set_max_heap_size(n) | |
670 | GC_word n; | |
671 | # endif | |
672 | { | |
673 | GC_max_heapsize = n; | |
674 | } | |
675 | ||
676 | GC_word GC_max_retries = 0; | |
677 | ||
678 | /* | |
679 | * this explicitly increases the size of the heap. It is used | |
680 | * internally, but may also be invoked from GC_expand_hp by the user. | |
681 | * The argument is in units of HBLKSIZE. | |
682 | * Tiny values of n are rounded up. | |
683 | * Returns FALSE on failure. | |
684 | */ | |
685 | GC_bool GC_expand_hp_inner(n) | |
686 | word n; | |
687 | { | |
688 | word bytes; | |
689 | struct hblk * space; | |
690 | word expansion_slop; /* Number of bytes by which we expect the */ | |
691 | /* heap to expand soon. */ | |
692 | ||
693 | if (n < MINHINCR) n = MINHINCR; | |
694 | bytes = n * HBLKSIZE; | |
695 | /* Make sure bytes is a multiple of GC_page_size */ | |
696 | { | |
697 | word mask = GC_page_size - 1; | |
698 | bytes += mask; | |
699 | bytes &= ~mask; | |
700 | } | |
701 | ||
702 | if (GC_max_heapsize != 0 && GC_heapsize + bytes > GC_max_heapsize) { | |
703 | /* Exceeded self-imposed limit */ | |
704 | return(FALSE); | |
705 | } | |
706 | space = GET_MEM(bytes); | |
707 | if( space == 0 ) { | |
708 | return(FALSE); | |
709 | } | |
710 | # ifdef PRINTSTATS | |
711 | GC_printf2("Increasing heap size by %lu after %lu allocated bytes\n", | |
712 | (unsigned long)bytes, | |
713 | (unsigned long)WORDS_TO_BYTES(GC_words_allocd)); | |
714 | # ifdef UNDEFINED | |
715 | GC_printf1("Root size = %lu\n", GC_root_size); | |
716 | GC_print_block_list(); GC_print_hblkfreelist(); | |
717 | GC_printf0("\n"); | |
718 | # endif | |
719 | # endif | |
720 | expansion_slop = 8 * WORDS_TO_BYTES(min_words_allocd()); | |
721 | if (5 * HBLKSIZE * MAXHINCR > expansion_slop) { | |
722 | expansion_slop = 5 * HBLKSIZE * MAXHINCR; | |
723 | } | |
724 | if (GC_last_heap_addr == 0 && !((word)space & SIGNB) | |
725 | || GC_last_heap_addr != 0 && GC_last_heap_addr < (ptr_t)space) { | |
726 | /* Assume the heap is growing up */ | |
727 | GC_greatest_plausible_heap_addr = | |
728 | GC_max(GC_greatest_plausible_heap_addr, | |
729 | (ptr_t)space + bytes + expansion_slop); | |
730 | } else { | |
731 | /* Heap is growing down */ | |
732 | GC_least_plausible_heap_addr = | |
733 | GC_min(GC_least_plausible_heap_addr, | |
734 | (ptr_t)space - expansion_slop); | |
735 | } | |
736 | GC_prev_heap_addr = GC_last_heap_addr; | |
737 | GC_last_heap_addr = (ptr_t)space; | |
738 | GC_add_to_heap(space, bytes); | |
739 | return(TRUE); | |
740 | } | |
741 | ||
742 | /* Really returns a bool, but it's externally visible, so that's clumsy. */ | |
743 | /* Arguments is in bytes. */ | |
744 | # if defined(__STDC__) || defined(__cplusplus) | |
745 | int GC_expand_hp(size_t bytes) | |
746 | # else | |
747 | int GC_expand_hp(bytes) | |
748 | size_t bytes; | |
749 | # endif | |
750 | { | |
751 | int result; | |
752 | DCL_LOCK_STATE; | |
753 | ||
754 | DISABLE_SIGNALS(); | |
755 | LOCK(); | |
756 | if (!GC_is_initialized) GC_init_inner(); | |
757 | result = (int)GC_expand_hp_inner(divHBLKSZ((word)bytes)); | |
758 | UNLOCK(); | |
759 | ENABLE_SIGNALS(); | |
760 | return(result); | |
761 | } | |
762 | ||
763 | unsigned GC_fail_count = 0; | |
764 | /* How many consecutive GC/expansion failures? */ | |
765 | /* Reset by GC_allochblk. */ | |
766 | ||
767 | GC_bool GC_collect_or_expand(needed_blocks, ignore_off_page) | |
768 | word needed_blocks; | |
769 | GC_bool ignore_off_page; | |
770 | { | |
771 | ||
772 | if (!GC_incremental && !GC_dont_gc && GC_should_collect()) { | |
773 | GC_notify_full_gc(); | |
774 | GC_gcollect_inner(); | |
775 | } else { | |
776 | word blocks_to_get = GC_heapsize/(HBLKSIZE*GC_free_space_divisor) | |
777 | + needed_blocks; | |
778 | ||
779 | if (blocks_to_get > MAXHINCR) { | |
780 | word slop; | |
781 | ||
782 | if (ignore_off_page) { | |
783 | slop = 4; | |
784 | } else { | |
785 | slop = 2*divHBLKSZ(BL_LIMIT); | |
786 | if (slop > needed_blocks) slop = needed_blocks; | |
787 | } | |
788 | if (needed_blocks + slop > MAXHINCR) { | |
789 | blocks_to_get = needed_blocks + slop; | |
790 | } else { | |
791 | blocks_to_get = MAXHINCR; | |
792 | } | |
793 | } | |
794 | if (!GC_expand_hp_inner(blocks_to_get) | |
795 | && !GC_expand_hp_inner(needed_blocks)) { | |
796 | if (GC_fail_count++ < GC_max_retries) { | |
797 | WARN("Out of Memory! Trying to continue ...\n", 0); | |
798 | GC_notify_full_gc(); | |
799 | GC_gcollect_inner(); | |
800 | } else { | |
801 | WARN("Out of Memory! Returning NIL!\n", 0); | |
802 | return(FALSE); | |
803 | } | |
804 | } else if (GC_fail_count) { | |
805 | # ifdef PRINTSTATS | |
806 | GC_printf0("Memory available again ...\n"); | |
807 | # endif | |
808 | } | |
809 | } | |
810 | return(TRUE); | |
811 | } | |
812 | ||
813 | /* | |
814 | * Make sure the object free list for sz is not empty. | |
815 | * Return a pointer to the first object on the free list. | |
816 | * The object MUST BE REMOVED FROM THE FREE LIST BY THE CALLER. | |
817 | * Assumes we hold the allocator lock and signals are disabled. | |
818 | * | |
819 | */ | |
820 | ptr_t GC_allocobj(sz, kind) | |
821 | word sz; | |
822 | int kind; | |
823 | { | |
824 | register ptr_t * flh = &(GC_obj_kinds[kind].ok_freelist[sz]); | |
825 | ||
826 | if (sz == 0) return(0); | |
827 | ||
828 | while (*flh == 0) { | |
829 | ENTER_GC(); | |
830 | /* Do our share of marking work */ | |
831 | if(GC_incremental && !GC_dont_gc) GC_collect_a_little_inner(1); | |
832 | /* Sweep blocks for objects of this size */ | |
833 | GC_continue_reclaim(sz, kind); | |
834 | EXIT_GC(); | |
835 | if (*flh == 0) { | |
836 | GC_new_hblk(sz, kind); | |
837 | } | |
838 | if (*flh == 0) { | |
839 | ENTER_GC(); | |
840 | if (!GC_collect_or_expand((word)1,FALSE)) { | |
841 | EXIT_GC(); | |
842 | return(0); | |
843 | } | |
844 | EXIT_GC(); | |
845 | } | |
846 | } | |
847 | ||
848 | return(*flh); | |
849 | } |