]>
Commit | Line | Data |
---|---|---|
18a4bc4e TT |
1 | |
2 | /* | |
3 | * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers | |
4 | * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved. | |
5 | * | |
6 | * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED | |
7 | * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. | |
8 | * | |
9 | * Permission is hereby granted to use or copy this program | |
10 | * for any purpose, provided the above notices are retained on all copies. | |
11 | * Permission to modify the code and to distribute modified code is granted, | |
12 | * provided the above notices are retained, and a notice that the code was | |
13 | * modified is included with the above copyright notice. | |
14 | * | |
15 | */ | |
16 | ||
17 | ||
18 | # include <stdio.h> | |
19 | # include "gc_priv.h" | |
20 | # include "gc_mark.h" | |
21 | ||
22 | /* We put this here to minimize the risk of inlining. */ | |
23 | /*VARARGS*/ | |
24 | void GC_noop() {} | |
25 | ||
26 | /* Single argument version, robust against whole program analysis. */ | |
27 | void GC_noop1(x) | |
28 | word x; | |
29 | { | |
30 | static VOLATILE word sink; | |
31 | ||
32 | sink = x; | |
33 | } | |
34 | ||
35 | mark_proc GC_mark_procs[MAX_MARK_PROCS] = {0}; | |
36 | word GC_n_mark_procs = 0; | |
37 | ||
38 | /* Initialize GC_obj_kinds properly and standard free lists properly. */ | |
39 | /* This must be done statically since they may be accessed before */ | |
40 | /* GC_init is called. */ | |
41 | /* It's done here, since we need to deal with mark descriptors. */ | |
42 | struct obj_kind GC_obj_kinds[MAXOBJKINDS] = { | |
43 | /* PTRFREE */ { &GC_aobjfreelist[0], 0 /* filled in dynamically */, | |
44 | 0 | DS_LENGTH, FALSE, FALSE }, | |
45 | /* NORMAL */ { &GC_objfreelist[0], 0, | |
46 | # if defined(ADD_BYTE_AT_END) && ALIGNMENT > DS_TAGS | |
47 | (word)(-ALIGNMENT) | DS_LENGTH, | |
48 | # else | |
49 | 0 | DS_LENGTH, | |
50 | # endif | |
51 | TRUE /* add length to descr */, TRUE }, | |
52 | /* UNCOLLECTABLE */ | |
53 | { &GC_uobjfreelist[0], 0, | |
54 | 0 | DS_LENGTH, TRUE /* add length to descr */, TRUE }, | |
55 | # ifdef ATOMIC_UNCOLLECTABLE | |
56 | /* AUNCOLLECTABLE */ | |
57 | { &GC_auobjfreelist[0], 0, | |
58 | 0 | DS_LENGTH, FALSE /* add length to descr */, FALSE }, | |
59 | # endif | |
60 | # ifdef STUBBORN_ALLOC | |
61 | /*STUBBORN*/ { &GC_sobjfreelist[0], 0, | |
62 | 0 | DS_LENGTH, TRUE /* add length to descr */, TRUE }, | |
63 | # endif | |
64 | }; | |
65 | ||
66 | # ifdef ATOMIC_UNCOLLECTABLE | |
67 | # ifdef STUBBORN_ALLOC | |
68 | int GC_n_kinds = 5; | |
69 | # else | |
70 | int GC_n_kinds = 4; | |
71 | # endif | |
72 | # else | |
73 | # ifdef STUBBORN_ALLOC | |
74 | int GC_n_kinds = 4; | |
75 | # else | |
76 | int GC_n_kinds = 3; | |
77 | # endif | |
78 | # endif | |
79 | ||
80 | ||
81 | # ifndef INITIAL_MARK_STACK_SIZE | |
82 | # define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE) | |
83 | /* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a */ | |
84 | /* multiple of HBLKSIZE. */ | |
85 | # endif | |
86 | ||
87 | /* | |
88 | * Limits of stack for GC_mark routine. | |
89 | * All ranges between GC_mark_stack(incl.) and GC_mark_stack_top(incl.) still | |
90 | * need to be marked from. | |
91 | */ | |
92 | ||
93 | word GC_n_rescuing_pages; /* Number of dirty pages we marked from */ | |
94 | /* excludes ptrfree pages, etc. */ | |
95 | ||
96 | mse * GC_mark_stack; | |
97 | ||
98 | word GC_mark_stack_size = 0; | |
99 | ||
100 | mse * GC_mark_stack_top; | |
101 | ||
102 | static struct hblk * scan_ptr; | |
103 | ||
104 | mark_state_t GC_mark_state = MS_NONE; | |
105 | ||
106 | GC_bool GC_mark_stack_too_small = FALSE; | |
107 | ||
108 | GC_bool GC_objects_are_marked = FALSE; /* Are there collectable marked */ | |
109 | /* objects in the heap? */ | |
110 | ||
111 | GC_bool GC_collection_in_progress() | |
112 | { | |
113 | return(GC_mark_state != MS_NONE); | |
114 | } | |
115 | ||
116 | /* clear all mark bits in the header */ | |
117 | void GC_clear_hdr_marks(hhdr) | |
118 | register hdr * hhdr; | |
119 | { | |
120 | BZERO(hhdr -> hb_marks, MARK_BITS_SZ*sizeof(word)); | |
121 | } | |
122 | ||
123 | /* Set all mark bits in the header. Used for uncollectable blocks. */ | |
124 | void GC_set_hdr_marks(hhdr) | |
125 | register hdr * hhdr; | |
126 | { | |
127 | register int i; | |
128 | ||
129 | for (i = 0; i < MARK_BITS_SZ; ++i) { | |
130 | hhdr -> hb_marks[i] = ONES; | |
131 | } | |
132 | } | |
133 | ||
134 | /* | |
135 | * Clear all mark bits associated with block h. | |
136 | */ | |
137 | /*ARGSUSED*/ | |
138 | static void clear_marks_for_block(h, dummy) | |
139 | struct hblk *h; | |
140 | word dummy; | |
141 | { | |
142 | register hdr * hhdr = HDR(h); | |
143 | ||
144 | if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) return; | |
145 | /* Mark bit for these is cleared only once the object is */ | |
146 | /* explicitly deallocated. This either frees the block, or */ | |
147 | /* the bit is cleared once the object is on the free list. */ | |
148 | GC_clear_hdr_marks(hhdr); | |
149 | } | |
150 | ||
151 | /* Slow but general routines for setting/clearing/asking about mark bits */ | |
152 | void GC_set_mark_bit(p) | |
153 | ptr_t p; | |
154 | { | |
155 | register struct hblk *h = HBLKPTR(p); | |
156 | register hdr * hhdr = HDR(h); | |
157 | register int word_no = (word *)p - (word *)h; | |
158 | ||
159 | set_mark_bit_from_hdr(hhdr, word_no); | |
160 | } | |
161 | ||
162 | void GC_clear_mark_bit(p) | |
163 | ptr_t p; | |
164 | { | |
165 | register struct hblk *h = HBLKPTR(p); | |
166 | register hdr * hhdr = HDR(h); | |
167 | register int word_no = (word *)p - (word *)h; | |
168 | ||
169 | clear_mark_bit_from_hdr(hhdr, word_no); | |
170 | } | |
171 | ||
172 | GC_bool GC_is_marked(p) | |
173 | ptr_t p; | |
174 | { | |
175 | register struct hblk *h = HBLKPTR(p); | |
176 | register hdr * hhdr = HDR(h); | |
177 | register int word_no = (word *)p - (word *)h; | |
178 | ||
179 | return(mark_bit_from_hdr(hhdr, word_no)); | |
180 | } | |
181 | ||
182 | ||
183 | /* | |
184 | * Clear mark bits in all allocated heap blocks. This invalidates | |
185 | * the marker invariant, and sets GC_mark_state to reflect this. | |
186 | * (This implicitly starts marking to reestablish the invariant.) | |
187 | */ | |
188 | void GC_clear_marks() | |
189 | { | |
190 | GC_apply_to_all_blocks(clear_marks_for_block, (word)0); | |
191 | GC_objects_are_marked = FALSE; | |
192 | GC_mark_state = MS_INVALID; | |
193 | scan_ptr = 0; | |
194 | # ifdef GATHERSTATS | |
195 | /* Counters reflect currently marked objects: reset here */ | |
196 | GC_composite_in_use = 0; | |
197 | GC_atomic_in_use = 0; | |
198 | # endif | |
199 | ||
200 | } | |
201 | ||
202 | /* Initiate a garbage collection. Initiates a full collection if the */ | |
203 | /* mark state is invalid. */ | |
204 | /*ARGSUSED*/ | |
205 | void GC_initiate_gc() | |
206 | { | |
207 | if (GC_dirty_maintained) GC_read_dirty(); | |
208 | # ifdef STUBBORN_ALLOC | |
209 | GC_read_changed(); | |
210 | # endif | |
211 | # ifdef CHECKSUMS | |
212 | { | |
213 | extern void GC_check_dirty(); | |
214 | ||
215 | if (GC_dirty_maintained) GC_check_dirty(); | |
216 | } | |
217 | # endif | |
218 | # ifdef GATHERSTATS | |
219 | GC_n_rescuing_pages = 0; | |
220 | # endif | |
221 | if (GC_mark_state == MS_NONE) { | |
222 | GC_mark_state = MS_PUSH_RESCUERS; | |
223 | } else if (GC_mark_state != MS_INVALID) { | |
224 | ABORT("unexpected state"); | |
225 | } /* else this is really a full collection, and mark */ | |
226 | /* bits are invalid. */ | |
227 | scan_ptr = 0; | |
228 | } | |
229 | ||
230 | ||
231 | static void alloc_mark_stack(); | |
232 | ||
233 | /* Perform a small amount of marking. */ | |
234 | /* We try to touch roughly a page of memory. */ | |
235 | /* Return TRUE if we just finished a mark phase. */ | |
236 | GC_bool GC_mark_some() | |
237 | { | |
238 | switch(GC_mark_state) { | |
239 | case MS_NONE: | |
240 | return(FALSE); | |
241 | ||
242 | case MS_PUSH_RESCUERS: | |
243 | if (GC_mark_stack_top | |
244 | >= GC_mark_stack + INITIAL_MARK_STACK_SIZE/4) { | |
245 | GC_mark_from_mark_stack(); | |
246 | return(FALSE); | |
247 | } else { | |
248 | scan_ptr = GC_push_next_marked_dirty(scan_ptr); | |
249 | if (scan_ptr == 0) { | |
250 | # ifdef PRINTSTATS | |
251 | GC_printf1("Marked from %lu dirty pages\n", | |
252 | (unsigned long)GC_n_rescuing_pages); | |
253 | # endif | |
254 | GC_push_roots(FALSE); | |
255 | GC_objects_are_marked = TRUE; | |
256 | if (GC_mark_state != MS_INVALID) { | |
257 | GC_mark_state = MS_ROOTS_PUSHED; | |
258 | } | |
259 | } | |
260 | } | |
261 | return(FALSE); | |
262 | ||
263 | case MS_PUSH_UNCOLLECTABLE: | |
264 | if (GC_mark_stack_top | |
265 | >= GC_mark_stack + INITIAL_MARK_STACK_SIZE/4) { | |
266 | GC_mark_from_mark_stack(); | |
267 | return(FALSE); | |
268 | } else { | |
269 | scan_ptr = GC_push_next_marked_uncollectable(scan_ptr); | |
270 | if (scan_ptr == 0) { | |
271 | GC_push_roots(TRUE); | |
272 | GC_objects_are_marked = TRUE; | |
273 | if (GC_mark_state != MS_INVALID) { | |
274 | GC_mark_state = MS_ROOTS_PUSHED; | |
275 | } | |
276 | } | |
277 | } | |
278 | return(FALSE); | |
279 | ||
280 | case MS_ROOTS_PUSHED: | |
281 | if (GC_mark_stack_top >= GC_mark_stack) { | |
282 | GC_mark_from_mark_stack(); | |
283 | return(FALSE); | |
284 | } else { | |
285 | GC_mark_state = MS_NONE; | |
286 | if (GC_mark_stack_too_small) { | |
287 | alloc_mark_stack(2*GC_mark_stack_size); | |
288 | } | |
289 | return(TRUE); | |
290 | } | |
291 | ||
292 | case MS_INVALID: | |
293 | case MS_PARTIALLY_INVALID: | |
294 | if (!GC_objects_are_marked) { | |
295 | GC_mark_state = MS_PUSH_UNCOLLECTABLE; | |
296 | return(FALSE); | |
297 | } | |
298 | if (GC_mark_stack_top >= GC_mark_stack) { | |
299 | GC_mark_from_mark_stack(); | |
300 | return(FALSE); | |
301 | } | |
302 | if (scan_ptr == 0 | |
303 | && (GC_mark_state == MS_INVALID || GC_mark_stack_too_small)) { | |
304 | alloc_mark_stack(2*GC_mark_stack_size); | |
305 | GC_mark_state = MS_PARTIALLY_INVALID; | |
306 | } | |
307 | scan_ptr = GC_push_next_marked(scan_ptr); | |
308 | if (scan_ptr == 0 && GC_mark_state == MS_PARTIALLY_INVALID) { | |
309 | GC_push_roots(TRUE); | |
310 | GC_objects_are_marked = TRUE; | |
311 | if (GC_mark_state != MS_INVALID) { | |
312 | GC_mark_state = MS_ROOTS_PUSHED; | |
313 | } | |
314 | } | |
315 | return(FALSE); | |
316 | default: | |
317 | ABORT("GC_mark_some: bad state"); | |
318 | return(FALSE); | |
319 | } | |
320 | } | |
321 | ||
322 | ||
323 | GC_bool GC_mark_stack_empty() | |
324 | { | |
325 | return(GC_mark_stack_top < GC_mark_stack); | |
326 | } | |
327 | ||
328 | #ifdef PROF_MARKER | |
329 | word GC_prof_array[10]; | |
330 | # define PROF(n) GC_prof_array[n]++ | |
331 | #else | |
332 | # define PROF(n) | |
333 | #endif | |
334 | ||
335 | /* Given a pointer to someplace other than a small object page or the */ | |
336 | /* first page of a large object, return a pointer either to the */ | |
337 | /* start of the large object or NIL. */ | |
338 | /* In the latter case black list the address current. */ | |
339 | /* Returns NIL without black listing if current points to a block */ | |
340 | /* with IGNORE_OFF_PAGE set. */ | |
341 | /*ARGSUSED*/ | |
342 | # ifdef PRINT_BLACK_LIST | |
343 | word GC_find_start(current, hhdr, source) | |
344 | word source; | |
345 | # else | |
346 | word GC_find_start(current, hhdr) | |
347 | # define source 0 | |
348 | # endif | |
349 | register word current; | |
350 | register hdr * hhdr; | |
351 | { | |
352 | # ifdef ALL_INTERIOR_POINTERS | |
353 | if (hhdr != 0) { | |
354 | register word orig = current; | |
355 | ||
356 | current = (word)HBLKPTR(current) + HDR_BYTES; | |
357 | do { | |
358 | current = current - HBLKSIZE*(word)hhdr; | |
359 | hhdr = HDR(current); | |
360 | } while(IS_FORWARDING_ADDR_OR_NIL(hhdr)); | |
361 | /* current points to the start of the large object */ | |
362 | if (hhdr -> hb_flags & IGNORE_OFF_PAGE) return(0); | |
363 | if ((word *)orig - (word *)current | |
364 | >= (ptrdiff_t)(hhdr->hb_sz)) { | |
365 | /* Pointer past the end of the block */ | |
366 | GC_ADD_TO_BLACK_LIST_NORMAL(orig, source); | |
367 | return(0); | |
368 | } | |
369 | return(current); | |
370 | } else { | |
371 | GC_ADD_TO_BLACK_LIST_NORMAL(current, source); | |
372 | return(0); | |
373 | } | |
374 | # else | |
375 | GC_ADD_TO_BLACK_LIST_NORMAL(current, source); | |
376 | return(0); | |
377 | # endif | |
378 | # undef source | |
379 | } | |
380 | ||
381 | void GC_invalidate_mark_state() | |
382 | { | |
383 | GC_mark_state = MS_INVALID; | |
384 | GC_mark_stack_top = GC_mark_stack-1; | |
385 | } | |
386 | ||
387 | mse * GC_signal_mark_stack_overflow(msp) | |
388 | mse * msp; | |
389 | { | |
390 | GC_mark_state = MS_INVALID; | |
391 | # ifdef PRINTSTATS | |
392 | GC_printf1("Mark stack overflow; current size = %lu entries\n", | |
393 | GC_mark_stack_size); | |
394 | # endif | |
395 | return(msp-INITIAL_MARK_STACK_SIZE/8); | |
396 | } | |
397 | ||
398 | ||
399 | /* | |
400 | * Mark objects pointed to by the regions described by | |
401 | * mark stack entries between GC_mark_stack and GC_mark_stack_top, | |
402 | * inclusive. Assumes the upper limit of a mark stack entry | |
403 | * is never 0. A mark stack entry never has size 0. | |
404 | * We try to traverse on the order of a hblk of memory before we return. | |
405 | * Caller is responsible for calling this until the mark stack is empty. | |
406 | */ | |
407 | void GC_mark_from_mark_stack() | |
408 | { | |
409 | mse * GC_mark_stack_reg = GC_mark_stack; | |
410 | mse * GC_mark_stack_top_reg = GC_mark_stack_top; | |
411 | mse * mark_stack_limit = &(GC_mark_stack[GC_mark_stack_size]); | |
412 | int credit = HBLKSIZE; /* Remaining credit for marking work */ | |
413 | register word * current_p; /* Pointer to current candidate ptr. */ | |
414 | register word current; /* Candidate pointer. */ | |
415 | register word * limit; /* (Incl) limit of current candidate */ | |
416 | /* range */ | |
417 | register word descr; | |
418 | register ptr_t greatest_ha = GC_greatest_plausible_heap_addr; | |
419 | register ptr_t least_ha = GC_least_plausible_heap_addr; | |
420 | # define SPLIT_RANGE_WORDS 128 /* Must be power of 2. */ | |
421 | ||
422 | GC_objects_are_marked = TRUE; | |
423 | # ifdef OS2 /* Use untweaked version to circumvent compiler problem */ | |
424 | while (GC_mark_stack_top_reg >= GC_mark_stack_reg && credit >= 0) { | |
425 | # else | |
426 | while ((((ptr_t)GC_mark_stack_top_reg - (ptr_t)GC_mark_stack_reg) | credit) | |
427 | >= 0) { | |
428 | # endif | |
429 | current_p = GC_mark_stack_top_reg -> mse_start; | |
430 | retry: | |
431 | descr = GC_mark_stack_top_reg -> mse_descr; | |
432 | if (descr & ((~(WORDS_TO_BYTES(SPLIT_RANGE_WORDS) - 1)) | DS_TAGS)) { | |
433 | word tag = descr & DS_TAGS; | |
434 | ||
435 | switch(tag) { | |
436 | case DS_LENGTH: | |
437 | /* Large length. */ | |
438 | /* Process part of the range to avoid pushing too much on the */ | |
439 | /* stack. */ | |
440 | GC_mark_stack_top_reg -> mse_start = | |
441 | limit = current_p + SPLIT_RANGE_WORDS-1; | |
442 | GC_mark_stack_top_reg -> mse_descr -= | |
443 | WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1); | |
444 | /* Make sure that pointers overlapping the two ranges are */ | |
445 | /* considered. */ | |
446 | limit = (word *)((char *)limit + sizeof(word) - ALIGNMENT); | |
447 | break; | |
448 | case DS_BITMAP: | |
449 | GC_mark_stack_top_reg--; | |
450 | descr &= ~DS_TAGS; | |
451 | credit -= WORDS_TO_BYTES(WORDSZ/2); /* guess */ | |
452 | while (descr != 0) { | |
453 | if ((signed_word)descr < 0) { | |
454 | current = *current_p; | |
455 | if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) { | |
456 | PUSH_CONTENTS(current, GC_mark_stack_top_reg, mark_stack_limit, | |
457 | current_p, exit1); | |
458 | } | |
459 | } | |
460 | descr <<= 1; | |
461 | ++ current_p; | |
462 | } | |
463 | continue; | |
464 | case DS_PROC: | |
465 | GC_mark_stack_top_reg--; | |
466 | credit -= PROC_BYTES; | |
1530be84 TT |
467 | #ifdef GC_DEBUG |
468 | current_p = GC_debug_object_start(current_p); | |
469 | #endif | |
18a4bc4e TT |
470 | GC_mark_stack_top_reg = |
471 | (*PROC(descr)) | |
472 | (current_p, GC_mark_stack_top_reg, | |
473 | mark_stack_limit, ENV(descr)); | |
474 | continue; | |
475 | case DS_PER_OBJECT: | |
476 | GC_mark_stack_top_reg -> mse_descr = | |
477 | *(word *)((ptr_t)current_p + descr - tag); | |
478 | goto retry; | |
479 | } | |
480 | } else { | |
481 | GC_mark_stack_top_reg--; | |
482 | limit = (word *)(((ptr_t)current_p) + (word)descr); | |
483 | } | |
484 | /* The simple case in which we're scanning a range. */ | |
485 | credit -= (ptr_t)limit - (ptr_t)current_p; | |
486 | limit -= 1; | |
487 | while (current_p <= limit) { | |
488 | current = *current_p; | |
489 | if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) { | |
490 | PUSH_CONTENTS(current, GC_mark_stack_top_reg, | |
491 | mark_stack_limit, current_p, exit2); | |
492 | } | |
493 | current_p = (word *)((char *)current_p + ALIGNMENT); | |
494 | } | |
495 | } | |
496 | GC_mark_stack_top = GC_mark_stack_top_reg; | |
497 | } | |
498 | ||
499 | /* Allocate or reallocate space for mark stack of size s words */ | |
500 | /* May silently fail. */ | |
501 | static void alloc_mark_stack(n) | |
502 | word n; | |
503 | { | |
504 | mse * new_stack = (mse *)GC_scratch_alloc(n * sizeof(struct ms_entry)); | |
505 | ||
506 | GC_mark_stack_too_small = FALSE; | |
507 | if (GC_mark_stack_size != 0) { | |
508 | if (new_stack != 0) { | |
509 | word displ = (word)GC_mark_stack & (GC_page_size - 1); | |
510 | word size = GC_mark_stack_size * sizeof(struct ms_entry); | |
511 | ||
512 | /* Recycle old space */ | |
513 | if (0 != displ) displ = GC_page_size - displ; | |
514 | size = (size - displ) & ~(GC_page_size - 1); | |
515 | GC_add_to_heap((struct hblk *) | |
516 | ((word)GC_mark_stack + displ), size); | |
517 | GC_mark_stack = new_stack; | |
518 | GC_mark_stack_size = n; | |
519 | # ifdef PRINTSTATS | |
520 | GC_printf1("Grew mark stack to %lu frames\n", | |
521 | (unsigned long) GC_mark_stack_size); | |
522 | # endif | |
523 | } else { | |
524 | # ifdef PRINTSTATS | |
525 | GC_printf1("Failed to grow mark stack to %lu frames\n", | |
526 | (unsigned long) n); | |
527 | # endif | |
528 | } | |
529 | } else { | |
530 | if (new_stack == 0) { | |
531 | GC_err_printf0("No space for mark stack\n"); | |
532 | EXIT(); | |
533 | } | |
534 | GC_mark_stack = new_stack; | |
535 | GC_mark_stack_size = n; | |
536 | } | |
537 | GC_mark_stack_top = GC_mark_stack-1; | |
538 | } | |
539 | ||
540 | void GC_mark_init() | |
541 | { | |
542 | alloc_mark_stack(INITIAL_MARK_STACK_SIZE); | |
543 | } | |
544 | ||
545 | /* | |
546 | * Push all locations between b and t onto the mark stack. | |
547 | * b is the first location to be checked. t is one past the last | |
548 | * location to be checked. | |
549 | * Should only be used if there is no possibility of mark stack | |
550 | * overflow. | |
551 | */ | |
552 | void GC_push_all(bottom, top) | |
553 | ptr_t bottom; | |
554 | ptr_t top; | |
555 | { | |
556 | register word length; | |
557 | ||
558 | bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1)); | |
559 | top = (ptr_t)(((word) top) & ~(ALIGNMENT-1)); | |
560 | if (top == 0 || bottom == top) return; | |
561 | GC_mark_stack_top++; | |
562 | if (GC_mark_stack_top >= GC_mark_stack + GC_mark_stack_size) { | |
563 | ABORT("unexpected mark stack overflow"); | |
564 | } | |
565 | length = top - bottom; | |
566 | # if DS_TAGS > ALIGNMENT - 1 | |
567 | length += DS_TAGS; | |
568 | length &= ~DS_TAGS; | |
569 | # endif | |
570 | GC_mark_stack_top -> mse_start = (word *)bottom; | |
571 | GC_mark_stack_top -> mse_descr = length; | |
572 | } | |
573 | ||
574 | /* | |
575 | * Analogous to the above, but push only those pages that may have been | |
576 | * dirtied. A block h is assumed dirty if dirty_fn(h) != 0. | |
577 | * We use push_fn to actually push the block. | |
578 | * Will not overflow mark stack if push_fn pushes a small fixed number | |
579 | * of entries. (This is invoked only if push_fn pushes a single entry, | |
580 | * or if it marks each object before pushing it, thus ensuring progress | |
581 | * in the event of a stack overflow.) | |
582 | */ | |
583 | void GC_push_dirty(bottom, top, dirty_fn, push_fn) | |
584 | ptr_t bottom; | |
585 | ptr_t top; | |
586 | int (*dirty_fn)(/* struct hblk * h */); | |
587 | void (*push_fn)(/* ptr_t bottom, ptr_t top */); | |
588 | { | |
589 | register struct hblk * h; | |
590 | ||
591 | bottom = (ptr_t)(((long) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1)); | |
592 | top = (ptr_t)(((long) top) & ~(ALIGNMENT-1)); | |
593 | ||
594 | if (top == 0 || bottom == top) return; | |
595 | h = HBLKPTR(bottom + HBLKSIZE); | |
596 | if (top <= (ptr_t) h) { | |
597 | if ((*dirty_fn)(h-1)) { | |
598 | (*push_fn)(bottom, top); | |
599 | } | |
600 | return; | |
601 | } | |
602 | if ((*dirty_fn)(h-1)) { | |
603 | (*push_fn)(bottom, (ptr_t)h); | |
604 | } | |
605 | while ((ptr_t)(h+1) <= top) { | |
606 | if ((*dirty_fn)(h)) { | |
607 | if ((word)(GC_mark_stack_top - GC_mark_stack) | |
608 | > 3 * GC_mark_stack_size / 4) { | |
609 | /* Danger of mark stack overflow */ | |
610 | (*push_fn)((ptr_t)h, top); | |
611 | return; | |
612 | } else { | |
613 | (*push_fn)((ptr_t)h, (ptr_t)(h+1)); | |
614 | } | |
615 | } | |
616 | h++; | |
617 | } | |
618 | if ((ptr_t)h != top) { | |
619 | if ((*dirty_fn)(h)) { | |
620 | (*push_fn)((ptr_t)h, top); | |
621 | } | |
622 | } | |
623 | if (GC_mark_stack_top >= GC_mark_stack + GC_mark_stack_size) { | |
624 | ABORT("unexpected mark stack overflow"); | |
625 | } | |
626 | } | |
627 | ||
628 | # ifndef SMALL_CONFIG | |
629 | void GC_push_conditional(bottom, top, all) | |
630 | ptr_t bottom; | |
631 | ptr_t top; | |
632 | int all; | |
633 | { | |
634 | if (all) { | |
635 | if (GC_dirty_maintained) { | |
636 | # ifdef PROC_VDB | |
637 | /* Pages that were never dirtied cannot contain pointers */ | |
638 | GC_push_dirty(bottom, top, GC_page_was_ever_dirty, GC_push_all); | |
639 | # else | |
640 | GC_push_all(bottom, top); | |
641 | # endif | |
642 | } else { | |
643 | GC_push_all(bottom, top); | |
644 | } | |
645 | } else { | |
646 | GC_push_dirty(bottom, top, GC_page_was_dirty, GC_push_all); | |
647 | } | |
648 | } | |
649 | #endif | |
650 | ||
651 | # ifdef MSWIN32 | |
652 | void __cdecl GC_push_one(p) | |
653 | # else | |
654 | void GC_push_one(p) | |
655 | # endif | |
656 | word p; | |
657 | { | |
658 | GC_PUSH_ONE_STACK(p); | |
659 | } | |
660 | ||
661 | # ifdef __STDC__ | |
662 | # define BASE(p) (word)GC_base((void *)(p)) | |
663 | # else | |
664 | # define BASE(p) (word)GC_base((char *)(p)) | |
665 | # endif | |
666 | ||
667 | /* As above, but argument passed preliminary test. */ | |
668 | # ifdef PRINT_BLACK_LIST | |
669 | void GC_push_one_checked(p, interior_ptrs, source) | |
670 | ptr_t source; | |
671 | # else | |
672 | void GC_push_one_checked(p, interior_ptrs) | |
673 | # define source 0 | |
674 | # endif | |
675 | register word p; | |
676 | register GC_bool interior_ptrs; | |
677 | { | |
678 | register word r; | |
679 | register hdr * hhdr; | |
680 | register int displ; | |
681 | ||
682 | GET_HDR(p, hhdr); | |
683 | if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) { | |
684 | if (hhdr != 0 && interior_ptrs) { | |
685 | r = BASE(p); | |
686 | hhdr = HDR(r); | |
687 | displ = BYTES_TO_WORDS(HBLKDISPL(r)); | |
688 | } else { | |
689 | hhdr = 0; | |
690 | } | |
691 | } else { | |
692 | register map_entry_type map_entry; | |
693 | ||
694 | displ = HBLKDISPL(p); | |
695 | map_entry = MAP_ENTRY((hhdr -> hb_map), displ); | |
696 | if (map_entry == OBJ_INVALID) { | |
697 | if (interior_ptrs) { | |
698 | r = BASE(p); | |
699 | displ = BYTES_TO_WORDS(HBLKDISPL(r)); | |
700 | if (r == 0) hhdr = 0; | |
701 | } else { | |
702 | hhdr = 0; | |
703 | } | |
704 | } else { | |
705 | displ = BYTES_TO_WORDS(displ); | |
706 | displ -= map_entry; | |
707 | r = (word)((word *)(HBLKPTR(p)) + displ); | |
708 | } | |
709 | } | |
710 | /* If hhdr != 0 then r == GC_base(p), only we did it faster. */ | |
711 | /* displ is the word index within the block. */ | |
712 | if (hhdr == 0) { | |
713 | if (interior_ptrs) { | |
714 | # ifdef PRINT_BLACK_LIST | |
715 | GC_add_to_black_list_stack(p, source); | |
716 | # else | |
717 | GC_add_to_black_list_stack(p); | |
718 | # endif | |
719 | } else { | |
720 | GC_ADD_TO_BLACK_LIST_NORMAL(p, source); | |
721 | # undef source /* In case we had to define it. */ | |
722 | } | |
723 | } else { | |
724 | if (!mark_bit_from_hdr(hhdr, displ)) { | |
725 | set_mark_bit_from_hdr(hhdr, displ); | |
726 | PUSH_OBJ((word *)r, hhdr, GC_mark_stack_top, | |
727 | &(GC_mark_stack[GC_mark_stack_size])); | |
728 | } | |
729 | } | |
730 | } | |
731 | ||
732 | # ifdef TRACE_BUF | |
733 | ||
734 | # define TRACE_ENTRIES 1000 | |
735 | ||
736 | struct trace_entry { | |
737 | char * kind; | |
738 | word gc_no; | |
739 | word words_allocd; | |
740 | word arg1; | |
741 | word arg2; | |
742 | } GC_trace_buf[TRACE_ENTRIES]; | |
743 | ||
744 | int GC_trace_buf_ptr = 0; | |
745 | ||
746 | void GC_add_trace_entry(char *kind, word arg1, word arg2) | |
747 | { | |
748 | GC_trace_buf[GC_trace_buf_ptr].kind = kind; | |
749 | GC_trace_buf[GC_trace_buf_ptr].gc_no = GC_gc_no; | |
750 | GC_trace_buf[GC_trace_buf_ptr].words_allocd = GC_words_allocd; | |
751 | GC_trace_buf[GC_trace_buf_ptr].arg1 = arg1 ^ 0x80000000; | |
752 | GC_trace_buf[GC_trace_buf_ptr].arg2 = arg2 ^ 0x80000000; | |
753 | GC_trace_buf_ptr++; | |
754 | if (GC_trace_buf_ptr >= TRACE_ENTRIES) GC_trace_buf_ptr = 0; | |
755 | } | |
756 | ||
757 | void GC_print_trace(word gc_no, GC_bool lock) | |
758 | { | |
759 | int i; | |
760 | struct trace_entry *p; | |
761 | ||
762 | if (lock) LOCK(); | |
763 | for (i = GC_trace_buf_ptr-1; i != GC_trace_buf_ptr; i--) { | |
764 | if (i < 0) i = TRACE_ENTRIES-1; | |
765 | p = GC_trace_buf + i; | |
766 | if (p -> gc_no < gc_no || p -> kind == 0) return; | |
767 | printf("Trace:%s (gc:%d,words:%d) 0x%X, 0x%X\n", | |
768 | p -> kind, p -> gc_no, p -> words_allocd, | |
769 | (p -> arg1) ^ 0x80000000, (p -> arg2) ^ 0x80000000); | |
770 | } | |
771 | printf("Trace incomplete\n"); | |
772 | if (lock) UNLOCK(); | |
773 | } | |
774 | ||
775 | # endif /* TRACE_BUF */ | |
776 | ||
777 | /* | |
778 | * A version of GC_push_all that treats all interior pointers as valid | |
779 | */ | |
780 | void GC_push_all_stack(bottom, top) | |
781 | ptr_t bottom; | |
782 | ptr_t top; | |
783 | { | |
784 | # ifdef ALL_INTERIOR_POINTERS | |
785 | GC_push_all(bottom, top); | |
786 | # ifdef TRACE_BUF | |
787 | GC_add_trace_entry("GC_push_all_stack", bottom, top); | |
788 | # endif | |
789 | # else | |
790 | word * b = (word *)(((long) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1)); | |
791 | word * t = (word *)(((long) top) & ~(ALIGNMENT-1)); | |
792 | register word *p; | |
793 | register word q; | |
794 | register word *lim; | |
795 | register ptr_t greatest_ha = GC_greatest_plausible_heap_addr; | |
796 | register ptr_t least_ha = GC_least_plausible_heap_addr; | |
797 | # define GC_greatest_plausible_heap_addr greatest_ha | |
798 | # define GC_least_plausible_heap_addr least_ha | |
799 | ||
800 | if (top == 0) return; | |
801 | /* check all pointers in range and put in push if they appear */ | |
802 | /* to be valid. */ | |
803 | lim = t - 1 /* longword */; | |
804 | for (p = b; p <= lim; p = (word *)(((char *)p) + ALIGNMENT)) { | |
805 | q = *p; | |
806 | GC_PUSH_ONE_STACK(q); | |
807 | } | |
808 | # undef GC_greatest_plausible_heap_addr | |
809 | # undef GC_least_plausible_heap_addr | |
810 | # endif | |
811 | } | |
812 | ||
813 | #ifndef SMALL_CONFIG | |
814 | /* Push all objects reachable from marked objects in the given block */ | |
815 | /* of size 1 objects. */ | |
816 | void GC_push_marked1(h, hhdr) | |
817 | struct hblk *h; | |
818 | register hdr * hhdr; | |
819 | { | |
820 | word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]); | |
821 | register word *p; | |
822 | word *plim; | |
823 | register int i; | |
824 | register word q; | |
825 | register word mark_word; | |
826 | register ptr_t greatest_ha = GC_greatest_plausible_heap_addr; | |
827 | register ptr_t least_ha = GC_least_plausible_heap_addr; | |
828 | # define GC_greatest_plausible_heap_addr greatest_ha | |
829 | # define GC_least_plausible_heap_addr least_ha | |
830 | ||
831 | p = (word *)(h->hb_body); | |
832 | plim = (word *)(((word)h) + HBLKSIZE); | |
833 | ||
834 | /* go through all words in block */ | |
835 | while( p < plim ) { | |
836 | mark_word = *mark_word_addr++; | |
837 | i = 0; | |
838 | while(mark_word != 0) { | |
839 | if (mark_word & 1) { | |
840 | q = p[i]; | |
841 | GC_PUSH_ONE_HEAP(q); | |
842 | } | |
843 | i++; | |
844 | mark_word >>= 1; | |
845 | } | |
846 | p += WORDSZ; | |
847 | } | |
848 | # undef GC_greatest_plausible_heap_addr | |
849 | # undef GC_least_plausible_heap_addr | |
850 | } | |
851 | ||
852 | ||
853 | #ifndef UNALIGNED | |
854 | ||
855 | /* Push all objects reachable from marked objects in the given block */ | |
856 | /* of size 2 objects. */ | |
857 | void GC_push_marked2(h, hhdr) | |
858 | struct hblk *h; | |
859 | register hdr * hhdr; | |
860 | { | |
861 | word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]); | |
862 | register word *p; | |
863 | word *plim; | |
864 | register int i; | |
865 | register word q; | |
866 | register word mark_word; | |
867 | register ptr_t greatest_ha = GC_greatest_plausible_heap_addr; | |
868 | register ptr_t least_ha = GC_least_plausible_heap_addr; | |
869 | # define GC_greatest_plausible_heap_addr greatest_ha | |
870 | # define GC_least_plausible_heap_addr least_ha | |
871 | ||
872 | p = (word *)(h->hb_body); | |
873 | plim = (word *)(((word)h) + HBLKSIZE); | |
874 | ||
875 | /* go through all words in block */ | |
876 | while( p < plim ) { | |
877 | mark_word = *mark_word_addr++; | |
878 | i = 0; | |
879 | while(mark_word != 0) { | |
880 | if (mark_word & 1) { | |
881 | q = p[i]; | |
882 | GC_PUSH_ONE_HEAP(q); | |
883 | q = p[i+1]; | |
884 | GC_PUSH_ONE_HEAP(q); | |
885 | } | |
886 | i += 2; | |
887 | mark_word >>= 2; | |
888 | } | |
889 | p += WORDSZ; | |
890 | } | |
891 | # undef GC_greatest_plausible_heap_addr | |
892 | # undef GC_least_plausible_heap_addr | |
893 | } | |
894 | ||
895 | /* Push all objects reachable from marked objects in the given block */ | |
896 | /* of size 4 objects. */ | |
897 | /* There is a risk of mark stack overflow here. But we handle that. */ | |
898 | /* And only unmarked objects get pushed, so it's not very likely. */ | |
899 | void GC_push_marked4(h, hhdr) | |
900 | struct hblk *h; | |
901 | register hdr * hhdr; | |
902 | { | |
903 | word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]); | |
904 | register word *p; | |
905 | word *plim; | |
906 | register int i; | |
907 | register word q; | |
908 | register word mark_word; | |
909 | register ptr_t greatest_ha = GC_greatest_plausible_heap_addr; | |
910 | register ptr_t least_ha = GC_least_plausible_heap_addr; | |
911 | # define GC_greatest_plausible_heap_addr greatest_ha | |
912 | # define GC_least_plausible_heap_addr least_ha | |
913 | ||
914 | p = (word *)(h->hb_body); | |
915 | plim = (word *)(((word)h) + HBLKSIZE); | |
916 | ||
917 | /* go through all words in block */ | |
918 | while( p < plim ) { | |
919 | mark_word = *mark_word_addr++; | |
920 | i = 0; | |
921 | while(mark_word != 0) { | |
922 | if (mark_word & 1) { | |
923 | q = p[i]; | |
924 | GC_PUSH_ONE_HEAP(q); | |
925 | q = p[i+1]; | |
926 | GC_PUSH_ONE_HEAP(q); | |
927 | q = p[i+2]; | |
928 | GC_PUSH_ONE_HEAP(q); | |
929 | q = p[i+3]; | |
930 | GC_PUSH_ONE_HEAP(q); | |
931 | } | |
932 | i += 4; | |
933 | mark_word >>= 4; | |
934 | } | |
935 | p += WORDSZ; | |
936 | } | |
937 | # undef GC_greatest_plausible_heap_addr | |
938 | # undef GC_least_plausible_heap_addr | |
939 | } | |
940 | ||
941 | #endif /* UNALIGNED */ | |
942 | ||
943 | #endif /* SMALL_CONFIG */ | |
944 | ||
945 | /* Push all objects reachable from marked objects in the given block */ | |
946 | void GC_push_marked(h, hhdr) | |
947 | struct hblk *h; | |
948 | register hdr * hhdr; | |
949 | { | |
950 | register int sz = hhdr -> hb_sz; | |
951 | register word * p; | |
952 | register int word_no; | |
953 | register word * lim; | |
954 | register mse * GC_mark_stack_top_reg; | |
955 | register mse * mark_stack_limit = &(GC_mark_stack[GC_mark_stack_size]); | |
956 | ||
957 | /* Some quick shortcuts: */ | |
958 | { | |
959 | struct obj_kind *ok = &(GC_obj_kinds[hhdr -> hb_obj_kind]); | |
960 | if ((0 | DS_LENGTH) == ok -> ok_descriptor | |
961 | && FALSE == ok -> ok_relocate_descr) | |
962 | return; | |
963 | } | |
964 | if (GC_block_empty(hhdr)/* nothing marked */) return; | |
965 | # ifdef GATHERSTATS | |
966 | GC_n_rescuing_pages++; | |
967 | # endif | |
968 | GC_objects_are_marked = TRUE; | |
969 | if (sz > MAXOBJSZ) { | |
970 | lim = (word *)(h + 1); | |
971 | } else { | |
972 | lim = (word *)(h + 1) - sz; | |
973 | } | |
974 | ||
975 | switch(sz) { | |
976 | # if !defined(SMALL_CONFIG) | |
977 | case 1: | |
978 | GC_push_marked1(h, hhdr); | |
979 | break; | |
980 | # endif | |
981 | # if !defined(SMALL_CONFIG) && !defined(UNALIGNED) | |
982 | case 2: | |
983 | GC_push_marked2(h, hhdr); | |
984 | break; | |
985 | case 4: | |
986 | GC_push_marked4(h, hhdr); | |
987 | break; | |
988 | # endif | |
989 | default: | |
990 | GC_mark_stack_top_reg = GC_mark_stack_top; | |
991 | for (p = (word *)h + HDR_WORDS, word_no = HDR_WORDS; p <= lim; | |
992 | p += sz, word_no += sz) { | |
993 | /* This ignores user specified mark procs. This currently */ | |
994 | /* doesn't matter, since marking from the whole object */ | |
995 | /* is always sufficient, and we will eventually use the user */ | |
996 | /* mark proc to avoid any bogus pointers. */ | |
997 | if (mark_bit_from_hdr(hhdr, word_no)) { | |
998 | /* Mark from fields inside the object */ | |
999 | PUSH_OBJ((word *)p, hhdr, GC_mark_stack_top_reg, mark_stack_limit); | |
1000 | # ifdef GATHERSTATS | |
1001 | /* Subtract this object from total, since it was */ | |
1002 | /* added in twice. */ | |
1003 | GC_composite_in_use -= sz; | |
1004 | # endif | |
1005 | } | |
1006 | } | |
1007 | GC_mark_stack_top = GC_mark_stack_top_reg; | |
1008 | } | |
1009 | } | |
1010 | ||
1011 | #ifndef SMALL_CONFIG | |
1012 | /* Test whether any page in the given block is dirty */ | |
1013 | GC_bool GC_block_was_dirty(h, hhdr) | |
1014 | struct hblk *h; | |
1015 | register hdr * hhdr; | |
1016 | { | |
1017 | register int sz = hhdr -> hb_sz; | |
1018 | ||
1019 | if (sz < MAXOBJSZ) { | |
1020 | return(GC_page_was_dirty(h)); | |
1021 | } else { | |
1022 | register ptr_t p = (ptr_t)h; | |
1023 | sz += HDR_WORDS; | |
1024 | sz = WORDS_TO_BYTES(sz); | |
1025 | while (p < (ptr_t)h + sz) { | |
1026 | if (GC_page_was_dirty((struct hblk *)p)) return(TRUE); | |
1027 | p += HBLKSIZE; | |
1028 | } | |
1029 | return(FALSE); | |
1030 | } | |
1031 | } | |
1032 | #endif /* SMALL_CONFIG */ | |
1033 | ||
1034 | /* Similar to GC_push_next_marked, but return address of next block */ | |
1035 | struct hblk * GC_push_next_marked(h) | |
1036 | struct hblk *h; | |
1037 | { | |
1038 | register hdr * hhdr; | |
1039 | ||
1040 | h = GC_next_block(h); | |
1041 | if (h == 0) return(0); | |
1042 | hhdr = HDR(h); | |
1043 | GC_push_marked(h, hhdr); | |
1044 | return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz)); | |
1045 | } | |
1046 | ||
1047 | #ifndef SMALL_CONFIG | |
1048 | /* Identical to above, but mark only from dirty pages */ | |
1049 | struct hblk * GC_push_next_marked_dirty(h) | |
1050 | struct hblk *h; | |
1051 | { | |
1052 | register hdr * hhdr = HDR(h); | |
1053 | ||
1054 | if (!GC_dirty_maintained) { ABORT("dirty bits not set up"); } | |
1055 | for (;;) { | |
1056 | h = GC_next_block(h); | |
1057 | if (h == 0) return(0); | |
1058 | hhdr = HDR(h); | |
1059 | # ifdef STUBBORN_ALLOC | |
1060 | if (hhdr -> hb_obj_kind == STUBBORN) { | |
1061 | if (GC_page_was_changed(h) && GC_block_was_dirty(h, hhdr)) { | |
1062 | break; | |
1063 | } | |
1064 | } else { | |
1065 | if (GC_block_was_dirty(h, hhdr)) break; | |
1066 | } | |
1067 | # else | |
1068 | if (GC_block_was_dirty(h, hhdr)) break; | |
1069 | # endif | |
1070 | h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz); | |
1071 | } | |
1072 | GC_push_marked(h, hhdr); | |
1073 | return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz)); | |
1074 | } | |
1075 | #endif | |
1076 | ||
1077 | /* Similar to above, but for uncollectable pages. Needed since we */ | |
1078 | /* do not clear marks for such pages, even for full collections. */ | |
1079 | struct hblk * GC_push_next_marked_uncollectable(h) | |
1080 | struct hblk *h; | |
1081 | { | |
1082 | register hdr * hhdr = HDR(h); | |
1083 | ||
1084 | for (;;) { | |
1085 | h = GC_next_block(h); | |
1086 | if (h == 0) return(0); | |
1087 | hhdr = HDR(h); | |
1088 | if (hhdr -> hb_obj_kind == UNCOLLECTABLE) break; | |
1089 | h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz); | |
1090 | } | |
1091 | GC_push_marked(h, hhdr); | |
1092 | return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz)); | |
1093 | } | |
1094 | ||
1095 |