]> gcc.gnu.org Git - gcc.git/blame - boehm-gc/linux_threads.c
new
[gcc.git] / boehm-gc / linux_threads.c
CommitLineData
73ffefd0
TT
1/*
2 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
4 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
5 *
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 *
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
14 */
15/*
16 * Support code for LinuxThreads, the clone()-based kernel
17 * thread package for Linux which is included in libc6.
18 *
19 * This code relies on implementation details of LinuxThreads,
20 * (i.e. properties not guaranteed by the Pthread standard):
21 *
22 * - the function GC_linux_thread_top_of_stack(void)
23 * relies on the way LinuxThreads lays out thread stacks
24 * in the address space.
25 *
26 * Note that there is a lot of code duplication between linux_threads.c
27 * and irix_threads.c; any changes made here may need to be reflected
28 * there too.
29 */
30
20bbd3cd 31/* #define DEBUG_THREADS 1 */
73ffefd0 32
20bbd3cd 33/* ANSI C requires that a compilation unit contains something */
73ffefd0 34# include "gc_priv.h"
20bbd3cd
TT
35
36# if defined(LINUX_THREADS)
37
73ffefd0 38# include <pthread.h>
93002327 39# include <sched.h>
73ffefd0
TT
40# include <time.h>
41# include <errno.h>
42# include <unistd.h>
43# include <sys/mman.h>
44# include <sys/time.h>
45# include <semaphore.h>
93002327
BM
46# include <signal.h>
47
48#ifdef USE_LD_WRAP
49# define WRAP_FUNC(f) __wrap_##f
50# define REAL_FUNC(f) __real_##f
51#else
52# define WRAP_FUNC(f) GC_##f
53# define REAL_FUNC(f) f
54# undef pthread_create
55# undef pthread_sigmask
56# undef pthread_join
57#endif
73ffefd0 58
73ffefd0
TT
59
60void GC_thr_init();
61
62#if 0
63void GC_print_sig_mask()
64{
65 sigset_t blocked;
66 int i;
67
68 if (pthread_sigmask(SIG_BLOCK, NULL, &blocked) != 0)
69 ABORT("pthread_sigmask");
70 GC_printf0("Blocked: ");
71 for (i = 1; i <= MAXSIG; i++) {
72 if (sigismember(&blocked, i)) { GC_printf1("%ld ",(long) i); }
73 }
74 GC_printf0("\n");
75}
76#endif
77
78/* We use the allocation lock to protect thread-related data structures. */
79
80/* The set of all known threads. We intercept thread creation and */
81/* joins. We never actually create detached threads. We allocate all */
82/* new thread stacks ourselves. These allow us to maintain this */
83/* data structure. */
84/* Protected by GC_thr_lock. */
85/* Some of this should be declared volatile, but that's incosnsistent */
86/* with some library routine declarations. */
87typedef struct GC_Thread_Rep {
88 struct GC_Thread_Rep * next; /* More recently allocated threads */
89 /* with a given pthread id come */
90 /* first. (All but the first are */
91 /* guaranteed to be dead, but we may */
92 /* not yet have registered the join.) */
93 pthread_t id;
94 word flags;
95# define FINISHED 1 /* Thread has exited. */
96# define DETACHED 2 /* Thread is intended to be detached. */
97# define MAIN_THREAD 4 /* True for the original thread only. */
98
93002327
BM
99 ptr_t stack_end; /* Cold end of the stack. */
100 ptr_t stack_ptr; /* Valid only when stopped. */
101# ifdef IA64
102 ptr_t backing_store_end;
103 ptr_t backing_store_ptr;
104# endif
73ffefd0
TT
105 int signal;
106 void * status; /* The value returned from the thread. */
107 /* Used only to avoid premature */
108 /* reclamation of any data it might */
109 /* reference. */
110} * GC_thread;
111
112GC_thread GC_lookup_thread(pthread_t id);
113
114/*
115 * The only way to suspend threads given the pthread interface is to send
116 * signals. We can't use SIGSTOP directly, because we need to get the
117 * thread to save its stack pointer in the GC thread table before
118 * suspending. So we have to reserve a signal of our own for this.
119 * This means we have to intercept client calls to change the signal mask.
120 * The linuxthreads package already uses SIGUSR1 and SIGUSR2,
121 * so we need to reuse something else. I chose SIGPWR.
122 * (Perhaps SIGUNUSED would be a better choice.)
123 */
124#define SIG_SUSPEND SIGPWR
125
126#define SIG_RESTART SIGXCPU
127
128sem_t GC_suspend_ack_sem;
129
130/*
131GC_linux_thread_top_of_stack() relies on implementation details of
132LinuxThreads, namely that thread stacks are allocated on 2M boundaries
133and grow to no more than 2M.
134To make sure that we're using LinuxThreads and not some other thread
20bbd3cd
TT
135package, we generate a dummy reference to `pthread_kill_other_threads_np'
136(was `__pthread_initial_thread_bos' but that disappeared),
73ffefd0
TT
137which is a symbol defined in LinuxThreads, but (hopefully) not in other
138thread packages.
139*/
20bbd3cd 140void (*dummy_var_to_force_linux_threads)() = pthread_kill_other_threads_np;
73ffefd0
TT
141
142#define LINUX_THREADS_STACK_SIZE (2 * 1024 * 1024)
143
144static inline ptr_t GC_linux_thread_top_of_stack(void)
145{
146 char *sp = GC_approx_sp();
147 ptr_t tos = (ptr_t) (((unsigned long)sp | (LINUX_THREADS_STACK_SIZE - 1)) + 1);
148#if DEBUG_THREADS
149 GC_printf1("SP = %lx\n", (unsigned long)sp);
150 GC_printf1("TOS = %lx\n", (unsigned long)tos);
151#endif
152 return tos;
153}
154
70635f1e 155#if defined(SPARC) || defined(IA64)
93002327
BM
156 extern word GC_save_regs_in_stack();
157#endif
158
73ffefd0
TT
159void GC_suspend_handler(int sig)
160{
161 int dummy;
162 pthread_t my_thread = pthread_self();
163 GC_thread me;
164 sigset_t all_sigs;
165 sigset_t old_sigs;
166 int i;
167 sigset_t mask;
168
169 if (sig != SIG_SUSPEND) ABORT("Bad signal in suspend_handler");
170
171#if DEBUG_THREADS
172 GC_printf1("Suspending 0x%x\n", my_thread);
173#endif
174
175 me = GC_lookup_thread(my_thread);
176 /* The lookup here is safe, since I'm doing this on behalf */
177 /* of a thread which holds the allocation lock in order */
178 /* to stop the world. Thus concurrent modification of the */
179 /* data structure is impossible. */
70635f1e
JJ
180# ifdef SPARC
181 me -> stack_ptr = (ptr_t)GC_save_regs_in_stack();
182# else
183 me -> stack_ptr = (ptr_t)(&dummy);
184# endif
93002327
BM
185# ifdef IA64
186 me -> backing_store_ptr = (ptr_t)GC_save_regs_in_stack();
187# endif
73ffefd0
TT
188
189 /* Tell the thread that wants to stop the world that this */
190 /* thread has been stopped. Note that sem_post() is */
191 /* the only async-signal-safe primitive in LinuxThreads. */
192 sem_post(&GC_suspend_ack_sem);
193
194 /* Wait until that thread tells us to restart by sending */
195 /* this thread a SIG_RESTART signal. */
196 /* SIG_RESTART should be masked at this point. Thus there */
197 /* is no race. */
198 if (sigfillset(&mask) != 0) ABORT("sigfillset() failed");
199 if (sigdelset(&mask, SIG_RESTART) != 0) ABORT("sigdelset() failed");
93002327
BM
200# ifdef NO_SIGNALS
201 if (sigdelset(&mask, SIGINT) != 0) ABORT("sigdelset() failed");
202 if (sigdelset(&mask, SIGQUIT) != 0) ABORT("sigdelset() failed");
203 if (sigdelset(&mask, SIGTERM) != 0) ABORT("sigdelset() failed");
204# endif
73ffefd0
TT
205 do {
206 me->signal = 0;
207 sigsuspend(&mask); /* Wait for signal */
208 } while (me->signal != SIG_RESTART);
209
210#if DEBUG_THREADS
211 GC_printf1("Continuing 0x%x\n", my_thread);
212#endif
213}
214
215void GC_restart_handler(int sig)
216{
217 GC_thread me;
218
219 if (sig != SIG_RESTART) ABORT("Bad signal in suspend_handler");
220
221 /* Let the GC_suspend_handler() know that we got a SIG_RESTART. */
222 /* The lookup here is safe, since I'm doing this on behalf */
223 /* of a thread which holds the allocation lock in order */
224 /* to stop the world. Thus concurrent modification of the */
225 /* data structure is impossible. */
226 me = GC_lookup_thread(pthread_self());
227 me->signal = SIG_RESTART;
228
229 /*
230 ** Note: even if we didn't do anything useful here,
231 ** it would still be necessary to have a signal handler,
232 ** rather than ignoring the signals, otherwise
233 ** the signals will not be delivered at all, and
234 ** will thus not interrupt the sigsuspend() above.
235 */
236
237#if DEBUG_THREADS
238 GC_printf1("In GC_restart_handler for 0x%x\n", pthread_self());
239#endif
240}
241
242GC_bool GC_thr_initialized = FALSE;
243
244# define THREAD_TABLE_SZ 128 /* Must be power of 2 */
245volatile GC_thread GC_threads[THREAD_TABLE_SZ];
246
247/* Add a thread to GC_threads. We assume it wasn't already there. */
248/* Caller holds allocation lock. */
249GC_thread GC_new_thread(pthread_t id)
250{
251 int hv = ((word)id) % THREAD_TABLE_SZ;
252 GC_thread result;
253 static struct GC_Thread_Rep first_thread;
254 static GC_bool first_thread_used = FALSE;
255
256 if (!first_thread_used) {
257 result = &first_thread;
258 first_thread_used = TRUE;
259 /* Dont acquire allocation lock, since we may already hold it. */
260 } else {
261 result = (struct GC_Thread_Rep *)
262 GC_generic_malloc_inner(sizeof(struct GC_Thread_Rep), NORMAL);
263 }
264 if (result == 0) return(0);
265 result -> id = id;
266 result -> next = GC_threads[hv];
267 GC_threads[hv] = result;
268 /* result -> flags = 0; */
269 return(result);
270}
271
272/* Delete a thread from GC_threads. We assume it is there. */
273/* (The code intentionally traps if it wasn't.) */
274/* Caller holds allocation lock. */
275void GC_delete_thread(pthread_t id)
276{
277 int hv = ((word)id) % THREAD_TABLE_SZ;
278 register GC_thread p = GC_threads[hv];
279 register GC_thread prev = 0;
280
281 while (!pthread_equal(p -> id, id)) {
282 prev = p;
283 p = p -> next;
284 }
285 if (prev == 0) {
286 GC_threads[hv] = p -> next;
287 } else {
288 prev -> next = p -> next;
289 }
290}
291
292/* If a thread has been joined, but we have not yet */
293/* been notified, then there may be more than one thread */
294/* in the table with the same pthread id. */
295/* This is OK, but we need a way to delete a specific one. */
296void GC_delete_gc_thread(pthread_t id, GC_thread gc_id)
297{
298 int hv = ((word)id) % THREAD_TABLE_SZ;
299 register GC_thread p = GC_threads[hv];
300 register GC_thread prev = 0;
301
302 while (p != gc_id) {
303 prev = p;
304 p = p -> next;
305 }
306 if (prev == 0) {
307 GC_threads[hv] = p -> next;
308 } else {
309 prev -> next = p -> next;
310 }
311}
312
313/* Return a GC_thread corresponding to a given thread_t. */
314/* Returns 0 if it's not there. */
315/* Caller holds allocation lock or otherwise inhibits */
316/* updates. */
317/* If there is more than one thread with the given id we */
318/* return the most recent one. */
319GC_thread GC_lookup_thread(pthread_t id)
320{
321 int hv = ((word)id) % THREAD_TABLE_SZ;
322 register GC_thread p = GC_threads[hv];
323
324 while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
325 return(p);
326}
327
328/* Caller holds allocation lock. */
329void GC_stop_world()
330{
331 pthread_t my_thread = pthread_self();
332 register int i;
333 register GC_thread p;
334 register int n_live_threads = 0;
335 register int result;
336
337 for (i = 0; i < THREAD_TABLE_SZ; i++) {
338 for (p = GC_threads[i]; p != 0; p = p -> next) {
339 if (p -> id != my_thread) {
340 if (p -> flags & FINISHED) continue;
341 n_live_threads++;
342 #if DEBUG_THREADS
343 GC_printf1("Sending suspend signal to 0x%x\n", p -> id);
344 #endif
345 result = pthread_kill(p -> id, SIG_SUSPEND);
346 switch(result) {
347 case ESRCH:
348 /* Not really there anymore. Possible? */
349 n_live_threads--;
350 break;
351 case 0:
352 break;
353 default:
354 ABORT("pthread_kill failed");
355 }
356 }
357 }
358 }
359 for (i = 0; i < n_live_threads; i++) {
360 sem_wait(&GC_suspend_ack_sem);
361 }
362 #if DEBUG_THREADS
363 GC_printf1("World stopped 0x%x\n", pthread_self());
364 #endif
365}
366
367/* Caller holds allocation lock. */
368void GC_start_world()
369{
370 pthread_t my_thread = pthread_self();
371 register int i;
372 register GC_thread p;
373 register int n_live_threads = 0;
374 register int result;
375
376# if DEBUG_THREADS
377 GC_printf0("World starting\n");
378# endif
379
380 for (i = 0; i < THREAD_TABLE_SZ; i++) {
381 for (p = GC_threads[i]; p != 0; p = p -> next) {
382 if (p -> id != my_thread) {
383 if (p -> flags & FINISHED) continue;
384 n_live_threads++;
385 #if DEBUG_THREADS
386 GC_printf1("Sending restart signal to 0x%x\n", p -> id);
387 #endif
388 result = pthread_kill(p -> id, SIG_RESTART);
389 switch(result) {
390 case ESRCH:
391 /* Not really there anymore. Possible? */
392 n_live_threads--;
393 break;
394 case 0:
395 break;
396 default:
397 ABORT("pthread_kill failed");
398 }
399 }
400 }
401 }
402 #if DEBUG_THREADS
403 GC_printf0("World started\n");
404 #endif
405}
406
93002327
BM
407# ifdef IA64
408# define IF_IA64(x) x
409# else
410# define IF_IA64(x)
411# endif
412/* We hold allocation lock. Should do exactly the right thing if the */
413/* world is stopped. Should not fail if it isn't. */
73ffefd0
TT
414void GC_push_all_stacks()
415{
93002327
BM
416 int i;
417 GC_thread p;
418 ptr_t sp = GC_approx_sp();
419 ptr_t lo, hi;
420 /* On IA64, we also need to scan the register backing store. */
421 IF_IA64(ptr_t bs_lo; ptr_t bs_hi;)
73ffefd0
TT
422 pthread_t me = pthread_self();
423
424 if (!GC_thr_initialized) GC_thr_init();
425 #if DEBUG_THREADS
426 GC_printf1("Pushing stacks from thread 0x%lx\n", (unsigned long) me);
427 #endif
428 for (i = 0; i < THREAD_TABLE_SZ; i++) {
429 for (p = GC_threads[i]; p != 0; p = p -> next) {
430 if (p -> flags & FINISHED) continue;
431 if (pthread_equal(p -> id, me)) {
70635f1e
JJ
432# ifdef SPARC
433 lo = (ptr_t)GC_save_regs_in_stack();
434# else
73ffefd0 435 lo = GC_approx_sp();
70635f1e 436# endif
93002327 437 IF_IA64(bs_hi = (ptr_t)GC_save_regs_in_stack();)
73ffefd0
TT
438 } else {
439 lo = p -> stack_ptr;
93002327 440 IF_IA64(bs_hi = p -> backing_store_ptr;)
73ffefd0
TT
441 }
442 if ((p -> flags & MAIN_THREAD) == 0) {
93002327
BM
443 hi = p -> stack_end;
444 IF_IA64(bs_lo = p -> backing_store_end);
73ffefd0
TT
445 } else {
446 /* The original stack. */
447 hi = GC_stackbottom;
93002327 448 IF_IA64(bs_lo = BACKING_STORE_BASE;)
73ffefd0
TT
449 }
450 #if DEBUG_THREADS
451 GC_printf3("Stack for thread 0x%lx = [%lx,%lx)\n",
452 (unsigned long) p -> id,
453 (unsigned long) lo, (unsigned long) hi);
454 #endif
93002327 455 if (0 == lo) ABORT("GC_push_all_stacks: sp not set!\n");
73ffefd0 456 GC_push_all_stack(lo, hi);
93002327
BM
457# ifdef IA64
458 if (pthread_equal(p -> id, me)) {
459 GC_push_all_eager(bs_lo, bs_hi);
460 } else {
461 GC_push_all_stack(bs_lo, bs_hi);
462 }
463# endif
73ffefd0
TT
464 }
465 }
466}
467
468
469/* We hold the allocation lock. */
470void GC_thr_init()
471{
93002327 472 int dummy;
73ffefd0
TT
473 GC_thread t;
474 struct sigaction act;
475
20bbd3cd 476 if (GC_thr_initialized) return;
73ffefd0
TT
477 GC_thr_initialized = TRUE;
478
479 if (sem_init(&GC_suspend_ack_sem, 0, 0) != 0)
480 ABORT("sem_init failed");
481
482 act.sa_flags = SA_RESTART;
483 if (sigfillset(&act.sa_mask) != 0) {
484 ABORT("sigfillset() failed");
485 }
54f76845 486
93002327
BM
487# ifdef NO_SIGNALS
488 if (sigdelset(&act.sa_mask, SIGINT) != 0
489 || sigdelset(&act.sa_mask, SIGQUIT != 0)
490 || sigdelset(&act.sa_mask, SIGTERM != 0)) {
491 ABORT("sigdelset() failed");
492 }
493# endif
54f76845 494
73ffefd0
TT
495 /* SIG_RESTART is unmasked by the handler when necessary. */
496 act.sa_handler = GC_suspend_handler;
497 if (sigaction(SIG_SUSPEND, &act, NULL) != 0) {
498 ABORT("Cannot set SIG_SUSPEND handler");
499 }
500
501 act.sa_handler = GC_restart_handler;
502 if (sigaction(SIG_RESTART, &act, NULL) != 0) {
503 ABORT("Cannot set SIG_SUSPEND handler");
504 }
505
506 /* Add the initial thread, so we can stop it. */
507 t = GC_new_thread(pthread_self());
93002327 508 t -> stack_ptr = (ptr_t)(&dummy);
73ffefd0
TT
509 t -> flags = DETACHED | MAIN_THREAD;
510}
511
93002327 512int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set, sigset_t *oset)
73ffefd0
TT
513{
514 sigset_t fudged_set;
515
516 if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
517 fudged_set = *set;
518 sigdelset(&fudged_set, SIG_SUSPEND);
519 set = &fudged_set;
520 }
93002327 521 return(REAL_FUNC(pthread_sigmask)(how, set, oset));
73ffefd0
TT
522}
523
524struct start_info {
525 void *(*start_routine)(void *);
526 void *arg;
20bbd3cd
TT
527 word flags;
528 sem_t registered; /* 1 ==> in our thread table, but */
529 /* parent hasn't yet noticed. */
73ffefd0
TT
530};
531
20bbd3cd
TT
532
533void GC_thread_exit_proc(void *arg)
73ffefd0
TT
534{
535 GC_thread me;
20bbd3cd 536 struct start_info * si = arg;
73ffefd0
TT
537
538 LOCK();
539 me = GC_lookup_thread(pthread_self());
540 if (me -> flags & DETACHED) {
541 GC_delete_thread(pthread_self());
542 } else {
543 me -> flags |= FINISHED;
544 }
93002327
BM
545 if (GC_incremental && GC_collection_in_progress()) {
546 int old_gc_no = GC_gc_no;
547
548 /* Make sure that no part of our stack is still on the mark stack, */
549 /* since it's about to be unmapped. */
550 while (GC_incremental && GC_collection_in_progress()
551 && old_gc_no == GC_gc_no) {
552 ENTER_GC();
553 GC_collect_a_little_inner(1);
554 EXIT_GC();
555 UNLOCK();
556 sched_yield();
557 LOCK();
558 }
559 }
73ffefd0
TT
560 UNLOCK();
561}
562
93002327 563int WRAP_FUNC(pthread_join)(pthread_t thread, void **retval)
73ffefd0
TT
564{
565 int result;
566 GC_thread thread_gc_id;
567
568 LOCK();
569 thread_gc_id = GC_lookup_thread(thread);
570 /* This is guaranteed to be the intended one, since the thread id */
571 /* cant have been recycled by pthreads. */
572 UNLOCK();
93002327 573 result = REAL_FUNC(pthread_join)(thread, retval);
73ffefd0
TT
574 LOCK();
575 /* Here the pthread thread id may have been recycled. */
576 GC_delete_gc_thread(thread, thread_gc_id);
577 UNLOCK();
578 return result;
579}
580
581void * GC_start_routine(void * arg)
582{
93002327 583 int dummy;
73ffefd0
TT
584 struct start_info * si = arg;
585 void * result;
586 GC_thread me;
20bbd3cd
TT
587 pthread_t my_pthread;
588 void *(*start)(void *);
589 void *start_arg;
73ffefd0 590
20bbd3cd 591 my_pthread = pthread_self();
93002327
BM
592# ifdef DEBUG_THREADS
593 GC_printf1("Starting thread 0x%lx\n", my_pthread);
594 GC_printf1("pid = %ld\n", (long) getpid());
595 GC_printf1("sp = 0x%lx\n", (long) &arg);
596# endif
73ffefd0 597 LOCK();
20bbd3cd
TT
598 me = GC_new_thread(my_pthread);
599 me -> flags = si -> flags;
600 me -> stack_ptr = 0;
93002327
BM
601 /* me -> stack_end = GC_linux_stack_base(); -- currently (11/99) */
602 /* doesn't work because the stack base in /proc/self/stat is the */
603 /* one for the main thread. There is a strong argument that that's */
604 /* a kernel bug, but a pervasive one. */
605# ifdef STACK_GROWS_DOWN
606 me -> stack_end = (ptr_t)(((word)(&dummy) + (GC_page_size - 1))
607 & ~(GC_page_size - 1));
608 me -> stack_ptr = me -> stack_end - 0x10;
609 /* Needs to be plausible, since an asynchronous stack mark */
610 /* should not crash. */
611# else
612 me -> stack_end = (ptr_t)(((word)(&dummy) & ~(GC_page_size - 1));
613 me -> stack_ptr = me -> stack_end + 0x10;
614# endif
615 /* This is dubious, since we may be more than a page into the stack, */
616 /* and hence skip some of it, though it's not clear that matters. */
617# ifdef IA64
618 me -> backing_store_end = (ptr_t)
619 (GC_save_regs_in_stack() & ~(GC_page_size - 1));
620 /* This is also < 100% convincing. We should also read this */
621 /* from /proc, but the hook to do so isn't there yet. */
622# endif /* IA64 */
73ffefd0 623 UNLOCK();
20bbd3cd 624 start = si -> start_routine;
73ffefd0 625# ifdef DEBUG_THREADS
20bbd3cd 626 GC_printf1("start_routine = 0x%lx\n", start);
73ffefd0 627# endif
93002327
BM
628 start_arg = si -> arg;
629 sem_post(&(si -> registered));
630 pthread_cleanup_push(GC_thread_exit_proc, si);
20bbd3cd 631 result = (*start)(start_arg);
73ffefd0
TT
632#if DEBUG_THREADS
633 GC_printf1("Finishing thread 0x%x\n", pthread_self());
634#endif
635 me -> status = result;
636 me -> flags |= FINISHED;
637 pthread_cleanup_pop(1);
20bbd3cd
TT
638 /* Cleanup acquires lock, ensuring that we can't exit */
639 /* while a collection that thinks we're alive is trying to stop */
640 /* us. */
73ffefd0
TT
641 return(result);
642}
643
644int
93002327 645WRAP_FUNC(pthread_create)(pthread_t *new_thread,
73ffefd0
TT
646 const pthread_attr_t *attr,
647 void *(*start_routine)(void *), void *arg)
648{
649 int result;
650 GC_thread t;
651 pthread_t my_new_thread;
652 void * stack;
653 size_t stacksize;
654 pthread_attr_t new_attr;
655 int detachstate;
656 word my_flags = 0;
657 struct start_info * si = GC_malloc(sizeof(struct start_info));
20bbd3cd
TT
658 /* This is otherwise saved only in an area mmapped by the thread */
659 /* library, which isn't visible to the collector. */
73ffefd0
TT
660
661 if (0 == si) return(ENOMEM);
20bbd3cd 662 sem_init(&(si -> registered), 0, 0);
73ffefd0
TT
663 si -> start_routine = start_routine;
664 si -> arg = arg;
665 LOCK();
666 if (!GC_thr_initialized) GC_thr_init();
667 if (NULL == attr) {
668 stack = 0;
669 (void) pthread_attr_init(&new_attr);
670 } else {
671 new_attr = *attr;
672 }
673 pthread_attr_getdetachstate(&new_attr, &detachstate);
674 if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
20bbd3cd
TT
675 si -> flags = my_flags;
676 UNLOCK();
93002327
BM
677# ifdef DEBUG_THREADS
678 GC_printf1("About to start new thread from thread 0x%X\n",
679 pthread_self());
680# endif
681 result = REAL_FUNC(pthread_create)(new_thread, &new_attr, GC_start_routine, si);
682# ifdef DEBUG_THREADS
683 GC_printf1("Started thread 0x%X\n", *new_thread);
684# endif
20bbd3cd
TT
685 /* Wait until child has been added to the thread table. */
686 /* This also ensures that we hold onto si until the child is done */
687 /* with it. Thus it doesn't matter whether it is otherwise */
688 /* visible to the collector. */
689 if (0 != sem_wait(&(si -> registered))) ABORT("sem_wait failed");
690 sem_destroy(&(si -> registered));
691 /* pthread_attr_destroy(&new_attr); */
73ffefd0
TT
692 /* pthread_attr_destroy(&new_attr); */
693 return(result);
694}
695
93002327
BM
696#if defined(USE_SPIN_LOCK)
697
698VOLATILE GC_bool GC_collecting = 0;
73ffefd0
TT
699 /* A hint that we're in the collector and */
700 /* holding the allocation lock for an */
701 /* extended period. */
702
703/* Reasonably fast spin locks. Basically the same implementation */
704/* as STL alloc.h. This isn't really the right way to do this. */
705/* but until the POSIX scheduling mess gets straightened out ... */
706
707volatile unsigned int GC_allocate_lock = 0;
708
709
710void GC_lock()
711{
712# define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
713# define high_spin_max 1000 /* spin cycles for multiprocessor */
714 static unsigned spin_max = low_spin_max;
715 unsigned my_spin_max;
716 static unsigned last_spins = 0;
717 unsigned my_last_spins;
718 volatile unsigned junk;
719# define PAUSE junk *= junk; junk *= junk; junk *= junk; junk *= junk
720 int i;
721
722 if (!GC_test_and_set(&GC_allocate_lock)) {
723 return;
724 }
725 junk = 0;
726 my_spin_max = spin_max;
727 my_last_spins = last_spins;
728 for (i = 0; i < my_spin_max; i++) {
729 if (GC_collecting) goto yield;
730 if (i < my_last_spins/2 || GC_allocate_lock) {
731 PAUSE;
732 continue;
733 }
734 if (!GC_test_and_set(&GC_allocate_lock)) {
735 /*
736 * got it!
737 * Spinning worked. Thus we're probably not being scheduled
738 * against the other process with which we were contending.
739 * Thus it makes sense to spin longer the next time.
740 */
741 last_spins = i;
742 spin_max = high_spin_max;
743 return;
744 }
745 }
746 /* We are probably being scheduled against the other process. Sleep. */
747 spin_max = low_spin_max;
748yield:
749 for (i = 0;; ++i) {
750 if (!GC_test_and_set(&GC_allocate_lock)) {
751 return;
752 }
753# define SLEEP_THRESHOLD 12
754 /* nanosleep(<= 2ms) just spins under Linux. We */
755 /* want to be careful to avoid that behavior. */
756 if (i < SLEEP_THRESHOLD) {
757 sched_yield();
758 } else {
759 struct timespec ts;
760
761 if (i > 26) i = 26;
762 /* Don't wait for more than about 60msecs, even */
763 /* under extreme contention. */
764 ts.tv_sec = 0;
765 ts.tv_nsec = 1 << i;
766 nanosleep(&ts, 0);
767 }
768 }
769}
770
93002327
BM
771#endif /* known architecture */
772
73ffefd0
TT
773# endif /* LINUX_THREADS */
774
This page took 0.167257 seconds and 5 git commands to generate.