Lines 14-49
Link Here
|
14 |
|
14 |
|
15 |
# define DEBUG_CYGWIN_THREADS 0 |
15 |
# define DEBUG_CYGWIN_THREADS 0 |
16 |
|
16 |
|
17 |
GC_bool GC_thr_initialized = FALSE; |
|
|
18 |
void * GC_start_routine(void * arg); |
17 |
void * GC_start_routine(void * arg); |
19 |
void GC_thread_exit_proc(void *arg); |
18 |
void GC_thread_exit_proc(void *arg); |
20 |
|
19 |
|
21 |
#endif |
20 |
#endif |
22 |
|
21 |
|
|
|
22 |
/* The type of the first argument to InterlockedExchange. */ |
23 |
/* Documented to be LONG volatile *, but at least gcc likes */ |
24 |
/* this better. */ |
25 |
typedef LONG * IE_t; |
26 |
|
23 |
#ifndef MAX_THREADS |
27 |
#ifndef MAX_THREADS |
24 |
# define MAX_THREADS 64 |
28 |
# define MAX_THREADS 256 |
|
|
29 |
/* FIXME: */ |
30 |
/* Things may get quite slow for large numbers of threads, */ |
31 |
/* since we look them up with sequential search. */ |
25 |
#endif |
32 |
#endif |
26 |
|
33 |
|
27 |
struct thread_entry { |
34 |
GC_bool GC_thr_initialized = FALSE; |
28 |
LONG in_use; |
35 |
|
|
|
36 |
DWORD GC_main_thread = 0; |
37 |
|
38 |
struct GC_thread_Rep { |
39 |
LONG in_use; /* Updated without lock. */ |
40 |
/* We assert that unused */ |
41 |
/* entries have invalid ids of */ |
42 |
/* zero and zero stack fields. */ |
29 |
DWORD id; |
43 |
DWORD id; |
30 |
HANDLE handle; |
44 |
HANDLE handle; |
31 |
void *stack; /* The cold end of the stack. */ |
45 |
ptr_t stack_base; /* The cold end of the stack. */ |
32 |
/* 0 ==> entry not valid. */ |
46 |
/* 0 ==> entry not valid. */ |
33 |
/* !in_use ==> stack == 0 */ |
47 |
/* !in_use ==> stack_base == 0 */ |
34 |
CONTEXT context; |
|
|
35 |
GC_bool suspended; |
48 |
GC_bool suspended; |
36 |
|
49 |
|
37 |
# ifdef CYGWIN32 |
50 |
# ifdef CYGWIN32 |
38 |
void *status; /* hold exit value until join in case it's a pointer */ |
51 |
void *status; /* hold exit value until join in case it's a pointer */ |
39 |
pthread_t pthread_id; |
52 |
pthread_t pthread_id; |
|
|
53 |
short flags; /* Protected by GC lock. */ |
54 |
# define FINISHED 1 /* Thread has exited. */ |
55 |
# define DETACHED 2 /* Thread is intended to be detached. */ |
40 |
# endif |
56 |
# endif |
41 |
|
|
|
42 |
}; |
57 |
}; |
43 |
|
58 |
|
|
|
59 |
typedef volatile struct GC_thread_Rep * GC_thread; |
60 |
|
61 |
/* |
62 |
* We generally assume that volatile ==> memory ordering, at least among |
63 |
* volatiles. |
64 |
*/ |
65 |
|
44 |
volatile GC_bool GC_please_stop = FALSE; |
66 |
volatile GC_bool GC_please_stop = FALSE; |
45 |
|
67 |
|
46 |
volatile struct thread_entry thread_table[MAX_THREADS]; |
68 |
volatile struct GC_thread_Rep thread_table[MAX_THREADS]; |
|
|
69 |
|
70 |
volatile LONG GC_max_thread_index = 0; /* Largest index in thread_table */ |
71 |
/* that was ever used. */ |
72 |
|
73 |
extern LONG WINAPI GC_write_fault_handler(struct _EXCEPTION_POINTERS *exc_info); |
74 |
|
75 |
/* |
76 |
* This may be called from DllMain, and hence operates under unusual |
77 |
* constraints. |
78 |
*/ |
79 |
static GC_thread GC_new_thread(void) { |
80 |
int i; |
81 |
/* It appears to be unsafe to acquire a lock here, since this */ |
82 |
/* code is apparently not preeemptible on some systems. */ |
83 |
/* (This is based on complaints, not on Microsoft's official */ |
84 |
/* documentation, which says this should perform "only simple */ |
85 |
/* initialization tasks".) */ |
86 |
/* Hence we make do with nonblocking synchronization. */ |
87 |
|
88 |
/* The following should be a noop according to the win32 */ |
89 |
/* documentation. There is empirical evidence that it */ |
90 |
/* isn't. - HB */ |
91 |
# if defined(MPROTECT_VDB) |
92 |
if (GC_incremental) SetUnhandledExceptionFilter(GC_write_fault_handler); |
93 |
# endif |
94 |
/* cast away volatile qualifier */ |
95 |
for (i = 0; InterlockedExchange((IE_t)&thread_table[i].in_use,1) != 0; i++) { |
96 |
/* Compare-and-swap would make this cleaner, but that's not */ |
97 |
/* supported before Windows 98 and NT 4.0. In Windows 2000, */ |
98 |
/* InterlockedExchange is supposed to be replaced by */ |
99 |
/* InterlockedExchangePointer, but that's not really what I */ |
100 |
/* want here. */ |
101 |
if (i == MAX_THREADS - 1) |
102 |
ABORT("too many threads"); |
103 |
} |
104 |
/* Update GC_max_thread_index if necessary. The following is safe, */ |
105 |
/* and unlike CompareExchange-based solutions seems to work on all */ |
106 |
/* Windows95 and later platforms. */ |
107 |
/* Unfortunately, GC_max_thread_index may be temporarily out of */ |
108 |
/* bounds, so readers have to compensate. */ |
109 |
while (i > GC_max_thread_index) { |
110 |
InterlockedIncrement((IE_t)&GC_max_thread_index); |
111 |
} |
112 |
if (GC_max_thread_index >= MAX_THREADS) { |
113 |
/* We overshot due to simultaneous increments. */ |
114 |
/* Setting it to MAX_THREADS-1 is always safe. */ |
115 |
GC_max_thread_index = MAX_THREADS - 1; |
116 |
} |
117 |
|
118 |
# ifdef CYGWIN32 |
119 |
thread_table[i].pthread_id = pthread_self(); |
120 |
# endif |
121 |
if (!DuplicateHandle(GetCurrentProcess(), |
122 |
GetCurrentThread(), |
123 |
GetCurrentProcess(), |
124 |
(HANDLE*)&thread_table[i].handle, |
125 |
0, |
126 |
0, |
127 |
DUPLICATE_SAME_ACCESS)) { |
128 |
DWORD last_error = GetLastError(); |
129 |
GC_printf1("Last error code: %lx\n", last_error); |
130 |
ABORT("DuplicateHandle failed"); |
131 |
} |
132 |
thread_table[i].stack_base = GC_get_stack_base(); |
133 |
/* Up until this point, GC_psuh_all_stacks considers this thread */ |
134 |
/* invalid. */ |
135 |
if (thread_table[i].stack_base == NULL) |
136 |
ABORT("Failed to find stack base in GC_new_thread"); |
137 |
/* Up until this point, this entry is viewed as reserved but invalid */ |
138 |
/* by GC_delete_thread. */ |
139 |
thread_table[i].id = GetCurrentThreadId(); |
140 |
/* If this thread is being created while we are trying to stop */ |
141 |
/* the world, wait here. Hopefully this can't happen on any */ |
142 |
/* systems that don't allow us to block here. */ |
143 |
while (GC_please_stop) Sleep(20); |
144 |
return thread_table + i; |
145 |
} |
146 |
|
147 |
/* |
148 |
* GC_max_thread_index may temporarily be larger than MAX_THREADS. |
149 |
* To avoid subscript errors, we check on access. |
150 |
*/ |
151 |
#ifdef __GNUC__ |
152 |
__inline__ |
153 |
#endif |
154 |
LONG GC_get_max_thread_index() |
155 |
{ |
156 |
LONG my_max = GC_max_thread_index; |
157 |
|
158 |
if (my_max >= MAX_THREADS) return MAX_THREADS-1; |
159 |
return my_max; |
160 |
} |
161 |
|
162 |
/* This is intended to be lock-free, though that */ |
163 |
/* assumes that the CloseHandle becomes visible before the */ |
164 |
/* in_use assignment. */ |
165 |
static void GC_delete_gc_thread(GC_thread thr) |
166 |
{ |
167 |
CloseHandle(thr->handle); |
168 |
/* cast away volatile qualifier */ |
169 |
thr->stack_base = 0; |
170 |
thr->id = 0; |
171 |
# ifdef CYGWIN32 |
172 |
thr->pthread_id = 0; |
173 |
# endif /* CYGWIN32 */ |
174 |
thr->in_use = FALSE; |
175 |
} |
176 |
|
177 |
static void GC_delete_thread(DWORD thread_id) { |
178 |
int i; |
179 |
LONG my_max = GC_get_max_thread_index(); |
180 |
|
181 |
for (i = 0; |
182 |
i <= my_max && |
183 |
(!thread_table[i].in_use || thread_table[i].id != thread_id); |
184 |
/* Must still be in_use, since nobody else can store our thread_id. */ |
185 |
i++) {} |
186 |
if (i > my_max) { |
187 |
WARN("Removing nonexisiting thread %ld\n", (GC_word)thread_id); |
188 |
} else { |
189 |
GC_delete_gc_thread(thread_table+i); |
190 |
} |
191 |
} |
192 |
|
193 |
|
194 |
#ifdef CYGWIN32 |
195 |
|
196 |
/* Return a GC_thread corresponding to a given pthread_t. */ |
197 |
/* Returns 0 if it's not there. */ |
198 |
/* We assume that this is only called for pthread ids that */ |
199 |
/* have not yet terminated or are still joinable. */ |
200 |
static GC_thread GC_lookup_thread(pthread_t id) |
201 |
{ |
202 |
int i; |
203 |
LONG my_max = GC_get_max_thread_index(); |
204 |
|
205 |
for (i = 0; |
206 |
i <= my_max && |
207 |
(!thread_table[i].in_use || thread_table[i].pthread_id != id |
208 |
|| !thread_table[i].in_use); |
209 |
/* Must still be in_use, since nobody else can store our thread_id. */ |
210 |
i++); |
211 |
if (i > my_max) return 0; |
212 |
return thread_table + i; |
213 |
} |
214 |
|
215 |
#endif /* CYGWIN32 */ |
47 |
|
216 |
|
48 |
void GC_push_thread_structures GC_PROTO((void)) |
217 |
void GC_push_thread_structures GC_PROTO((void)) |
49 |
{ |
218 |
{ |
Lines 52-59
Link Here
|
52 |
/* no private structures we need to preserve. */ |
221 |
/* no private structures we need to preserve. */ |
53 |
# ifdef CYGWIN32 |
222 |
# ifdef CYGWIN32 |
54 |
{ int i; /* pthreads may keep a pointer in the thread exit value */ |
223 |
{ int i; /* pthreads may keep a pointer in the thread exit value */ |
55 |
for (i = 0; i < MAX_THREADS; i++) |
224 |
LONG my_max = GC_get_max_thread_index(); |
56 |
if (thread_table[i].in_use) GC_push_all((ptr_t)&(thread_table[i].status),(ptr_t)(&(thread_table[i].status)+1)); |
225 |
|
|
|
226 |
for (i = 0; i <= my_max; i++) |
227 |
if (thread_table[i].in_use) |
228 |
GC_push_all((ptr_t)&(thread_table[i].status), |
229 |
(ptr_t)(&(thread_table[i].status)+1)); |
57 |
} |
230 |
} |
58 |
# endif |
231 |
# endif |
59 |
} |
232 |
} |
Lines 63-75
Link Here
|
63 |
DWORD thread_id = GetCurrentThreadId(); |
236 |
DWORD thread_id = GetCurrentThreadId(); |
64 |
int i; |
237 |
int i; |
65 |
|
238 |
|
66 |
#ifdef CYGWIN32 |
|
|
67 |
if (!GC_thr_initialized) ABORT("GC_stop_world() called before GC_thr_init()"); |
239 |
if (!GC_thr_initialized) ABORT("GC_stop_world() called before GC_thr_init()"); |
68 |
#endif |
|
|
69 |
|
240 |
|
70 |
GC_please_stop = TRUE; |
241 |
GC_please_stop = TRUE; |
71 |
for (i = 0; i < MAX_THREADS; i++) |
242 |
for (i = 0; i <= GC_get_max_thread_index(); i++) |
72 |
if (thread_table[i].stack != 0 |
243 |
if (thread_table[i].stack_base != 0 |
73 |
&& thread_table[i].id != thread_id) { |
244 |
&& thread_table[i].id != thread_id) { |
74 |
# ifdef MSWINCE |
245 |
# ifdef MSWINCE |
75 |
/* SuspendThread will fail if thread is running kernel code */ |
246 |
/* SuspendThread will fail if thread is running kernel code */ |
Lines 84-96
Link Here
|
84 |
DWORD exitCode; |
255 |
DWORD exitCode; |
85 |
if (GetExitCodeThread(thread_table[i].handle,&exitCode) && |
256 |
if (GetExitCodeThread(thread_table[i].handle,&exitCode) && |
86 |
exitCode != STILL_ACTIVE) { |
257 |
exitCode != STILL_ACTIVE) { |
87 |
thread_table[i].stack = 0; /* prevent stack from being pushed */ |
258 |
thread_table[i].stack_base = 0; /* prevent stack from being pushed */ |
88 |
# ifndef CYGWIN32 |
259 |
# ifndef CYGWIN32 |
89 |
/* this breaks pthread_join on Cygwin, which is guaranteed to */ |
260 |
/* this breaks pthread_join on Cygwin, which is guaranteed to */ |
90 |
/* only see user pthreads */ |
261 |
/* only see user pthreads */ |
91 |
thread_table[i].in_use = FALSE; |
262 |
thread_table[i].in_use = FALSE; |
92 |
CloseHandle(thread_table[i].handle); |
263 |
CloseHandle(thread_table[i].handle); |
93 |
BZERO((void *)(&thread_table[i].context), sizeof(CONTEXT)); |
|
|
94 |
# endif |
264 |
# endif |
95 |
continue; |
265 |
continue; |
96 |
} |
266 |
} |
Lines 105-112
Link Here
|
105 |
{ |
275 |
{ |
106 |
DWORD thread_id = GetCurrentThreadId(); |
276 |
DWORD thread_id = GetCurrentThreadId(); |
107 |
int i; |
277 |
int i; |
108 |
for (i = 0; i < MAX_THREADS; i++) |
278 |
LONG my_max = GC_get_max_thread_index(); |
109 |
if (thread_table[i].stack != 0 && thread_table[i].suspended |
279 |
|
|
|
280 |
for (i = 0; i <= my_max; i++) |
281 |
if (thread_table[i].stack_base != 0 && thread_table[i].suspended |
110 |
&& thread_table[i].id != thread_id) { |
282 |
&& thread_table[i].id != thread_id) { |
111 |
if (ResumeThread(thread_table[i].handle) == (DWORD)-1) |
283 |
if (ResumeThread(thread_table[i].handle) == (DWORD)-1) |
112 |
ABORT("ResumeThread failed"); |
284 |
ABORT("ResumeThread failed"); |
Lines 122-130
Link Here
|
122 |
{ |
294 |
{ |
123 |
DWORD thread_id = GetCurrentThreadId(); |
295 |
DWORD thread_id = GetCurrentThreadId(); |
124 |
int i; |
296 |
int i; |
125 |
for (i = 0; i < MAX_THREADS; i++) |
297 |
LONG my_max = GC_get_max_thread_index(); |
126 |
if (thread_table[i].stack && thread_table[i].id == thread_id) |
298 |
|
127 |
return thread_table[i].stack; |
299 |
for (i = 0; i <= my_max; i++) |
|
|
300 |
if (thread_table[i].stack_base && thread_table[i].id == thread_id) |
301 |
return thread_table[i].stack_base; |
128 |
ABORT("no thread table entry for current thread"); |
302 |
ABORT("no thread table entry for current thread"); |
129 |
} |
303 |
} |
130 |
# ifdef _MSC_VER |
304 |
# ifdef _MSC_VER |
Lines 135-144
Link Here
|
135 |
/* The VirtualQuery calls below won't work properly on WinCE, but */ |
309 |
/* The VirtualQuery calls below won't work properly on WinCE, but */ |
136 |
/* since each stack is restricted to an aligned 64K region of */ |
310 |
/* since each stack is restricted to an aligned 64K region of */ |
137 |
/* virtual memory we can just take the next lowest multiple of 64K. */ |
311 |
/* virtual memory we can just take the next lowest multiple of 64K. */ |
138 |
# define GC_get_lo_stack_addr(s) \ |
312 |
# define GC_get_stack_min(s) \ |
139 |
((ptr_t)(((DWORD)(s) - 1) & 0xFFFF0000)) |
313 |
((ptr_t)(((DWORD)(s) - 1) & 0xFFFF0000)) |
140 |
# else |
314 |
# else |
141 |
static ptr_t GC_get_lo_stack_addr(ptr_t s) |
315 |
static ptr_t GC_get_stack_min(ptr_t s) |
142 |
{ |
316 |
{ |
143 |
ptr_t bottom; |
317 |
ptr_t bottom; |
144 |
MEMORY_BASIC_INFORMATION info; |
318 |
MEMORY_BASIC_INFORMATION info; |
Lines 155-351
Link Here
|
155 |
void GC_push_all_stacks() |
329 |
void GC_push_all_stacks() |
156 |
{ |
330 |
{ |
157 |
DWORD thread_id = GetCurrentThreadId(); |
331 |
DWORD thread_id = GetCurrentThreadId(); |
|
|
332 |
GC_bool found_me = FALSE; |
158 |
int i; |
333 |
int i; |
159 |
for (i = 0; i < MAX_THREADS; i++) |
334 |
int dummy; |
160 |
if (thread_table[i].stack) { |
335 |
ptr_t sp, stack_min; |
161 |
ptr_t bottom = GC_get_lo_stack_addr(thread_table[i].stack); |
336 |
GC_thread thread; |
162 |
if (thread_table[i].id == thread_id) |
337 |
LONG my_max = GC_get_max_thread_index(); |
163 |
GC_push_all_stack((ptr_t)&i, thread_table[i].stack); |
338 |
|
164 |
else { |
339 |
for (i = 0; i <= my_max; i++) { |
165 |
thread_table[i].context.ContextFlags |
340 |
thread = thread_table + i; |
166 |
= (CONTEXT_INTEGER|CONTEXT_CONTROL); |
341 |
if (thread -> in_use && thread -> stack_base) { |
167 |
if (!GetThreadContext(thread_table[i].handle, |
342 |
if (thread -> id == thread_id) { |
168 |
/* cast away volatile qualifier */ |
343 |
sp = (ptr_t) &dummy; |
169 |
(LPCONTEXT)&thread_table[i].context)) |
344 |
found_me = TRUE; |
|
|
345 |
} else { |
346 |
CONTEXT context; |
347 |
context.ContextFlags = CONTEXT_INTEGER|CONTEXT_CONTROL; |
348 |
if (!GetThreadContext(thread_table[i].handle, &context)) |
170 |
ABORT("GetThreadContext failed"); |
349 |
ABORT("GetThreadContext failed"); |
171 |
# ifdef I386 |
350 |
|
172 |
GC_push_one ((word) thread_table[i].context.Edi); |
351 |
/* Push all registers that might point into the heap. Frame */ |
173 |
GC_push_one ((word) thread_table[i].context.Esi); |
352 |
/* pointer registers are included in case client code was */ |
174 |
GC_push_one ((word) thread_table[i].context.Ebp); |
353 |
/* compiled with the 'omit frame pointer' optimisation. */ |
175 |
GC_push_one ((word) thread_table[i].context.Ebx); |
354 |
# define PUSH1(reg) GC_push_one((word)context.reg) |
176 |
GC_push_one ((word) thread_table[i].context.Edx); |
355 |
# define PUSH2(r1,r2) PUSH1(r1), PUSH1(r2) |
177 |
GC_push_one ((word) thread_table[i].context.Ecx); |
356 |
# define PUSH4(r1,r2,r3,r4) PUSH2(r1,r2), PUSH2(r3,r4) |
178 |
GC_push_one ((word) thread_table[i].context.Eax); |
357 |
# if defined(I386) |
179 |
if (thread_table[i].context.Esp >= (DWORD)thread_table[i].stack |
358 |
PUSH4(Edi,Esi,Ebx,Edx), PUSH2(Ecx,Eax), PUSH1(Ebp); |
180 |
|| thread_table[i].context.Esp < (DWORD)bottom) { |
359 |
sp = (ptr_t)context.Esp; |
181 |
WARN("Thread stack pointer 0x%lx out of range, pushing everything", |
360 |
# elif defined(ARM32) |
182 |
thread_table[i].context.Esp); |
361 |
PUSH4(R0,R1,R2,R3),PUSH4(R4,R5,R6,R7),PUSH4(R8,R9,R10,R11),PUSH1(R12); |
183 |
GC_push_all_stack((char *) bottom, thread_table[i].stack); |
362 |
sp = (ptr_t)context.Sp; |
184 |
} else { |
363 |
# elif defined(SHx) |
185 |
GC_push_all_stack((char *) thread_table[i].context.Esp, |
364 |
PUSH4(R0,R1,R2,R3), PUSH4(R4,R5,R6,R7), PUSH4(R8,R9,R10,R11); |
186 |
thread_table[i].stack); |
365 |
PUSH2(R12,R13), PUSH1(R14); |
187 |
} |
366 |
sp = (ptr_t)context.R15; |
188 |
# else |
367 |
# elif defined(MIPS) |
189 |
# ifdef ARM32 |
368 |
PUSH4(IntAt,IntV0,IntV1,IntA0), PUSH4(IntA1,IntA2,IntA3,IntT0); |
190 |
if (thread_table[i].context.Sp >= (DWORD)thread_table[i].stack |
369 |
PUSH4(IntT1,IntT2,IntT3,IntT4), PUSH4(IntT5,IntT6,IntT7,IntS0); |
191 |
|| thread_table[i].context.Sp < (DWORD)bottom) |
370 |
PUSH4(IntS1,IntS2,IntS3,IntS4), PUSH4(IntS5,IntS6,IntS7,IntT8); |
192 |
ABORT("Thread stack pointer out of range"); |
371 |
PUSH4(IntT9,IntK0,IntK1,IntS8); |
193 |
GC_push_one ((word) thread_table[i].context.R0); |
372 |
sp = (ptr_t)context.IntSp; |
194 |
GC_push_one ((word) thread_table[i].context.R1); |
373 |
# elif defined(PPC) |
195 |
GC_push_one ((word) thread_table[i].context.R2); |
374 |
PUSH4(Gpr0, Gpr3, Gpr4, Gpr5), PUSH4(Gpr6, Gpr7, Gpr8, Gpr9); |
196 |
GC_push_one ((word) thread_table[i].context.R3); |
375 |
PUSH4(Gpr10,Gpr11,Gpr12,Gpr14), PUSH4(Gpr15,Gpr16,Gpr17,Gpr18); |
197 |
GC_push_one ((word) thread_table[i].context.R4); |
376 |
PUSH4(Gpr19,Gpr20,Gpr21,Gpr22), PUSH4(Gpr23,Gpr24,Gpr25,Gpr26); |
198 |
GC_push_one ((word) thread_table[i].context.R5); |
377 |
PUSH4(Gpr27,Gpr28,Gpr29,Gpr30), PUSH1(Gpr31); |
199 |
GC_push_one ((word) thread_table[i].context.R6); |
378 |
sp = (ptr_t)context.Gpr1; |
200 |
GC_push_one ((word) thread_table[i].context.R7); |
379 |
# elif defined(ALPHA) |
201 |
GC_push_one ((word) thread_table[i].context.R8); |
380 |
PUSH4(IntV0,IntT0,IntT1,IntT2), PUSH4(IntT3,IntT4,IntT5,IntT6); |
202 |
GC_push_one ((word) thread_table[i].context.R9); |
381 |
PUSH4(IntT7,IntS0,IntS1,IntS2), PUSH4(IntS3,IntS4,IntS5,IntFp); |
203 |
GC_push_one ((word) thread_table[i].context.R10); |
382 |
PUSH4(IntA0,IntA1,IntA2,IntA3), PUSH4(IntA4,IntA5,IntT8,IntT9); |
204 |
GC_push_one ((word) thread_table[i].context.R11); |
383 |
PUSH4(IntT10,IntT11,IntT12,IntAt); |
205 |
GC_push_one ((word) thread_table[i].context.R12); |
384 |
sp = (ptr_t)context.IntSp; |
206 |
GC_push_all_stack((char *) thread_table[i].context.Sp, |
|
|
207 |
thread_table[i].stack); |
208 |
# else |
209 |
# ifdef SHx |
210 |
if (thread_table[i].context.R15 >= (DWORD)thread_table[i].stack |
211 |
|| thread_table[i].context.R15 < (DWORD)bottom) |
212 |
ABORT("Thread stack pointer out of range"); |
213 |
GC_push_one ((word) thread_table[i].context.R0); |
214 |
GC_push_one ((word) thread_table[i].context.R1); |
215 |
GC_push_one ((word) thread_table[i].context.R2); |
216 |
GC_push_one ((word) thread_table[i].context.R3); |
217 |
GC_push_one ((word) thread_table[i].context.R4); |
218 |
GC_push_one ((word) thread_table[i].context.R5); |
219 |
GC_push_one ((word) thread_table[i].context.R6); |
220 |
GC_push_one ((word) thread_table[i].context.R7); |
221 |
GC_push_one ((word) thread_table[i].context.R8); |
222 |
GC_push_one ((word) thread_table[i].context.R9); |
223 |
GC_push_one ((word) thread_table[i].context.R10); |
224 |
GC_push_one ((word) thread_table[i].context.R11); |
225 |
GC_push_one ((word) thread_table[i].context.R12); |
226 |
GC_push_one ((word) thread_table[i].context.R13); |
227 |
GC_push_one ((word) thread_table[i].context.R14); |
228 |
GC_push_all_stack((char *) thread_table[i].context.R15, |
229 |
thread_table[i].stack); |
230 |
# else |
385 |
# else |
231 |
# ifdef MIPS |
386 |
# error "architecture is not supported" |
232 |
if (thread_table[i].context.IntSp >= (DWORD)thread_table[i].stack |
387 |
# endif |
233 |
|| thread_table[i].context.IntSp < (DWORD)bottom) |
388 |
} |
234 |
ABORT("Thread stack pointer out of range"); |
389 |
|
235 |
GC_push_one ((word) thread_table[i].context.IntAt); |
390 |
stack_min = GC_get_stack_min(thread->stack_base); |
236 |
GC_push_one ((word) thread_table[i].context.IntV0); |
391 |
|
237 |
GC_push_one ((word) thread_table[i].context.IntV1); |
392 |
if (sp >= stack_min && sp < thread->stack_base) |
238 |
GC_push_one ((word) thread_table[i].context.IntA0); |
393 |
GC_push_all_stack(sp, thread->stack_base); |
239 |
GC_push_one ((word) thread_table[i].context.IntA1); |
394 |
else { |
240 |
GC_push_one ((word) thread_table[i].context.IntA2); |
395 |
WARN("Thread stack pointer 0x%lx out of range, pushing everything\n", |
241 |
GC_push_one ((word) thread_table[i].context.IntA3); |
396 |
(unsigned long)sp); |
242 |
GC_push_one ((word) thread_table[i].context.IntT0); |
397 |
GC_push_all_stack(stack_min, thread->stack_base); |
243 |
GC_push_one ((word) thread_table[i].context.IntT1); |
|
|
244 |
GC_push_one ((word) thread_table[i].context.IntT2); |
245 |
GC_push_one ((word) thread_table[i].context.IntT3); |
246 |
GC_push_one ((word) thread_table[i].context.IntT4); |
247 |
GC_push_one ((word) thread_table[i].context.IntT5); |
248 |
GC_push_one ((word) thread_table[i].context.IntT6); |
249 |
GC_push_one ((word) thread_table[i].context.IntT7); |
250 |
GC_push_one ((word) thread_table[i].context.IntS0); |
251 |
GC_push_one ((word) thread_table[i].context.IntS1); |
252 |
GC_push_one ((word) thread_table[i].context.IntS2); |
253 |
GC_push_one ((word) thread_table[i].context.IntS3); |
254 |
GC_push_one ((word) thread_table[i].context.IntS4); |
255 |
GC_push_one ((word) thread_table[i].context.IntS5); |
256 |
GC_push_one ((word) thread_table[i].context.IntS6); |
257 |
GC_push_one ((word) thread_table[i].context.IntS7); |
258 |
GC_push_one ((word) thread_table[i].context.IntT8); |
259 |
GC_push_one ((word) thread_table[i].context.IntT9); |
260 |
GC_push_one ((word) thread_table[i].context.IntK0); |
261 |
GC_push_one ((word) thread_table[i].context.IntK1); |
262 |
GC_push_one ((word) thread_table[i].context.IntS8); |
263 |
GC_push_all_stack((char *) thread_table[i].context.IntSp, |
264 |
thread_table[i].stack); |
265 |
# else |
266 |
# ifdef PPC |
267 |
if (thread_table[i].context.Gpr1 >= (DWORD)thread_table[i].stack |
268 |
|| thread_table[i].context.Gpr1 < (DWORD)bottom) |
269 |
ABORT("Thread stack pointer out of range"); |
270 |
GC_push_one ((word) thread_table[i].context.Gpr0); |
271 |
/* Gpr1 is stack pointer */ |
272 |
/* Gpr2 is global pointer */ |
273 |
GC_push_one ((word) thread_table[i].context.Gpr3); |
274 |
GC_push_one ((word) thread_table[i].context.Gpr4); |
275 |
GC_push_one ((word) thread_table[i].context.Gpr5); |
276 |
GC_push_one ((word) thread_table[i].context.Gpr6); |
277 |
GC_push_one ((word) thread_table[i].context.Gpr7); |
278 |
GC_push_one ((word) thread_table[i].context.Gpr8); |
279 |
GC_push_one ((word) thread_table[i].context.Gpr9); |
280 |
GC_push_one ((word) thread_table[i].context.Gpr10); |
281 |
GC_push_one ((word) thread_table[i].context.Gpr11); |
282 |
GC_push_one ((word) thread_table[i].context.Gpr12); |
283 |
/* Gpr13 is reserved for the kernel */ |
284 |
GC_push_one ((word) thread_table[i].context.Gpr14); |
285 |
GC_push_one ((word) thread_table[i].context.Gpr15); |
286 |
GC_push_one ((word) thread_table[i].context.Gpr16); |
287 |
GC_push_one ((word) thread_table[i].context.Gpr17); |
288 |
GC_push_one ((word) thread_table[i].context.Gpr18); |
289 |
GC_push_one ((word) thread_table[i].context.Gpr19); |
290 |
GC_push_one ((word) thread_table[i].context.Gpr20); |
291 |
GC_push_one ((word) thread_table[i].context.Gpr21); |
292 |
GC_push_one ((word) thread_table[i].context.Gpr22); |
293 |
GC_push_one ((word) thread_table[i].context.Gpr23); |
294 |
GC_push_one ((word) thread_table[i].context.Gpr24); |
295 |
GC_push_one ((word) thread_table[i].context.Gpr25); |
296 |
GC_push_one ((word) thread_table[i].context.Gpr26); |
297 |
GC_push_one ((word) thread_table[i].context.Gpr27); |
298 |
GC_push_one ((word) thread_table[i].context.Gpr28); |
299 |
GC_push_one ((word) thread_table[i].context.Gpr29); |
300 |
GC_push_one ((word) thread_table[i].context.Gpr30); |
301 |
GC_push_one ((word) thread_table[i].context.Gpr31); |
302 |
GC_push_all_stack((char *) thread_table[i].context.Gpr1, |
303 |
thread_table[i].stack); |
304 |
# else |
305 |
# ifdef ALPHA |
306 |
if (thread_table[i].context.IntSp >= (DWORD)thread_table[i].stack |
307 |
|| thread_table[i].context.IntSp < (DWORD)bottom) |
308 |
ABORT("Thread stack pointer out of range"); |
309 |
GC_push_one ((word) thread_table[i].context.IntV0); |
310 |
GC_push_one ((word) thread_table[i].context.IntT0); |
311 |
GC_push_one ((word) thread_table[i].context.IntT1); |
312 |
GC_push_one ((word) thread_table[i].context.IntT2); |
313 |
GC_push_one ((word) thread_table[i].context.IntT3); |
314 |
GC_push_one ((word) thread_table[i].context.IntT4); |
315 |
GC_push_one ((word) thread_table[i].context.IntT5); |
316 |
GC_push_one ((word) thread_table[i].context.IntT6); |
317 |
GC_push_one ((word) thread_table[i].context.IntT7); |
318 |
GC_push_one ((word) thread_table[i].context.IntS0); |
319 |
GC_push_one ((word) thread_table[i].context.IntS1); |
320 |
GC_push_one ((word) thread_table[i].context.IntS2); |
321 |
GC_push_one ((word) thread_table[i].context.IntS3); |
322 |
GC_push_one ((word) thread_table[i].context.IntS4); |
323 |
GC_push_one ((word) thread_table[i].context.IntS5); |
324 |
GC_push_one ((word) thread_table[i].context.IntFp); |
325 |
GC_push_one ((word) thread_table[i].context.IntA0); |
326 |
GC_push_one ((word) thread_table[i].context.IntA1); |
327 |
GC_push_one ((word) thread_table[i].context.IntA2); |
328 |
GC_push_one ((word) thread_table[i].context.IntA3); |
329 |
GC_push_one ((word) thread_table[i].context.IntA4); |
330 |
GC_push_one ((word) thread_table[i].context.IntA5); |
331 |
GC_push_one ((word) thread_table[i].context.IntT8); |
332 |
GC_push_one ((word) thread_table[i].context.IntT9); |
333 |
GC_push_one ((word) thread_table[i].context.IntT10); |
334 |
GC_push_one ((word) thread_table[i].context.IntT11); |
335 |
GC_push_one ((word) thread_table[i].context.IntT12); |
336 |
GC_push_one ((word) thread_table[i].context.IntAt); |
337 |
GC_push_all_stack((char *) thread_table[i].context.IntSp, |
338 |
thread_table[i].stack); |
339 |
# else |
340 |
--> architecture not supported |
341 |
# endif /* !ALPHA */ |
342 |
# endif /* !PPC */ |
343 |
# endif /* !MIPS */ |
344 |
# endif /* !SHx */ |
345 |
# endif /* !ARM32 */ |
346 |
# endif /* !I386 */ |
347 |
} |
398 |
} |
348 |
} |
399 |
} |
|
|
400 |
} |
401 |
if (!found_me) ABORT("Collecting from unknown thread."); |
349 |
} |
402 |
} |
350 |
|
403 |
|
351 |
void GC_get_next_stack(char *start, char **lo, char **hi) |
404 |
void GC_get_next_stack(char *start, char **lo, char **hi) |
Lines 353-361
Link Here
|
353 |
int i; |
406 |
int i; |
354 |
# define ADDR_LIMIT (char *)(-1L) |
407 |
# define ADDR_LIMIT (char *)(-1L) |
355 |
char * current_min = ADDR_LIMIT; |
408 |
char * current_min = ADDR_LIMIT; |
356 |
|
409 |
LONG my_max = GC_get_max_thread_index(); |
357 |
for (i = 0; i < MAX_THREADS; i++) { |
410 |
|
358 |
char * s = (char *)thread_table[i].stack; |
411 |
for (i = 0; i <= my_max; i++) { |
|
|
412 |
char * s = (char *)thread_table[i].stack_base; |
359 |
|
413 |
|
360 |
if (0 != s && s > start && s < current_min) { |
414 |
if (0 != s && s > start && s < current_min) { |
361 |
current_min = s; |
415 |
current_min = s; |
Lines 366-372
Link Here
|
366 |
*lo = ADDR_LIMIT; |
420 |
*lo = ADDR_LIMIT; |
367 |
return; |
421 |
return; |
368 |
} |
422 |
} |
369 |
*lo = GC_get_lo_stack_addr(current_min); |
423 |
*lo = GC_get_stack_min(current_min); |
370 |
if (*lo < start) *lo = start; |
424 |
if (*lo < start) *lo = start; |
371 |
} |
425 |
} |
372 |
|
426 |
|
Lines 391-398
Link Here
|
391 |
/* must properly intercept thread creation. */ |
445 |
/* must properly intercept thread creation. */ |
392 |
|
446 |
|
393 |
typedef struct { |
447 |
typedef struct { |
394 |
HANDLE child_ready_h, parent_ready_h; |
|
|
395 |
volatile struct thread_entry * entry; |
396 |
LPTHREAD_START_ROUTINE start; |
448 |
LPTHREAD_START_ROUTINE start; |
397 |
LPVOID param; |
449 |
LPVOID param; |
398 |
} thread_args; |
450 |
} thread_args; |
Lines 405-484
Link Here
|
405 |
LPVOID lpParameter, DWORD dwCreationFlags, LPDWORD lpThreadId ) |
457 |
LPVOID lpParameter, DWORD dwCreationFlags, LPDWORD lpThreadId ) |
406 |
{ |
458 |
{ |
407 |
HANDLE thread_h = NULL; |
459 |
HANDLE thread_h = NULL; |
408 |
HANDLE child_ready_h, parent_ready_h; |
|
|
409 |
|
460 |
|
410 |
int i; |
461 |
thread_args *args; |
411 |
thread_args args; |
|
|
412 |
|
462 |
|
413 |
/* allocate thread slot */ |
463 |
if (!GC_is_initialized) GC_init(); |
414 |
LOCK(); |
464 |
/* make sure GC is initialized (i.e. main thread is attached) */ |
415 |
for (i = 0; i != MAX_THREADS && thread_table[i].in_use; i++) |
465 |
|
416 |
; |
466 |
args = GC_malloc_uncollectable(sizeof(thread_args)); |
417 |
if (i != MAX_THREADS) { |
467 |
/* Handed off to and deallocated by child thread. */ |
418 |
thread_table[i].in_use = TRUE; |
468 |
if (0 == args) { |
|
|
469 |
SetLastError(ERROR_NOT_ENOUGH_MEMORY); |
470 |
return NULL; |
419 |
} |
471 |
} |
420 |
UNLOCK(); |
|
|
421 |
|
422 |
if (i != MAX_THREADS) { |
423 |
|
424 |
/* create unnamed unsignalled events */ |
425 |
if (child_ready_h = CreateEvent(NULL, FALSE, FALSE, NULL)) { |
426 |
if (parent_ready_h = CreateEvent(NULL, FALSE, FALSE, NULL)) { |
427 |
|
428 |
/* set up thread arguments */ |
429 |
args.child_ready_h = child_ready_h; |
430 |
args.parent_ready_h = parent_ready_h; |
431 |
args.entry = &thread_table[i]; |
432 |
args.start = lpStartAddress; |
433 |
args.param = lpParameter; |
434 |
|
435 |
thread_h = CreateThread(lpThreadAttributes, |
436 |
dwStackSize, thread_start, |
437 |
&args, |
438 |
dwCreationFlags & ~CREATE_SUSPENDED, |
439 |
lpThreadId); |
440 |
|
441 |
if (thread_h) { |
442 |
|
443 |
/* fill in ID and handle; tell child this is done */ |
444 |
thread_table[i].id = *lpThreadId; |
445 |
if (!DuplicateHandle(GetCurrentProcess(), |
446 |
thread_h, |
447 |
GetCurrentProcess(), |
448 |
(PHANDLE) &thread_table[i].handle, |
449 |
0, |
450 |
0, |
451 |
DUPLICATE_SAME_ACCESS)) { |
452 |
DWORD last_error = GetLastError(); |
453 |
GC_printf1("Last error code: %lx\n", last_error); |
454 |
ABORT("DuplicateHandle failed"); |
455 |
} |
456 |
SetEvent (parent_ready_h); |
457 |
|
458 |
/* wait for child to fill in stack and copy args */ |
459 |
WaitForSingleObject (child_ready_h, INFINITE); |
460 |
|
461 |
/* suspend the child if requested */ |
462 |
if (dwCreationFlags & CREATE_SUSPENDED) |
463 |
SuspendThread (thread_h); |
464 |
|
465 |
/* let child call given function now (or when resumed) */ |
466 |
SetEvent (parent_ready_h); |
467 |
|
468 |
} else { |
469 |
CloseHandle (parent_ready_h); |
470 |
} |
471 |
} |
472 |
} |
473 |
|
474 |
CloseHandle (child_ready_h); |
475 |
|
472 |
|
476 |
if (thread_h == NULL) |
473 |
/* set up thread arguments */ |
477 |
thread_table[i].in_use = FALSE; |
474 |
args -> start = lpStartAddress; |
478 |
|
475 |
args -> param = lpParameter; |
479 |
} else { /* no thread slot found */ |
476 |
|
480 |
SetLastError (ERROR_TOO_MANY_TCBS); |
477 |
thread_h = CreateThread(lpThreadAttributes, |
481 |
} |
478 |
dwStackSize, thread_start, |
|
|
479 |
args, dwCreationFlags, |
480 |
lpThreadId); |
482 |
|
481 |
|
483 |
return thread_h; |
482 |
return thread_h; |
484 |
} |
483 |
} |
Lines 486-504
Link Here
|
486 |
static DWORD WINAPI thread_start(LPVOID arg) |
485 |
static DWORD WINAPI thread_start(LPVOID arg) |
487 |
{ |
486 |
{ |
488 |
DWORD ret = 0; |
487 |
DWORD ret = 0; |
489 |
thread_args args = *(thread_args *)arg; |
488 |
thread_args *args = (thread_args *)arg; |
490 |
|
489 |
|
491 |
/* wait for parent to fill in ID and handle */ |
490 |
GC_new_thread(); |
492 |
WaitForSingleObject (args.parent_ready_h, INFINITE); |
|
|
493 |
ResetEvent (args.parent_ready_h); |
494 |
|
495 |
/* fill in stack; tell parent this is done */ |
496 |
args.entry->stack = GC_get_stack_base(); |
497 |
SetEvent (args.child_ready_h); |
498 |
|
499 |
/* wait for parent to tell us to go (in case it needs to suspend us) */ |
500 |
WaitForSingleObject (args.parent_ready_h, INFINITE); |
501 |
CloseHandle (args.parent_ready_h); |
502 |
|
491 |
|
503 |
/* Clear the thread entry even if we exit with an exception. */ |
492 |
/* Clear the thread entry even if we exit with an exception. */ |
504 |
/* This is probably pointless, since an uncaught exception is */ |
493 |
/* This is probably pointless, since an uncaught exception is */ |
Lines 506-521
Link Here
|
506 |
#ifndef __GNUC__ |
495 |
#ifndef __GNUC__ |
507 |
__try { |
496 |
__try { |
508 |
#endif /* __GNUC__ */ |
497 |
#endif /* __GNUC__ */ |
509 |
ret = args.start (args.param); |
498 |
ret = args->start (args->param); |
510 |
#ifndef __GNUC__ |
499 |
#ifndef __GNUC__ |
511 |
} __finally { |
500 |
} __finally { |
512 |
#endif /* __GNUC__ */ |
501 |
#endif /* __GNUC__ */ |
513 |
LOCK(); |
502 |
GC_free(args); |
514 |
args.entry->stack = 0; |
503 |
GC_delete_thread(GetCurrentThreadId()); |
515 |
args.entry->in_use = FALSE; |
|
|
516 |
/* cast away volatile qualifier */ |
517 |
BZERO((void *) &args.entry->context, sizeof(CONTEXT)); |
518 |
UNLOCK(); |
519 |
#ifndef __GNUC__ |
504 |
#ifndef __GNUC__ |
520 |
} |
505 |
} |
521 |
#endif /* __GNUC__ */ |
506 |
#endif /* __GNUC__ */ |
Lines 549-555
Link Here
|
549 |
DWORD thread_id; |
534 |
DWORD thread_id; |
550 |
|
535 |
|
551 |
/* initialize everything */ |
536 |
/* initialize everything */ |
552 |
InitializeCriticalSection(&GC_allocate_ml); |
|
|
553 |
GC_init(); |
537 |
GC_init(); |
554 |
|
538 |
|
555 |
/* start the main thread */ |
539 |
/* start the main thread */ |
Lines 579-704
Link Here
|
579 |
|
563 |
|
580 |
# else /* !MSWINCE */ |
564 |
# else /* !MSWINCE */ |
581 |
|
565 |
|
582 |
LONG WINAPI GC_write_fault_handler(struct _EXCEPTION_POINTERS *exc_info); |
|
|
583 |
|
584 |
/* threadAttach/threadDetach routines used by both CYGWIN and DLL |
585 |
* implementation, since both recieve explicit notification on thread |
586 |
* creation/destruction. |
587 |
*/ |
588 |
static void threadAttach() { |
589 |
int i; |
590 |
/* It appears to be unsafe to acquire a lock here, since this */ |
591 |
/* code is apparently not preeemptible on some systems. */ |
592 |
/* (This is based on complaints, not on Microsoft's official */ |
593 |
/* documentation, which says this should perform "only simple */ |
594 |
/* inititalization tasks".) */ |
595 |
/* Hence we make do with nonblocking synchronization. */ |
596 |
|
597 |
/* The following should be a noop according to the win32 */ |
598 |
/* documentation. There is empirical evidence that it */ |
599 |
/* isn't. - HB */ |
600 |
# if defined(MPROTECT_VDB) |
601 |
if (GC_incremental) SetUnhandledExceptionFilter(GC_write_fault_handler); |
602 |
# endif |
603 |
/* cast away volatile qualifier */ |
604 |
for (i = 0; InterlockedExchange((LONG*)&thread_table[i].in_use,1) != 0; i++) { |
605 |
/* Compare-and-swap would make this cleaner, but that's not */ |
606 |
/* supported before Windows 98 and NT 4.0. In Windows 2000, */ |
607 |
/* InterlockedExchange is supposed to be replaced by */ |
608 |
/* InterlockedExchangePointer, but that's not really what I */ |
609 |
/* want here. */ |
610 |
if (i == MAX_THREADS - 1) |
611 |
ABORT("too many threads"); |
612 |
} |
613 |
thread_table[i].id = GetCurrentThreadId(); |
614 |
# ifdef CYGWIN32 |
615 |
thread_table[i].pthread_id = pthread_self(); |
616 |
# endif |
617 |
if (!DuplicateHandle(GetCurrentProcess(), |
618 |
GetCurrentThread(), |
619 |
GetCurrentProcess(), |
620 |
(HANDLE*)&thread_table[i].handle, |
621 |
0, |
622 |
0, |
623 |
DUPLICATE_SAME_ACCESS)) { |
624 |
DWORD last_error = GetLastError(); |
625 |
GC_printf1("Last error code: %lx\n", last_error); |
626 |
ABORT("DuplicateHandle failed"); |
627 |
} |
628 |
thread_table[i].stack = GC_get_stack_base(); |
629 |
if (thread_table[i].stack == NULL) |
630 |
ABORT("Failed to find stack base in threadAttach"); |
631 |
/* If this thread is being created while we are trying to stop */ |
632 |
/* the world, wait here. Hopefully this can't happen on any */ |
633 |
/* systems that don't allow us to block here. */ |
634 |
while (GC_please_stop) Sleep(20); |
635 |
} |
636 |
|
637 |
static void threadDetach(DWORD thread_id) { |
638 |
int i; |
639 |
|
640 |
LOCK(); |
641 |
for (i = 0; |
642 |
i < MAX_THREADS && |
643 |
(!thread_table[i].in_use || thread_table[i].id != thread_id); |
644 |
i++) {} |
645 |
if (i >= MAX_THREADS ) { |
646 |
WARN("thread %ld not found on detach", (GC_word)thread_id); |
647 |
} else { |
648 |
thread_table[i].stack = 0; |
649 |
thread_table[i].in_use = FALSE; |
650 |
CloseHandle(thread_table[i].handle); |
651 |
/* cast away volatile qualifier */ |
652 |
BZERO((void *)&thread_table[i].context, sizeof(CONTEXT)); |
653 |
} |
654 |
UNLOCK(); |
655 |
} |
656 |
|
657 |
#ifdef CYGWIN32 |
658 |
|
659 |
/* Called by GC_init() - we hold the allocation lock. */ |
566 |
/* Called by GC_init() - we hold the allocation lock. */ |
660 |
void GC_thr_init() { |
567 |
void GC_thr_init() { |
661 |
if (GC_thr_initialized) return; |
568 |
if (GC_thr_initialized) return; |
|
|
569 |
GC_main_thread = GetCurrentThreadId(); |
662 |
GC_thr_initialized = TRUE; |
570 |
GC_thr_initialized = TRUE; |
663 |
|
571 |
|
664 |
#if 0 |
|
|
665 |
/* this might already be handled in GC_init... */ |
666 |
InitializeCriticalSection(&GC_allocate_ml); |
667 |
#endif |
668 |
|
669 |
/* Add the initial thread, so we can stop it. */ |
572 |
/* Add the initial thread, so we can stop it. */ |
670 |
threadAttach(); |
573 |
GC_new_thread(); |
671 |
} |
574 |
} |
672 |
|
575 |
|
|
|
576 |
#ifdef CYGWIN32 |
577 |
|
673 |
struct start_info { |
578 |
struct start_info { |
674 |
void *(*start_routine)(void *); |
579 |
void *(*start_routine)(void *); |
675 |
void *arg; |
580 |
void *arg; |
|
|
581 |
GC_bool detached; |
676 |
}; |
582 |
}; |
677 |
|
583 |
|
678 |
int GC_pthread_join(pthread_t pthread_id, void **retval) { |
584 |
int GC_pthread_join(pthread_t pthread_id, void **retval) { |
679 |
int result; |
585 |
int result; |
680 |
int i; |
586 |
int i; |
|
|
587 |
GC_thread me; |
681 |
|
588 |
|
682 |
# if DEBUG_CYGWIN_THREADS |
589 |
# if DEBUG_CYGWIN_THREADS |
683 |
GC_printf3("thread 0x%x(0x%x) is joining thread 0x%x.\n",(int)pthread_self(), |
590 |
GC_printf3("thread 0x%x(0x%x) is joining thread 0x%x.\n", |
684 |
GetCurrentThreadId(), (int)pthread_id); |
591 |
(int)pthread_self(), GetCurrentThreadId(), (int)pthread_id); |
685 |
# endif |
592 |
# endif |
686 |
|
593 |
|
687 |
/* Can't do any table lookups here, because thread being joined |
594 |
/* Thread being joined might not have registered itself yet. */ |
688 |
might not have registered itself yet */ |
595 |
/* After the join,thread id may have been recycled. */ |
|
|
596 |
/* FIXME: It would be better if this worked more like */ |
597 |
/* pthread_support.c. */ |
598 |
|
599 |
while ((me = GC_lookup_thread(pthread_id)) == 0) Sleep(10); |
689 |
|
600 |
|
690 |
result = pthread_join(pthread_id, retval); |
601 |
result = pthread_join(pthread_id, retval); |
691 |
|
602 |
|
692 |
LOCK(); |
603 |
GC_delete_gc_thread(me); |
693 |
for (i = 0; !thread_table[i].in_use || thread_table[i].pthread_id != pthread_id; |
|
|
694 |
i++) { |
695 |
if (i == MAX_THREADS - 1) { |
696 |
GC_printf1("Failed to find thread 0x%x in pthread_join()\n", pthread_id); |
697 |
ABORT("thread not found on detach"); |
698 |
} |
699 |
} |
700 |
UNLOCK(); |
701 |
threadDetach(thread_table[i].id); |
702 |
|
604 |
|
703 |
# if DEBUG_CYGWIN_THREADS |
605 |
# if DEBUG_CYGWIN_THREADS |
704 |
GC_printf3("thread 0x%x(0x%x) completed join with thread 0x%x.\n", |
606 |
GC_printf3("thread 0x%x(0x%x) completed join with thread 0x%x.\n", |
Lines 729-738
Link Here
|
729 |
|
631 |
|
730 |
si -> start_routine = start_routine; |
632 |
si -> start_routine = start_routine; |
731 |
si -> arg = arg; |
633 |
si -> arg = arg; |
|
|
634 |
if (attr != 0 && |
635 |
pthread_attr_getdetachstate(attr, &si->detached) |
636 |
== PTHREAD_CREATE_DETACHED) { |
637 |
si->detached = TRUE; |
638 |
} |
732 |
|
639 |
|
733 |
# if DEBUG_CYGWIN_THREADS |
640 |
# if DEBUG_CYGWIN_THREADS |
734 |
GC_printf2("About to create a thread from 0x%x(0x%x)\n",(int)pthread_self(), |
641 |
GC_printf2("About to create a thread from 0x%x(0x%x)\n", |
735 |
GetCurrentThreadId); |
642 |
(int)pthread_self(), GetCurrentThreadId); |
736 |
# endif |
643 |
# endif |
737 |
result = pthread_create(new_thread, attr, GC_start_routine, si); |
644 |
result = pthread_create(new_thread, attr, GC_start_routine, si); |
738 |
|
645 |
|
Lines 750-755
Link Here
|
750 |
void *(*start)(void *); |
657 |
void *(*start)(void *); |
751 |
void *start_arg; |
658 |
void *start_arg; |
752 |
pthread_t pthread_id; |
659 |
pthread_t pthread_id; |
|
|
660 |
GC_thread me; |
661 |
GC_bool detached; |
753 |
int i; |
662 |
int i; |
754 |
|
663 |
|
755 |
# if DEBUG_CYGWIN_THREADS |
664 |
# if DEBUG_CYGWIN_THREADS |
Lines 764-780
Link Here
|
764 |
LOCK(); |
673 |
LOCK(); |
765 |
/* We register the thread here instead of in the parent, so that */ |
674 |
/* We register the thread here instead of in the parent, so that */ |
766 |
/* we don't need to hold the allocation lock during pthread_create. */ |
675 |
/* we don't need to hold the allocation lock during pthread_create. */ |
767 |
threadAttach(); |
676 |
me = GC_new_thread(); |
768 |
UNLOCK(); |
677 |
UNLOCK(); |
769 |
|
678 |
|
770 |
start = si -> start_routine; |
679 |
start = si -> start_routine; |
771 |
start_arg = si -> arg; |
680 |
start_arg = si -> arg; |
772 |
pthread_id = pthread_self(); |
681 |
if (si-> detached) me -> flags |= DETACHED; |
|
|
682 |
me -> pthread_id = pthread_id = pthread_self(); |
773 |
|
683 |
|
774 |
GC_free(si); /* was allocated uncollectable */ |
684 |
GC_free(si); /* was allocated uncollectable */ |
775 |
|
685 |
|
776 |
pthread_cleanup_push(GC_thread_exit_proc, pthread_id); |
686 |
pthread_cleanup_push(GC_thread_exit_proc, (void *)me); |
777 |
result = (*start)(start_arg); |
687 |
result = (*start)(start_arg); |
|
|
688 |
me -> status = result; |
778 |
pthread_cleanup_pop(0); |
689 |
pthread_cleanup_pop(0); |
779 |
|
690 |
|
780 |
# if DEBUG_CYGWIN_THREADS |
691 |
# if DEBUG_CYGWIN_THREADS |
Lines 782-801
Link Here
|
782 |
(int)pthread_self(),GetCurrentThreadId()); |
693 |
(int)pthread_self(),GetCurrentThreadId()); |
783 |
# endif |
694 |
# endif |
784 |
|
695 |
|
785 |
LOCK(); |
|
|
786 |
for (i = 0; thread_table[i].pthread_id != pthread_id; i++) { |
787 |
if (i == MAX_THREADS - 1) |
788 |
ABORT("thread not found on exit"); |
789 |
} |
790 |
thread_table[i].status = result; |
791 |
UNLOCK(); |
792 |
|
793 |
return(result); |
696 |
return(result); |
794 |
} |
697 |
} |
795 |
|
698 |
|
796 |
void GC_thread_exit_proc(void *arg) |
699 |
void GC_thread_exit_proc(void *arg) |
797 |
{ |
700 |
{ |
798 |
pthread_t pthread_id = (pthread_t)arg; |
701 |
GC_thread me = (GC_thread)arg; |
799 |
int i; |
702 |
int i; |
800 |
|
703 |
|
801 |
# if DEBUG_CYGWIN_THREADS |
704 |
# if DEBUG_CYGWIN_THREADS |
Lines 804-828
Link Here
|
804 |
# endif |
707 |
# endif |
805 |
|
708 |
|
806 |
LOCK(); |
709 |
LOCK(); |
807 |
for (i = 0; thread_table[i].pthread_id != pthread_id; i++) { |
710 |
if (me -> flags & DETACHED) { |
808 |
if (i == MAX_THREADS - 1) |
711 |
GC_delete_thread(GetCurrentThreadId()); |
809 |
ABORT("thread not found on exit"); |
712 |
} else { |
|
|
713 |
/* deallocate it as part of join */ |
714 |
me -> flags |= FINISHED; |
810 |
} |
715 |
} |
811 |
UNLOCK(); |
716 |
UNLOCK(); |
812 |
|
|
|
813 |
#if 0 |
814 |
/* TODO: we need a way to get the exit value after a pthread_exit so we can stash it safely away */ |
815 |
thread_table[i].status = ??? |
816 |
#endif |
817 |
} |
717 |
} |
818 |
|
718 |
|
819 |
/* nothing required here... */ |
719 |
/* nothing required here... */ |
820 |
int GC_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset) { |
720 |
int GC_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset) { |
821 |
return pthread_sigmask(how, set, oset); |
721 |
return pthread_sigmask(how, set, oset); |
822 |
} |
722 |
} |
823 |
int GC_pthread_detach(pthread_t thread) { |
723 |
|
824 |
return pthread_detach(thread); |
724 |
int GC_pthread_detach(pthread_t thread) |
|
|
725 |
{ |
726 |
int result; |
727 |
GC_thread thread_gc_id; |
728 |
|
729 |
LOCK(); |
730 |
thread_gc_id = GC_lookup_thread(thread); |
731 |
UNLOCK(); |
732 |
result = pthread_detach(thread); |
733 |
if (result == 0) { |
734 |
LOCK(); |
735 |
thread_gc_id -> flags |= DETACHED; |
736 |
/* Here the pthread thread id may have been recycled. */ |
737 |
if (thread_gc_id -> flags & FINISHED) { |
738 |
GC_delete_gc_thread(thread_gc_id); |
739 |
} |
740 |
UNLOCK(); |
741 |
} |
742 |
return result; |
825 |
} |
743 |
} |
|
|
744 |
|
826 |
#else /* !CYGWIN32 */ |
745 |
#else /* !CYGWIN32 */ |
827 |
|
746 |
|
828 |
/* |
747 |
/* |
Lines 834-848
Link Here
|
834 |
{ |
753 |
{ |
835 |
switch (reason) { |
754 |
switch (reason) { |
836 |
case DLL_PROCESS_ATTACH: |
755 |
case DLL_PROCESS_ATTACH: |
837 |
InitializeCriticalSection(&GC_allocate_ml); |
|
|
838 |
GC_init(); /* Force initialization before thread attach. */ |
756 |
GC_init(); /* Force initialization before thread attach. */ |
839 |
/* fall through */ |
757 |
/* fall through */ |
840 |
case DLL_THREAD_ATTACH: |
758 |
case DLL_THREAD_ATTACH: |
841 |
threadAttach(); |
759 |
GC_ASSERT(GC_thr_initialized); |
|
|
760 |
if (GC_main_thread != GetCurrentThreadId()) { |
761 |
GC_new_thread(); |
762 |
} /* o.w. we already did it during GC_thr_init(), called by GC_init() */ |
842 |
break; |
763 |
break; |
843 |
|
764 |
|
844 |
case DLL_THREAD_DETACH: |
765 |
case DLL_THREAD_DETACH: |
845 |
threadDetach(GetCurrentThreadId()); |
766 |
GC_delete_thread(GetCurrentThreadId()); |
846 |
break; |
767 |
break; |
847 |
|
768 |
|
848 |
case DLL_PROCESS_DETACH: |
769 |
case DLL_PROCESS_DETACH: |
Lines 850-864
Link Here
|
850 |
int i; |
771 |
int i; |
851 |
|
772 |
|
852 |
LOCK(); |
773 |
LOCK(); |
853 |
for (i = 0; i < MAX_THREADS; ++i) |
774 |
for (i = 0; i <= GC_get_max_thread_index(); ++i) |
854 |
{ |
775 |
{ |
855 |
if (thread_table[i].in_use) |
776 |
if (thread_table[i].in_use) |
856 |
{ |
777 |
GC_delete_gc_thread(thread_table + i); |
857 |
thread_table[i].stack = 0; |
|
|
858 |
thread_table[i].in_use = FALSE; |
859 |
CloseHandle(thread_table[i].handle); |
860 |
BZERO((void *) &thread_table[i].context, sizeof(CONTEXT)); |
861 |
} |
862 |
} |
778 |
} |
863 |
UNLOCK(); |
779 |
UNLOCK(); |
864 |
|
780 |
|