]> gcc.gnu.org Git - gcc.git/blame - libgomp/team.c
crayptr2.f90: Remove forced static linkage for darwin...
[gcc.git] / libgomp / team.c
CommitLineData
748086b7 1/* Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
953ff289
DN
2 Contributed by Richard Henderson <rth@redhat.com>.
3
4 This file is part of the GNU OpenMP Library (libgomp).
5
6 Libgomp is free software; you can redistribute it and/or modify it
748086b7
JJ
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
953ff289
DN
10
11 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
748086b7 13 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
953ff289
DN
14 more details.
15
748086b7
JJ
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
19
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
953ff289
DN
24
25/* This file handles the maintainence of threads in response to team
26 creation and termination. */
27
28#include "libgomp.h"
29#include <stdlib.h>
30#include <string.h>
31
953ff289 32/* This attribute contains PTHREAD_CREATE_DETACHED. */
d0d1b24d 33pthread_attr_t gomp_thread_attr;
953ff289 34
a68ab351
JJ
35/* This key is for the thread destructor. */
36pthread_key_t gomp_thread_destructor;
37
953ff289
DN
38
39/* This is the libgomp per-thread data structure. */
40#ifdef HAVE_TLS
41__thread struct gomp_thread gomp_tls_data;
42#else
43pthread_key_t gomp_tls_key;
44#endif
45
46
47/* This structure is used to communicate across pthread_create. */
48
49struct gomp_thread_start_data
50{
953ff289
DN
51 void (*fn) (void *);
52 void *fn_data;
a68ab351
JJ
53 struct gomp_team_state ts;
54 struct gomp_task *task;
55 struct gomp_thread_pool *thread_pool;
953ff289
DN
56 bool nested;
57};
58
59
60/* This function is a pthread_create entry point. This contains the idle
61 loop in which a thread waits to be called up to become part of a team. */
62
63static void *
64gomp_thread_start (void *xdata)
65{
66 struct gomp_thread_start_data *data = xdata;
67 struct gomp_thread *thr;
a68ab351 68 struct gomp_thread_pool *pool;
953ff289
DN
69 void (*local_fn) (void *);
70 void *local_data;
71
72#ifdef HAVE_TLS
73 thr = &gomp_tls_data;
74#else
75 struct gomp_thread local_thr;
76 thr = &local_thr;
77 pthread_setspecific (gomp_tls_key, thr);
78#endif
79 gomp_sem_init (&thr->release, 0);
80
81 /* Extract what we need from data. */
82 local_fn = data->fn;
83 local_data = data->fn_data;
a68ab351 84 thr->thread_pool = data->thread_pool;
953ff289 85 thr->ts = data->ts;
a68ab351 86 thr->task = data->task;
953ff289
DN
87
88 thr->ts.team->ordered_release[thr->ts.team_id] = &thr->release;
89
a68ab351
JJ
90 /* Make thread pool local. */
91 pool = thr->thread_pool;
92
953ff289
DN
93 if (data->nested)
94 {
a68ab351
JJ
95 struct gomp_team *team = thr->ts.team;
96 struct gomp_task *task = thr->task;
97
98 gomp_barrier_wait (&team->barrier);
99
953ff289 100 local_fn (local_data);
a68ab351
JJ
101 gomp_team_barrier_wait (&team->barrier);
102 gomp_finish_task (task);
103 gomp_barrier_wait_last (&team->barrier);
953ff289
DN
104 }
105 else
106 {
a68ab351 107 pool->threads[thr->ts.team_id] = thr;
953ff289 108
a68ab351 109 gomp_barrier_wait (&pool->threads_dock);
953ff289
DN
110 do
111 {
a68ab351
JJ
112 struct gomp_team *team = thr->ts.team;
113 struct gomp_task *task = thr->task;
953ff289
DN
114
115 local_fn (local_data);
a68ab351
JJ
116 gomp_team_barrier_wait (&team->barrier);
117 gomp_finish_task (task);
953ff289 118
a68ab351 119 gomp_barrier_wait (&pool->threads_dock);
953ff289
DN
120
121 local_fn = thr->fn;
122 local_data = thr->data;
a68ab351 123 thr->fn = NULL;
953ff289
DN
124 }
125 while (local_fn);
126 }
127
6dea8e99 128 gomp_sem_destroy (&thr->release);
953ff289
DN
129 return NULL;
130}
131
132
133/* Create a new team data structure. */
134
a68ab351
JJ
135struct gomp_team *
136gomp_new_team (unsigned nthreads)
953ff289
DN
137{
138 struct gomp_team *team;
139 size_t size;
a68ab351 140 int i;
953ff289 141
a68ab351
JJ
142 size = sizeof (*team) + nthreads * (sizeof (team->ordered_release[0])
143 + sizeof (team->implicit_task[0]));
953ff289 144 team = gomp_malloc (size);
953ff289 145
a68ab351
JJ
146 team->work_share_chunk = 8;
147#ifdef HAVE_SYNC_BUILTINS
148 team->single_count = 0;
149#else
150 gomp_mutex_init (&team->work_share_list_free_lock);
151#endif
152 gomp_init_work_share (&team->work_shares[0], false, nthreads);
153 team->work_shares[0].next_alloc = NULL;
154 team->work_share_list_free = NULL;
155 team->work_share_list_alloc = &team->work_shares[1];
156 for (i = 1; i < 7; i++)
157 team->work_shares[i].next_free = &team->work_shares[i + 1];
158 team->work_shares[i].next_free = NULL;
953ff289
DN
159
160 team->nthreads = nthreads;
161 gomp_barrier_init (&team->barrier, nthreads);
162
163 gomp_sem_init (&team->master_release, 0);
a68ab351 164 team->ordered_release = (void *) &team->implicit_task[nthreads];
953ff289
DN
165 team->ordered_release[0] = &team->master_release;
166
a68ab351
JJ
167 gomp_mutex_init (&team->task_lock);
168 team->task_queue = NULL;
169 team->task_count = 0;
170 team->task_running_count = 0;
171
953ff289
DN
172 return team;
173}
174
175
176/* Free a team data structure. */
177
178static void
179free_team (struct gomp_team *team)
180{
953ff289 181 gomp_barrier_destroy (&team->barrier);
a68ab351 182 gomp_mutex_destroy (&team->task_lock);
953ff289
DN
183 free (team);
184}
185
a68ab351
JJ
186/* Allocate and initialize a thread pool. */
187
188static struct gomp_thread_pool *gomp_new_thread_pool (void)
189{
190 struct gomp_thread_pool *pool
191 = gomp_malloc (sizeof(struct gomp_thread_pool));
192 pool->threads = NULL;
193 pool->threads_size = 0;
194 pool->threads_used = 0;
195 pool->last_team = NULL;
196 return pool;
197}
198
199static void
200gomp_free_pool_helper (void *thread_pool)
201{
202 struct gomp_thread_pool *pool
203 = (struct gomp_thread_pool *) thread_pool;
204 gomp_barrier_wait_last (&pool->threads_dock);
6dea8e99 205 gomp_sem_destroy (&gomp_thread ()->release);
a68ab351
JJ
206 pthread_exit (NULL);
207}
208
209/* Free a thread pool and release its threads. */
210
211static void
212gomp_free_thread (void *arg __attribute__((unused)))
213{
214 struct gomp_thread *thr = gomp_thread ();
215 struct gomp_thread_pool *pool = thr->thread_pool;
216 if (pool)
217 {
218 if (pool->threads_used > 0)
219 {
220 int i;
221 for (i = 1; i < pool->threads_used; i++)
222 {
223 struct gomp_thread *nthr = pool->threads[i];
224 nthr->fn = gomp_free_pool_helper;
225 nthr->data = pool;
226 }
227 /* This barrier undocks threads docked on pool->threads_dock. */
228 gomp_barrier_wait (&pool->threads_dock);
229 /* And this waits till all threads have called gomp_barrier_wait_last
230 in gomp_free_pool_helper. */
231 gomp_barrier_wait (&pool->threads_dock);
232 /* Now it is safe to destroy the barrier and free the pool. */
233 gomp_barrier_destroy (&pool->threads_dock);
234 }
235 free (pool->threads);
236 if (pool->last_team)
237 free_team (pool->last_team);
238 free (pool);
239 thr->thread_pool = NULL;
240 }
241 if (thr->task != NULL)
242 {
243 struct gomp_task *task = thr->task;
244 gomp_end_task ();
245 free (task);
246 }
247}
953ff289
DN
248
249/* Launch a team. */
250
251void
252gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads,
a68ab351 253 struct gomp_team *team)
953ff289
DN
254{
255 struct gomp_thread_start_data *start_data;
256 struct gomp_thread *thr, *nthr;
a68ab351
JJ
257 struct gomp_task *task;
258 struct gomp_task_icv *icv;
953ff289 259 bool nested;
a68ab351 260 struct gomp_thread_pool *pool;
953ff289 261 unsigned i, n, old_threads_used = 0;
a0884cf0 262 pthread_attr_t thread_attr, *attr;
953ff289
DN
263
264 thr = gomp_thread ();
265 nested = thr->ts.team != NULL;
a68ab351
JJ
266 if (__builtin_expect (thr->thread_pool == NULL, 0))
267 {
268 thr->thread_pool = gomp_new_thread_pool ();
269 pthread_setspecific (gomp_thread_destructor, thr);
270 }
271 pool = thr->thread_pool;
272 task = thr->task;
273 icv = task ? &task->icv : &gomp_global_icv;
953ff289
DN
274
275 /* Always save the previous state, even if this isn't a nested team.
276 In particular, we should save any work share state from an outer
277 orphaned work share construct. */
278 team->prev_ts = thr->ts;
279
280 thr->ts.team = team;
953ff289 281 thr->ts.team_id = 0;
a68ab351
JJ
282 ++thr->ts.level;
283 if (nthreads > 1)
284 ++thr->ts.active_level;
285 thr->ts.work_share = &team->work_shares[0];
286 thr->ts.last_work_share = NULL;
287#ifdef HAVE_SYNC_BUILTINS
288 thr->ts.single_count = 0;
289#endif
953ff289 290 thr->ts.static_trip = 0;
a68ab351
JJ
291 thr->task = &team->implicit_task[0];
292 gomp_init_task (thr->task, task, icv);
953ff289
DN
293
294 if (nthreads == 1)
295 return;
296
297 i = 1;
298
299 /* We only allow the reuse of idle threads for non-nested PARALLEL
300 regions. This appears to be implied by the semantics of
301 threadprivate variables, but perhaps that's reading too much into
302 things. Certainly it does prevent any locking problems, since
303 only the initial program thread will modify gomp_threads. */
304 if (!nested)
305 {
a68ab351 306 old_threads_used = pool->threads_used;
953ff289
DN
307
308 if (nthreads <= old_threads_used)
309 n = nthreads;
310 else if (old_threads_used == 0)
311 {
312 n = 0;
a68ab351 313 gomp_barrier_init (&pool->threads_dock, nthreads);
953ff289
DN
314 }
315 else
316 {
317 n = old_threads_used;
318
319 /* Increase the barrier threshold to make sure all new
320 threads arrive before the team is released. */
a68ab351 321 gomp_barrier_reinit (&pool->threads_dock, nthreads);
953ff289
DN
322 }
323
324 /* Not true yet, but soon will be. We're going to release all
a68ab351 325 threads from the dock, and those that aren't part of the
953ff289 326 team will exit. */
a68ab351 327 pool->threads_used = nthreads;
953ff289
DN
328
329 /* Release existing idle threads. */
330 for (; i < n; ++i)
331 {
a68ab351 332 nthr = pool->threads[i];
953ff289 333 nthr->ts.team = team;
a68ab351
JJ
334 nthr->ts.work_share = &team->work_shares[0];
335 nthr->ts.last_work_share = NULL;
953ff289 336 nthr->ts.team_id = i;
a68ab351
JJ
337 nthr->ts.level = team->prev_ts.level + 1;
338 nthr->ts.active_level = thr->ts.active_level;
339#ifdef HAVE_SYNC_BUILTINS
340 nthr->ts.single_count = 0;
341#endif
953ff289 342 nthr->ts.static_trip = 0;
a68ab351
JJ
343 nthr->task = &team->implicit_task[i];
344 gomp_init_task (nthr->task, task, icv);
953ff289
DN
345 nthr->fn = fn;
346 nthr->data = data;
347 team->ordered_release[i] = &nthr->release;
348 }
349
350 if (i == nthreads)
351 goto do_release;
352
353 /* If necessary, expand the size of the gomp_threads array. It is
a68ab351 354 expected that changes in the number of threads are rare, thus we
953ff289 355 make no effort to expand gomp_threads_size geometrically. */
a68ab351 356 if (nthreads >= pool->threads_size)
953ff289 357 {
a68ab351
JJ
358 pool->threads_size = nthreads + 1;
359 pool->threads
360 = gomp_realloc (pool->threads,
361 pool->threads_size
953ff289
DN
362 * sizeof (struct gomp_thread_data *));
363 }
364 }
365
a68ab351
JJ
366 if (__builtin_expect (nthreads > old_threads_used, 0))
367 {
368 long diff = (long) nthreads - (long) old_threads_used;
369
370 if (old_threads_used == 0)
371 --diff;
372
373#ifdef HAVE_SYNC_BUILTINS
374 __sync_fetch_and_add (&gomp_managed_threads, diff);
375#else
376 gomp_mutex_lock (&gomp_remaining_threads_lock);
377 gomp_managed_threads += diff;
378 gomp_mutex_unlock (&gomp_remaining_threads_lock);
379#endif
380 }
381
a0884cf0 382 attr = &gomp_thread_attr;
a68ab351 383 if (__builtin_expect (gomp_cpu_affinity != NULL, 0))
a0884cf0
JJ
384 {
385 size_t stacksize;
386 pthread_attr_init (&thread_attr);
387 pthread_attr_setdetachstate (&thread_attr, PTHREAD_CREATE_DETACHED);
46d8fbd1 388 if (! pthread_attr_getstacksize (&gomp_thread_attr, &stacksize))
a0884cf0
JJ
389 pthread_attr_setstacksize (&thread_attr, stacksize);
390 attr = &thread_attr;
391 }
392
a55b8e18
SE
393 start_data = gomp_alloca (sizeof (struct gomp_thread_start_data)
394 * (nthreads-i));
953ff289
DN
395
396 /* Launch new threads. */
397 for (; i < nthreads; ++i, ++start_data)
398 {
399 pthread_t pt;
400 int err;
401
a68ab351
JJ
402 start_data->fn = fn;
403 start_data->fn_data = data;
953ff289 404 start_data->ts.team = team;
a68ab351
JJ
405 start_data->ts.work_share = &team->work_shares[0];
406 start_data->ts.last_work_share = NULL;
953ff289 407 start_data->ts.team_id = i;
a68ab351
JJ
408 start_data->ts.level = team->prev_ts.level + 1;
409 start_data->ts.active_level = thr->ts.active_level;
410#ifdef HAVE_SYNC_BUILTINS
411 start_data->ts.single_count = 0;
412#endif
953ff289 413 start_data->ts.static_trip = 0;
a68ab351
JJ
414 start_data->task = &team->implicit_task[i];
415 gomp_init_task (start_data->task, task, icv);
416 start_data->thread_pool = pool;
953ff289
DN
417 start_data->nested = nested;
418
a0884cf0
JJ
419 if (gomp_cpu_affinity != NULL)
420 gomp_init_thread_affinity (attr);
421
422 err = pthread_create (&pt, attr, gomp_thread_start, start_data);
953ff289
DN
423 if (err != 0)
424 gomp_fatal ("Thread creation failed: %s", strerror (err));
425 }
426
a68ab351 427 if (__builtin_expect (gomp_cpu_affinity != NULL, 0))
a0884cf0
JJ
428 pthread_attr_destroy (&thread_attr);
429
953ff289 430 do_release:
a68ab351 431 gomp_barrier_wait (nested ? &team->barrier : &pool->threads_dock);
953ff289
DN
432
433 /* Decrease the barrier threshold to match the number of threads
434 that should arrive back at the end of this team. The extra
435 threads should be exiting. Note that we arrange for this test
436 to never be true for nested teams. */
a68ab351
JJ
437 if (__builtin_expect (nthreads < old_threads_used, 0))
438 {
439 long diff = (long) nthreads - (long) old_threads_used;
440
441 gomp_barrier_reinit (&pool->threads_dock, nthreads);
442
443#ifdef HAVE_SYNC_BUILTINS
444 __sync_fetch_and_add (&gomp_managed_threads, diff);
445#else
446 gomp_mutex_lock (&gomp_remaining_threads_lock);
447 gomp_managed_threads += diff;
448 gomp_mutex_unlock (&gomp_remaining_threads_lock);
449#endif
450 }
953ff289
DN
451}
452
453
454/* Terminate the current team. This is only to be called by the master
455 thread. We assume that we must wait for the other threads. */
456
457void
458gomp_team_end (void)
459{
460 struct gomp_thread *thr = gomp_thread ();
461 struct gomp_team *team = thr->ts.team;
462
a68ab351
JJ
463 /* This barrier handles all pending explicit threads. */
464 gomp_team_barrier_wait (&team->barrier);
465 gomp_fini_work_share (thr->ts.work_share);
953ff289 466
a68ab351 467 gomp_end_task ();
953ff289
DN
468 thr->ts = team->prev_ts;
469
a68ab351
JJ
470 if (__builtin_expect (thr->ts.team != NULL, 0))
471 {
472#ifdef HAVE_SYNC_BUILTINS
473 __sync_fetch_and_add (&gomp_managed_threads, 1L - team->nthreads);
474#else
475 gomp_mutex_lock (&gomp_remaining_threads_lock);
476 gomp_managed_threads -= team->nthreads - 1L;
477 gomp_mutex_unlock (&gomp_remaining_threads_lock);
478#endif
479 /* This barrier has gomp_barrier_wait_last counterparts
480 and ensures the team can be safely destroyed. */
481 gomp_barrier_wait (&team->barrier);
482 }
483
484 if (__builtin_expect (team->work_shares[0].next_alloc != NULL, 0))
485 {
486 struct gomp_work_share *ws = team->work_shares[0].next_alloc;
487 do
488 {
489 struct gomp_work_share *next_ws = ws->next_alloc;
490 free (ws);
491 ws = next_ws;
492 }
493 while (ws != NULL);
494 }
495 gomp_sem_destroy (&team->master_release);
496#ifndef HAVE_SYNC_BUILTINS
497 gomp_mutex_destroy (&team->work_share_list_free_lock);
498#endif
499
4db72361
JJ
500 if (__builtin_expect (thr->ts.team != NULL, 0)
501 || __builtin_expect (team->nthreads == 1, 0))
a68ab351
JJ
502 free_team (team);
503 else
504 {
505 struct gomp_thread_pool *pool = thr->thread_pool;
506 if (pool->last_team)
507 free_team (pool->last_team);
508 pool->last_team = team;
509 }
953ff289
DN
510}
511
512
513/* Constructors for this file. */
514
515static void __attribute__((constructor))
516initialize_team (void)
517{
518 struct gomp_thread *thr;
519
520#ifndef HAVE_TLS
521 static struct gomp_thread initial_thread_tls_data;
522
523 pthread_key_create (&gomp_tls_key, NULL);
524 pthread_setspecific (gomp_tls_key, &initial_thread_tls_data);
525#endif
526
a68ab351
JJ
527 if (pthread_key_create (&gomp_thread_destructor, gomp_free_thread) != 0)
528 gomp_fatal ("could not create thread pool destructor.");
529
953ff289
DN
530#ifdef HAVE_TLS
531 thr = &gomp_tls_data;
532#else
533 thr = &initial_thread_tls_data;
534#endif
535 gomp_sem_init (&thr->release, 0);
953ff289 536}
a68ab351
JJ
537
538static void __attribute__((destructor))
539team_destructor (void)
540{
541 /* Without this dlclose on libgomp could lead to subsequent
542 crashes. */
543 pthread_key_delete (gomp_thread_destructor);
544}
545
546struct gomp_task_icv *
547gomp_new_icv (void)
548{
549 struct gomp_thread *thr = gomp_thread ();
550 struct gomp_task *task = gomp_malloc (sizeof (struct gomp_task));
551 gomp_init_task (task, NULL, &gomp_global_icv);
552 thr->task = task;
553 pthread_setspecific (gomp_thread_destructor, thr);
554 return &task->icv;
555}
This page took 0.352828 seconds and 5 git commands to generate.