This is the mail archive of the
gcc-patches@gcc.gnu.org
mailing list for the GCC project.
[gomp3] Fix !HAVE_SYNC_BUILTINS build
- From: Jakub Jelinek <jakub at redhat dot com>
- To: gcc-patches at gcc dot gnu dot org
- Date: Thu, 27 Mar 2008 07:21:14 -0400
- Subject: [gomp3] Fix !HAVE_SYNC_BUILTINS build
- Reply-to: Jakub Jelinek <jakub at redhat dot com>
Hi!
The recent changes broke !HAVE_SYNC_BUILTINS build, this patch fixes that.
Additionally, there is no point to initialize single_count when
sync builtins aren't available, as nothing will use it.
2008-03-27 Jakub Jelinek <jakub@redhat.com>
* libgomp.h (struct gomp_team_state): Remove single_count field
ifndef HAVE_SYNC_BUILTINS.
(struct gomp_team): Likewise. Add work_share_list_free_lock
ifndef HAVE_SYNC_BUILTINS.
* team.c (gomp_new_team): If HAVE_SYNC_BUILTINS is not defined,
don't initialize single_count, but instead initialize
work_share_list_free_lock.
(free_team): Destroy work_share_list_free_lock ifndef
HAVE_SYNC_BUILTINS.
(gomp_team_start): Don't initialize ts.single_count ifndef
HAVE_SYNC_BUILTINS.
* work.c (alloc_work_share, free_work_share): Use
work_share_list_free_lock instead of atomic chaining ifndef
HAVE_SYNC_BUILTINS.
--- libgomp/libgomp.h (revision 133583)
+++ libgomp/libgomp.h (working copy)
@@ -179,8 +179,10 @@ struct gomp_team_state
/* Active nesting level. Only active parallel regions are counted. */
unsigned active_level;
+#ifdef HAVE_SYNC_BUILTINS
/* Number of single stmts encountered. */
unsigned long single_count;
+#endif
/* For GFS_RUNTIME loops that resolved to GFS_STATIC, this is the
trip number through the loop. So first time a particular loop
@@ -263,9 +265,14 @@ struct gomp_team
with alloc_work_share. */
struct gomp_work_share *work_share_list_free;
+#ifdef HAVE_SYNC_BUILTINS
/* Number of simple single regions encountered by threads in this
team. */
unsigned long single_count;
+#else
+ /* Mutex protecting addition of workshares to work_share_list_free. */
+ gomp_mutex_t work_share_list_free_lock;
+#endif
/* This barrier is used for most synchronization of the team. */
gomp_barrier_t barrier;
--- libgomp/team.c (revision 133583)
+++ libgomp/team.c (working copy)
@@ -149,7 +149,11 @@ gomp_new_team (unsigned nthreads)
team = gomp_malloc (size);
team->work_share_chunk = 8;
+#ifdef HAVE_SYNC_BUILTINS
team->single_count = 0;
+#else
+ gomp_mutex_init (&team->work_share_list_free_lock);
+#endif
gomp_init_work_share (&team->work_shares[0], false, nthreads);
team->work_shares[0].next_alloc = NULL;
team->work_share_list_free = NULL;
@@ -187,6 +191,9 @@ free_team (struct gomp_team *team)
}
gomp_barrier_destroy (&team->barrier);
gomp_sem_destroy (&team->master_release);
+#ifndef HAVE_SYNC_BUILTINS
+ gomp_mutex_destroy (&team->work_share_list_free_lock);
+#endif
free (team);
}
@@ -222,7 +229,9 @@ gomp_team_start (void (*fn) (void *), vo
++thr->ts.active_level;
thr->ts.work_share = &team->work_shares[0];
thr->ts.last_work_share = NULL;
+#ifdef HAVE_SYNC_BUILTINS
thr->ts.single_count = 0;
+#endif
thr->ts.static_trip = 0;
thr->task = &team->implicit_task[0];
gomp_init_task (thr->task, task, icv);
@@ -272,7 +281,9 @@ gomp_team_start (void (*fn) (void *), vo
nthr->ts.team_id = i;
nthr->ts.level = team->prev_ts.level + 1;
nthr->ts.active_level = thr->ts.active_level;
+#ifdef HAVE_SYNC_BUILTINS
nthr->ts.single_count = 0;
+#endif
nthr->ts.static_trip = 0;
nthr->task = &team->implicit_task[i];
gomp_init_task (nthr->task, task, icv);
@@ -341,7 +352,9 @@ gomp_team_start (void (*fn) (void *), vo
start_data->ts.team_id = i;
start_data->ts.level = team->prev_ts.level + 1;
start_data->ts.active_level = thr->ts.active_level;
+#ifdef HAVE_SYNC_BUILTINS
start_data->ts.single_count = 0;
+#endif
start_data->ts.static_trip = 0;
start_data->task = &team->implicit_task[i];
gomp_init_task (start_data->task, task, icv);
--- libgomp/work.c (revision 133510)
+++ libgomp/work.c (working copy)
@@ -51,6 +51,7 @@ alloc_work_share (struct gomp_team *team
return ws;
}
+#ifdef HAVE_SYNC_BUILTINS
ws = team->work_share_list_free;
/* We need atomic read from work_share_list_free,
as free_work_share can be called concurrently. */
@@ -63,6 +64,18 @@ alloc_work_share (struct gomp_team *team
team->work_share_list_alloc = next->next_free;
return next;
}
+#else
+ gomp_mutex_lock (&team->work_share_list_free_lock);
+ ws = team->work_share_list_free;
+ if (ws)
+ {
+ team->work_share_list_alloc = ws->next_free;
+ team->work_share_list_free = NULL;
+ gomp_mutex_unlock (&team->work_share_list_free_lock);
+ return ws;
+ }
+ gomp_mutex_unlock (&team->work_share_list_free_lock);
+#endif
team->work_share_chunk *= 2;
ws = gomp_malloc (team->work_share_chunk * sizeof (struct gomp_work_share));
@@ -131,6 +144,7 @@ free_work_share (struct gomp_team *team,
else
{
struct gomp_work_share *next_ws;
+#ifdef HAVE_SYNC_BUILTINS
do
{
next_ws = team->work_share_list_free;
@@ -138,6 +152,13 @@ free_work_share (struct gomp_team *team,
}
while (!__sync_bool_compare_and_swap (&team->work_share_list_free,
next_ws, ws));
+#else
+ gomp_mutex_lock (&team->work_share_list_free_lock);
+ next_ws = team->work_share_list_free;
+ ws->next_free = next_ws;
+ team->work_share_list_free = ws;
+ gomp_mutex_unlock (&team->work_share_list_free_lock);
+#endif
}
}
Jakub