This is the mail archive of the
gcc-patches@gcc.gnu.org
mailing list for the GCC project.
[committed] Fix libgomp lastprivate conditional handling on some strict alignment targets (PR libgomp/90641)
- From: Jakub Jelinek <jakub at redhat dot com>
- To: gcc-patches at gcc dot gnu dot org
- Date: Mon, 27 May 2019 23:29:37 +0200
- Subject: [committed] Fix libgomp lastprivate conditional handling on some strict alignment targets (PR libgomp/90641)
- Reply-to: Jakub Jelinek <jakub at redhat dot com>
Hi!
While on x86_64-linux as well as i686-linux inline_ordered_team_ids
array is long long aligned, on other targets like sparc solaris it is not
due to different sizes of mutexes/pointer locks. The following patch makes
sure that the memory used for lastprivate conditional is always long long
aligned.
Bootstrapped/regtested on x86_64-linux and i686-linux, additionally Rainer
has kindly tested it on sparc-sun-solaris, committed to trunk.
2019-05-27 Jakub Jelinek <jakub@redhat.com>
PR libgomp/90641
* work.c (gomp_init_work_share): Instead of aligning final ordered
value to multiples of long long alignment, align to that the
first part (ordered team ids) and if inline_ordered_team_ids
is not on a long long alignment boundary within the structure,
use __alignof__ (long long) - 1 pad size always.
* loop.c (GOMP_loop_start): Fix *mem computation if
inline_ordered_team_ids is not aligned on long long alignment boundary
within the structure.
* loop-ull.c (GOMP_loop_ull_start): Likewise.
* sections.c (GOMP_sections2_start): Likewise.
--- libgomp/work.c.jj 2019-01-01 12:38:37.703653396 +0100
+++ libgomp/work.c 2019-05-27 13:49:04.513336631 +0200
@@ -110,9 +110,12 @@ gomp_init_work_share (struct gomp_work_s
if (__builtin_expect (ordered != 1, 0))
{
- ordered += nthreads * sizeof (*ws->ordered_team_ids) - 1;
- ordered = ordered + __alignof__ (long long) - 1;
- ordered &= ~(__alignof__ (long long) - 1);
+ size_t o = nthreads * sizeof (*ws->ordered_team_ids);
+ o += __alignof__ (long long) - 1;
+ if ((offsetof (struct gomp_work_share, inline_ordered_team_ids)
+ & (__alignof__ (long long) - 1)) == 0)
+ o &= ~(__alignof__ (long long) - 1);
+ ordered += o - 1;
}
else
ordered = nthreads * sizeof (*ws->ordered_team_ids);
--- libgomp/loop.c.jj 2019-01-01 12:38:37.514656497 +0100
+++ libgomp/loop.c 2019-05-27 14:12:10.355836249 +0200
@@ -267,14 +267,17 @@ GOMP_loop_start (long start, long end, l
if (mem)
{
uintptr_t size = (uintptr_t) *mem;
+#define INLINE_ORDERED_TEAM_IDS_OFF \
+ ((offsetof (struct gomp_work_share, inline_ordered_team_ids) \
+ + __alignof__ (long long) - 1) & ~(__alignof__ (long long) - 1))
if (size > (sizeof (struct gomp_work_share)
- - offsetof (struct gomp_work_share,
- inline_ordered_team_ids)))
- thr->ts.work_share->ordered_team_ids
- = gomp_malloc_cleared (size);
+ - INLINE_ORDERED_TEAM_IDS_OFF))
+ *mem
+ = (void *) (thr->ts.work_share->ordered_team_ids
+ = gomp_malloc_cleared (size));
else
- memset (thr->ts.work_share->ordered_team_ids, '\0', size);
- *mem = (void *) thr->ts.work_share->ordered_team_ids;
+ *mem = memset (((char *) thr->ts.work_share)
+ + INLINE_ORDERED_TEAM_IDS_OFF, '\0', size);
}
gomp_work_share_init_done ();
}
@@ -287,7 +290,18 @@ GOMP_loop_start (long start, long end, l
first_reductions);
}
if (mem)
- *mem = (void *) thr->ts.work_share->ordered_team_ids;
+ {
+ if ((offsetof (struct gomp_work_share, inline_ordered_team_ids)
+ & (__alignof__ (long long) - 1)) == 0)
+ *mem = (void *) thr->ts.work_share->ordered_team_ids;
+ else
+ {
+ uintptr_t p = (uintptr_t) thr->ts.work_share->ordered_team_ids;
+ p += __alignof__ (long long) - 1;
+ p &= ~(__alignof__ (long long) - 1);
+ *mem = (void *) p;
+ }
+ }
}
if (!istart)
--- libgomp/loop_ull.c.jj 2019-01-01 12:38:37.893650279 +0100
+++ libgomp/loop_ull.c 2019-05-27 14:58:23.140888183 +0200
@@ -266,14 +266,17 @@ GOMP_loop_ull_start (bool up, gomp_ull s
if (mem)
{
uintptr_t size = (uintptr_t) *mem;
+#define INLINE_ORDERED_TEAM_IDS_OFF \
+ ((offsetof (struct gomp_work_share, inline_ordered_team_ids) \
+ + __alignof__ (long long) - 1) & ~(__alignof__ (long long) - 1))
if (size > (sizeof (struct gomp_work_share)
- - offsetof (struct gomp_work_share,
- inline_ordered_team_ids)))
- thr->ts.work_share->ordered_team_ids
- = gomp_malloc_cleared (size);
+ - INLINE_ORDERED_TEAM_IDS_OFF))
+ *mem
+ = (void *) (thr->ts.work_share->ordered_team_ids
+ = gomp_malloc_cleared (size));
else
- memset (thr->ts.work_share->ordered_team_ids, '\0', size);
- *mem = (void *) thr->ts.work_share->ordered_team_ids;
+ *mem = memset (((char *) thr->ts.work_share)
+ + INLINE_ORDERED_TEAM_IDS_OFF, '\0', size);
}
gomp_work_share_init_done ();
}
@@ -286,7 +289,18 @@ GOMP_loop_ull_start (bool up, gomp_ull s
first_reductions);
}
if (mem)
- *mem = (void *) thr->ts.work_share->ordered_team_ids;
+ {
+ if ((offsetof (struct gomp_work_share, inline_ordered_team_ids)
+ & (__alignof__ (long long) - 1)) == 0)
+ *mem = (void *) thr->ts.work_share->ordered_team_ids;
+ else
+ {
+ uintptr_t p = (uintptr_t) thr->ts.work_share->ordered_team_ids;
+ p += __alignof__ (long long) - 1;
+ p &= ~(__alignof__ (long long) - 1);
+ *mem = (void *) p;
+ }
+ }
}
return ialias_call (GOMP_loop_ull_runtime_next) (istart, iend);
--- libgomp/sections.c.jj 2019-01-01 12:38:37.770652297 +0100
+++ libgomp/sections.c 2019-05-27 14:59:15.380044507 +0200
@@ -118,14 +118,17 @@ GOMP_sections2_start (unsigned count, ui
if (mem)
{
uintptr_t size = (uintptr_t) *mem;
+#define INLINE_ORDERED_TEAM_IDS_OFF \
+ ((offsetof (struct gomp_work_share, inline_ordered_team_ids) \
+ + __alignof__ (long long) - 1) & ~(__alignof__ (long long) - 1))
if (size > (sizeof (struct gomp_work_share)
- - offsetof (struct gomp_work_share,
- inline_ordered_team_ids)))
- thr->ts.work_share->ordered_team_ids
- = gomp_malloc_cleared (size);
+ - INLINE_ORDERED_TEAM_IDS_OFF))
+ *mem
+ = (void *) (thr->ts.work_share->ordered_team_ids
+ = gomp_malloc_cleared (size));
else
- memset (thr->ts.work_share->ordered_team_ids, '\0', size);
- *mem = (void *) thr->ts.work_share->ordered_team_ids;
+ *mem = memset (((char *) thr->ts.work_share)
+ + INLINE_ORDERED_TEAM_IDS_OFF, '\0', size);
}
gomp_work_share_init_done ();
}
@@ -138,7 +141,18 @@ GOMP_sections2_start (unsigned count, ui
first_reductions);
}
if (mem)
- *mem = (void *) thr->ts.work_share->ordered_team_ids;
+ {
+ if ((offsetof (struct gomp_work_share, inline_ordered_team_ids)
+ & (__alignof__ (long long) - 1)) == 0)
+ *mem = (void *) thr->ts.work_share->ordered_team_ids;
+ else
+ {
+ uintptr_t p = (uintptr_t) thr->ts.work_share->ordered_team_ids;
+ p += __alignof__ (long long) - 1;
+ p &= ~(__alignof__ (long long) - 1);
+ *mem = (void *) p;
+ }
+ }
}
#ifdef HAVE_SYNC_BUILTINS
Jakub