#define OMP_SCOPE_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
#define OMP_SCOPE_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
else if ((gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
|| ctx->loop_p
+ || code == OMP_CLAUSE_ALLOCATE
|| (code == OMP_CLAUSE_PRIVATE
&& (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
|| gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS
allocator = TREE_PURPOSE (allocator);
}
if (TREE_CODE (allocator) != INTEGER_CST)
- allocator = build_outer_var_ref (allocator, ctx);
+ allocator = build_outer_var_ref (allocator, ctx, OMP_CLAUSE_ALLOCATE);
allocator = fold_convert (pointer_sized_int_node, allocator);
if (TREE_CODE (allocator) != INTEGER_CST)
{
--- /dev/null
+/* { dg-do compile } */
+
+void
+foo ()
+{
+ int f = 0;
+ #pragma omp scope firstprivate(f) /* { dg-error "firstprivate variable 'f' is private in outer context" } */
+ f++;
+}
--- /dev/null
+typedef enum omp_allocator_handle_t
+#if __cplusplus >= 201103L
+: __UINTPTR_TYPE__
+#endif
+{
+ omp_null_allocator = 0,
+ omp_default_mem_alloc = 1,
+ omp_large_cap_mem_alloc = 2,
+ omp_const_mem_alloc = 3,
+ omp_high_bw_mem_alloc = 4,
+ omp_low_lat_mem_alloc = 5,
+ omp_cgroup_mem_alloc = 6,
+ omp_pteam_mem_alloc = 7,
+ omp_thread_mem_alloc = 8,
+ __omp_allocator_handle_t_max__ = __UINTPTR_MAX__
+} omp_allocator_handle_t;
+
+int a = 0, b = 42, c = 0;
+
+void
+foo (omp_allocator_handle_t h)
+{
+ #pragma omp scope private (a) private (b) reduction (+: c) allocate (allocator (h): a, b, c)
+ {
+ if (b != 42)
+ __builtin_abort ();
+ a = 36;
+ b = 15;
+ c++;
+ }
+}
[[omp::directive (cancellation point parallel)]];
}
}
- [[omp::directive (scope private (p) reduction(+:r) nowait)]]
+ [[omp::directive (scope private (p) firstprivate (f) reduction(+:r) nowait
+ allocate (omp_default_mem_alloc: r))]]
;
- [[omp::directive (scope private (p) reduction(task, +:r))]]
+ [[omp::directive (scope private (p) firstprivate (f) reduction(task, +:r)
+ allocate (omp_default_mem_alloc: f))]]
;
extern int t2;
[[omp::directive (threadprivate (t2))]];
[[omp::directive (cancellation point, parallel)]];
}
}
- [[omp::directive (scope, private (p), reduction(+:r), nowait)]]
+ [[omp::directive (scope, private (p), firstprivate (f), reduction(+:r), nowait,
+ allocate(omp_default_mem_alloc: r))]]
;
- [[using omp:directive (scope, private (p), reduction(task, +:r))]]
+ [[using omp:directive (scope, private (p), firstprivate (f), reduction(task, +:r),
+ allocate (omp_default_mem_alloc: f))]]
;
extern int t2;
[[omp::directive (threadprivate (t2))]];
int i2, j2, n2 = 9, l4;
int i3, j3, n3 = 10, l5;
int i4, j4, n4 = 11, l6;
- int i5;
+ int i5, n5;
int v[x], w[x];
int r2[4] = { 0, 0, 0, 0 };
int xo = x;
|| q[0] != 3 * (32 * 31) / 2 || q[2] != 4 * (32 * 31) / 2
|| r2[0] != 5 * (32 * 31) / 2 || r2[3] != 6 * (32 * 31) / 2)
abort ();
+ r = 0;
+ x = xo;
+ #pragma omp parallel shared (x, y, r, n5) firstprivate (h)
+ {
+ #pragma omp masked
+ n5 = omp_get_num_threads ();
+ #pragma omp scope private (y) firstprivate (x) reduction(+:r) \
+ allocate (h: x, y, r)
+ {
+ int *volatile p1 = &x;
+ int *volatile p2 = &y;
+ int *volatile p3 = &r;
+ if (x != 42)
+ abort ();
+ #pragma omp barrier
+ *p2 = 1;
+ p1[0]++;
+ p3[0]++;
+ #pragma omp barrier
+ if (x != 43 || y != 1 || r != 1)
+ abort ();
+ if ((fl & 1) && (((uintptr_t) p1 | (uintptr_t) p2
+ | (uintptr_t) p3) & 63) != 0)
+ abort ();
+ }
+ }
+ if (x != 42 || r != n5)
+ abort ();
}
void
int i2, j2, n2 = 9, l4;
int i3, j3, n3 = 10, l5;
int i4, j4, n4 = 11, l6;
- int i5;
+ int i5, n5;
int v[x], w[x];
int r2[4] = { 0, 0, 0, 0 };
int xo = x;
|| q[0] != 3 * (32 * 31) / 2 || q[2] != 4 * (32 * 31) / 2
|| r2[0] != 5 * (32 * 31) / 2 || r2[3] != 6 * (32 * 31) / 2)
abort ();
+ r = 0;
+ x = xo;
+ #pragma omp parallel shared (x, y, r, n5) firstprivate (h)
+ {
+ #pragma omp masked
+ n5 = omp_get_num_threads ();
+ #pragma omp scope private (y) firstprivate (x) reduction(+:r) \
+ allocate (allocator (h), align (32): x) \
+ allocate (align (128), allocator (h): y) \
+ allocate (align (32), allocator (h): r)
+ {
+ int *volatile p1 = &x;
+ int *volatile p2 = &y;
+ if (x != 42)
+ abort ();
+ #pragma omp barrier
+ *p2 = 1;
+ p1[0]++;
+ r++;
+ #pragma omp barrier
+ if (x != 43 || y != 1 || r != 1)
+ abort ();
+ if ((fl & 1) && (((uintptr_t) p1 | (uintptr_t) p2
+ | (uintptr_t) &r) & 63) != 0)
+ abort ();
+ if ((((uintptr_t) p1 | (uintptr_t) &r) & 31) != 0)
+ abort ();
+ if ((((uintptr_t) p2) & 127) != 0)
+ abort ();
+ }
+ }
+ if (x != 42 || r != n5)
+ abort ();
}
void
--- /dev/null
+#ifdef __cplusplus
+extern "C"
+#endif
+void abort ();
+
+int
+main ()
+{
+ int a[64] = {};
+ int r = 0, r2 = 0, i, n = 64;
+ #pragma omp parallel
+ {
+ #pragma omp scope nowait
+ #pragma omp scope nowait firstprivate (n)
+ #pragma omp for
+ for (i = 0; i < 64; i++)
+ a[i] += 1;
+ #pragma omp scope reduction(+: r) nowait firstprivate (n)
+ {
+ #pragma omp for nowait
+ for (i = 0; i < 64; i++)
+ {
+ r += i;
+ if (a[i] != 1)
+ abort ();
+ }
+ #pragma omp barrier
+ if (n != 64)
+ abort ();
+ else
+ n = 128;
+ }
+ #pragma omp barrier
+ if (r != 64 * 63 / 2)
+ abort ();
+ #pragma omp scope nowait private (i)
+ #pragma omp scope reduction(+: r2)
+ {
+ #pragma omp for nowait
+ for (i = 0; i < 64; i++)
+ {
+ r2 += 2 * i;
+ a[i] += i;
+ }
+ }
+ if (r2 != 64 * 63)
+ abort ();
+ #pragma omp for nowait
+ for (i = 0; i < 64; i++)
+ if (a[i] != i + 1)
+ abort ();
+ }
+ return 0;
+}