This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[gomp4.1] Various ordered, linear and copyin tweaks


Hi!

So, OpenMP 4.5 says that:
1) linear can be only specified for the loop iterator on
   distribute {, parallel for} simd (because distribute can't
   do firstprivate + lastprivate)
2) linear can't be specified at all on distribute parallel for
3) linear can't be specified on doacross loops (ordered(n) clause)
4) ordered can't be specified on distribute parallel for{, simd}
   (as no synchronization exists between contention groups)
5) copyin can't be specified on target parallel{, for, for simd}
   (as threadprivate is not supported in target regions)

This patch adds diagnostics for those restrictions and tweaks the testsuite.

2015-10-13  Jakub Jelinek  <jakub@redhat.com>

gcc/
	* gimplify.c (gimplify_scan_omp_clauses): Diagnose linear
	clause on combined distribute {, parallel for} simd construct,
	unless it is the loop iterator.
gcc/c/
	* c-parser.c (c_parser_omp_for_loop): Disallow linear clause
	if ordered(n) is present.
	(c_parser_omp_for): Disallow ordered clause if combined with
	distribute.  Disallow linear clause if not combined with
	simd and combined with distribute.
	(c_parser_omp_parallel): Disallow copyin clause on
	target parallel{, for, for simd}.
gcc/cp/
	* parser.c (cp_parser_omp_for_loop): Disallow linear clause
	if ordered(n) is present.
	(cp_parser_omp_for): Disallow ordered clause if combined with
	distribute.  Disallow linear clause if not combined with
	simd and combined with distribute.
	(cp_parser_omp_parallel): Disallow copyin clause on
	target parallel{, for, for simd}.
gcc/testsuite/
	* c-c++-common/gomp/clauses-1.c (bar): Remove linear
	and/or ordered clauses where they are no longer allowed.
	* c-c++-common/gomp/clauses-4.c: New test.
	* c-c++-common/gomp/pr61486-1.c (foo): Remove linear clause
	on non-iterator.
	* c-c++-common/gomp/pr61486-2.c (test, test2): Remove ordered
	clause and ordered construct where no longer allowed.
libgomp/
	* testsuite/libgomp.c/pr66199-2.c (f2): Adjust for linear clause
	only allowed on the loop iterator.
	* testsuite/libgomp.c/pr66199-4.c (f2): Adjust for linear clause
	no longer allowed.
	* testsuite/libgomp.c/linear-2.c: Remove.
	* testsuite/libgomp.c++/linear-2.C: Remove.

--- gcc/gimplify.c.jj	2015-10-12 13:43:30.000000000 +0200
+++ gcc/gimplify.c	2015-10-12 19:19:34.563575391 +0200
@@ -6340,6 +6340,36 @@ gimplify_scan_omp_clauses (tree *list_p,
 	    }
 	  else
 	    {
+	      if (code == OMP_SIMD
+		  && !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
+		{
+		  struct gimplify_omp_ctx *octx = outer_ctx;
+		  if (octx
+		      && octx->region_type == ORT_WORKSHARE
+		      && octx->combined_loop
+		      && !octx->distribute)
+		    {
+		      if (octx->outer_context
+			  && (octx->outer_context->region_type
+			      == ORT_COMBINED_PARALLEL))
+			octx = octx->outer_context->outer_context;
+		      else
+			octx = octx->outer_context;
+		    }
+		  if (octx
+		      && octx->region_type == ORT_WORKSHARE
+		      && octx->combined_loop
+		      && octx->distribute
+		      && !lang_GNU_Fortran ())
+		    {
+		      error_at (OMP_CLAUSE_LOCATION (c),
+				"%<linear%> clause for variable other than "
+				"loop iterator specified on construct "
+				"combined with %<distribute%>");
+		      remove = true;
+		      break;
+		    }
+		}
 	      /* For combined #pragma omp parallel for simd, need to put
 		 lastprivate and perhaps firstprivate too on the
 		 parallel.  Similarly for #pragma omp for simd.  */
--- gcc/c/c-parser.c.jj	2015-10-12 13:26:52.000000000 +0200
+++ gcc/c/c-parser.c	2015-10-12 18:27:03.321992027 +0200
@@ -13662,6 +13662,19 @@ c_parser_omp_for_loop (location_t loc, c
 	= build_int_cst (NULL_TREE, collapse);
       ordered = collapse;
     }
+  if (ordered)
+    {
+      for (tree *pc = &clauses; *pc; )
+	if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_LINEAR)
+	  {
+	    error_at (OMP_CLAUSE_LOCATION (*pc),
+		      "%<linear%> clause may not be specified together "
+		      "with %<ordered%> clause with a parameter");
+	    *pc = OMP_CLAUSE_CHAIN (*pc);
+	  }
+	else
+	  pc = &OMP_CLAUSE_CHAIN (*pc);
+    }
 
   gcc_assert (collapse >= 1 && ordered >= 0);
   count = ordered ? ordered : collapse;
@@ -14066,6 +14079,9 @@ c_parser_omp_for (location_t loc, c_pars
   mask |= OMP_FOR_CLAUSE_MASK;
   if (cclauses)
     mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT);
+  /* Composite distribute parallel for{, simd} disallows ordered clause.  */
+  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
+    mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ORDERED);
 
   if (c_parser_next_token_is (parser, CPP_NAME))
     {
@@ -14100,6 +14116,10 @@ c_parser_omp_for (location_t loc, c_pars
       return NULL_TREE;
     }
 
+  /* Composite distribute parallel for disallows linear clause.  */
+  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
+    mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINEAR);
+
   clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL);
   if (cclauses)
     {
@@ -14340,6 +14360,10 @@ c_parser_omp_parallel (location_t loc, c
 
   strcat (p_name, " parallel");
   mask |= OMP_PARALLEL_CLAUSE_MASK;
+  /* #pragma omp target parallel{, for, for simd} disallow copyin clause.  */
+  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0
+      && (mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0)
+    mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COPYIN);
 
   if (c_parser_next_token_is_keyword (parser, RID_FOR))
     {
--- gcc/cp/parser.c.jj	2015-10-12 13:27:43.000000000 +0200
+++ gcc/cp/parser.c	2015-10-12 18:44:31.150225238 +0200
@@ -32228,6 +32228,19 @@ cp_parser_omp_for_loop (cp_parser *parse
 	= build_int_cst (NULL_TREE, collapse);
       ordered = collapse;
     }
+  if (ordered)
+    {
+      for (tree *pc = &clauses; *pc; )
+	if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_LINEAR)
+	  {
+	    error_at (OMP_CLAUSE_LOCATION (*pc),
+		      "%<linear%> clause may not be specified together "
+		      "with %<ordered%> clause with a parameter");
+	    *pc = OMP_CLAUSE_CHAIN (*pc);
+	  }
+	else
+	  pc = &OMP_CLAUSE_CHAIN (*pc);
+    }
 
   gcc_assert (collapse >= 1 && ordered >= 0);
   count = ordered ? ordered : collapse;
@@ -32587,6 +32600,9 @@ cp_parser_omp_for (cp_parser *parser, cp
   mask |= OMP_FOR_CLAUSE_MASK;
   if (cclauses)
     mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT);
+  /* Composite distribute parallel for{, simd} disallows ordered clause.  */
+  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
+    mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ORDERED);
 
   if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
     {
@@ -32626,6 +32642,10 @@ cp_parser_omp_for (cp_parser *parser, cp
       return NULL_TREE;
     }
 
+  /* Composite distribute parallel for disallows linear clause.  */
+  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
+    mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINEAR);
+
   clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok,
 				       cclauses == NULL);
   if (cclauses)
@@ -32852,6 +32872,10 @@ cp_parser_omp_parallel (cp_parser *parse
 
   strcat (p_name, " parallel");
   mask |= OMP_PARALLEL_CLAUSE_MASK;
+  /* #pragma omp target parallel{, for, for simd} disallow copyin clause.  */
+  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0
+      && (mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0)
+    mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COPYIN);
 
   if (cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR))
     {
--- gcc/testsuite/c-c++-common/gomp/clauses-1.c.jj	2015-07-16 18:09:25.000000000 +0200
+++ gcc/testsuite/c-c++-common/gomp/clauses-1.c	2015-10-12 17:47:00.355155717 +0200
@@ -15,19 +15,19 @@ foo (int d, int m, int i1, int i2, int p
   #pragma omp distribute parallel for \
     private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
     if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
-    lastprivate (l) linear (ll:1) ordered schedule(static, 4)
+    lastprivate (l) schedule(static, 4)
   for (int i = 0; i < 64; i++)
     ll++;
   #pragma omp distribute parallel for simd \
     private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
     if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
-    lastprivate (l) linear (ll:1) schedule(static, 4) \
+    lastprivate (l) schedule(static, 4) \
     safelen(8) simdlen(4) aligned(q: 32)
   for (int i = 0; i < 64; i++)
     ll++;
   #pragma omp distribute simd \
     private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
-    safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(+:r)
+    safelen(8) simdlen(4) aligned(q: 32) reduction(+:r)
   for (int i = 0; i < 64; i++)
     ll++;
 }
@@ -94,7 +94,7 @@ bar (int d, int m, int i1, int i2, int p
     shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
     collapse(1) dist_schedule(static, 16) \
     if (parallel: i2) num_threads (nth) proc_bind(spread) \
-    lastprivate (l) linear (ll:1) ordered schedule(static, 4)
+    lastprivate (l) schedule(static, 4)
   for (int i = 0; i < 64; i++)
     ll++;
   #pragma omp target teams distribute parallel for simd \
@@ -102,7 +102,7 @@ bar (int d, int m, int i1, int i2, int p
     shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
     collapse(1) dist_schedule(static, 16) \
     if (parallel: i2) num_threads (nth) proc_bind(spread) \
-    lastprivate (l) linear (ll:1) schedule(static, 4) \
+    lastprivate (l) schedule(static, 4) \
     safelen(8) simdlen(4) aligned(q: 32)
   for (int i = 0; i < 64; i++)
     ll++;
@@ -110,7 +110,7 @@ bar (int d, int m, int i1, int i2, int p
     device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
     shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
     collapse(1) dist_schedule(static, 16) \
-    safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32)
+    safelen(8) simdlen(4) aligned(q: 32)
   for (int i = 0; i < 64; i++)
     ll++;
   #pragma omp target simd \
@@ -140,7 +140,7 @@ bar (int d, int m, int i1, int i2, int p
     private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
     collapse(1) dist_schedule(static, 16) \
     if (parallel: i2) num_threads (nth) proc_bind(spread) \
-    lastprivate (l) linear (ll:1) ordered schedule(static, 4)
+    lastprivate (l) schedule(static, 4)
   for (int i = 0; i < 64; i++)
     ll++;
   #pragma omp target
@@ -148,7 +148,7 @@ bar (int d, int m, int i1, int i2, int p
     private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
     collapse(1) dist_schedule(static, 16) \
     if (parallel: i2) num_threads (nth) proc_bind(spread) \
-    lastprivate (l) linear (ll:1) schedule(static, 4) \
+    lastprivate (l) schedule(static, 4) \
     safelen(8) simdlen(4) aligned(q: 32)
   for (int i = 0; i < 64; i++)
     ll++;
@@ -156,7 +156,7 @@ bar (int d, int m, int i1, int i2, int p
   #pragma omp teams distribute simd \
     private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
     collapse(1) dist_schedule(static, 16) \
-    safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32)
+    safelen(8) simdlen(4) aligned(q: 32)
   for (int i = 0; i < 64; i++)
     ll++;
 }
--- gcc/testsuite/c-c++-common/gomp/clauses-4.c.jj	2015-10-12 17:26:00.475096064 +0200
+++ gcc/testsuite/c-c++-common/gomp/clauses-4.c	2015-10-12 18:40:14.763083015 +0200
@@ -0,0 +1,96 @@
+int t;
+#pragma omp threadprivate (t)
+
+void
+foo (int y, short z)
+{
+  int x;
+  #pragma omp target teams map (from: x)
+  #pragma omp distribute simd linear (x : 2)
+  for (x = 0; x < 64; x += 2)
+    ;
+  #pragma omp target teams map (from: x)
+  #pragma omp distribute parallel for simd linear (x)
+  for (x = 0; x < 64; x++)
+    ;
+  #pragma omp target teams map (tofrom: y)
+  #pragma omp distribute simd linear (y : 2)	/* { dg-error ".linear. clause for variable other than loop iterator specified on construct combined with .distribute." } */
+  for (x = 0; x < 64; x += 2)
+    y += 2;
+  #pragma omp target teams map (tofrom: z)
+  #pragma omp distribute parallel for simd linear (z)	/* { dg-error ".linear. clause for variable other than loop iterator specified on construct combined with .distribute." } */
+  for (x = 0; x < 64; x++)
+    z++;
+  #pragma omp target teams map (tofrom: z)
+  #pragma omp distribute parallel for linear (z: 4)	/* { dg-error ".linear. is not valid for .#pragma omp distribute parallel for." } */
+  for (x = 0; x < 64; x++)
+    z += 4;
+  #pragma omp target map (from: x)
+  #pragma omp teams distribute simd linear (x : 2)
+  for (x = 0; x < 64; x += 2)
+    ;
+  #pragma omp target map (from: x)
+  #pragma omp teams distribute parallel for simd linear (x)
+  for (x = 0; x < 64; x++)
+    ;
+  #pragma omp target map (tofrom: y)
+  #pragma omp teams distribute simd linear (y : 2)	/* { dg-error ".linear. clause for variable other than loop iterator specified on construct combined with .distribute." } */
+  for (x = 0; x < 64; x += 2)
+    y += 2;
+  #pragma omp target map (tofrom: z)
+  #pragma omp teams distribute parallel for simd linear (z)	/* { dg-error ".linear. clause for variable other than loop iterator specified on construct combined with .distribute." } */
+  for (x = 0; x < 64; x++)
+    z++;
+  #pragma omp target map (tofrom: z)
+  #pragma omp teams distribute parallel for linear (z: 4)	/* { dg-error ".linear. is not valid for .#pragma omp teams distribute parallel for." } */
+  for (x = 0; x < 64; x++)
+    z += 4;
+  #pragma omp target parallel copyin (t)	/* { dg-error ".copyin. is not valid for .#pragma omp target parallel." } */
+    ;
+  #pragma omp target parallel for copyin (t)	/* { dg-error ".copyin. is not valid for .#pragma omp target parallel for." } */
+  for (x = 0; x < 64; x++)
+    ;
+  #pragma omp target parallel for simd copyin (t)	/* { dg-error ".copyin. is not valid for .#pragma omp target parallel for simd." } */
+  for (x = 0; x < 64; x++)
+    ;
+  #pragma omp target teams
+  #pragma omp distribute parallel for ordered		/* { dg-error ".ordered. is not valid for .#pragma omp distribute parallel for." } */
+  for (x = 0; x < 64; x++)
+    {
+      #pragma omp ordered	/* { dg-error "ordered region must be closely nested inside a loop region with an ordered clause" } */
+      ;
+    }
+  #pragma omp target teams
+  #pragma omp distribute parallel for simd ordered	/* { dg-error ".ordered. is not valid for .#pragma omp distribute parallel for simd." } */
+  for (x = 0; x < 64; x++)
+    {
+      #pragma omp ordered simd, threads	/* { dg-error "OpenMP constructs other than .#pragma omp ordered simd. may not be nested inside simd region" } */
+      ;
+    }
+  #pragma omp target
+  #pragma omp teams distribute parallel for ordered		/* { dg-error ".ordered. is not valid for .#pragma omp teams distribute parallel for." } */
+  for (x = 0; x < 64; x++)
+    {
+      #pragma omp ordered	/* { dg-error "ordered region must be closely nested inside a loop region with an ordered clause" } */
+      ;
+    }
+  #pragma omp target
+  #pragma omp teams distribute parallel for simd ordered	/* { dg-error ".ordered. is not valid for .#pragma omp teams distribute parallel for simd." } */
+  for (x = 0; x < 64; x++)
+    {
+      #pragma omp ordered simd, threads	/* { dg-error "OpenMP constructs other than .#pragma omp ordered simd. may not be nested inside simd region" } */
+      ;
+    }
+  #pragma omp target teams distribute parallel for ordered		/* { dg-error ".ordered. is not valid for .#pragma omp target teams distribute parallel for." } */
+  for (x = 0; x < 64; x++)
+    {
+      #pragma omp ordered	/* { dg-error "ordered region must be closely nested inside a loop region with an ordered clause" } */
+      ;
+    }
+  #pragma omp target teams distribute parallel for simd ordered	/* { dg-error ".ordered. is not valid for .#pragma omp target teams distribute parallel for simd." } */
+  for (x = 0; x < 64; x++)
+    {
+      #pragma omp ordered simd, threads	/* { dg-error "OpenMP constructs other than .#pragma omp ordered simd. may not be nested inside simd region" } */
+      ;
+    }
+}
--- gcc/testsuite/c-c++-common/gomp/pr61486-1.c.jj	2015-04-24 12:32:01.000000000 +0200
+++ gcc/testsuite/c-c++-common/gomp/pr61486-1.c	2015-10-12 17:49:42.041721972 +0200
@@ -6,8 +6,8 @@ int
 foo (int *a)
 {
   int i, j = 0;
-  #pragma omp target teams distribute simd linear(i, j) map(a[:10])
+  #pragma omp target teams distribute simd linear(i) map(a[:10])
   for (i = 0; i < 10; i++)
-    a[i] = j++;
-  return i + j;
+    a[i] = j;
+  return i;
 }
--- gcc/testsuite/c-c++-common/gomp/pr61486-2.c.jj	2015-09-03 16:37:51.000000000 +0200
+++ gcc/testsuite/c-c++-common/gomp/pr61486-2.c	2015-10-12 17:55:47.205225446 +0200
@@ -50,22 +50,21 @@ test (int n, int o, int p, int q, int r,
     	private (p) firstprivate (q) shared (n) reduction (+: r) \
     	thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
     	num_threads (n + 4) proc_bind (spread) lastprivate (s) \
-    	ordered schedule (static, 8)
+    	schedule (static, 8)
       for (i = 0; i < 10; i++)
 	for (j = 0; j < 10; j++)
 	  {
 	    r = r + 1;
 	    p = q;
 	    dosomething (a, n, p + q);
-	    #pragma omp ordered
-	      p = q;
+	    p = q;
 	    s = i * 10 + j;
 	  }
     #pragma omp target teams distribute parallel for device (n + 1) num_teams (n + 4) \
     	if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
     	private (p) firstprivate (q) shared (n) reduction (+: r) \
     	thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
-    	proc_bind (master) lastprivate (s) ordered schedule (static, 8)
+    	proc_bind (master) lastprivate (s) schedule (static, 8)
       for (i = 0; i < 10; i++)
 	{
 	  for (j = 0; j < 10; j++)
@@ -74,8 +73,7 @@ test (int n, int o, int p, int q, int r,
 	      p = q;
 	      dosomething (a, n, p + q);
 	    }
-	  #pragma omp ordered
-	    p = q;
+	  p = q;
 	  s = i * 10;
 	}
     #pragma omp target teams distribute parallel for simd device (n + 1) \
@@ -165,22 +163,21 @@ test (int n, int o, int p, int q, int r,
 	default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
     	thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
     	num_threads (n + 4) proc_bind (spread) lastprivate (s) \
-    	ordered schedule (static, 8)
+    	schedule (static, 8)
       for (i = 0; i < 10; i++)
 	for (j = 0; j < 10; j++)
 	  {
 	    r = r + 1;
 	    p = q;
 	    dosomething (a, n, p + q);
-	    #pragma omp ordered
-	      p = q;
+	    p = q;
 	    s = i * 10 + j;
 	  }
     #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
     #pragma omp teams distribute parallel for num_teams (n + 4) if (n != 6) \
 	default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
     	thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
-    	proc_bind (master) lastprivate (s) ordered schedule (static, 8)
+    	proc_bind (master) lastprivate (s) schedule (static, 8)
       for (i = 0; i < 10; i++)
 	{
 	  for (j = 0; j < 10; j++)
@@ -189,8 +186,7 @@ test (int n, int o, int p, int q, int r,
 	      p = q;
 	      dosomething (a, n, p + q);
 	    }
-	  #pragma omp ordered
-	    p = q;
+	  p = q;
 	  s = i * 10;
 	}
     #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
@@ -273,15 +269,14 @@ test (int n, int o, int p, int q, int r,
 	default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
     	collapse (2) dist_schedule (static, 4) \
     	num_threads (n + 4) proc_bind (spread) lastprivate (s) \
-    	ordered schedule (static, 8)
+    	schedule (static, 8)
       for (i = 0; i < 10; i++)
 	for (j = 0; j < 10; j++)
 	  {
 	    r = r + 1;
 	    p = q;
 	    dosomething (a, n, p + q);
-	    #pragma omp ordered
-	      p = q;
+	    p = q;
 	    s = i * 10 + j;
 	  }
     #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
@@ -289,7 +284,7 @@ test (int n, int o, int p, int q, int r,
     #pragma omp distribute parallel for if (n != 6) \
 	default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
     	num_threads (n + 4) dist_schedule (static, 4) \
-    	proc_bind (master) lastprivate (s) ordered schedule (static, 8)
+    	proc_bind (master) lastprivate (s) schedule (static, 8)
       for (i = 0; i < 10; i++)
 	{
 	  for (j = 0; j < 10; j++)
@@ -298,8 +293,7 @@ test (int n, int o, int p, int q, int r,
 	      p = q;
 	      dosomething (a, n, p + q);
 	    }
-	  #pragma omp ordered
-	    p = q;
+	  p = q;
 	  s = i * 10;
 	}
     #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
@@ -385,21 +379,20 @@ test2 (int n, int o, int p, int r, int s
 	default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
     	collapse (2) dist_schedule (static, 4) \
     	num_threads (n + 4) proc_bind (spread) lastprivate (s) \
-    	ordered schedule (static, 8)
+    	schedule (static, 8)
       for (i = 0; i < 10; i++)
 	for (j = 0; j < 10; j++)
 	  {
 	    r = r + 1;
 	    p = q;
 	    dosomething (a, n, p + q);
-	    #pragma omp ordered
-	      p = q;
+	    p = q;
 	    s = i * 10 + j;
 	  }
     #pragma omp distribute parallel for if (n != 6) \
 	default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
     	num_threads (n + 4) dist_schedule (static, 4) \
-    	proc_bind (master) lastprivate (s) ordered schedule (static, 8)
+    	proc_bind (master) lastprivate (s) schedule (static, 8)
       for (i = 0; i < 10; i++)
 	{
 	  for (j = 0; j < 10; j++)
@@ -408,8 +401,7 @@ test2 (int n, int o, int p, int r, int s
 	      p = q;
 	      dosomething (a, n, p + q);
 	    }
-	  #pragma omp ordered
-	    p = q;
+	  p = q;
 	  s = i * 10;
 	}
     #pragma omp distribute parallel for simd if (n != 6)default(shared) \
--- libgomp/testsuite/libgomp.c/pr66199-2.c.jj	2015-06-17 21:00:37.000000000 +0200
+++ libgomp/testsuite/libgomp.c/pr66199-2.c	2015-10-12 19:16:16.276558954 +0200
@@ -18,12 +18,11 @@ __attribute__((noinline, noclone)) void
 f2 (long a, long b, long c)
 {
   long d, e;
-  #pragma omp target teams distribute parallel for simd default(none) firstprivate (a, b) shared(u, v, w) linear(d) linear(c:5) lastprivate(e)
+  #pragma omp target teams distribute parallel for simd default(none) firstprivate (a, b, c) shared(u, v, w) linear(d) lastprivate(e)
   for (d = a; d < b; d++)
     {
       u[d] = v[d] + w[d];
-      c += 5;
-      e = c;
+      e = c + d * 5;
     }
 }
 
--- libgomp/testsuite/libgomp.c/pr66199-4.c.jj	2015-06-18 15:16:18.000000000 +0200
+++ libgomp/testsuite/libgomp.c/pr66199-4.c	2015-10-12 19:16:40.353196680 +0200
@@ -19,12 +19,11 @@ __attribute__((noinline, noclone)) void
 f2 (long a, long b, long c)
 {
   long d, e;
-  #pragma omp target teams distribute parallel for default(none) firstprivate (a, b) shared(u, v, w) linear(d) linear(c:5) lastprivate(e)
+  #pragma omp target teams distribute parallel for default(none) firstprivate (a, b, c) shared(u, v, w) lastprivate(d, e)
   for (d = a; d < b; d++)
     {
       u[d] = v[d] + w[d];
-      c += 5;
-      e = c;
+      e = c + d * 5;
     }
 }
 
--- libgomp/testsuite/libgomp.c/linear-2.c.jj	2015-07-17 15:23:45.000000000 +0200
+++ libgomp/testsuite/libgomp.c/linear-2.c	2015-10-12 19:10:36.912665256 +0200
@@ -1,244 +0,0 @@
-#pragma omp declare target
-int a[256];
-#pragma omp end declare target
-
-__attribute__((noinline, noclone)) void
-f1 (int i)
-{
-  #pragma omp target teams distribute parallel for linear (i: 4)
-  for (int j = 16; j < 64; j++)
-    {
-      a[i] = j;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f2 (short int i, char k)
-{
-  #pragma omp target teams distribute parallel for linear (i: k + 1)
-  for (long j = 16; j < 64; j++)
-    {
-      a[i] = j;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f3 (long long int i, long long int k)
-{
-  #pragma omp target teams distribute parallel for linear (i: k)
-  for (short j = 16; j < 64; j++)
-    {
-      a[i] = j;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f4 (int i)
-{
-  #pragma omp target teams distribute parallel for linear (i: 4) schedule(static, 3)
-  for (int j = 16; j < 64; j++)
-    {
-      a[i] = j;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f5 (short int i, char k)
-{
-  #pragma omp target teams distribute parallel for linear (i: k + 1) schedule(static, 5)
-  for (long j = 16; j < 64; j++)
-    {
-      a[i] = j;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f6 (long long int i, long long int k)
-{
-  #pragma omp target teams distribute parallel for linear (i: k) schedule(static, 7)
-  for (short j = 16; j < 64; j++)
-    {
-      a[i] = j;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f7 (int i)
-{
-  #pragma omp target teams distribute parallel for linear (i: 4) schedule(dynamic, 3)
-  for (int j = 16; j < 64; j++)
-    {
-      a[i] = j;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f8 (short int i, char k)
-{
-  #pragma omp target teams distribute parallel for linear (i: k + 1) schedule(dynamic, 5)
-  for (long j = 16; j < 64; j++)
-    {
-      a[i] = j;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f9 (long long int i, long long int k)
-{
-  #pragma omp target teams distribute parallel for linear (i: k) schedule(dynamic, 7)
-  for (short j = 16; j < 64; j++)
-    {
-      a[i] = j;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f10 (int i, char start, long step)
-{
-  #pragma omp target teams distribute parallel for linear (i: 4)
-  for (int j = start; j < 112; j += step)
-    {
-      a[i] = j / 2 + 8;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f11 (short int i, char k, long start, char step)
-{
-  #pragma omp target teams distribute parallel for linear (i: k + 1)
-  for (long j = start; j < 112; j += step)
-    {
-      a[i] = j / 2 + 8;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f12 (long long int i, long long int k, long long int start, int step)
-{
-  #pragma omp target teams distribute parallel for linear (i: k)
-  for (short j = start; j < 112; j += step)
-    {
-      a[i] = j / 2 + 8;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f13 (int i, int start, long long int step)
-{
-  #pragma omp target teams distribute parallel for linear (i: 4) schedule(static, 3)
-  for (int j = start; j < 112; j += step)
-    {
-      a[i] = j / 2 + 8;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f14 (short int i, char k, unsigned long long int start, int step)
-{
-  #pragma omp target teams distribute parallel for linear (i: k + 1) schedule(static, 5)
-  for (long j = start; j < 112; j += step)
-    {
-      a[i] = j / 2 + 8;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f15 (long long int i, long long int k, char start, long int step)
-{
-  #pragma omp target teams distribute parallel for linear (i: k) schedule(static, 7)
-  for (short j = start; j < 112; j += step)
-    {
-      a[i] = j / 2 + 8;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f16 (int i, int start, long long int step)
-{
-  #pragma omp target teams distribute parallel for linear (i: 4) schedule(dynamic, 3)
-  for (int j = start; j < 112; j += step)
-    {
-      a[i] = j / 2 + 8;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f17 (short int i, char k, long start, int step)
-{
-  #pragma omp target teams distribute parallel for linear (i: k + 1) schedule(dynamic, 5)
-  for (long j = start; j < 112; j += step)
-    {
-      a[i] = j / 2 + 8;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f18 (long long int i, long long int k, short start, long int step)
-{
-  #pragma omp target teams distribute parallel for linear (i: k) schedule(dynamic, 7)
-  for (short j = start; j < 112; j += step)
-    {
-      a[i] = j / 2 + 8;
-      i += 4;
-    }
-}
-
-void
-verify (void)
-{
-  int err;
-  #pragma omp target map(from:err)
-  {
-    err = 0;
-    for (int i = 0; i < 256; i++)
-      if (a[i] != (((i & 3) == 0 && i >= 8
-		    && i < 8 + 48 * 4)
-		   ? ((i - 8) / 4) + 16 : 0))
-	err = 1;
-    __builtin_memset (a, 0, sizeof (a));
-  }
-  if (err)
-    __builtin_abort ();
-}
-
-int
-main ()
-{
-#define TEST(x) x; verify ()
-  TEST (f1 (8));
-  TEST (f2 (8, 3));
-  TEST (f3 (8LL, 4LL));
-  TEST (f4 (8));
-  TEST (f5 (8, 3));
-  TEST (f6 (8LL, 4LL));
-  TEST (f7 (8));
-  TEST (f8 (8, 3));
-  TEST (f9 (8LL, 4LL));
-  TEST (f10 (8, 16, 2));
-  TEST (f11 (8, 3, 16, 2));
-  TEST (f12 (8LL, 4LL, 16, 2));
-  TEST (f13 (8, 16, 2));
-  TEST (f14 (8, 3, 16, 2));
-  TEST (f15 (8LL, 4LL, 16, 2));
-  TEST (f16 (8, 16, 2));
-  TEST (f17 (8, 3, 16, 2));
-  TEST (f18 (8LL, 4LL, 16, 2));
-  return 0;
-}
--- libgomp/testsuite/libgomp.c++/linear-2.C.jj	2015-07-17 15:26:20.000000000 +0200
+++ libgomp/testsuite/libgomp.c++/linear-2.C	2015-10-12 19:16:53.194003469 +0200
@@ -1,261 +0,0 @@
-#pragma omp declare target
-int a[256];
-#pragma omp end declare target
-
-template <typename T>
-__attribute__((noinline, noclone)) void
-f1 (T &i)
-{
-  #pragma omp target teams distribute parallel for linear (i: 4)
-  for (int j = 16; j < 64; j++)
-    {
-      a[i] = j;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f2 (short int i, char k)
-{
-  #pragma omp target teams distribute parallel for linear (i: k + 1)
-  for (long j = 16; j < 64; j++)
-    {
-      a[i] = j;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f3 (long long int &i, long long int k)
-{
-  #pragma omp target teams distribute parallel for linear (i: k)
-  for (short j = 16; j < 64; j++)
-    {
-      a[i] = j;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f4 (int i)
-{
-  #pragma omp target teams distribute parallel for linear (i: 4) schedule(static, 3)
-  for (int j = 16; j < 64; j++)
-    {
-      a[i] = j;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f5 (short int i, char k)
-{
-  #pragma omp target teams distribute parallel for linear (i: k + 1) schedule(static, 5)
-  for (long j = 16; j < 64; j++)
-    {
-      a[i] = j;
-      i += 4;
-    }
-}
-
-template <int N>
-__attribute__((noinline, noclone)) void
-f6 (long long int &i, long long int k)
-{
-  #pragma omp target teams distribute parallel for linear (i: k) schedule(static, 7)
-  for (short j = 16; j < 64; j++)
-    {
-      a[i] = j;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f7 (int i)
-{
-  #pragma omp target teams distribute parallel for linear (i: 4) schedule(dynamic, 3)
-  for (int j = 16; j < 64; j++)
-    {
-      a[i] = j;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f8 (short int &i, char &k)
-{
-  #pragma omp target teams distribute parallel for linear (i: k + 1) schedule(dynamic, 5)
-  for (long j = 16; j < 64; j++)
-    {
-      a[i] = j;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f9 (long long int i, long long int k)
-{
-  #pragma omp target teams distribute parallel for linear (i: k) schedule(dynamic, 7)
-  for (short j = 16; j < 64; j++)
-    {
-      a[i] = j;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f10 (int i, char start, long step)
-{
-  #pragma omp target teams distribute parallel for linear (i: 4)
-  for (int j = start; j < 112; j += step)
-    {
-      a[i] = j / 2 + 8;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f11 (short int &i, char &k, long &start, char step)
-{
-  #pragma omp target teams distribute parallel for linear (i: k + 1)
-  for (long j = start; j < 112; j += step)
-    {
-      a[i] = j / 2 + 8;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f12 (long long int i, long long int k, long long int start, int step)
-{
-  #pragma omp target teams distribute parallel for linear (i: k)
-  for (short j = start; j < 112; j += step)
-    {
-      a[i] = j / 2 + 8;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f13 (int i, int start, long long int step)
-{
-  #pragma omp target teams distribute parallel for linear (i: 4) schedule(static, 3)
-  for (int j = start; j < 112; j += step)
-    {
-      a[i] = j / 2 + 8;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f14 (short int i, char k, unsigned long long int start, int step)
-{
-  #pragma omp target teams distribute parallel for linear (i: k + 1) schedule(static, 5)
-  for (long j = start; j < 112; j += step)
-    {
-      a[i] = j / 2 + 8;
-      i += 4;
-    }
-}
-
-template <typename T>
-__attribute__((noinline, noclone)) void
-f15 (T &i, T k, char &start, long int &step)
-{
-  #pragma omp target teams distribute parallel for linear (i: k) schedule(static, 7)
-  for (short j = start; j < 112; j += step)
-    {
-      a[i] = j / 2 + 8;
-      i += 4;
-    }
-}
-
-template <typename T>
-__attribute__((noinline, noclone)) void
-f16 (T i, T start, long long int step)
-{
-  #pragma omp target teams distribute parallel for linear (i: 4) schedule(dynamic, 3)
-  for (int j = start; j < 112; j += step)
-    {
-      a[i] = j / 2 + 8;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f17 (short int i, char k, long start, int step)
-{
-  #pragma omp target teams distribute parallel for linear (i: k + 1) schedule(dynamic, 5)
-  for (long j = start; j < 112; j += step)
-    {
-      a[i] = j / 2 + 8;
-      i += 4;
-    }
-}
-
-__attribute__((noinline, noclone)) void
-f18 (long long int i, long long int k, short start, long int step)
-{
-  #pragma omp target teams distribute parallel for linear (i: k) schedule(dynamic, 7)
-  for (short j = start; j < 112; j += step)
-    {
-      a[i] = j / 2 + 8;
-      i += 4;
-    }
-}
-
-void
-verify (void)
-{
-  int err;
-  #pragma omp target map(from:err)
-  {
-    err = 0;
-    for (int i = 0; i < 256; i++)
-      if (a[i] != (((i & 3) == 0 && i >= 8
-		    && i < 8 + 48 * 4)
-		   ? ((i - 8) / 4) + 16 : 0))
-	err = 1;
-    __builtin_memset (a, 0, sizeof (a));
-  }
-  if (err)
-    __builtin_abort ();
-}
-
-int
-main ()
-{
-#define TEST(x) x; verify ()
-  int vi = 8;
-  TEST (f1 (vi));
-  TEST (f2 (8, 3));
-  long long int vll = 8LL;
-  TEST (f3 (vll, 4LL));
-  TEST (f4 (8));
-  TEST (f5 (8, 3));
-  vll = 8LL;
-  TEST (f6<9> (vll, 4LL));
-  TEST (f7 (8));
-  short int vs = 8;
-  char vk = 3;
-  TEST (f8 (vs, vk));
-  TEST (f9 (8LL, 4LL));
-  TEST (f10 (8, 16, 2));
-  vs = 8;
-  vk = 3;
-  long int vl = 16;
-  TEST (f11 (vs, vk, vl, 2));
-  TEST (f12 (8LL, 4LL, 16, 2));
-  TEST (f13 (8, 16, 2));
-  TEST (f14 (8, 3, 16, 2));
-  vll = 8LL;
-  vk = 16;
-  vl = 2;
-  TEST (f15 (vll, 4LL, vk, vl));
-  vi = 8;
-  int vi2 = 16;
-  TEST (f16<int &> (vi, vi2, 2));
-  TEST (f17 (8, 3, 16, 2));
-  TEST (f18 (8LL, 4LL, 16, 2));
-  return 0;
-}

	Jakub


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]