This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[gomp] Fix degenerate omp for loops (PR libgomp/29949)


Hi!

As the testcases show, we weren't doing the right thing with loops that
had 0 iterations.
For the loops handled in libgomp loops where start == end on entry were
correct (except ordered static), so say
#pragma omp for
  for (i = 4; i < 4; i++)
    body;
or
#pragma omp for
  for (i = 8; i >= 9; --i)
    body;
were ok.  But e.g.
#pragma omp for
  for (i = 4; i < 3; i++)
    body;
where on entry start > end (resp. start < end for negative decrement loops)
would run the first iteration and only after that stop.  This is fixed
by the loop.c patch, canonicalizing any 0 iteration loops to what libgomp
already handles works.  gomp_loop_init now uses signed long types, because
the callers have the parameters they pass to it signed as well and the
ws struct it is filling has signed long fields as well.

For ordered static loops we crashed because there was a if (start == end)
return false; shortcut in GOMP_loop_ordered_static_start, but bailing out
that early is a bad idea, because its caller then calls GOMP_loop_end
which doesn't expect the workshare to be uninitialized.

Lastly, the compiler expanded loops (static without chunk size, static with
chunk size) failed because the arithmetics to compute range given to the
current thread was mostly done in unsigned long and thus when N2 - N1
was negative (for positive STEP resp. positive for negative STEP), we ended
up with a huge range.  I tried to figure out why this was cast to utype,
but haven't found why it has been written that way.  Certainly doing the
arithmetics on the signed type (omp for iteration vars are always signed)
seems to handle this right and both make check RUNTESTFLAGS=gomp.exp
and libgomp make check pass.

Ok for 4.2/trunk?

2006-12-01  Jakub Jelinek  <jakub@redhat.com>

	PR libgomp/29949
	* omp-low.c (expand_omp_for_static_nochunk,
	expand_omp_for_static_chunk): Do all arithmetics in signed rather than
	unsigned type.

	* loop.c (gomp_loop_init): Make parameters signed.  Set ws->end to
	start if there shouldn't be any loop iterations.
	(gomp_loop_ordered_static_start): Remove start == end test.
	* testsuite/libgomp.c/pr29949-1.c: New test.
	* testsuite/libgomp.c/pr29949-2.c: New test.

--- gcc/omp-low.c.jj	2006-11-30 14:51:52.000000000 +0100
+++ gcc/omp-low.c	2006-12-01 16:44:51.000000000 +0100
@@ -2771,13 +2771,12 @@ expand_omp_for_static_nochunk (struct om
 			       struct omp_for_data *fd)
 {
   tree l0, l1, l2, n, q, s0, e0, e, t, nthreads, threadid;
-  tree type, utype, list;
+  tree type, list;
   basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
   basic_block fin_bb;
   block_stmt_iterator si;
 
   type = TREE_TYPE (fd->v);
-  utype = lang_hooks.types.unsigned_type (type);
 
   entry_bb = region->entry;
   seq_start_bb = create_empty_bb (entry_bb);
@@ -2795,12 +2794,12 @@ expand_omp_for_static_nochunk (struct om
 
   t = built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS];
   t = build_function_call_expr (t, NULL);
-  t = fold_convert (utype, t);
+  t = fold_convert (type, t);
   nthreads = get_formal_tmp_var (t, &list);
   
   t = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM];
   t = build_function_call_expr (t, NULL);
-  t = fold_convert (utype, t);
+  t = fold_convert (type, t);
   threadid = get_formal_tmp_var (t, &list);
 
   fd->n1 = fold_convert (type, fd->n1);
@@ -2820,25 +2819,25 @@ expand_omp_for_static_nochunk (struct om
   t = fold_build2 (PLUS_EXPR, type, t, fd->n2);
   t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
   t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
-  t = fold_convert (utype, t);
+  t = fold_convert (type, t);
   if (is_gimple_val (t))
     n = t;
   else
     n = get_formal_tmp_var (t, &list);
 
-  t = build2 (TRUNC_DIV_EXPR, utype, n, nthreads);
+  t = build2 (TRUNC_DIV_EXPR, type, n, nthreads);
   q = get_formal_tmp_var (t, &list);
 
-  t = build2 (MULT_EXPR, utype, q, nthreads);
-  t = build2 (NE_EXPR, utype, t, n);
-  t = build2 (PLUS_EXPR, utype, q, t);
+  t = build2 (MULT_EXPR, type, q, nthreads);
+  t = build2 (NE_EXPR, type, t, n);
+  t = build2 (PLUS_EXPR, type, q, t);
   q = get_formal_tmp_var (t, &list);
 
-  t = build2 (MULT_EXPR, utype, q, threadid);
+  t = build2 (MULT_EXPR, type, q, threadid);
   s0 = get_formal_tmp_var (t, &list);
 
-  t = build2 (PLUS_EXPR, utype, s0, q);
-  t = build2 (MIN_EXPR, utype, t, n);
+  t = build2 (PLUS_EXPR, type, s0, q);
+  t = build2 (MIN_EXPR, type, t, n);
   e0 = get_formal_tmp_var (t, &list);
 
   t = build2 (GE_EXPR, boolean_type_node, s0, e0);
@@ -2944,14 +2943,13 @@ expand_omp_for_static_chunk (struct omp_
 {
   tree l0, l1, l2, l3, l4, n, s0, e0, e, t;
   tree trip, nthreads, threadid;
-  tree type, utype;
+  tree type;
   basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
   basic_block trip_update_bb, cont_bb, fin_bb;
   tree list;
   block_stmt_iterator si;
 
   type = TREE_TYPE (fd->v);
-  utype = lang_hooks.types.unsigned_type (type);
 
   entry_bb = region->entry;
   iter_part_bb = create_empty_bb (entry_bb);
@@ -2973,12 +2971,12 @@ expand_omp_for_static_chunk (struct omp_
 
   t = built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS];
   t = build_function_call_expr (t, NULL);
-  t = fold_convert (utype, t);
+  t = fold_convert (type, t);
   nthreads = get_formal_tmp_var (t, &list);
   
   t = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM];
   t = build_function_call_expr (t, NULL);
-  t = fold_convert (utype, t);
+  t = fold_convert (type, t);
   threadid = get_formal_tmp_var (t, &list);
 
   fd->n1 = fold_convert (type, fd->n1);
@@ -2993,7 +2991,7 @@ expand_omp_for_static_chunk (struct omp_
   if (!is_gimple_val (fd->step))
     fd->step = get_formal_tmp_var (fd->step, &list);
 
-  fd->chunk_size = fold_convert (utype, fd->chunk_size);
+  fd->chunk_size = fold_convert (type, fd->chunk_size);
   if (!is_gimple_val (fd->chunk_size))
     fd->chunk_size = get_formal_tmp_var (fd->chunk_size, &list);
 
@@ -3002,13 +3000,13 @@ expand_omp_for_static_chunk (struct omp_
   t = fold_build2 (PLUS_EXPR, type, t, fd->n2);
   t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
   t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
-  t = fold_convert (utype, t);
+  t = fold_convert (type, t);
   if (is_gimple_val (t))
     n = t;
   else
     n = get_formal_tmp_var (t, &list);
 
-  t = build_int_cst (utype, 0);
+  t = build_int_cst (type, 0);
   trip = get_initialized_tmp_var (t, &list, NULL);
 
   si = bsi_last (entry_bb);
@@ -3019,13 +3017,13 @@ expand_omp_for_static_chunk (struct omp_
   /* Iteration space partitioning goes in ITER_PART_BB.  */
   list = alloc_stmt_list ();
 
-  t = build2 (MULT_EXPR, utype, trip, nthreads);
-  t = build2 (PLUS_EXPR, utype, t, threadid);
-  t = build2 (MULT_EXPR, utype, t, fd->chunk_size);
+  t = build2 (MULT_EXPR, type, trip, nthreads);
+  t = build2 (PLUS_EXPR, type, t, threadid);
+  t = build2 (MULT_EXPR, type, t, fd->chunk_size);
   s0 = get_formal_tmp_var (t, &list);
 
-  t = build2 (PLUS_EXPR, utype, s0, fd->chunk_size);
-  t = build2 (MIN_EXPR, utype, t, n);
+  t = build2 (PLUS_EXPR, type, s0, fd->chunk_size);
+  t = build2 (MIN_EXPR, type, t, n);
   e0 = get_formal_tmp_var (t, &list);
 
   t = build2 (LT_EXPR, boolean_type_node, s0, n);
@@ -3075,8 +3073,8 @@ expand_omp_for_static_chunk (struct omp_
   /* Trip update code goes into TRIP_UPDATE_BB.  */
   list = alloc_stmt_list ();
 
-  t = build_int_cst (utype, 1);
-  t = build2 (PLUS_EXPR, utype, trip, t);
+  t = build_int_cst (type, 1);
+  t = build2 (PLUS_EXPR, type, trip, t);
   t = build2 (MODIFY_EXPR, void_type_node, trip, t);
   gimplify_and_add (t, &list);
 
--- libgomp/loop.c.jj	2006-10-05 00:24:40.000000000 +0200
+++ libgomp/loop.c	2006-12-01 16:33:39.000000000 +0100
@@ -34,13 +34,14 @@
 /* Initialize the given work share construct from the given arguments.  */
 
 static inline void
-gomp_loop_init (struct gomp_work_share *ws, unsigned long start,
-		unsigned long end, unsigned long incr,
-		enum gomp_schedule_type sched, unsigned long chunk_size)
+gomp_loop_init (struct gomp_work_share *ws, long start, long end, long incr,
+		enum gomp_schedule_type sched, long chunk_size)
 {
   ws->sched = sched;
   ws->chunk_size = chunk_size;
-  ws->end = end;
+  /* Canonicalize loops that have zero iterations to ->next == ->end.  */
+  ws->end = ((incr > 0 && start > end) || (incr < 0 && start < end))
+	    ? start : end;
   ws->incr = incr;
   ws->next = start;
 }
@@ -148,9 +149,6 @@ gomp_loop_ordered_static_start (long sta
 {
   struct gomp_thread *thr = gomp_thread ();
 
-  if (start == end)
-    return false;
-
   if (gomp_work_share_start (true))
     {
       gomp_loop_init (thr->ts.work_share, start, end, incr,
--- libgomp/testsuite/libgomp.c/pr29949-1.c.jj	2006-12-01 16:52:27.000000000 +0100
+++ libgomp/testsuite/libgomp.c/pr29949-1.c	2006-12-01 16:52:08.000000000 +0100
@@ -0,0 +1,328 @@
+/* PR libgomp/29949 */
+/* { dg-options "-O2 -fopenmp" } */
+/* { dg-do run } */
+
+extern void abort (void);
+
+int cnt;
+
+void
+test1 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (dynamic)
+    for (i = j1; i <= k1; ++i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test2 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (dynamic)
+    for (i = k1; i >= j1; --i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test3 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (guided)
+    for (i = j1; i <= k1; ++i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test4 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (guided)
+    for (i = k1; i >= j1; --i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test5 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (dynamic) ordered
+    for (i = j1; i <= k1; ++i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+#pragma omp ordered
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test6 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (dynamic) ordered
+    for (i = k1; i >= j1; --i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+#pragma omp ordered
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test7 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (guided) ordered
+    for (i = j1; i <= k1; ++i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+#pragma omp ordered
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test8 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (guided) ordered
+    for (i = k1; i >= j1; --i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+#pragma omp ordered
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test9 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (dynamic)
+  for (i = j1; i <= k1; ++i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test10 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (dynamic)
+  for (i = k1; i >= j1; --i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test11 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (guided)
+  for (i = j1; i <= k1; ++i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test12 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (guided)
+  for (i = k1; i >= j1; --i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test13 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (dynamic) ordered
+  for (i = j1; i <= k1; ++i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+#pragma omp ordered
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test14 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (dynamic) ordered
+  for (i = k1; i >= j1; --i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+#pragma omp ordered
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test15 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (guided) ordered
+  for (i = j1; i <= k1; ++i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+#pragma omp ordered
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test16 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (guided) ordered
+  for (i = k1; i >= j1; --i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+#pragma omp ordered
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+int
+__attribute__((noinline))
+test (long j1, long k1, long j2, long k2)
+{
+  test1 (j1, k1, j2, k2);
+  test2 (j1, k1, j2, k2);
+  test3 (j1, k1, j2, k2);
+  test4 (j1, k1, j2, k2);
+  test5 (j1, k1, j2, k2);
+  test6 (j1, k1, j2, k2);
+  test7 (j1, k1, j2, k2);
+  test8 (j1, k1, j2, k2);
+  test9 (j1, k1, j2, k2);
+  test10 (j1, k1, j2, k2);
+  test11 (j1, k1, j2, k2);
+  test12 (j1, k1, j2, k2);
+  test13 (j1, k1, j2, k2);
+  test14 (j1, k1, j2, k2);
+  test15 (j1, k1, j2, k2);
+  test16 (j1, k1, j2, k2);
+  return cnt;
+}
+
+int
+main (void)
+{
+  test (1, 5, 1, 5);
+  test (5, 5, 5, 5);
+  test (5, 4, 5, 4);
+  test (5, 1, 5, 1);
+  return 0;
+}
--- libgomp/testsuite/libgomp.c/pr29949-2.c.jj	2006-12-01 16:52:29.000000000 +0100
+++ libgomp/testsuite/libgomp.c/pr29949-2.c	2006-12-01 16:52:13.000000000 +0100
@@ -0,0 +1,328 @@
+/* PR libgomp/29949 */
+/* { dg-options "-O2 -fopenmp" } */
+/* { dg-do run } */
+
+extern void abort (void);
+
+int cnt;
+
+void
+test1 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (static)
+    for (i = j1; i <= k1; ++i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test2 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (static)
+    for (i = k1; i >= j1; --i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test3 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (static, 1)
+    for (i = j1; i <= k1; ++i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test4 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (static, 1)
+    for (i = k1; i >= j1; --i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test5 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (static) ordered
+    for (i = j1; i <= k1; ++i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+#pragma omp ordered
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test6 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (static) ordered
+    for (i = k1; i >= j1; --i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+#pragma omp ordered
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test7 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (static, 1) ordered
+    for (i = j1; i <= k1; ++i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+#pragma omp ordered
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test8 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (static, 1) ordered
+    for (i = k1; i >= j1; --i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+#pragma omp ordered
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test9 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (static)
+  for (i = j1; i <= k1; ++i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test10 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (static)
+  for (i = k1; i >= j1; --i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test11 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (static, 1)
+  for (i = j1; i <= k1; ++i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test12 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (static, 1)
+  for (i = k1; i >= j1; --i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test13 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (static) ordered
+  for (i = j1; i <= k1; ++i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+#pragma omp ordered
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test14 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (static) ordered
+  for (i = k1; i >= j1; --i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+#pragma omp ordered
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test15 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (static, 1) ordered
+  for (i = j1; i <= k1; ++i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+#pragma omp ordered
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test16 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (static, 1) ordered
+  for (i = k1; i >= j1; --i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+#pragma omp ordered
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+int
+__attribute__((noinline))
+test (long j1, long k1, long j2, long k2)
+{
+  test1 (j1, k1, j2, k2);
+  test2 (j1, k1, j2, k2);
+  test3 (j1, k1, j2, k2);
+  test4 (j1, k1, j2, k2);
+  test5 (j1, k1, j2, k2);
+  test6 (j1, k1, j2, k2);
+  test7 (j1, k1, j2, k2);
+  test8 (j1, k1, j2, k2);
+  test9 (j1, k1, j2, k2);
+  test10 (j1, k1, j2, k2);
+  test11 (j1, k1, j2, k2);
+  test12 (j1, k1, j2, k2);
+  test13 (j1, k1, j2, k2);
+  test14 (j1, k1, j2, k2);
+  test15 (j1, k1, j2, k2);
+  test16 (j1, k1, j2, k2);
+  return cnt;
+}
+
+int
+main (void)
+{
+  test (1, 5, 1, 5);
+  test (5, 5, 5, 5);
+  test (5, 4, 5, 4);
+  test (5, 1, 5, 1);
+  return 0;
+}

	Jakub


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]