diff --git a/gcc/testsuite/gcc.dg/torture/pr69652.c b/gcc/testsuite/gcc.dg/torture/pr69652.c new file mode 100644 index 0000000..91f30cf --- /dev/null +++ b/gcc/testsuite/gcc.dg/torture/pr69652.c @@ -0,0 +1,14 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -ffast-math -ftree-vectorize " } */ +/* { dg-additional-options "-mavx" { target { i?86-*-* x86_64-*-* } } } */ + +void fn1(double **matrix, int column, int row, int n) +{ + int k; + for (k = 0; k < n; k++) + if (matrix[row][k] != matrix[column][k]) + { + matrix[column][k] = -matrix[column][k]; + matrix[row][k] = matrix[row][k] - matrix[column][k]; + } +} \ No newline at end of file diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c index 60b0a09..79b2056 100644 --- a/gcc/tree-vect-loop.c +++ b/gcc/tree-vect-loop.c @@ -6959,8 +6959,9 @@ optimize_mask_stores (struct loop *loop) unsigned i; basic_block bb; gimple_stmt_iterator gsi; - gimple *stmt, *stmt1 = NULL; + gimple *stmt; auto_vec worklist; + auto_vec scalar_vuse; vect_location = find_loop_location (loop); /* Pick up all masked stores in loop if any. */ @@ -6971,6 +6972,8 @@ optimize_mask_stores (struct loop *loop) gsi_next (&gsi)) { stmt = gsi_stmt (gsi); + if (is_gimple_debug (stmt)) + continue; if (is_gimple_call (stmt) && gimple_call_internal_p (stmt) && gimple_call_internal_fn (stmt) == IFN_MASK_STORE) @@ -6991,6 +6994,7 @@ optimize_mask_stores (struct loop *loop) basic_block store_bb, join_bb; gimple_stmt_iterator gsi_to; tree vdef, new_vdef; + tree last_vuse; gphi *phi; tree vectype; tree zero; @@ -7033,11 +7037,14 @@ optimize_mask_stores (struct loop *loop) gimple_set_vdef (last, new_vdef); phi = create_phi_node (vdef, join_bb); add_phi_arg (phi, new_vdef, EDGE_SUCC (store_bb, 0), UNKNOWN_LOCATION); + gcc_assert (scalar_vuse.is_empty ()); /* Put all masked stores with the same mask to STORE_BB if possible. */ while (true) { gimple_stmt_iterator gsi_from; + gimple *stmt1 = NULL; + /* Move masked store to STORE_BB. */ last_store = last; gsi = gsi_for_stmt (last); @@ -7054,68 +7061,95 @@ optimize_mask_stores (struct loop *loop) "Move stmt to created bb\n"); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, last, 0); } - /* Move all stored value producers if possible. */ - while (!gsi_end_p (gsi)) - { - tree lhs; - imm_use_iterator imm_iter; - use_operand_p use_p; - bool res; - stmt1 = gsi_stmt (gsi); - /* Do not consider statements writing to memory. */ - if (gimple_vdef (stmt1)) - break; - gsi_from = gsi; - gsi_prev (&gsi); - lhs = gimple_get_lhs (stmt1); - if (!lhs) - break; - - /* LHS of vectorized stmt must be SSA_NAME. */ - if (TREE_CODE (lhs) != SSA_NAME) - break; - - /* Skip scalar statements. */ - if (!VECTOR_TYPE_P (TREE_TYPE (lhs))) + /* Move all stored value producers if possible. */ + while (!gsi_end_p (gsi)) + { + tree lhs; + imm_use_iterator imm_iter; + use_operand_p use_p; + bool res; + + /* Skip debug sstatements. */ + if (is_gimple_debug (gsi_stmt (gsi))) + continue; + stmt1 = gsi_stmt (gsi); + /* Do not consider writing to memory,volatile and call + statements. */ + if (gimple_vdef (stmt1) + || gimple_has_volatile_ops (stmt1) + || !is_gimple_assign (stmt1)) + break; + gsi_from = gsi; + gsi_prev (&gsi); + lhs = gimple_assign_lhs (stmt1); + if (!lhs) + break; + + /* LHS of vectorized stmt must be SSA_NAME. */ + if (TREE_CODE (lhs) != SSA_NAME) + break; + + /* Skip scalar statements. */ + if (!VECTOR_TYPE_P (TREE_TYPE (lhs))) + { + /* If scalar statement has vuse we need to modify it + when another masked store will be sunk. */ + if (gimple_vuse (stmt1)) + scalar_vuse.safe_push (stmt1); continue; + } - /* Check that LHS does not have uses outside of STORE_BB. */ - res = true; - FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs) - { - gimple *use_stmt; - use_stmt = USE_STMT (use_p); - if (gimple_bb (use_stmt) != store_bb) - { - res = false; - break; - } - } - if (!res) - break; + /* Check that LHS does not have uses outside of STORE_BB. */ + res = true; + FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs) + { + gimple *use_stmt; + use_stmt = USE_STMT (use_p); + if (is_gimple_debug (use_stmt)) + continue; + if (gimple_bb (use_stmt) != store_bb) + { + res = false; + break; + } + } + if (!res) + break; - if (gimple_vuse (stmt1) - && gimple_vuse (stmt1) != gimple_vuse (last_store)) - break; + if (gimple_vuse (stmt1) + && gimple_vuse (stmt1) != gimple_vuse (last_store)) + break; - /* Can move STMT1 to STORE_BB. */ - if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "Move stmt to created bb\n"); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0); - } - gsi_move_before (&gsi_from, &gsi_to); - /* Shift GSI_TO for further insertion. */ - gsi_prev (&gsi_to); - } - /* Put other masked stores with the same mask to STORE_BB. */ - if (worklist.is_empty () - || gimple_call_arg (worklist.last (), 2) != mask - || worklist.last () != stmt1) - break; - last = worklist.pop (); + /* Can move STMT1 to STORE_BB. */ + if (dump_enabled_p ()) + { + dump_printf_loc (MSG_NOTE, vect_location, + "Move stmt to created bb\n"); + dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0); + } + gsi_move_before (&gsi_from, &gsi_to); + /* Shift GSI_TO for further insertion. */ + gsi_prev (&gsi_to); + } + /* Put other masked stores with the same mask to STORE_BB. */ + if (worklist.is_empty () + || gimple_call_arg (worklist.last (), 2) != mask + || worklist.last () != stmt1) + break; + last = worklist.pop (); } add_phi_arg (phi, gimple_vuse (last_store), e, UNKNOWN_LOCATION); + /* Mask stores motion could crossing scalar statements with vuse + which should be corrected. */ + last_vuse = gimple_vuse (last_store); + while (!scalar_vuse.is_empty ()) + { + stmt = scalar_vuse.pop (); + if (gimple_vuse (stmt) != last_vuse) + { + gimple_set_vuse (stmt, last_vuse); + update_stmt (stmt); + } + } } }