[PATCH] Improve match_simplify_replacement in phi-opt

Richard Biener richard.guenther@gmail.com
Wed Jun 2 06:45:32 GMT 2021


On Tue, Jun 1, 2021 at 9:07 PM apinski--- via Gcc-patches
<gcc-patches@gcc.gnu.org> wrote:
>
> From: Andrew Pinski <apinski@marvell.com>
>
> This improves match_simplify_replace in phi-opt to handle the
> case where there is one cheap preparation statement in the
> middle basic block similar to xor_replacement and others.
> This allows to remove xor_replacement too.
>
> OK?  Bootstrapped and tested on x86_64-linux-gnu with no regressions.
>
> gcc/ChangeLog:
>
>         PR tree-optimization/25290
>         * tree-ssa-phiopt.c (xor_replacement): Delete.
>         (tree_ssa_phiopt_worker): Delete use of xor_replacement.
>         (match_simplify_replacement): Allow one cheap preparation
>         statement that can be moved to before the if.
>
> gcc/testsuite/ChangeLog:
>
>         * gcc.dg/tree-ssa/pr96928-1.c: Fix testcase for now that ~
>         happens on the outside of the bit_xor.
> ---
>  gcc/testsuite/gcc.dg/tree-ssa/pr96928-1.c |   4 +-
>  gcc/tree-ssa-phiopt.c                     | 136 +++++-----------------
>  2 files changed, 28 insertions(+), 112 deletions(-)
>
> diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr96928-1.c b/gcc/testsuite/gcc.dg/tree-ssa/pr96928-1.c
> index a2770e5e896..2e86620da11 100644
> --- a/gcc/testsuite/gcc.dg/tree-ssa/pr96928-1.c
> +++ b/gcc/testsuite/gcc.dg/tree-ssa/pr96928-1.c
> @@ -1,9 +1,9 @@
>  /* PR tree-optimization/96928 */
>  /* { dg-do compile } */
> -/* { dg-options "-O2 -fdump-tree-phiopt2" } */
> +/* { dg-options "-O2 -fdump-tree-phiopt2 -fdump-tree-optimized" } */
>  /* { dg-final { scan-tree-dump-times " = a_\[0-9]*\\\(D\\\) >> " 5 "phiopt2" } } */
>  /* { dg-final { scan-tree-dump-times " = ~c_\[0-9]*\\\(D\\\);" 1 "phiopt2" } } */
> -/* { dg-final { scan-tree-dump-times " = ~" 1 "phiopt2" } } */
> +/* { dg-final { scan-tree-dump-times " = ~" 1 "optimized" } } */
>  /* { dg-final { scan-tree-dump-times " = \[abc_0-9\\\(\\\)D]* \\\^ " 5 "phiopt2" } } */
>  /* { dg-final { scan-tree-dump-not "a < 0" "phiopt2" } } */
>
> diff --git a/gcc/tree-ssa-phiopt.c b/gcc/tree-ssa-phiopt.c
> index 969b868397e..4f98e505029 100644
> --- a/gcc/tree-ssa-phiopt.c
> +++ b/gcc/tree-ssa-phiopt.c
> @@ -63,8 +63,6 @@ static bool minmax_replacement (basic_block, basic_block,
>                                 edge, edge, gphi *, tree, tree);
>  static bool abs_replacement (basic_block, basic_block,
>                              edge, edge, gphi *, tree, tree);
> -static bool xor_replacement (basic_block, basic_block,
> -                            edge, edge, gphi *, tree, tree);
>  static bool spaceship_replacement (basic_block, basic_block,
>                                    edge, edge, gphi *, tree, tree);
>  static bool cond_removal_in_popcount_clz_ctz_pattern (basic_block, basic_block,
> @@ -352,9 +350,6 @@ tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads, bool early_p)
>             cfgchanged = true;
>           else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
>             cfgchanged = true;
> -         else if (!early_p
> -                  && xor_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
> -           cfgchanged = true;
>           else if (!early_p
>                    && cond_removal_in_popcount_clz_ctz_pattern (bb, bb1, e1,
>                                                                 e2, phi, arg0,
> @@ -801,9 +796,23 @@ match_simplify_replacement (basic_block cond_bb, basic_block middle_bb,
>    edge true_edge, false_edge;
>    gimple_seq seq = NULL;
>    tree result;
> +  gimple *stmt_to_move = NULL;
>
> +  /* If the basic block only has a cheap preparation statement. */
>    if (!empty_block_p (middle_bb))
> -    return false;
> +    {
> +      stmt_to_move = last_and_only_stmt (middle_bb);
> +      if (!stmt_to_move)
> +       return false;
> +      if (gimple_could_trap_p (stmt_to_move)
> +         || gimple_has_side_effects (stmt_to_move))
> +       return false;

In tree-ssa-ifcombine we in addition check gimple_uses_undefined_value_p ()

> +      if ((!is_gimple_assign (stmt_to_move)
> +          || TREE_CODE (gimple_assign_lhs (stmt_to_move)) != SSA_NAME)
> +         && (!is_gimple_call (stmt_to_move)
> +             || !gimple_inexpensive_call_p (as_a <gcall *>  (stmt_to_move))))

any reason to allow calls at this point?  ifcombine has

          /* const calls don't match any of the above, yet they could
             still have some side-effects - they could contain
             gimple_could_trap_p statements, like floating point
             exceptions or integer division by zero.  See PR70586.
             FIXME: perhaps gimple_has_side_effects or gimple_could_trap_p
             should handle this.  */

and disallows any calls.  Not sure if any gimple_inexpensive_call_p could
trap for example, at least we don't document that it doesn't (just thinking
of IFN_SQRT for example).

I think we also want to restrict the intermediate stmts to those feeding the
PHI in the CFG merge and not allow arbitrary random stmts?  Thus sth
like

      tree lhs = gimple_get_lhs (stmt_to_move);
      if (!lhs || TREE_CODE (lhs) != SSA_NAME || !single_imm_use (...)
|| use_stmt != phi)
        return false;

Richard.

> +       return false;
> +    }
>
>    /* Special case A ? B : B as this will always simplify to B. */
>    if (operand_equal_for_phi_arg_p (arg0, arg1))
> @@ -844,7 +853,17 @@ match_simplify_replacement (basic_block cond_bb, basic_block middle_bb,
>      return false;
>
>    gsi = gsi_last_bb (cond_bb);
> -
> +  if (stmt_to_move)
> +    {
> +      if (dump_file && (dump_flags & TDF_DETAILS))
> +       {
> +         fprintf (dump_file, "statement un-sinked:\n");
> +         print_gimple_stmt (dump_file, stmt_to_move, 0,
> +                          TDF_VOPS|TDF_MEMSYMS);
> +       }
> +      gimple_stmt_iterator gsi1 = gsi_for_stmt (stmt_to_move);
> +      gsi_move_before (&gsi1, &gsi);
> +    }
>    if (seq)
>      gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
>
> @@ -2592,109 +2611,6 @@ abs_replacement (basic_block cond_bb, basic_block middle_bb,
>    return true;
>  }
>
> -/* Optimize x < 0 ? ~y : y into (x >> (prec-1)) ^ y.  */
> -
> -static bool
> -xor_replacement (basic_block cond_bb, basic_block middle_bb,
> -                edge e0 ATTRIBUTE_UNUSED, edge e1,
> -                gphi *phi, tree arg0, tree arg1)
> -{
> -  if (!INTEGRAL_TYPE_P (TREE_TYPE (arg1)))
> -    return false;
> -
> -  /* OTHER_BLOCK must have only one executable statement which must have the
> -     form arg0 = ~arg1 or arg1 = ~arg0.  */
> -
> -  gimple *assign = last_and_only_stmt (middle_bb);
> -  /* If we did not find the proper one's complement assignment, then we cannot
> -     optimize.  */
> -  if (assign == NULL)
> -    return false;
> -
> -  /* If we got here, then we have found the only executable statement
> -     in OTHER_BLOCK.  If it is anything other than arg = ~arg1 or
> -     arg1 = ~arg0, then we cannot optimize.  */
> -  if (!is_gimple_assign (assign))
> -    return false;
> -
> -  if (gimple_assign_rhs_code (assign) != BIT_NOT_EXPR)
> -    return false;
> -
> -  tree lhs = gimple_assign_lhs (assign);
> -  tree rhs = gimple_assign_rhs1 (assign);
> -
> -  /* The assignment has to be arg0 = -arg1 or arg1 = -arg0.  */
> -  if (!(lhs == arg0 && rhs == arg1) && !(lhs == arg1 && rhs == arg0))
> -    return false;
> -
> -  gimple *cond = last_stmt (cond_bb);
> -  tree result = PHI_RESULT (phi);
> -
> -  /* Only relationals comparing arg[01] against zero are interesting.  */
> -  enum tree_code cond_code = gimple_cond_code (cond);
> -  if (cond_code != LT_EXPR && cond_code != GE_EXPR)
> -    return false;
> -
> -  /* Make sure the conditional is x OP 0.  */
> -  tree clhs = gimple_cond_lhs (cond);
> -  if (TREE_CODE (clhs) != SSA_NAME
> -      || !INTEGRAL_TYPE_P (TREE_TYPE (clhs))
> -      || TYPE_UNSIGNED (TREE_TYPE (clhs))
> -      || TYPE_PRECISION (TREE_TYPE (clhs)) != TYPE_PRECISION (TREE_TYPE (arg1))
> -      || !integer_zerop (gimple_cond_rhs (cond)))
> -    return false;
> -
> -  /* We need to know which is the true edge and which is the false
> -     edge so that we know if have xor or inverted xor.  */
> -  edge true_edge, false_edge;
> -  extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
> -
> -  /* For GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
> -     will need to invert the result.  Similarly for LT_EXPR if
> -     the false edge goes to OTHER_BLOCK.  */
> -  edge e;
> -  if (cond_code == GE_EXPR)
> -    e = true_edge;
> -  else
> -    e = false_edge;
> -
> -  bool invert = e->dest == middle_bb;
> -
> -  result = duplicate_ssa_name (result, NULL);
> -
> -  gimple_stmt_iterator gsi = gsi_last_bb (cond_bb);
> -
> -  int prec = TYPE_PRECISION (TREE_TYPE (clhs));
> -  gimple *new_stmt
> -    = gimple_build_assign (make_ssa_name (TREE_TYPE (clhs)), RSHIFT_EXPR, clhs,
> -                          build_int_cst (integer_type_node, prec - 1));
> -  gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
> -
> -  if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (clhs)))
> -    {
> -      new_stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (result)),
> -                                     NOP_EXPR, gimple_assign_lhs (new_stmt));
> -      gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
> -    }
> -  lhs = gimple_assign_lhs (new_stmt);
> -
> -  if (invert)
> -    {
> -      new_stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (result)),
> -                                     BIT_NOT_EXPR, rhs);
> -      gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
> -      rhs = gimple_assign_lhs (new_stmt);
> -    }
> -
> -  new_stmt = gimple_build_assign (result, BIT_XOR_EXPR, lhs, rhs);
> -  gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
> -
> -  replace_phi_edge_with_variable (cond_bb, e1, phi, result);
> -
> -  /* Note that we optimized this PHI.  */
> -  return true;
> -}
> -
>  /* Auxiliary functions to determine the set of memory accesses which
>     can't trap because they are preceded by accesses to the same memory
>     portion.  We do that for MEM_REFs, so we only need to track
> --
> 2.17.1
>


More information about the Gcc-patches mailing list