This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: [14/46] Make STMT_VINFO_VEC_STMT a stmt_vec_info


On Tue, Jul 24, 2018 at 11:58 AM Richard Sandiford
<richard.sandiford@arm.com> wrote:
>
> This patch changes STMT_VINFO_VEC_STMT from a gimple stmt to a
> stmt_vec_info and makes the vectorizable_* routines pass back
> a stmt_vec_info to vect_transform_stmt.

OK, but - I don't think we ever "use" that stmt_info on vectorized stmts apart
from the chaining via related-stmt?  I'd also like to get rid of that chaining
and instead do sth similar to SLP where we simply have a vec<> of
vectorized stmts.

Richard.

>
> 2018-07-24  Richard Sandiford  <richard.sandiford@arm.com>
>
> gcc/
>         * tree-vectorizer.h (_stmt_vec_info::vectorized_stmt): Change from
>         a gimple stmt to a stmt_vec_info.
>         (vectorizable_condition, vectorizable_live_operation)
>         (vectorizable_reduction, vectorizable_induction): Pass back the
>         vectorized statement as a stmt_vec_info.
>         * tree-vect-data-refs.c (vect_record_grouped_load_vectors): Update
>         use of STMT_VINFO_VEC_STMT.
>         * tree-vect-loop.c (vect_create_epilog_for_reduction): Likewise,
>         accumulating the inner phis that feed the STMT_VINFO_VEC_STMT
>         as stmt_vec_infos rather than gimple stmts.
>         (vectorize_fold_left_reduction): Change vec_stmt from a gimple stmt
>         to a stmt_vec_info.
>         (vectorizable_live_operation): Likewise.
>         (vectorizable_reduction, vectorizable_induction): Likewise,
>         updating use of STMT_VINFO_VEC_STMT.
>         * tree-vect-stmts.c (vect_get_vec_def_for_operand_1): Update use
>         of STMT_VINFO_VEC_STMT.
>         (vect_build_gather_load_calls, vectorizable_bswap, vectorizable_call)
>         (vectorizable_simd_clone_call, vectorizable_conversion)
>         (vectorizable_assignment, vectorizable_shift, vectorizable_operation)
>         (vectorizable_store, vectorizable_load, vectorizable_condition)
>         (vectorizable_comparison, can_vectorize_live_stmts): Change vec_stmt
>         from a gimple stmt to a stmt_vec_info.
>         (vect_transform_stmt): Update use of STMT_VINFO_VEC_STMT.  Pass a
>         pointer to a stmt_vec_info to the vectorizable_* routines.
>
> Index: gcc/tree-vectorizer.h
> ===================================================================
> --- gcc/tree-vectorizer.h       2018-07-24 10:22:44.297185652 +0100
> +++ gcc/tree-vectorizer.h       2018-07-24 10:22:47.489157307 +0100
> @@ -812,7 +812,7 @@ struct _stmt_vec_info {
>    tree vectype;
>
>    /* The vectorized version of the stmt.  */
> -  gimple *vectorized_stmt;
> +  stmt_vec_info vectorized_stmt;
>
>
>    /* The following is relevant only for stmts that contain a non-scalar
> @@ -1560,7 +1560,7 @@ extern void vect_remove_stores (gimple *
>  extern bool vect_analyze_stmt (gimple *, bool *, slp_tree, slp_instance,
>                                stmt_vector_for_cost *);
>  extern bool vectorizable_condition (gimple *, gimple_stmt_iterator *,
> -                                   gimple **, tree, int, slp_tree,
> +                                   stmt_vec_info *, tree, int, slp_tree,
>                                     stmt_vector_for_cost *);
>  extern void vect_get_load_cost (stmt_vec_info, int, bool,
>                                 unsigned int *, unsigned int *,
> @@ -1649,13 +1649,13 @@ extern tree vect_get_loop_mask (gimple_s
>  extern struct loop *vect_transform_loop (loop_vec_info);
>  extern loop_vec_info vect_analyze_loop_form (struct loop *, vec_info_shared *);
>  extern bool vectorizable_live_operation (gimple *, gimple_stmt_iterator *,
> -                                        slp_tree, int, gimple **,
> +                                        slp_tree, int, stmt_vec_info *,
>                                          stmt_vector_for_cost *);
>  extern bool vectorizable_reduction (gimple *, gimple_stmt_iterator *,
> -                                   gimple **, slp_tree, slp_instance,
> +                                   stmt_vec_info *, slp_tree, slp_instance,
>                                     stmt_vector_for_cost *);
>  extern bool vectorizable_induction (gimple *, gimple_stmt_iterator *,
> -                                   gimple **, slp_tree,
> +                                   stmt_vec_info *, slp_tree,
>                                     stmt_vector_for_cost *);
>  extern tree get_initial_def_for_reduction (gimple *, tree, tree *);
>  extern bool vect_worthwhile_without_simd_p (vec_info *, tree_code);
> Index: gcc/tree-vect-data-refs.c
> ===================================================================
> --- gcc/tree-vect-data-refs.c   2018-07-24 10:22:44.285185759 +0100
> +++ gcc/tree-vect-data-refs.c   2018-07-24 10:22:47.485157343 +0100
> @@ -6401,18 +6401,17 @@ vect_record_grouped_load_vectors (gimple
>              {
>                if (!DR_GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
>                  {
> -                 gimple *prev_stmt =
> -                   STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt));
> +                 stmt_vec_info prev_stmt_info
> +                   = STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt));
>                   stmt_vec_info rel_stmt_info
> -                   = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt));
> +                   = STMT_VINFO_RELATED_STMT (prev_stmt_info);
>                   while (rel_stmt_info)
>                     {
> -                     prev_stmt = rel_stmt_info;
> +                     prev_stmt_info = rel_stmt_info;
>                       rel_stmt_info = STMT_VINFO_RELATED_STMT (rel_stmt_info);
>                     }
>
> -                 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt))
> -                   = new_stmt_info;
> +                 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
>                  }
>              }
>
> Index: gcc/tree-vect-loop.c
> ===================================================================
> --- gcc/tree-vect-loop.c        2018-07-24 10:22:44.289185723 +0100
> +++ gcc/tree-vect-loop.c        2018-07-24 10:22:47.489157307 +0100
> @@ -4445,7 +4445,7 @@ vect_create_epilog_for_reduction (vec<tr
>    gimple *use_stmt, *reduction_phi = NULL;
>    bool nested_in_vect_loop = false;
>    auto_vec<gimple *> new_phis;
> -  auto_vec<gimple *> inner_phis;
> +  auto_vec<stmt_vec_info> inner_phis;
>    enum vect_def_type dt = vect_unknown_def_type;
>    int j, i;
>    auto_vec<tree> scalar_results;
> @@ -4455,7 +4455,7 @@ vect_create_epilog_for_reduction (vec<tr
>    bool slp_reduc = false;
>    bool direct_slp_reduc;
>    tree new_phi_result;
> -  gimple *inner_phi = NULL;
> +  stmt_vec_info inner_phi = NULL;
>    tree induction_index = NULL_TREE;
>
>    if (slp_node)
> @@ -4605,7 +4605,7 @@ vect_create_epilog_for_reduction (vec<tr
>        tree indx_before_incr, indx_after_incr;
>        poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype);
>
> -      gimple *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
> +      gimple *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info)->stmt;
>        gcc_assert (gimple_assign_rhs_code (vec_stmt) == VEC_COND_EXPR);
>
>        int scalar_precision
> @@ -4738,20 +4738,21 @@ vect_create_epilog_for_reduction (vec<tr
>        inner_phis.create (vect_defs.length ());
>        FOR_EACH_VEC_ELT (new_phis, i, phi)
>         {
> +         stmt_vec_info phi_info = loop_vinfo->lookup_stmt (phi);
>           tree new_result = copy_ssa_name (PHI_RESULT (phi));
>           gphi *outer_phi = create_phi_node (new_result, exit_bb);
>           SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
>                            PHI_RESULT (phi));
>           prev_phi_info = loop_vinfo->add_stmt (outer_phi);
> -         inner_phis.quick_push (phi);
> +         inner_phis.quick_push (phi_info);
>           new_phis[i] = outer_phi;
> -          while (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi)))
> +         while (STMT_VINFO_RELATED_STMT (phi_info))
>              {
> -             phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
> -             new_result = copy_ssa_name (PHI_RESULT (phi));
> +             phi_info = STMT_VINFO_RELATED_STMT (phi_info);
> +             new_result = copy_ssa_name (PHI_RESULT (phi_info->stmt));
>               outer_phi = create_phi_node (new_result, exit_bb);
>               SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
> -                              PHI_RESULT (phi));
> +                              PHI_RESULT (phi_info->stmt));
>               stmt_vec_info outer_phi_info = loop_vinfo->add_stmt (outer_phi);
>               STMT_VINFO_RELATED_STMT (prev_phi_info) = outer_phi_info;
>               prev_phi_info = outer_phi_info;
> @@ -5644,7 +5645,8 @@ vect_create_epilog_for_reduction (vec<tr
>               if (double_reduc)
>                 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = inner_phi;
>               else
> -               STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt;
> +               STMT_VINFO_VEC_STMT (exit_phi_vinfo)
> +                 = vinfo_for_stmt (epilog_stmt);
>                if (!double_reduc
>                    || STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
>                        != vect_double_reduction_def)
> @@ -5706,8 +5708,8 @@ vect_create_epilog_for_reduction (vec<tr
>                    add_phi_arg (vect_phi, vect_phi_init,
>                                 loop_preheader_edge (outer_loop),
>                                 UNKNOWN_LOCATION);
> -                  add_phi_arg (vect_phi, PHI_RESULT (inner_phi),
> -                               loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
> +                 add_phi_arg (vect_phi, PHI_RESULT (inner_phi->stmt),
> +                              loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
>                    if (dump_enabled_p ())
>                      {
>                        dump_printf_loc (MSG_NOTE, vect_location,
> @@ -5846,7 +5848,7 @@ vect_expand_fold_left (gimple_stmt_itera
>
>  static bool
>  vectorize_fold_left_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
> -                              gimple **vec_stmt, slp_tree slp_node,
> +                              stmt_vec_info *vec_stmt, slp_tree slp_node,
>                                gimple *reduc_def_stmt,
>                                tree_code code, internal_fn reduc_fn,
>                                tree ops[3], tree vectype_in,
> @@ -6070,7 +6072,7 @@ is_nonwrapping_integer_induction (gimple
>
>  bool
>  vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
> -                       gimple **vec_stmt, slp_tree slp_node,
> +                       stmt_vec_info *vec_stmt, slp_tree slp_node,
>                         slp_instance slp_node_instance,
>                         stmt_vector_for_cost *cost_vec)
>  {
> @@ -6220,7 +6222,8 @@ vectorizable_reduction (gimple *stmt, gi
>                   else
>                     {
>                       if (j == 0)
> -                       STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_phi;
> +                       STMT_VINFO_VEC_STMT (stmt_info)
> +                         = *vec_stmt = new_phi_info;
>                       else
>                         STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi_info;
>                       prev_phi_info = new_phi_info;
> @@ -7201,7 +7204,7 @@ vectorizable_reduction (gimple *stmt, gi
>    /* Finalize the reduction-phi (set its arguments) and create the
>       epilog reduction code.  */
>    if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
> -    vect_defs[0] = gimple_get_lhs (*vec_stmt);
> +    vect_defs[0] = gimple_get_lhs ((*vec_stmt)->stmt);
>
>    vect_create_epilog_for_reduction (vect_defs, stmt, reduc_def_stmt,
>                                     epilog_copies, reduc_fn, phis,
> @@ -7262,7 +7265,7 @@ vect_worthwhile_without_simd_p (vec_info
>  bool
>  vectorizable_induction (gimple *phi,
>                         gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
> -                       gimple **vec_stmt, slp_tree slp_node,
> +                       stmt_vec_info *vec_stmt, slp_tree slp_node,
>                         stmt_vector_for_cost *cost_vec)
>  {
>    stmt_vec_info stmt_info = vinfo_for_stmt (phi);
> @@ -7700,7 +7703,7 @@ vectorizable_induction (gimple *phi,
>    add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
>                UNKNOWN_LOCATION);
>
> -  STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = induction_phi;
> +  STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = induction_phi_info;
>
>    /* In case that vectorization factor (VF) is bigger than the number
>       of elements that we can fit in a vectype (nunits), we have to generate
> @@ -7779,7 +7782,7 @@ vectorizable_induction (gimple *phi,
>           gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
>                       && !STMT_VINFO_LIVE_P (stmt_vinfo));
>
> -         STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
> +         STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt_info;
>           if (dump_enabled_p ())
>             {
>               dump_printf_loc (MSG_NOTE, vect_location,
> @@ -7811,7 +7814,7 @@ vectorizable_induction (gimple *phi,
>  vectorizable_live_operation (gimple *stmt,
>                              gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
>                              slp_tree slp_node, int slp_index,
> -                            gimple **vec_stmt,
> +                            stmt_vec_info *vec_stmt,
>                              stmt_vector_for_cost *)
>  {
>    stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
> Index: gcc/tree-vect-stmts.c
> ===================================================================
> --- gcc/tree-vect-stmts.c       2018-07-24 10:22:44.293185688 +0100
> +++ gcc/tree-vect-stmts.c       2018-07-24 10:22:47.489157307 +0100
> @@ -1465,7 +1465,7 @@ vect_init_vector (gimple *stmt, tree val
>  vect_get_vec_def_for_operand_1 (gimple *def_stmt, enum vect_def_type dt)
>  {
>    tree vec_oprnd;
> -  gimple *vec_stmt;
> +  stmt_vec_info vec_stmt_info;
>    stmt_vec_info def_stmt_info = NULL;
>
>    switch (dt)
> @@ -1482,21 +1482,19 @@ vect_get_vec_def_for_operand_1 (gimple *
>          /* Get the def from the vectorized stmt.  */
>          def_stmt_info = vinfo_for_stmt (def_stmt);
>
> -        vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
> -        /* Get vectorized pattern statement.  */
> -        if (!vec_stmt
> -            && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
> -            && !STMT_VINFO_RELEVANT (def_stmt_info))
> -         vec_stmt = (STMT_VINFO_VEC_STMT
> -                     (STMT_VINFO_RELATED_STMT (def_stmt_info)));
> -        gcc_assert (vec_stmt);
> -       if (gimple_code (vec_stmt) == GIMPLE_PHI)
> -         vec_oprnd = PHI_RESULT (vec_stmt);
> -       else if (is_gimple_call (vec_stmt))
> -         vec_oprnd = gimple_call_lhs (vec_stmt);
> +       vec_stmt_info = STMT_VINFO_VEC_STMT (def_stmt_info);
> +       /* Get vectorized pattern statement.  */
> +       if (!vec_stmt_info
> +           && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
> +           && !STMT_VINFO_RELEVANT (def_stmt_info))
> +         vec_stmt_info = (STMT_VINFO_VEC_STMT
> +                          (STMT_VINFO_RELATED_STMT (def_stmt_info)));
> +       gcc_assert (vec_stmt_info);
> +       if (gphi *phi = dyn_cast <gphi *> (vec_stmt_info->stmt))
> +         vec_oprnd = PHI_RESULT (phi);
>         else
> -         vec_oprnd = gimple_assign_lhs (vec_stmt);
> -        return vec_oprnd;
> +         vec_oprnd = gimple_get_lhs (vec_stmt_info->stmt);
> +       return vec_oprnd;
>        }
>
>      /* operand is defined by a loop header phi.  */
> @@ -1507,14 +1505,14 @@ vect_get_vec_def_for_operand_1 (gimple *
>        {
>         gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
>
> -        /* Get the def from the vectorized stmt.  */
> -        def_stmt_info = vinfo_for_stmt (def_stmt);
> -        vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
> -       if (gimple_code (vec_stmt) == GIMPLE_PHI)
> -         vec_oprnd = PHI_RESULT (vec_stmt);
> +       /* Get the def from the vectorized stmt.  */
> +       def_stmt_info = vinfo_for_stmt (def_stmt);
> +       vec_stmt_info = STMT_VINFO_VEC_STMT (def_stmt_info);
> +       if (gphi *phi = dyn_cast <gphi *> (vec_stmt_info->stmt))
> +         vec_oprnd = PHI_RESULT (phi);
>         else
> -         vec_oprnd = gimple_get_lhs (vec_stmt);
> -        return vec_oprnd;
> +         vec_oprnd = gimple_get_lhs (vec_stmt_info->stmt);
> +       return vec_oprnd;
>        }
>
>      default:
> @@ -2674,8 +2672,9 @@ vect_build_zero_merge_argument (gimple *
>
>  static void
>  vect_build_gather_load_calls (gimple *stmt, gimple_stmt_iterator *gsi,
> -                             gimple **vec_stmt, gather_scatter_info *gs_info,
> -                             tree mask, vect_def_type mask_dt)
> +                             stmt_vec_info *vec_stmt,
> +                             gather_scatter_info *gs_info, tree mask,
> +                             vect_def_type mask_dt)
>  {
>    stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
>    loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
> @@ -2960,7 +2959,7 @@ vect_get_data_ptr_increment (data_refere
>
>  static bool
>  vectorizable_bswap (gimple *stmt, gimple_stmt_iterator *gsi,
> -                   gimple **vec_stmt, slp_tree slp_node,
> +                   stmt_vec_info *vec_stmt, slp_tree slp_node,
>                     tree vectype_in, enum vect_def_type *dt,
>                     stmt_vector_for_cost *cost_vec)
>  {
> @@ -3104,8 +3103,9 @@ simple_integer_narrowing (tree vectype_o
>     Return FALSE if not a vectorizable STMT, TRUE otherwise.  */
>
>  static bool
> -vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
> -                  slp_tree slp_node, stmt_vector_for_cost *cost_vec)
> +vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi,
> +                  stmt_vec_info *vec_stmt, slp_tree slp_node,
> +                  stmt_vector_for_cost *cost_vec)
>  {
>    gcall *stmt;
>    tree vec_dest;
> @@ -3745,7 +3745,7 @@ simd_clone_subparts (tree vectype)
>
>  static bool
>  vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
> -                             gimple **vec_stmt, slp_tree slp_node,
> +                             stmt_vec_info *vec_stmt, slp_tree slp_node,
>                               stmt_vector_for_cost *)
>  {
>    tree vec_dest;
> @@ -4596,7 +4596,7 @@ vect_create_vectorized_promotion_stmts (
>
>  static bool
>  vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
> -                        gimple **vec_stmt, slp_tree slp_node,
> +                        stmt_vec_info *vec_stmt, slp_tree slp_node,
>                          stmt_vector_for_cost *cost_vec)
>  {
>    tree vec_dest;
> @@ -5204,7 +5204,7 @@ vectorizable_conversion (gimple *stmt, g
>
>  static bool
>  vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
> -                        gimple **vec_stmt, slp_tree slp_node,
> +                        stmt_vec_info *vec_stmt, slp_tree slp_node,
>                          stmt_vector_for_cost *cost_vec)
>  {
>    tree vec_dest;
> @@ -5405,7 +5405,7 @@ vect_supportable_shift (enum tree_code c
>
>  static bool
>  vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
> -                    gimple **vec_stmt, slp_tree slp_node,
> +                   stmt_vec_info *vec_stmt, slp_tree slp_node,
>                     stmt_vector_for_cost *cost_vec)
>  {
>    tree vec_dest;
> @@ -5769,7 +5769,7 @@ vectorizable_shift (gimple *stmt, gimple
>
>  static bool
>  vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
> -                       gimple **vec_stmt, slp_tree slp_node,
> +                       stmt_vec_info *vec_stmt, slp_tree slp_node,
>                         stmt_vector_for_cost *cost_vec)
>  {
>    tree vec_dest;
> @@ -6222,8 +6222,9 @@ get_group_alias_ptr_type (gimple *first_
>     Return FALSE if not a vectorizable STMT, TRUE otherwise.  */
>
>  static bool
> -vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
> -                    slp_tree slp_node, stmt_vector_for_cost *cost_vec)
> +vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
> +                   stmt_vec_info *vec_stmt, slp_tree slp_node,
> +                   stmt_vector_for_cost *cost_vec)
>  {
>    tree data_ref;
>    tree op;
> @@ -7385,8 +7386,9 @@ hoist_defs_of_uses (gimple *stmt, struct
>     Return FALSE if not a vectorizable STMT, TRUE otherwise.  */
>
>  static bool
> -vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
> -                   slp_tree slp_node, slp_instance slp_node_instance,
> +vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
> +                  stmt_vec_info *vec_stmt, slp_tree slp_node,
> +                  slp_instance slp_node_instance,
>                    stmt_vector_for_cost *cost_vec)
>  {
>    tree scalar_dest;
> @@ -8710,8 +8712,9 @@ vect_is_simple_cond (tree cond, vec_info
>
>  bool
>  vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
> -                       gimple **vec_stmt, tree reduc_def, int reduc_index,
> -                       slp_tree slp_node, stmt_vector_for_cost *cost_vec)
> +                       stmt_vec_info *vec_stmt, tree reduc_def,
> +                       int reduc_index, slp_tree slp_node,
> +                       stmt_vector_for_cost *cost_vec)
>  {
>    tree scalar_dest = NULL_TREE;
>    tree vec_dest = NULL_TREE;
> @@ -9111,7 +9114,7 @@ vectorizable_condition (gimple *stmt, gi
>
>  static bool
>  vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi,
> -                        gimple **vec_stmt, tree reduc_def,
> +                        stmt_vec_info *vec_stmt, tree reduc_def,
>                          slp_tree slp_node, stmt_vector_for_cost *cost_vec)
>  {
>    tree lhs, rhs1, rhs2;
> @@ -9383,7 +9386,7 @@ vectorizable_comparison (gimple *stmt, g
>
>  static bool
>  can_vectorize_live_stmts (gimple *stmt, gimple_stmt_iterator *gsi,
> -                         slp_tree slp_node, gimple **vec_stmt,
> +                         slp_tree slp_node, stmt_vec_info *vec_stmt,
>                           stmt_vector_for_cost *cost_vec)
>  {
>    if (slp_node)
> @@ -9647,11 +9650,11 @@ vect_transform_stmt (gimple *stmt, gimpl
>    stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
>    vec_info *vinfo = stmt_info->vinfo;
>    bool is_store = false;
> -  gimple *vec_stmt = NULL;
> +  stmt_vec_info vec_stmt = NULL;
>    bool done;
>
>    gcc_assert (slp_node || !PURE_SLP_STMT (stmt_info));
> -  gimple *old_vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
> +  stmt_vec_info old_vec_stmt_info = STMT_VINFO_VEC_STMT (stmt_info);
>
>    bool nested_p = (STMT_VINFO_LOOP_VINFO (stmt_info)
>                    && nested_in_vect_loop_p
> @@ -9752,7 +9755,7 @@ vect_transform_stmt (gimple *stmt, gimpl
>       This would break hybrid SLP vectorization.  */
>    if (slp_node)
>      gcc_assert (!vec_stmt
> -               && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt);
> +               && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt_info);
>
>    /* Handle inner-loop stmts whose DEF is used in the loop-nest that
>       is being vectorized, but outside the immediately enclosing loop.  */


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]