[PATCH, Pointer Bounds Checker 22/x] Inline

Ilya Enkovich enkovich.gnu@gmail.com
Fri Jun 6 07:59:00 GMT 2014


2014-06-03 13:07 GMT+04:00 Richard Biener <richard.guenther@gmail.com>:
> On Mon, Jun 2, 2014 at 5:56 PM, Ilya Enkovich <enkovich.gnu@gmail.com> wrote:
>> Hi,
>>
>> This patch adds support for inlining instrumented calls.  Changes are mostly to support returned bounds.  Also generated mem-to-mem assignments are registered to be later instrumented with appropriate bounds copy.
>>
>> Bootstrapped and tested on linux-x86_64.
>>
>> Thanks,
>> Ilya
>> --
>> gcc/
>>
>> 2014-06-02  Ilya Enkovich  <ilya.enkovich@intel.com>
>>
>>         * ipa-inline.c (early_inliner): Check edge has summary allocated.
>>         * tree-inline.c: Include tree-chkp.h.
>>         (declare_return_variable): Add arg holding
>>         returned bounds slot.  Create and initialize returned bounds var.
>>         (remap_gimple_stmt): Handle returned bounds.
>>         Return sequence of statements instead of a single statement.
>>         (insert_init_stmt): Add declaration.
>>         (remap_gimple_seq): Adjust to new remap_gimple_stmt signature.
>>         (copy_bb): Adjust to changed return type of remap_gimple_stmt.
>>         (expand_call_inline): Handle returned bounds.  Add bounds copy
>>         for generated mem to mem assignments.
>>         * tree-inline.h (copy_body_data): Add fields retbnd and
>>         assign_stmts.
>>         * cgraph.c: Include tree-chkp.h.
>>         (cgraph_redirect_edge_call_stmt_to_callee): Support
>>         returned bounds.
>>         * value-prof.c: Include tree-chkp.h.
>>         (gimple_ic): Support returned bounds.
>>
>>
>> diff --git a/gcc/cgraph.c b/gcc/cgraph.c
>> index 1f684c2..4b6996b 100644
>> --- a/gcc/cgraph.c
>> +++ b/gcc/cgraph.c
>> @@ -63,6 +63,7 @@ along with GCC; see the file COPYING3.  If not see
>>  #include "gimple-pretty-print.h"
>>  #include "expr.h"
>>  #include "tree-dfa.h"
>> +#include "tree-chkp.h"
>>
>>  /* FIXME: Only for PROP_loops, but cgraph shouldn't have to know about this.  */
>>  #include "tree-pass.h"
>> @@ -1398,6 +1399,31 @@ cgraph_redirect_edge_call_stmt_to_callee (struct cgraph_edge *e)
>>           e->speculative = false;
>>           cgraph_set_call_stmt_including_clones (e->caller, e->call_stmt,
>>                                                  new_stmt, false);
>> +
>> +         /* Fix edges for BUILT_IN_CHKP_BNDRET calls attached to the
>> +            processed call stmt.  */
>> +         if (gimple_call_with_bounds_p (new_stmt)
>> +             && gimple_call_lhs (new_stmt)
>> +             && chkp_retbnd_call_by_val (gimple_call_lhs (e2->call_stmt)))
>> +           {
>> +             tree dresult = gimple_call_lhs (new_stmt);
>> +             tree iresult = gimple_call_lhs (e2->call_stmt);
>> +             gimple dbndret = chkp_retbnd_call_by_val (dresult);
>> +             gimple ibndret = chkp_retbnd_call_by_val (iresult);
>> +             struct cgraph_edge *iedge = cgraph_edge (e2->caller, ibndret);
>> +             struct cgraph_edge *dedge;
>> +
>> +             if (dbndret)
>> +               {
>> +                 dedge = cgraph_create_edge (iedge->caller, iedge->callee,
>> +                                             dbndret, e->count,
>> +                                             e->frequency);
>> +                 dedge->frequency = compute_call_stmt_bb_frequency
>> +                   (dedge->caller->decl, gimple_bb (dedge->call_stmt));
>> +               }
>> +             iedge->frequency = compute_call_stmt_bb_frequency
>> +               (iedge->caller->decl, gimple_bb (iedge->call_stmt));
>> +           }
>>           e->frequency = compute_call_stmt_bb_frequency
>>                            (e->caller->decl, gimple_bb (e->call_stmt));
>>           e2->frequency = compute_call_stmt_bb_frequency
>> diff --git a/gcc/ipa-inline.c b/gcc/ipa-inline.c
>> index 4051819..a6fc853 100644
>> --- a/gcc/ipa-inline.c
>> +++ b/gcc/ipa-inline.c
>> @@ -2301,11 +2301,15 @@ early_inliner (void)
>>              info that might be cleared out for newly discovered edges.  */
>>           for (edge = node->callees; edge; edge = edge->next_callee)
>>             {
>> -             struct inline_edge_summary *es = inline_edge_summary (edge);
>> -             es->call_stmt_size
>> -               = estimate_num_insns (edge->call_stmt, &eni_size_weights);
>> -             es->call_stmt_time
>> -               = estimate_num_insns (edge->call_stmt, &eni_time_weights);
>> +             /* We have no summary for new bound store calls yet.  */
>> +             if (inline_edge_summary_vec.length () > (unsigned)edge->uid)
>> +               {
>> +                 struct inline_edge_summary *es = inline_edge_summary (edge);
>> +                 es->call_stmt_size
>> +                   = estimate_num_insns (edge->call_stmt, &eni_size_weights);
>> +                 es->call_stmt_time
>> +                   = estimate_num_insns (edge->call_stmt, &eni_time_weights);
>> +               }
>>               if (edge->callee->decl
>>                   && !gimple_check_call_matching_types (
>>                       edge->call_stmt, edge->callee->decl, false))
>> diff --git a/gcc/tree-inline.c b/gcc/tree-inline.c
>> index 23fef90..6557a95 100644
>> --- a/gcc/tree-inline.c
>> +++ b/gcc/tree-inline.c
>> @@ -67,6 +67,7 @@ along with GCC; see the file COPYING3.  If not see
>>  #include "tree-pass.h"
>>  #include "target.h"
>>  #include "cfgloop.h"
>> +#include "tree-chkp.h"
>>
>>  #include "rtl.h"       /* FIXME: For asm_str_count.  */
>>
>> @@ -130,7 +131,8 @@ eni_weights eni_time_weights;
>>
>>  /* Prototypes.  */
>>
>> -static tree declare_return_variable (copy_body_data *, tree, tree, basic_block);
>> +static tree declare_return_variable (copy_body_data *, tree, tree, tree,
>> +                                    basic_block);
>>  static void remap_block (tree *, copy_body_data *);
>>  static void copy_bind_expr (tree *, int *, copy_body_data *);
>>  static void declare_inline_vars (tree, tree);
>> @@ -139,8 +141,9 @@ static void prepend_lexical_block (tree current_block, tree new_block);
>>  static tree copy_decl_to_var (tree, copy_body_data *);
>>  static tree copy_result_decl_to_var (tree, copy_body_data *);
>>  static tree copy_decl_maybe_to_var (tree, copy_body_data *);
>> -static gimple remap_gimple_stmt (gimple, copy_body_data *);
>> +static gimple_seq remap_gimple_stmt (gimple, copy_body_data *);
>>  static bool delete_unreachable_blocks_update_callgraph (copy_body_data *id);
>> +static void insert_init_stmt (copy_body_data *, basic_block, gimple);
>>
>>  /* Insert a tree->tree mapping for ID.  Despite the name suggests
>>     that the trees should be variables, it is used for more than that.  */
>> @@ -732,8 +735,8 @@ remap_gimple_seq (gimple_seq body, copy_body_data *id)
>>
>>    for (si = gsi_start (body); !gsi_end_p (si); gsi_next (&si))
>>      {
>> -      gimple new_stmt = remap_gimple_stmt (gsi_stmt (si), id);
>> -      gimple_seq_add_stmt (&new_body, new_stmt);
>> +      gimple_seq new_stmts = remap_gimple_stmt (gsi_stmt (si), id);
>> +      gimple_seq_add_seq (&new_body, new_stmts);
>>      }
>>
>>    return new_body;
>> @@ -1241,12 +1244,13 @@ remap_eh_region_tree_nr (tree old_t_nr, copy_body_data *id)
>>  /* Helper for copy_bb.  Remap statement STMT using the inlining
>>     information in ID.  Return the new statement copy.  */
>>
>> -static gimple
>> +static gimple_seq
>>  remap_gimple_stmt (gimple stmt, copy_body_data *id)
>>  {
>>    gimple copy = NULL;
>>    struct walk_stmt_info wi;
>>    bool skip_first = false;
>> +  gimple_seq stmts = NULL;
>>
>>    /* Begin by recognizing trees that we'll completely rewrite for the
>>       inlining context.  Our output for these trees is completely
>> @@ -1261,6 +1265,17 @@ remap_gimple_stmt (gimple stmt, copy_body_data *id)
>>    if (gimple_code (stmt) == GIMPLE_RETURN && id->transform_return_to_modify)
>>      {
>>        tree retval = gimple_return_retval (stmt);
>> +      tree retbnd = gimple_return_retbnd (stmt);
>> +      tree bndslot = id->retbnd;
>> +
>> +      if (retbnd && bndslot)
>> +       {
>> +         gimple bndcopy = gimple_build_assign (bndslot, retbnd);
>> +         memset (&wi, 0, sizeof (wi));
>> +         wi.info = id;
>> +         walk_gimple_op (bndcopy, remap_gimple_op_r, &wi);
>> +         gimple_seq_add_stmt (&stmts, bndcopy);
>> +       }
>>
>>        /* If we're returning something, just turn that into an
>>          assignment into the equivalent of the original RESULT_DECL.
>> @@ -1278,9 +1293,18 @@ remap_gimple_stmt (gimple stmt, copy_body_data *id)
>>                                       retval);
>>           /* id->retvar is already substituted.  Skip it on later remapping.  */
>>           skip_first = true;
>> +
>> +         /* We need to copy bounds if return structure with pointers into
>> +            instrumented function.  */
>> +         if (chkp_function_instrumented_p (id->dst_fn)
>> +             && !bndslot
>> +             && !BOUNDED_P (id->retvar)
>> +             && chkp_type_has_pointer (TREE_TYPE (id->retvar)))
>> +           id->assign_stmts.safe_push (copy);
>> +
>>         }
>>        else
>> -       return gimple_build_nop ();
>> +       return stmts;
>>      }
>>    else if (gimple_has_substatements (stmt))
>>      {
>> @@ -1444,7 +1468,7 @@ remap_gimple_stmt (gimple stmt, copy_body_data *id)
>>               value = *n;
>>               STRIP_TYPE_NOPS (value);
>>               if (TREE_CONSTANT (value) || TREE_READONLY (value))
>> -               return gimple_build_nop ();
>> +               return NULL;
>>             }
>>         }
>>
>> @@ -1461,7 +1485,7 @@ remap_gimple_stmt (gimple stmt, copy_body_data *id)
>>               if (gimple_bb (def_stmt)
>>                   && !bitmap_bit_p (id->blocks_to_copy,
>>                                     gimple_bb (def_stmt)->index))
>> -               return gimple_build_nop ();
>> +               return NULL;
>>             }
>>         }
>>
>> @@ -1471,7 +1495,8 @@ remap_gimple_stmt (gimple stmt, copy_body_data *id)
>>                                           gimple_debug_bind_get_value (stmt),
>>                                           stmt);
>>           id->debug_stmts.safe_push (copy);
>> -         return copy;
>> +         gimple_seq_add_stmt (&stmts, copy);
>> +         return stmts;
>>         }
>>        if (gimple_debug_source_bind_p (stmt))
>>         {
>> @@ -1479,7 +1504,8 @@ remap_gimple_stmt (gimple stmt, copy_body_data *id)
>>                    (gimple_debug_source_bind_get_var (stmt),
>>                     gimple_debug_source_bind_get_value (stmt), stmt);
>>           id->debug_stmts.safe_push (copy);
>> -         return copy;
>> +         gimple_seq_add_stmt (&stmts, copy);
>> +         return stmts;
>>         }
>>
>>        /* Create a new deep copy of the statement.  */
>> @@ -1553,7 +1579,10 @@ remap_gimple_stmt (gimple stmt, copy_body_data *id)
>>      }
>>
>>    if (gimple_debug_bind_p (copy) || gimple_debug_source_bind_p (copy))
>> -    return copy;
>> +    {
>> +      gimple_seq_add_stmt (&stmts, copy);
>> +      return stmts;
>> +    }
>>
>>    /* Remap all the operands in COPY.  */
>>    memset (&wi, 0, sizeof (wi));
>> @@ -1571,7 +1600,8 @@ remap_gimple_stmt (gimple stmt, copy_body_data *id)
>>        gimple_set_vuse (copy, NULL_TREE);
>>      }
>>
>> -  return copy;
>> +  gimple_seq_add_stmt (&stmts, copy);
>> +  return stmts;
>>  }
>>
>>
>> @@ -1612,36 +1642,59 @@ copy_bb (copy_body_data *id, basic_block bb, int frequency_scale,
>>
>>    for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
>>      {
>> +      gimple_seq stmts;
>>        gimple stmt = gsi_stmt (gsi);
>>        gimple orig_stmt = stmt;
>> +      gimple_stmt_iterator stmts_gsi;
>> +      bool stmt_added = false;
>>
>>        id->regimplify = false;
>> -      stmt = remap_gimple_stmt (stmt, id);
>> -      if (gimple_nop_p (stmt))
>> +      stmts = remap_gimple_stmt (stmt, id);
>> +
>> +      if (gimple_seq_empty_p (stmts))
>>         continue;
>>
>> -      gimple_duplicate_stmt_histograms (cfun, stmt, id->src_cfun, orig_stmt);
>>        seq_gsi = copy_gsi;
>>
>> -      /* With return slot optimization we can end up with
>> -        non-gimple (foo *)&this->m, fix that here.  */
>> -      if (is_gimple_assign (stmt)
>> -         && gimple_assign_rhs_code (stmt) == NOP_EXPR
>> -         && !is_gimple_val (gimple_assign_rhs1 (stmt)))
>> +      for (stmts_gsi = gsi_start (stmts);
>> +          !gsi_end_p (stmts_gsi); )
>>         {
>> -         tree new_rhs;
>> -         new_rhs = force_gimple_operand_gsi (&seq_gsi,
>> -                                             gimple_assign_rhs1 (stmt),
>> -                                             true, NULL, false,
>> -                                             GSI_CONTINUE_LINKING);
>> -         gimple_assign_set_rhs1 (stmt, new_rhs);
>> -         id->regimplify = false;
>> -       }
>> +         stmt = gsi_stmt (stmts_gsi);
>> +
>> +         /* Advance iterator now before stmt is moved to seq_gsi.  */
>> +         gsi_next (&stmts_gsi);
>> +
>> +         if (gimple_nop_p (stmt))
>> +             continue;
>> +
>> +         gimple_duplicate_stmt_histograms (cfun, stmt, id->src_cfun,
>> +                                           orig_stmt);
>> +
>> +         /* With return slot optimization we can end up with
>> +            non-gimple (foo *)&this->m, fix that here.  */
>> +         if (is_gimple_assign (stmt)
>> +             && gimple_assign_rhs_code (stmt) == NOP_EXPR
>> +             && !is_gimple_val (gimple_assign_rhs1 (stmt)))
>> +           {
>> +             tree new_rhs;
>> +             new_rhs = force_gimple_operand_gsi (&seq_gsi,
>> +                                                 gimple_assign_rhs1 (stmt),
>> +                                                 true, NULL, false,
>> +                                                 GSI_CONTINUE_LINKING);
>> +             gimple_assign_set_rhs1 (stmt, new_rhs);
>> +             id->regimplify = false;
>> +           }
>>
>> -      gsi_insert_after (&seq_gsi, stmt, GSI_NEW_STMT);
>> +         gsi_insert_after (&seq_gsi, stmt, GSI_NEW_STMT);
>>
>> -      if (id->regimplify)
>> -       gimple_regimplify_operands (stmt, &seq_gsi);
>> +         if (id->regimplify)
>> +           gimple_regimplify_operands (stmt, &seq_gsi);
>> +
>> +         stmt_added = true;
>> +       }
>> +
>> +      if (!stmt_added)
>> +       continue;
>>
>>        /* If copy_basic_block has been empty at the start of this iteration,
>>          call gsi_start_bb again to get at the newly added statements.  */
>> @@ -3071,12 +3124,14 @@ initialize_inlined_parameters (copy_body_data *id, gimple stmt,
>>     is set only for CALL_EXPR_RETURN_SLOT_OPT.  MODIFY_DEST, if non-null,
>>     was the LHS of the MODIFY_EXPR to which this call is the RHS.
>>
>> +   RETURN_BOUNDS holds a destination for returned bounds.
>> +
>>     The return value is a (possibly null) value that holds the result
>>     as seen by the caller.  */
>>
>>  static tree
>>  declare_return_variable (copy_body_data *id, tree return_slot, tree modify_dest,
>> -                        basic_block entry_bb)
>> +                        tree return_bounds, basic_block entry_bb)
>>  {
>>    tree callee = id->src_fn;
>>    tree result = DECL_RESULT (callee);
>> @@ -3255,6 +3310,19 @@ declare_return_variable (copy_body_data *id, tree return_slot, tree modify_dest,
>>    /* Remember this so we can ignore it in remap_decls.  */
>>    id->retvar = var;
>>
>> +  /* If returned bounds are used, then make var for them.  */
>> +  if (return_bounds)
>> +  {
>> +    tree bndtemp = create_tmp_var (pointer_bounds_type_node, "retbnd");
>> +    DECL_SEEN_IN_BIND_EXPR_P (bndtemp) = 1;
>> +    TREE_NO_WARNING (bndtemp) = 1;
>> +    declare_inline_vars (id->block, bndtemp);
>> +
>> +    id->retbnd = bndtemp;
>> +    insert_init_stmt (id, entry_bb,
>> +                     gimple_build_assign (bndtemp, chkp_get_zero_bounds_var ()));
>> +  }
>> +
>>    return use;
>>  }
>>
>> @@ -4084,6 +4152,7 @@ expand_call_inline (basic_block bb, gimple stmt, copy_body_data *id)
>>    struct pointer_map_t *st, *dst;
>>    tree return_slot;
>>    tree modify_dest;
>> +  tree return_bounds = NULL;
>>    location_t saved_location;
>>    struct cgraph_edge *cg_edge;
>>    cgraph_inline_failed_t reason;
>> @@ -4092,6 +4161,7 @@ expand_call_inline (basic_block bb, gimple stmt, copy_body_data *id)
>>    gimple_stmt_iterator gsi, stmt_gsi;
>>    bool successfully_inlined = FALSE;
>>    bool purge_dead_abnormal_edges;
>> +  unsigned int i;
>>
>>    /* Set input_location here so we get the right instantiation context
>>       if we call instantiate_decl from inlinable_function_p.  */
>> @@ -4180,6 +4250,7 @@ expand_call_inline (basic_block bb, gimple stmt, copy_body_data *id)
>>
>>    /* We will be inlining this callee.  */
>>    id->eh_lp_nr = lookup_stmt_eh_lp (stmt);
>> +  id->assign_stmts.create (0);
>>
>>    /* Update the callers EH personality.  */
>>    if (DECL_FUNCTION_PERSONALITY (cg_edge->callee->decl))
>> @@ -4301,6 +4372,24 @@ expand_call_inline (basic_block bb, gimple stmt, copy_body_data *id)
>>      {
>>        modify_dest = gimple_call_lhs (stmt);
>>
>> +      /* Remember where to copy returned bounds.  */
>> +      if (gimple_call_with_bounds_p (stmt)
>> +         && TREE_CODE (modify_dest) == SSA_NAME)
>> +       {
>> +         gimple retbnd = chkp_retbnd_call_by_val (modify_dest);
>> +         if (retbnd)
>> +           {
>> +             return_bounds = gimple_call_lhs (retbnd);
>> +             /* If returned bounds are not used then just
>> +                remove unused call.  */
>> +             if (!return_bounds)
>> +               {
>> +                 gimple_stmt_iterator iter = gsi_for_stmt (retbnd);
>> +                 gsi_remove (&iter, true);
>> +               }
>> +           }
>> +       }
>> +
>>        /* The function which we are inlining might not return a value,
>>          in which case we should issue a warning that the function
>>          does not return a value.  In that case the optimizers will
>> @@ -4331,7 +4420,8 @@ expand_call_inline (basic_block bb, gimple stmt, copy_body_data *id)
>>      }
>>
>>    /* Declare the return variable for the function.  */
>> -  use_retvar = declare_return_variable (id, return_slot, modify_dest, bb);
>> +  use_retvar = declare_return_variable (id, return_slot, modify_dest,
>> +                                       return_bounds, bb);
>>
>>    /* Add local vars in this inlined callee to caller.  */
>>    add_local_variables (id->src_cfun, cfun, id);
>> @@ -4383,6 +4473,12 @@ expand_call_inline (basic_block bb, gimple stmt, copy_body_data *id)
>>        stmt = gimple_build_assign (gimple_call_lhs (stmt), use_retvar);
>>        gsi_replace (&stmt_gsi, stmt, false);
>>        maybe_clean_or_replace_eh_stmt (old_stmt, stmt);
>> +
>> +      /* Copy bounds if we copy structure with bounds.  */
>> +      if (chkp_function_instrumented_p (id->dst_fn)
>> +         && !BOUNDED_P (use_retvar)
>> +         && chkp_type_has_pointer (TREE_TYPE (use_retvar)))
>> +       id->assign_stmts.safe_push (stmt);
>>      }
>>    else
>>      {
>> @@ -4414,6 +4510,20 @@ expand_call_inline (basic_block bb, gimple stmt, copy_body_data *id)
>>          gsi_remove (&stmt_gsi, true);
>>      }
>>
>> +  /* Put returned bounds into the correct place if required.  */
>> +  if (return_bounds)
>> +    {
>> +      gimple old_stmt = SSA_NAME_DEF_STMT (return_bounds);
>> +      gimple new_stmt = gimple_build_assign (return_bounds, id->retbnd);
>> +      gimple_stmt_iterator bnd_gsi = gsi_for_stmt (old_stmt);
>> +      unlink_stmt_vdef (old_stmt);
>> +      gsi_replace (&bnd_gsi, new_stmt, false);
>> +      maybe_clean_or_replace_eh_stmt (old_stmt, new_stmt);
>> +      cgraph_update_edges_for_call_stmt (old_stmt,
>> +                                        gimple_call_fndecl (old_stmt),
>> +                                        new_stmt);
>> +    }
>> +
>>    if (purge_dead_abnormal_edges)
>>      {
>>        gimple_purge_dead_eh_edges (return_block);
>> @@ -4430,6 +4540,11 @@ expand_call_inline (basic_block bb, gimple stmt, copy_body_data *id)
>>        TREE_USED (gimple_assign_rhs1 (stmt)) = 1;
>>      }
>>
>> +  /* Copy bounds for all generated assigns that need it.  */
>> +  for (i = 0; i < id->assign_stmts.length (); i++)
>> +    chkp_copy_bounds_for_assign (id->assign_stmts[i], cg_edge);
>> +  id->assign_stmts.release ();
>> +
>>    /* Output the inlining info for this abstract function, since it has been
>>       inlined.  If we don't do this now, we can lose the information about the
>>       variables in the function when the blocks get blown away as soon as we
>> diff --git a/gcc/tree-inline.h b/gcc/tree-inline.h
>> index 13c5516..a3b62b4 100644
>> --- a/gcc/tree-inline.h
>> +++ b/gcc/tree-inline.h
>> @@ -60,6 +60,12 @@ struct copy_body_data
>>    /* The VAR_DECL for the return value.  */
>>    tree retvar;
>>
>> +  /* The VAR_DECL for the return bounds.  */
>> +  tree retbnd;
>> +
>> +  /* Assign statements that need bounds copy.  */
>> +  vec<gimple> assign_stmts;
>> +
>>    /* The map from local declarations in the inlined function to
>>       equivalents in the function into which it is being inlined.  */
>>    struct pointer_map_t *decl_map;
>> diff --git a/gcc/value-prof.c b/gcc/value-prof.c
>> index 2890093..5470167 100644
>> --- a/gcc/value-prof.c
>> +++ b/gcc/value-prof.c
>> @@ -59,6 +59,7 @@ along with GCC; see the file COPYING3.  If not see
>>  #include "data-streamer.h"
>>  #include "builtins.h"
>>  #include "tree-nested.h"
>> +#include "tree-chkp.h"
>>
>>  /* In this file value profile based optimizations are placed.  Currently the
>>     following optimizations are implemented (for more detailed descriptions
>> @@ -1319,7 +1320,7 @@ gimple
>>  gimple_ic (gimple icall_stmt, struct cgraph_node *direct_call,
>>            int prob, gcov_type count, gcov_type all)
>>  {
>> -  gimple dcall_stmt, load_stmt, cond_stmt;
>> +  gimple dcall_stmt, load_stmt, cond_stmt, iretbnd_stmt = NULL;
>>    tree tmp0, tmp1, tmp;
>>    basic_block cond_bb, dcall_bb, icall_bb, join_bb = NULL;
>>    tree optype = build_pointer_type (void_type_node);
>> @@ -1333,6 +1334,9 @@ gimple_ic (gimple icall_stmt, struct cgraph_node *direct_call,
>>    cond_bb = gimple_bb (icall_stmt);
>>    gsi = gsi_for_stmt (icall_stmt);
>>
>> +  if (gimple_call_with_bounds_p (icall_stmt) && gimple_call_lhs (icall_stmt))
>> +    iretbnd_stmt = chkp_retbnd_call_by_val (gimple_call_lhs (icall_stmt));
>> +
>>    tmp0 = make_temp_ssa_name (optype, NULL, "PROF");
>>    tmp1 = make_temp_ssa_name (optype, NULL, "PROF");
>>    tmp = unshare_expr (gimple_call_fn (icall_stmt));
>> @@ -1425,6 +1429,49 @@ gimple_ic (gimple icall_stmt, struct cgraph_node *direct_call,
>>        gimple_call_set_lhs (dcall_stmt,
>>                            duplicate_ssa_name (result, dcall_stmt));
>>        add_phi_arg (phi, gimple_call_lhs (dcall_stmt), e_dj, UNKNOWN_LOCATION);
>> +
>> +      /* If indirect call has following BUILT_IN_CHKP_BNDRET
>> +        call then we need to make it's copy for the direct
>> +        call.  */
>> +      if (iretbnd_stmt)
>> +       {
>> +         if (gimple_call_lhs (iretbnd_stmt))
>> +           {
>> +             gimple copy;
>> +
>> +             gimple_set_vdef (iretbnd_stmt, NULL_TREE);
>> +             gimple_set_vuse (iretbnd_stmt, NULL_TREE);
>> +             update_stmt (iretbnd_stmt);
>
> This looks bogus - what are you trying to workaround?

I did not try to workaround something. I just looked at how this
function makes a copy for a call and repeated the process for retbnd
call.

>
> Richard.
>
>> +             result = gimple_call_lhs (iretbnd_stmt);
>> +             phi = create_phi_node (result, join_bb);
>> +
>> +             copy = gimple_copy (iretbnd_stmt);
>> +             gimple_call_set_arg (copy, 0,
>> +                                  gimple_call_lhs (dcall_stmt));
>> +             gimple_call_set_lhs (copy, duplicate_ssa_name (result, copy));
>> +             gsi_insert_on_edge (e_dj, copy);
>> +             add_phi_arg (phi, gimple_call_lhs (copy),
>> +                          e_dj, UNKNOWN_LOCATION);
>> +
>> +             gimple_call_set_arg (iretbnd_stmt, 0,
>> +                                  gimple_call_lhs (icall_stmt));
>> +             gimple_call_set_lhs (iretbnd_stmt,
>> +                                  duplicate_ssa_name (result, iretbnd_stmt));
>> +             psi = gsi_for_stmt (iretbnd_stmt);
>> +             gsi_remove (&psi, false);
>> +             gsi_insert_on_edge (e_ij, iretbnd_stmt);
>> +             add_phi_arg (phi, gimple_call_lhs (iretbnd_stmt),
>> +                          e_ij, UNKNOWN_LOCATION);
>> +
>> +             gsi_commit_edge_inserts ();
>
> Are you sure this is a good idea?  IMNSHO you should be using
> either gsi_commit_one_edge_insert or be using gsi_insert_on_edge_immediate.

Will replace it with a couple of gsi_commit_one_edge_insert.

Thanks,
Ilya
>
> Richard.
>
>
>> +           }
>> +         else
>> +           {
>> +             psi = gsi_for_stmt (iretbnd_stmt);
>> +             gsi_remove (&psi, true);
>> +           }
>> +       }
>>      }
>>
>>    /* Build an EH edge for the direct call if necessary.  */



More information about the Gcc-patches mailing list