This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: [tuples] Conversion of estimate_num_insns and pass_ch


Which version?


Diego Novillo-3 wrote:
> 
> This patch merges the patches from Zdenek and Bill to convert
> estimate_num_insns and pass_ch.  Note that I still wasn't able to
> re-enable pass_ch, as libgcc does not build because of unconverted
> code.
> 
> We are still missing some of the CFG manipulators and VRP.  I
> re-enabled ssa_name_nonzero_p and ssa_name_nonnegative_p to always
> return false, and I also re-enabled split_block for gimple, but that's
> not enough.
> 
> Since we need estimate_num_insns for other stuff, I think it's best to
> commit this and re-enable pass_ch later.  Zdenek, I think we are only
> missing a few bits, so it shouldn't be much of a problem.
> 
> 
> Thanks.  Diego.
> 
> 2008-02-12  Zdenek Dvorak  <ook@ucw.cz>
>             Bill Maddox  <maddox@google.com>
> 
> 	* tree-inline.h (estimate_num_insns_fn): Declare.
> 	* cgraphunit.c (cgraph_process_new_functions):
> 	Use estimate_num_insns_fn.
> 	* ipa-inline.c (compute_inline_parameters): Ditto.
> 	* gimple-dummy.c (estimate_move_cost, estimate_num_insns):
> 	Removed.
> 	* tree-inline.c (struct eni_data, estimate_num_insns_1):
> 	Removed.
> 	(estimate_num_insns): Rewritten.
> 	(operation_cost, estimate_num_insns_fn): New functions.
> 	* gimple.c (gimple_copy): Unshare operands.  Update
> 	operand caches.
> 	* gimple.h (gimple_set_no_warning): New.
> 	(gimple_cond_set_true_label, gimple_cond_set_false_label):
> 	Allow setting the label to NULL.
> 	* tree-ssa-operands.c (copy_virtual_operands): Handle statements
> 	without virtual operands.
> 
> 2008-02-12  Zdenek Dvorak  <ook@ucw.cz>
> 
> 	* tree-into-ssa.c (update_ssa): Remove ATTRIBUTE_UNUSED.
> 	* tree-ssa-loop-ch.c: Tuplified.
> 	* gimple-iterator.c (gsi_commit_one_edge_insert): Ditto.
> 	* tree-cfg.c (gimple_redirect_edge_and_branch,
> 	gimple_try_redirect_by_replacing_jump, gimple_merge_blocks,
> 	gimple_block_label, gimple_redirect_edge_and_branch_force,
> 	gimple_duplicate_bb): Ditto.
> 	(make_cond_expr_edges): Remove the labels.
> 	(cleanup_dead_labels): Handle conditions without labels.
> 	(gimple_make_forwarder_block): Do not reverse the list
> 	of phi nodes.
> 	(gimple_duplicate_sese_region): Enable ssa updates.
> 	(gimple_cfg_hooks): Enable edge redirection and bb duplication.
> 	* gimple-pretty-print.c (dump_gimple_cond): Do not dump
> 	branches if labels are null.
> 	(dump_implicit_edges): Dump implicit GIMPLE_COND edges.
> 
> 2008-02-12  Diego Novillo  <dnovillo@google.com>
> 
> 	* tree-vrp.c (range_includes_zero_p): Partially re-enable.
> 	(ssa_name_nonnegative_p): Partially re-enable.
> 	(ssa_name_nonzero_p): Likewise.
> 	* gimple-dummy.c (ssa_name_nonzero_p): Remove.
> 	(ssa_name_nonnegative_p): Remove.
> 	* tree-cfg.c (gimple_split_block): Convert to tuples.
> 
> Index: tree-vrp.c
> ===================================================================
> --- tree-vrp.c	(revision 132261)
> +++ tree-vrp.c	(working copy)
> @@ -1041,13 +1041,16 @@ range_includes_zero_p (value_range_t *vr
>    zero = build_int_cst (TREE_TYPE (vr->min), 0);
>    return (value_inside_range (zero, vr) == 1);
>  }
> +#endif
>  
>  /* Return true if T, an SSA_NAME, is known to be nonnegative.  Return
>     false otherwise or if no value range information is available.  */
>  
>  bool
> -ssa_name_nonnegative_p (const_tree t)
> +ssa_name_nonnegative_p (const_tree t ATTRIBUTE_UNUSED)
>  {
> +  /* FIXME tuples.  */
> +#if 0
>    value_range_t *vr = get_value_range (t);
>  
>    if (!vr)
> @@ -1062,14 +1065,20 @@ ssa_name_nonnegative_p (const_tree t)
>        return (result == 0 || result == 1);
>      }
>    return false;
> +#else
> +  gimple_unreachable ();
> +  return false;
> +#endif
>  }
>  
>  /* Return true if T, an SSA_NAME, is known to be nonzero.  Return
>     false otherwise or if no value range information is available.  */
>  
>  bool
> -ssa_name_nonzero_p (const_tree t)
> +ssa_name_nonzero_p (const_tree t ATTRIBUTE_UNUSED)
>  {
> +  /* FIXME tuples.  */
> +#if 0
>    value_range_t *vr = get_value_range (t);
>  
>    if (!vr)
> @@ -1084,9 +1093,15 @@ ssa_name_nonzero_p (const_tree t)
>      return range_includes_zero_p (vr);
>  
>    return false;
> +#else
> +  gimple_unreachable ();
> +  return false;
> +#endif
>  }
>  
>  
> +/* FIXME tuples.  */
> +#if 0
>  /* Extract value range information from an ASSERT_EXPR EXPR and store
>     it in *VR_P.  */
>  
> Index: tree-into-ssa.c
> ===================================================================
> --- tree-into-ssa.c	(revision 132261)
> +++ tree-into-ssa.c	(working copy)
> @@ -3176,7 +3176,7 @@ switch_virtuals_to_full_rewrite (void)
>     TODO_update_ssa*.  */
>  
>  void
> -update_ssa (unsigned update_flags ATTRIBUTE_UNUSED)
> +update_ssa (unsigned update_flags)
>  {
>    basic_block bb, start_bb;
>    bitmap_iterator bi;
> Index: tree-ssa-loop-ch.c
> ===================================================================
> --- tree-ssa-loop-ch.c	(revision 132261)
> +++ tree-ssa-loop-ch.c	(working copy)
> @@ -37,8 +37,6 @@ along with GCC; see the file COPYING3.  
>  #include "flags.h"
>  #include "tree-inline.h"
>  
> -/* FIXME tuples.  */
> -#if 0
>  /* Duplicates headers of loops if they are small enough, so that the
> statements
>     in the loop body are always executed when the loop is entered.  This
>     increases effectiveness of code motion optimizations, and reduces the
> need
> @@ -52,8 +50,8 @@ static bool
>  should_duplicate_loop_header_p (basic_block header, struct loop *loop,
>  				int *limit)
>  {
> -  block_stmt_iterator bsi;
> -  tree last;
> +  gimple_stmt_iterator bsi;
> +  gimple last;
>  
>    /* Do not copy one block more than once (we do not really want to do
>       loop peeling here).  */
> @@ -73,19 +71,19 @@ should_duplicate_loop_header_p (basic_bl
>      return false;
>  
>    last = last_stmt (header);
> -  if (TREE_CODE (last) != COND_EXPR)
> +  if (gimple_code (last) != GIMPLE_COND)
>      return false;
>  
>    /* Approximately copy the conditions that used to be used in jump.c --
>       at most 20 insns and no calls.  */
> -  for (bsi = bsi_start (header); !bsi_end_p (bsi); bsi_next (&bsi))
> +  for (bsi = gsi_start_bb (header); !gsi_end_p (bsi); gsi_next (&bsi))
>      {
> -      last = bsi_stmt (bsi);
> +      last = gsi_stmt (bsi);
>  
> -      if (TREE_CODE (last) == LABEL_EXPR)
> +      if (gimple_code (last) == GIMPLE_LABEL)
>  	continue;
>  
> -      if (get_call_expr_in (last))
> +      if (gimple_code (last) == GIMPLE_CALL)
>  	return false;
>  
>        *limit -= estimate_num_insns (last, &eni_size_weights);
> @@ -101,17 +99,17 @@ should_duplicate_loop_header_p (basic_bl
>  static bool
>  do_while_loop_p (struct loop *loop)
>  {
> -  tree stmt = last_stmt (loop->latch);
> +  gimple stmt = last_stmt (loop->latch);
>  
>    /* If the latch of the loop is not empty, it is not a do-while loop. 
> */
>    if (stmt
> -      && TREE_CODE (stmt) != LABEL_EXPR)
> +      && gimple_code (stmt) != GIMPLE_LABEL)
>      return false;
>  
>    /* If the header contains just a condition, it is not a do-while loop. 
> */
>    stmt = last_and_only_stmt (loop->header);
>    if (stmt
> -      && TREE_CODE (stmt) == COND_EXPR)
> +      && gimple_code (stmt) == GIMPLE_COND)
>      return false;
>  
>    return true;
> @@ -198,7 +196,7 @@ copy_loop_headers (void)
>  
>        entry = loop_preheader_edge (loop);
>  
> -      if (!tree_duplicate_sese_region (entry, exit, bbs, n_bbs,
> copied_bbs))
> +      if (!gimple_duplicate_sese_region (entry, exit, bbs, n_bbs,
> copied_bbs))
>  	{
>  	  fprintf (dump_file, "Duplication failed.\n");
>  	  continue;
> @@ -210,27 +208,27 @@ copy_loop_headers (void)
>  	 we assume that "j < j + 10" is true.  We don't want to warn
>  	 about that case for -Wstrict-overflow, because in general we
>  	 don't warn about overflow involving loops.  Prevent the
> -	 warning by setting TREE_NO_WARNING.  */
> +	 warning by setting the no_warning flag in the condition.  */
>        if (warn_strict_overflow > 0)
>  	{
>  	  unsigned int i;
>  
>  	  for (i = 0; i < n_bbs; ++i)
>  	    {
> -	      block_stmt_iterator bsi;
> +	      gimple_stmt_iterator bsi;
>  
> -	      for (bsi = bsi_start (copied_bbs[i]);
> -		   !bsi_end_p (bsi);
> -		   bsi_next (&bsi))
> +	      for (bsi = gsi_start_bb (copied_bbs[i]);
> +		   !gsi_end_p (bsi);
> +		   gsi_next (&bsi))
>  		{
> -		  tree stmt = bsi_stmt (bsi);
> -		  if (TREE_CODE (stmt) == COND_EXPR)
> -		    TREE_NO_WARNING (stmt) = 1;
> -		  else if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT)
> +		  gimple stmt = gsi_stmt (bsi);
> +		  if (gimple_code (stmt) == GIMPLE_COND)
> +		    gimple_set_no_warning (stmt, true);
> +		  else if (gimple_code (stmt) == GIMPLE_ASSIGN)
>  		    {
> -		      tree rhs = GIMPLE_STMT_OPERAND (stmt, 1);
> -		      if (COMPARISON_CLASS_P (rhs))
> -			TREE_NO_WARNING (stmt) = 1;
> +		      enum tree_code rhs_code = gimple_assign_subcode (stmt);
> +		      if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
> +			gimple_set_no_warning (stmt, true);
>  		    }
>  		}
>  	    }
> @@ -272,4 +270,3 @@ struct tree_opt_pass pass_ch = 
>    | TODO_verify_ssa,			/* todo_flags_finish */
>    0					/* letter */
>  };
> -#endif
> Index: cgraphunit.c
> ===================================================================
> --- cgraphunit.c	(revision 132261)
> +++ cgraphunit.c	(working copy)
> @@ -461,13 +461,12 @@ cgraph_process_new_functions (void)
>  	  push_cfun (DECL_STRUCT_FUNCTION (fndecl));
>  	  current_function_decl = fndecl;
>  	  node->local.inlinable = tree_inlinable_function_p (fndecl);
> +	  node->local.self_insns = estimate_num_insns_fn (fndecl,
> +						&eni_inlining_weights);
> +
>  	  /* FIXME tuples.  */
> -#if 0
> -	  node->local.self_insns = estimate_num_insns (fndecl,
> -						       &eni_inlining_weights);
> -#else
>  	  gimple_unreachable ();
> -#endif
> +
>  	  node->local.disregard_inline_limits
>  	    |= DECL_DISREGARD_INLINE_LIMITS (fndecl);
>  	  /* Inlining characteristics are maintained by the
> Index: gimple-dummy.c
> ===================================================================
> --- gimple-dummy.c	(revision 132261)
> +++ gimple-dummy.c	(working copy)
> @@ -26,8 +26,6 @@ DUMMY_FN (chrec_contains_symbols_defined
>  DUMMY_FN (compute_builtin_object_size)
>  DUMMY_FN (compute_data_dependences_for_loop)
>  DUMMY_FN (dump_ddrs)
> -DUMMY_FN (estimate_move_cost)
> -DUMMY_FN (estimate_num_insns)
>  DUMMY_FN (estimate_numbers_of_iterations)
>  DUMMY_FN (expr_invariant_in_loop_p)
>  DUMMY_FN (fold_stmt)
> @@ -74,8 +72,6 @@ DUMMY_FN (scev_finalize)
>  DUMMY_FN (scev_initialize)
>  DUMMY_FN (scev_probably_wraps_p)
>  DUMMY_FN (scev_reset)
> -DUMMY_FN (ssa_name_nonnegative_p)
> -DUMMY_FN (ssa_name_nonzero_p)
>  DUMMY_FN (gimple_duplicate_loop_to_header_edge)
>  DUMMY_FN (tree_function_versioning)
>  DUMMY_FN (tree_inlinable_function_p)
> Index: ipa-inline.c
> ===================================================================
> --- ipa-inline.c	(revision 132261)
> +++ ipa-inline.c	(working copy)
> @@ -1535,13 +1535,12 @@ compute_inline_parameters (void)
>    node->global.estimated_stack_size =
> node->local.estimated_self_stack_size;
>    node->global.stack_frame_offset = 0;
>    node->local.inlinable = tree_inlinable_function_p
> (current_function_decl);
> +  node->local.self_insns = estimate_num_insns_fn (current_function_decl,
> +						  &eni_inlining_weights);
> +
>    /* FIXME tuples.  */
> -#if 0
> -  node->local.self_insns = estimate_num_insns (current_function_decl,
> -					       &eni_inlining_weights);
> -#else
>    gimple_unreachable ();
> -#endif
> +
>    if (node->local.inlinable && !node->local.disregard_inline_limits)
>      node->local.disregard_inline_limits
>        = DECL_DISREGARD_INLINE_LIMITS (current_function_decl);
> Index: gimple-iterator.c
> ===================================================================
> --- gimple-iterator.c	(revision 132261)
> +++ gimple-iterator.c	(working copy)
> @@ -582,19 +582,14 @@ gsi_commit_one_edge_insert (edge e, basi
>  
>    if (PENDING_STMT (e))
>      {
> -      /* FIXME tuples.  */
> -#if 0
> -      gimple_stmt_iterator *gsi;
> -      gimple stmt = PENDING_STMT (e);
> +      gimple_stmt_iterator gsi;
> +      gimple_seq seq = PENDING_STMT (e);
>  
> -      PENDING_STMT (e) = NULL_TREE;
> +      PENDING_STMT (e) = NULL;
>  
>        if (gimple_find_edge_insert_loc (e, &gsi, new_bb))
> -	gsi_insert_after (gsi, stmt, GSI_NEW_STMT);
> +	gsi_link_seq_after (&gsi, seq, GSI_NEW_STMT);
>        else
> -	gsi_insert_before (gsi, stmt, GSI_NEW_STMT);
> -#else
> -      gimple_unreachable ();
> -#endif
> +	gsi_link_seq_before (&gsi, seq, GSI_NEW_STMT);
>      }
>  }
> Index: gimple-pretty-print.c
> ===================================================================
> --- gimple-pretty-print.c	(revision 132261)
> +++ gimple-pretty-print.c	(working copy)
> @@ -332,10 +332,20 @@ dump_gimple_cond (pretty_printer *buffer
>    pp_string (buffer, tree_code_name [gimple_cond_code (gs)]);
>    pp_space (buffer);
>    dump_generic_node (buffer, gimple_cond_rhs (gs), spc, flags, false);
> -  pp_string (buffer, ") goto ");
> -  dump_generic_node (buffer, gimple_cond_true_label (gs), spc, flags,
> false);
> -  pp_string (buffer, " else goto ");
> -  dump_generic_node (buffer, gimple_cond_false_label (gs), spc, flags,
> false);
> +  pp_string (buffer, ")");
> +  
> +  if (gimple_cond_true_label (gs))
> +    {
> +      pp_string (buffer, " goto ");
> +      dump_generic_node (buffer, gimple_cond_true_label (gs), spc, flags,
> +			 false);
> +    }
> +  if (gimple_cond_false_label (gs))
> +    {
> +      pp_string (buffer, " else goto ");
> +      dump_generic_node (buffer, gimple_cond_false_label (gs), spc,
> flags,
> +			 false);
> +    }
>  }
>  
>  
> @@ -935,6 +945,27 @@ dump_implicit_edges (pretty_printer *buf
>  
>    stmt = last_stmt (bb);
>  
> +  if (stmt && gimple_code (stmt) == GIMPLE_COND)
> +    {
> +      edge true_edge, false_edge;
> +
> +      /* When we are emitting the code or changing CFG, it is possible
> that
> +	 the edges are not yet created.  When we are using debug_bb in such
> +	 a situation, we do not want it to crash.  */
> +      if (EDGE_COUNT (bb->succs) != 2)
> +	return;
> +      extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
> +
> +      INDENT (indent + 2);
> +      pp_cfg_jump (buffer, true_edge->dest);
> +      newline_and_indent (buffer, indent);
> +      pp_string (buffer, "else");
> +      newline_and_indent (buffer, indent + 2);
> +      pp_cfg_jump (buffer, false_edge->dest);
> +      pp_newline (buffer);
> +      return;
> +    }
> +
>    /* If there is a fallthru edge, we may need to add an artificial
>       goto to the dump.  */
>    FOR_EACH_EDGE (e, ei, bb->succs)
> Index: tree-inline.c
> ===================================================================
> --- tree-inline.c	(revision 132261)
> +++ tree-inline.c	(working copy)
> @@ -2184,8 +2184,6 @@ inlinable_function_p (tree fn)
>  }
>  #endif
>  
> -/* FIXME tuples.  */
> -#if 0
>  /* Estimate the cost of a memory move.  Use machine dependent
>     word size and take possible memcpy call into account.  */
>  
> @@ -2203,152 +2201,13 @@ estimate_move_cost (tree type)
>      return ((size + MOVE_MAX_PIECES - 1) / MOVE_MAX_PIECES);
>  }
>  
> -/* Arguments for estimate_num_insns_1.  */
> -
> -struct eni_data
> -{
> -  /* Used to return the number of insns.  */
> -  int count;
> -
> -  /* Weights of various constructs.  */
> -  eni_weights *weights;
> -};
> -
> -/* Used by estimate_num_insns.  Estimate number of instructions seen
> -   by given statement.  */
> +/* Returns cost of operation CODE, according to WEIGHTS  */
>  
> -static tree
> -estimate_num_insns_1 (tree *tp, int *walk_subtrees, void *data)
> +static int
> +estimate_operator_cost (enum tree_code code, eni_weights *weights)
>  {
> -  struct eni_data *d = data;
> -  tree x = *tp;
> -  unsigned cost;
> -
> -  if (IS_TYPE_OR_DECL_P (x))
> -    {
> -      *walk_subtrees = 0;
> -      return NULL;
> -    }
> -  /* Assume that constants and references counts nothing.  These should
> -     be majorized by amount of operations among them we count later
> -     and are common target of CSE and similar optimizations.  */
> -  else if (CONSTANT_CLASS_P (x) || REFERENCE_CLASS_P (x))
> -    return NULL;
> -
> -  switch (TREE_CODE (x))
> +  switch (code)
>      {
> -    /* Containers have no cost.  */
> -    case TREE_LIST:
> -    case TREE_VEC:
> -    case BLOCK:
> -    case COMPONENT_REF:
> -    case BIT_FIELD_REF:
> -    case INDIRECT_REF:
> -    case ALIGN_INDIRECT_REF:
> -    case MISALIGNED_INDIRECT_REF:
> -    case ARRAY_REF:
> -    case ARRAY_RANGE_REF:
> -    case OBJ_TYPE_REF:
> -    case EXC_PTR_EXPR: /* ??? */
> -    case FILTER_EXPR: /* ??? */
> -    case COMPOUND_EXPR:
> -    case BIND_EXPR:
> -    case WITH_CLEANUP_EXPR:
> -    case NOP_EXPR:
> -    case CONVERT_EXPR:
> -    case VIEW_CONVERT_EXPR:
> -    case SAVE_EXPR:
> -    case ADDR_EXPR:
> -    case COMPLEX_EXPR:
> -    case RANGE_EXPR:
> -    case CASE_LABEL_EXPR:
> -    case SSA_NAME:
> -    case CATCH_EXPR:
> -    case EH_FILTER_EXPR:
> -    case STATEMENT_LIST:
> -    case ERROR_MARK:
> -    case NON_LVALUE_EXPR:
> -    case FDESC_EXPR:
> -    case VA_ARG_EXPR:
> -    case TRY_CATCH_EXPR:
> -    case TRY_FINALLY_EXPR:
> -    case LABEL_EXPR:
> -    case GOTO_EXPR:
> -    case RETURN_EXPR:
> -    case EXIT_EXPR:
> -    case LOOP_EXPR:
> -    case PHI_NODE:
> -    case WITH_SIZE_EXPR:
> -    case OMP_CLAUSE:
> -    case OMP_RETURN:
> -    case OMP_CONTINUE:
> -    case OMP_SECTIONS_SWITCH:
> -    case OMP_ATOMIC_STORE:
> -      break;
> -
> -    /* We don't account constants for now.  Assume that the cost is
> amortized
> -       by operations that do use them.  We may re-consider this decision
> once
> -       we are able to optimize the tree before estimating its size and
> break
> -       out static initializers.  */
> -    case IDENTIFIER_NODE:
> -    case INTEGER_CST:
> -    case REAL_CST:
> -    case FIXED_CST:
> -    case COMPLEX_CST:
> -    case VECTOR_CST:
> -    case STRING_CST:
> -      *walk_subtrees = 0;
> -      return NULL;
> -
> -      /* CHANGE_DYNAMIC_TYPE_EXPR explicitly expands to nothing.  */
> -    case CHANGE_DYNAMIC_TYPE_EXPR:
> -      *walk_subtrees = 0;
> -      return NULL;
> -
> -    /* Try to estimate the cost of assignments.  We have three cases to
> -       deal with:
> -	1) Simple assignments to registers;
> -	2) Stores to things that must live in memory.  This includes
> -	   "normal" stores to scalars, but also assignments of large
> -	   structures, or constructors of big arrays;
> -	3) TARGET_EXPRs.
> -
> -       Let us look at the first two cases, assuming we have "a = b + C":
> -       <GIMPLE_MODIFY_STMT <var_decl "a">
> -       			   <plus_expr <var_decl "b"> <constant C>>
> -       If "a" is a GIMPLE register, the assignment to it is free on
> almost
> -       any target, because "a" usually ends up in a real register.  Hence
> -       the only cost of this expression comes from the PLUS_EXPR, and we
> -       can ignore the GIMPLE_MODIFY_STMT.
> -       If "a" is not a GIMPLE register, the assignment to "a" will most
> -       likely be a real store, so the cost of the GIMPLE_MODIFY_STMT is
> the cost
> -       of moving something into "a", which we compute using the function
> -       estimate_move_cost.
> -
> -       The third case deals with TARGET_EXPRs, for which the semantics
> are
> -       that a temporary is assigned, unless the TARGET_EXPR itself is
> being
> -       assigned to something else.  In the latter case we do not need the
> -       temporary.  E.g. in:
> -       		<GIMPLE_MODIFY_STMT <var_decl "a"> <target_expr>>, the
> -       GIMPLE_MODIFY_STMT is free.  */
> -    case INIT_EXPR:
> -    case GIMPLE_MODIFY_STMT:
> -      /* Is the right and side a TARGET_EXPR?  */
> -      if (TREE_CODE (GENERIC_TREE_OPERAND (x, 1)) == TARGET_EXPR)
> -	break;
> -      /* ... fall through ...  */
> -
> -    case TARGET_EXPR:
> -      x = GENERIC_TREE_OPERAND (x, 0);
> -      /* Is this an assignments to a register?  */
> -      if (is_gimple_reg (x))
> -	break;
> -      /* Otherwise it's a store, so fall through to compute the move
> cost.  */
> -
> -    case CONSTRUCTOR:
> -      d->count += estimate_move_cost (TREE_TYPE (x));
> -      break;
> -
>      /* Assign cost of 1 to usual operations.
>         ??? We may consider mapping RTL costs to this.  */
>      case COND_EXPR:
> @@ -2437,17 +2296,7 @@ estimate_num_insns_1 (tree *tp, int *wal
>      case VEC_INTERLEAVE_LOW_EXPR:
>  
>      case RESX_EXPR:
> -      d->count += 1;
> -      break;
> -
> -    case SWITCH_EXPR:
> -      /* Take into account cost of the switch + guess 2 conditional jumps
> for
> -         each case label.  
> -
> -	 TODO: once the switch expansion logic is sufficiently separated, we can
> -	 do better job on estimating cost of the switch.  */
> -      d->count += TREE_VEC_LENGTH (SWITCH_LABELS (x)) * 2;
> -      break;
> +      return 1;
>  
>      /* Few special cases of expensive operations.  This is useful
>         to avoid inlining on functions having too many of these.  */
> @@ -2461,112 +2310,215 @@ estimate_num_insns_1 (tree *tp, int *wal
>      case FLOOR_MOD_EXPR:
>      case ROUND_MOD_EXPR:
>      case RDIV_EXPR:
> -      d->count += d->weights->div_mod_cost;
> +      return weights->div_mod_cost;
> +
> +    default:
> +      /* We expect a copy assignment with no operator.  */
> +      gcc_assert (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS);
> +      return 0;
> +    }
> +}
> +
> +
> +/* Estimate number of instructions that will be created by expanding
> +   the statements in the statement sequence STMTS.
> +   WEIGHTS contains weights attributed to various constructs.  */
> +
> +static
> +int estimate_num_insns_seq (gimple_seq stmts, eni_weights *weights)
> +{
> +  int cost;
> +  gimple_stmt_iterator gsi;
> +
> +  cost = 0;
> +  for (gsi = gsi_start (stmts); !gsi_end_p (gsi); gsi_next (&gsi))
> +    cost += estimate_num_insns (gsi_stmt (gsi), weights);
> +
> +  return cost;
> +}
> +
> +
> +/* Estimate number of instructions that will be created by expanding
> STMT.
> +   WEIGHTS contains weights attributed to various constructs.  */
> +
> +int
> +estimate_num_insns (gimple stmt, eni_weights *weights)
> +{
> +  unsigned cost, i;
> +  enum gimple_code code = gimple_code (stmt);
> +  tree lhs;
> +
> +  switch (code)
> +    {
> +    case GIMPLE_ASSIGN:
> +      /* Try to estimate the cost of assignments.  We have three cases to
> +	 deal with:
> +	 1) Simple assignments to registers;
> +	 2) Stores to things that must live in memory.  This includes
> +	    "normal" stores to scalars, but also assignments of large
> +	    structures, or constructors of big arrays;
> +
> +	 Let us look at the first two cases, assuming we have "a = b + C":
> +	 <GIMPLE_ASSIGN <var_decl "a">
> +	        <plus_expr <var_decl "b"> <constant C>>
> +	 If "a" is a GIMPLE register, the assignment to it is free on almost
> +	 any target, because "a" usually ends up in a real register.  Hence
> +	 the only cost of this expression comes from the PLUS_EXPR, and we
> +	 can ignore the GIMPLE_ASSIGN.
> +	 If "a" is not a GIMPLE register, the assignment to "a" will most
> +	 likely be a real store, so the cost of the GIMPLE_ASSIGN is the cost
> +	 of moving something into "a", which we compute using the function
> +	 estimate_move_cost.  */
> +      lhs = gimple_assign_lhs (stmt);
> +      if (is_gimple_reg (lhs))
> +	cost = 0;
> +      else
> +	cost = estimate_move_cost (TREE_TYPE (lhs));
> +
> +      cost += estimate_operator_cost (gimple_assign_subcode (stmt),
> weights);
>        break;
> -    case CALL_EXPR:
> +
> +    case GIMPLE_COND:
> +      cost = 1 + estimate_operator_cost (gimple_cond_code (stmt),
> weights);
> +      break;
> +
> +    case GIMPLE_SWITCH:
> +      /* Take into account cost of the switch + guess 2 conditional jumps
> for
> +         each case label.  
> +
> +	 TODO: once the switch expansion logic is sufficiently separated, we can
> +	 do better job on estimating cost of the switch.  */
> +      cost = gimple_switch_num_labels (stmt) * 2;
> +      break;
> +
> +    case GIMPLE_CALL:
>        {
> -	tree decl = get_callee_fndecl (x);
> +	tree decl = gimple_call_fndecl (stmt);
>  
>  	if (decl && DECL_BUILT_IN_CLASS (decl) == BUILT_IN_MD)
> -	  cost = d->weights->target_builtin_call_cost;
> +	  cost = weights->target_builtin_call_cost;
>  	else
> -	  cost = d->weights->call_cost;
> +	  cost = weights->call_cost;
>  	
>  	if (decl && DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL)
>  	  switch (DECL_FUNCTION_CODE (decl))
>  	    {
>  	    case BUILT_IN_CONSTANT_P:
> -	      *walk_subtrees = 0;
> -	      return NULL_TREE;
> +	      return 0;
>  	    case BUILT_IN_EXPECT:
> -	      return NULL_TREE;
> +	      cost = 0;
> +	      break;
> +
>  	    /* Prefetch instruction is not expensive.  */
>  	    case BUILT_IN_PREFETCH:
> -	      cost = 1;
> +	      cost = weights->target_builtin_call_cost;
>  	      break;
> +
>  	    default:
>  	      break;
>  	    }
>  
> -	/* Our cost must be kept in sync with
> cgraph_estimate_size_after_inlining
> -	   that does use function declaration to figure out the arguments.  */
> +	/* Our cost must be kept in sync with
> +	   cgraph_estimate_size_after_inlining that does use function
> +	   declaration to figure out the arguments.  */
>  	if (!decl)
>  	  {
> -	    tree a;
> -	    call_expr_arg_iterator iter;
> -	    FOR_EACH_CALL_EXPR_ARG (a, iter, x)
> -	      d->count += estimate_move_cost (TREE_TYPE (a));
> +	    for (i = 0; i < gimple_call_num_args (stmt); i++)
> +	      {
> +		tree arg = gimple_call_arg (stmt, i);
> +		cost += estimate_move_cost (TREE_TYPE (arg));
> +	      }
>  	  }
>  	else
>  	  {
>  	    tree arg;
>  	    for (arg = DECL_ARGUMENTS (decl); arg; arg = TREE_CHAIN (arg))
> -	      d->count += estimate_move_cost (TREE_TYPE (arg));
> +	      cost += estimate_move_cost (TREE_TYPE (arg));
>  	  }
>  
> -	d->count += cost;
>  	break;
>        }
>  
> -    case OMP_PARALLEL:
> -    case OMP_FOR:
> -    case OMP_SECTIONS:
> -    case OMP_SINGLE:
> -    case OMP_SECTION:
> -    case OMP_MASTER:
> -    case OMP_ORDERED:
> -    case OMP_CRITICAL:
> -    case OMP_ATOMIC:
> -    case OMP_ATOMIC_LOAD:
> -      /* OpenMP directives are generally very expensive.  */
> -      d->count += d->weights->omp_cost;
> -      break;
> +    case GIMPLE_GOTO:
> +    case GIMPLE_LABEL:
> +    case GIMPLE_NOP:
> +    case GIMPLE_PHI:
> +    case GIMPLE_RETURN:
> +      return 0;
> +
> +    case GIMPLE_ASM:
> +    case GIMPLE_RESX:
> +      return 1;
> +
> +    case GIMPLE_BIND:
> +      return estimate_num_insns_seq (gimple_bind_body (stmt), weights);
> +
> +    case GIMPLE_EH_FILTER:
> +      return estimate_num_insns_seq (gimple_eh_filter_failure (stmt),
> weights);
> +
> +    case GIMPLE_CATCH:
> +      return estimate_num_insns_seq (gimple_catch_handler (stmt),
> weights);
> +
> +    case GIMPLE_TRY:
> +      return (estimate_num_insns_seq (gimple_try_eval (stmt), weights)
> +              + estimate_num_insns_seq (gimple_try_cleanup (stmt),
> weights));
> +
> +    /* OpenMP directives are generally very expensive.  */
> +    case GIMPLE_OMP_RETURN:
> +    case GIMPLE_OMP_SECTIONS_SWITCH:
> +    case GIMPLE_OMP_ATOMIC_STORE:
> +      return 0;
> +
> +    case GIMPLE_OMP_CONTINUE:
> +      return estimate_num_insns_seq (gimple_omp_body (stmt), weights);
> +
> +    case GIMPLE_OMP_ATOMIC_LOAD:
> +      return weights->omp_cost;
> +
> +    case GIMPLE_OMP_FOR:
> +      return (weights->omp_cost
> +              + estimate_num_insns_seq (gimple_omp_body (stmt), weights)
> +              + estimate_num_insns_seq (gimple_omp_for_pre_body (stmt),
> weights));
> +
> +    case GIMPLE_OMP_PARALLEL:
> +    case GIMPLE_OMP_CRITICAL:
> +    case GIMPLE_OMP_MASTER:
> +    case GIMPLE_OMP_ORDERED:
> +    case GIMPLE_OMP_SECTION:
> +    case GIMPLE_OMP_SECTIONS:
> +    case GIMPLE_OMP_SINGLE:
> +      return (weights->omp_cost
> +              + estimate_num_insns_seq (gimple_omp_body (stmt),
> weights));
>  
>      default:
>        gcc_unreachable ();
>      }
> -  return NULL;
> +
> +  return cost;
>  }
>  
> -/* Estimate number of instructions that will be created by expanding
> EXPR.
> -   WEIGHTS contains weights attributed to various constructs.  */
> +/* Estimate number of instructions that will be created by expanding
> +   function FNDECL.  WEIGHTS contains weights attributed to various
> +   constructs.  */
>  
>  int
> -estimate_num_insns (tree expr, eni_weights *weights)
> +estimate_num_insns_fn (tree fndecl, eni_weights *weights)
>  {
> -  struct pointer_set_t *visited_nodes;
> +  struct function *my_function = DECL_STRUCT_FUNCTION (fndecl);
> +  gimple_stmt_iterator bsi;
>    basic_block bb;
> -  block_stmt_iterator bsi;
> -  struct function *my_function;
> -  struct eni_data data;
> -
> -  data.count = 0;
> -  data.weights = weights;
> +  int n = 0;
>  
> -  /* If we're given an entire function, walk the CFG.  */
> -  if (TREE_CODE (expr) == FUNCTION_DECL)
> +  gcc_assert (my_function && my_function->cfg);
> +  FOR_EACH_BB_FN (bb, my_function)
>      {
> -      my_function = DECL_STRUCT_FUNCTION (expr);
> -      gcc_assert (my_function && my_function->cfg);
> -      visited_nodes = pointer_set_create ();
> -      FOR_EACH_BB_FN (bb, my_function)
> -	{
> -	  for (bsi = bsi_start (bb);
> -	       !bsi_end_p (bsi);
> -	       bsi_next (&bsi))
> -	    {
> -	      walk_tree (bsi_stmt_ptr (bsi), estimate_num_insns_1,
> -			 &data, visited_nodes);
> -	    }
> -	}
> -      pointer_set_destroy (visited_nodes);
> +      for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
> +	n += estimate_num_insns (gsi_stmt (bsi), weights);
>      }
> -  else
> -    walk_tree_without_duplicates (&expr, estimate_num_insns_1, &data);
>  
> -  return data.count;
> +  return n;
>  }
> -#endif
> +
>  
>  /* Initializes weights used by estimate_num_insns.  */
>  
> Index: tree-inline.h
> ===================================================================
> --- tree-inline.h	(revision 132261)
> +++ tree-inline.h	(working copy)
> @@ -148,6 +148,7 @@ void clone_body (tree, tree, void *);
>  void save_body (tree, tree *, tree *);
>  int estimate_move_cost (tree type);
>  int estimate_num_insns (gimple, eni_weights *);
> +int estimate_num_insns_fn (tree, eni_weights *);
>  int count_insns_seq (gimple_seq, eni_weights *);
>  bool tree_versionable_function_p (tree);
>  void tree_function_versioning (tree, tree, varray_type, bool);
> Index: gimple.c
> ===================================================================
> --- gimple.c	(revision 132261)
> +++ gimple.c	(working copy)
> @@ -1725,11 +1725,18 @@ gimple_copy (gimple stmt)
>    enum gimple_code code = gimple_code (stmt);
>    size_t num_ops = gimple_num_ops (stmt);
>    gimple copy = gimple_alloc (code);
> +  unsigned i;
> +
>    memcpy (copy, stmt, gimple_size (code));
>    if (num_ops > 0)
>      {
>        gimple_alloc_ops (copy, num_ops);
> -      memcpy (gimple_ops (copy), gimple_ops (stmt), num_ops * sizeof
> (tree));
> +      for (i = 0; i < num_ops; i++)
> +	gimple_set_op (copy, i, unshare_expr (gimple_op (stmt, i)));
> +
> +      gimple_set_def_ops (copy, NULL);
> +      gimple_set_use_ops (copy, NULL);
> +      update_stmt (copy);
>      }
>  
>    return copy;
> Index: gimple.h
> ===================================================================
> --- gimple.h	(revision 132261)
> +++ gimple.h	(working copy)
> @@ -698,6 +698,13 @@ gimple_no_warning_p (const_gimple stmt)
>    return stmt->gsbase.no_warning;
>  }
>  
> +/* Set the no_warning flag of STMT to NO_WARNING.  */
> +
> +static inline void
> +gimple_set_no_warning (gimple stmt, bool no_warning)
> +{
> +  stmt->gsbase.no_warning = (unsigned) no_warning;
> +}
>  
>  /* Set the visited status on statement STMT to VISITED_P.  */
>  
> @@ -1432,7 +1439,7 @@ static inline void
>  gimple_cond_set_true_label (gimple gs, tree label)
>  {
>    GIMPLE_CHECK (gs, GIMPLE_COND);
> -  gcc_assert (TREE_CODE (label) == LABEL_DECL);
> +  gcc_assert (!label || TREE_CODE (label) == LABEL_DECL);
>    gimple_set_op (gs, 2, label);
>  }
>  
> @@ -1444,7 +1451,7 @@ static inline void
>  gimple_cond_set_false_label (gimple gs, tree label)
>  {
>    GIMPLE_CHECK (gs, GIMPLE_COND);
> -  gcc_assert (TREE_CODE (label) == LABEL_DECL);
> +  gcc_assert (!label || TREE_CODE (label) == LABEL_DECL);
>    gimple_set_op (gs, 3, label);
>  }
>  
> Index: tree-cfg.c
> ===================================================================
> --- tree-cfg.c	(revision 132261)
> +++ tree-cfg.c	(working copy)
> @@ -91,11 +91,8 @@ static void make_edges (void);
>  static void make_cond_expr_edges (basic_block);
>  static void make_gimple_switch_edges (basic_block);
>  static void make_goto_expr_edges (basic_block);
> -/* FIXME tuples.  */
> -#if 0
>  static edge gimple_redirect_edge_and_branch (edge, basic_block);
>  static edge gimple_try_redirect_by_replacing_jump (edge, basic_block);
> -#endif
>  static unsigned int split_critical_edges (void);
>  
>  /* Various helpers.  */
> @@ -671,6 +668,10 @@ make_cond_expr_edges (basic_block bb)
>    e = make_edge (bb, else_bb, EDGE_FALSE_VALUE);
>    if (e)
>      e->goto_locus = gimple_locus (else_stmt);
> +
> +  /* We do not need the labels anymore.  */
> +  gimple_cond_set_true_label (entry, NULL_TREE);
> +  gimple_cond_set_false_label (entry, NULL_TREE);
>  }
>  
>  
> @@ -993,16 +994,13 @@ cleanup_dead_labels (void)
>  	{
>  	case GIMPLE_COND:
>  	  {
> -	    tree new_true_label, new_false_label;
> -
> -	    new_true_label = main_block_label (gimple_cond_true_label (stmt));
> -	    new_false_label = main_block_label (gimple_cond_false_label (stmt));
> -
> -	    if (new_true_label)
> -	      gimple_cond_set_true_label (stmt, new_true_label);
> +	    tree true_label = gimple_cond_true_label (stmt);
> +	    tree false_label = gimple_cond_false_label (stmt);
>  
> -	    if (new_false_label)
> -	      gimple_cond_set_false_label (stmt, new_false_label);
> +	    if (true_label)
> +	      gimple_cond_set_true_label (stmt, main_block_label (true_label));
> +	    if (false_label)
> +	      gimple_cond_set_false_label (stmt, main_block_label
> (false_label));
>  	    break;
>  	  }
>  
> @@ -1321,7 +1319,7 @@ replace_uses_by (tree name, tree val)
>  static void
>  gimple_merge_blocks (basic_block a, basic_block b)
>  {
> -  gimple_stmt_iterator last, gsi;
> +  gimple_stmt_iterator last, gsi, psi;
>    gimple_seq phis = phi_nodes (b);
>  
>    if (dump_file)
> @@ -1330,12 +1328,12 @@ gimple_merge_blocks (basic_block a, basi
>    /* Remove all single-valued PHI nodes from block B of the form
>       V_i = PHI <V_j> by propagating V_j to all the uses of V_i.  */
>    gsi = gsi_last_bb (a);
> -  for (gsi = gsi_start (phis); !gsi_end_p (gsi); )
> +  for (psi = gsi_start (phis); !gsi_end_p (psi); )
>      {
> -      gimple phi = gsi_stmt (gsi);
> +      gimple phi = gsi_stmt (psi);
>        tree def = gimple_phi_result (phi), use = gimple_phi_arg_def (phi,
> 0);
>        gimple copy;
> -      bool may_replace_uses = may_propagate_copy (def, use);
> +      bool may_replace_uses = !is_gimple_reg (def) || may_propagate_copy
> (def, use);
>  
>        /* In case we maintain loop closed ssa form, do not propagate
> arguments
>  	 of loop exit phi nodes.  */
> @@ -1357,7 +1355,7 @@ gimple_merge_blocks (basic_block a, basi
>  	  copy = gimple_build_assign (def, use);
>  	  gsi_insert_after (&gsi, copy, GSI_NEW_STMT);
>  	  SSA_NAME_DEF_STMT (def) = copy;
> -          remove_phi_node (&gsi, false);
> +          remove_phi_node (&psi, false);
>  	}
>        else
>          {
> @@ -1377,7 +1375,7 @@ gimple_merge_blocks (basic_block a, basi
>  	  else
>              replace_uses_by (def, use);
>  
> -          remove_phi_node (&gsi, true);
> +          remove_phi_node (&psi, true);
>          }
>      }
>  
> @@ -4335,9 +4333,6 @@ gimple_make_forwarder_block (edge fallth
>        add_phi_arg (new_phi, gimple_phi_result (phi), fallthru);
>      }
>  
> -  /* Ensure that the PHI node chain is in the same order.  */
> -  set_phi_nodes (bb, gimple_seq_reverse (phi_nodes (bb)));
> -
>    /* Add the arguments we have stored on edges.  */
>    FOR_EACH_EDGE (e, ei, bb->preds)
>      {
> @@ -4355,18 +4350,17 @@ gimple_make_forwarder_block (edge fallth
>  tree
>  gimple_block_label (basic_block bb ATTRIBUTE_UNUSED)
>  {
> -/* FIXME tuples.  */
> -#if 0
> -  gimple_stmt_iterator i, s = gsi_start (bb);
> +  gimple_stmt_iterator i, s = gsi_start_bb (bb);
>    bool first = true;
> -  tree label, stmt;
> +  tree label;
> +  gimple stmt;
>  
>    for (i = s; !gsi_end_p (i); first = false, gsi_next (&i))
>      {
>        stmt = gsi_stmt (i);
> -      if (TREE_CODE (stmt) != LABEL_EXPR)
> +      if (gimple_code (stmt) != GIMPLE_LABEL)
>  	break;
> -      label = LABEL_EXPR_LABEL (stmt);
> +      label = gimple_label_label (stmt);
>        if (!DECL_NONLOCAL (label))
>  	{
>  	  if (!first)
> @@ -4376,17 +4370,12 @@ gimple_block_label (basic_block bb ATTRI
>      }
>  
>    label = create_artificial_label ();
> -  stmt = build1 (LABEL_EXPR, void_type_node, label);
> +  stmt = gimple_build_label (label);
>    gsi_insert_before (&s, stmt, GSI_NEW_STMT);
>    return label;
> -#else
> -  gimple_unreachable ();
> -#endif
>  }
>  
>  
> -/* FIXME tuples.  */
> -#if 0
>  /* Attempt to perform edge redirection by replacing a possibly complex
>     jump instruction by a goto or by removing the jump completely.
>     This can apply only if all edges now point to the same block.  The
> @@ -4397,7 +4386,7 @@ static edge
>  gimple_try_redirect_by_replacing_jump (edge e, basic_block target)
>  {
>    basic_block src = e->src;
> -  gimple_stmt_iterator *i;
> +  gimple_stmt_iterator i;
>    gimple stmt;
>  
>    /* We can replace or remove a complex jump only when we have exactly
> @@ -4408,15 +4397,15 @@ gimple_try_redirect_by_replacing_jump (e
>        || EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest != target)
>      return NULL;
>  
> -  i = gsi_last (bb_seq (src));
> +  i = gsi_last_bb (src);
>    if (gsi_end_p (i))
>      return NULL;
>  
>    stmt = gsi_stmt (i);
>  
> -  if (gimple_code (stmt) == GIMPLE_COND || gimple_code (stmt) ==
> SWITCH_EXPR)
> +  if (gimple_code (stmt) == GIMPLE_COND || gimple_code (stmt) ==
> GIMPLE_SWITCH)
>      {
> -      gsi_remove (i, true);
> +      gsi_remove (&i, true);
>        e = ssa_redirect_edge (e, target);
>        e->flags = EDGE_FALLTHRU;
>        return e;
> @@ -4433,7 +4422,7 @@ static edge
>  gimple_redirect_edge_and_branch (edge e, basic_block dest)
>  {
>    basic_block bb = e->src;
> -  gimple_stmt_iterator *gsi;
> +  gimple_stmt_iterator gsi;
>    edge ret;
>    gimple stmt;
>  
> @@ -4447,7 +4436,7 @@ gimple_redirect_edge_and_branch (edge e,
>    if (e->dest == dest)
>      return NULL;
>  
> -  gsi = gsi_last (bb_seq (bb));
> +  gsi = gsi_last_bb (bb);
>    stmt = gsi_end_p (gsi) ? NULL : gsi_stmt (gsi);
>  
>    switch (stmt ? gimple_code (stmt) : ERROR_MARK)
> @@ -4463,8 +4452,10 @@ gimple_redirect_edge_and_branch (edge e,
>  
>      case GIMPLE_SWITCH:
>        {
> -        tree cases = get_cases_for_edge (e, stmt);
>  	tree label = gimple_block_label (dest);
> +/* FIXME tuples.  */
> +#if 0
> +        tree cases = get_cases_for_edge (e, stmt);
>  
>  	/* If we have a list of cases associated with E, then use it
>  	   as it's a lot faster than walking the entire case vector.  */
> @@ -4492,6 +4483,7 @@ gimple_redirect_edge_and_branch (edge e,
>  	      }
>  	  }
>  	else
> +#endif
>  	  {
>  	    size_t i, n = gimple_switch_num_labels (stmt);
>  
> @@ -4507,7 +4499,7 @@ gimple_redirect_edge_and_branch (edge e,
>        }
>  
>      case GIMPLE_RETURN:
> -      gsi_remove (gsi, true);
> +      gsi_remove (&gsi, true);
>        e->flags |= EDGE_FALLTHRU;
>        break;
>  
> @@ -4532,7 +4524,6 @@ gimple_redirect_edge_and_branch (edge e,
>  
>    return e;
>  }
> -#endif
>  
>  /* Returns true if it is possible to remove edge E by redirecting
>     it to the destination of the other edge from E->src.  */
> @@ -4548,8 +4539,6 @@ gimple_can_remove_branch_p (const_edge e
>  
>  /* Simple wrapper, as we can always redirect fallthru edges.  */
>  
> -/* FIXME tuples.  */
> -#if 0
>  static basic_block
>  gimple_redirect_edge_and_branch_force (edge e, basic_block dest)
>  {
> @@ -4558,20 +4547,18 @@ gimple_redirect_edge_and_branch_force (e
>  
>    return NULL;
>  }
> -#endif
>  
>  
>  /* Splits basic block BB after statement STMT (but at least after the
>     labels).  If STMT is NULL, BB is split just after the labels.  */
>  
> -/* FIXME tuples.  */
> -#if 0
>  static basic_block
>  gimple_split_block (basic_block bb, void *stmt)
>  {
>    gimple_stmt_iterator gsi;
> -  gimple_stmt_iterator *gsi_tgt;
> -  tree act, list;
> +  gimple_stmt_iterator gsi_tgt;
> +  gimple act;
> +  gimple_seq list;
>    basic_block new_bb;
>    edge e;
>    edge_iterator ei;
> @@ -4588,10 +4575,10 @@ gimple_split_block (basic_block bb, void
>      stmt = NULL;
>  
>    /* Move everything from GSI to the new basic block.  */
> -  for (gsi = gsi_start (bb); !gsi_end_p (gsi); gsi_next (&gsi))
> +  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
>      {
>        act = gsi_stmt (gsi);
> -      if (TREE_CODE (act) == LABEL_EXPR)
> +      if (gimple_code (act) == GIMPLE_LABEL)
>  	continue;
>  
>        if (!stmt)
> @@ -4611,7 +4598,7 @@ gimple_split_block (basic_block bb, void
>       brings ugly quadratic memory consumption in the inliner.  
>       (We are still quadratic since we need to update stmt BB pointers,
>       sadly.)  */
> -  list = gsi_split_seq_before (&gsi.gsi);
> +  list = gsi_split_seq_before (&gsi);
>    set_bb_seq (new_bb, list);
>    for (gsi_tgt = gsi_start (list);
>         !gsi_end_p (gsi_tgt); gsi_next (&gsi_tgt))
> @@ -4619,7 +4606,6 @@ gimple_split_block (basic_block bb, void
>  
>    return new_bb;
>  }
> -#endif
>  
>  
>  /* Moves basic block BB after block AFTER.  */
> @@ -4645,48 +4631,43 @@ gimple_can_duplicate_bb_p (const_basic_b
>    return true;
>  }
>  
> -
>  /* Create a duplicate of the basic block BB.  NOTE: This does not
>     preserve SSA form.  */
>  
> -/* FIXME tuples.  */
> -#if 0
>  static basic_block
>  gimple_duplicate_bb (basic_block bb)
>  {
>    basic_block new_bb;
>    gimple_stmt_iterator gsi, gsi_tgt;
> -  tree phi;
> +  gimple_seq phis = phi_nodes (bb);
> +  gimple phi, stmt, copy;
>  
>    new_bb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb);
>  
>    /* Copy the PHI nodes.  We ignore PHI node arguments here because
>       the incoming edges have not been setup yet.  */
> -  for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi))
> +  for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
>      {
> -      tree copy = create_phi_node (PHI_RESULT (phi), new_bb);
> -      create_new_def_for (PHI_RESULT (copy), copy, PHI_RESULT_PTR
> (copy));
> +      phi = gsi_stmt (gsi);
> +      copy = create_phi_node (gimple_phi_result (phi), new_bb);
> +      create_new_def_for (gimple_phi_result (copy), copy,
> +			  gimple_phi_result_ptr (copy));
>      }
>  
> -  /* Keep the chain of PHI nodes in the same order so that they can be
> -     updated by ssa_redirect_edge.  */
> -  set_phi_nodes (new_bb, phi_reverse (phi_nodes (new_bb)));
> -
> -  gsi_tgt = gsi_start (new_bb);
> -  for (gsi = gsi_start (bb); !gsi_end_p (gsi); gsi_next (&gsi))
> +  gsi_tgt = gsi_start_bb (new_bb);
> +  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
>      {
>        def_operand_p def_p;
>        ssa_op_iter op_iter;
> -      tree stmt, copy;
>        int region;
>  
>        stmt = gsi_stmt (gsi);
> -      if (TREE_CODE (stmt) == LABEL_EXPR)
> +      if (gimple_code (stmt) == GIMPLE_LABEL)
>  	continue;
>  
>        /* Create a new copy of STMT and duplicate STMT's virtual
>  	 operands.  */
> -      copy = unshare_expr (stmt);
> +      copy = gimple_copy (stmt);
>        gsi_insert_after (&gsi_tgt, copy, GSI_NEW_STMT);
>        copy_virtual_operands (copy, stmt);
>        region = lookup_stmt_eh_region (stmt);
> @@ -4702,7 +4683,6 @@ gimple_duplicate_bb (basic_block bb)
>  
>    return new_bb;
>  }
> -#endif
>  
>  /* Adds phi node arguments for edge E_COPY after basic block duplication. 
> */
>  
> @@ -4867,12 +4847,7 @@ gimple_duplicate_sese_region (edge entry
>        free_region_copy = true;
>      }
>  
> -  /* FIXME tuples */
> -#if 0
>    gcc_assert (!need_ssa_update_p ());
> -#else
> -  gimple_unreachable ();
> -#endif
>  
>    /* Record blocks outside the region that are dominated by something
>       inside.  */
> @@ -4942,13 +4917,8 @@ gimple_duplicate_sese_region (edge entry
>    /* Add the other PHI node arguments.  */
>    add_phi_args_after_copy (region_copy, n_region, NULL);
>  
> -  /* FIXME tuples.  */
> -#if 0
>    /* Update the SSA web.  */
>    update_ssa (TODO_update_ssa);
> -#else
> -  gimple_unreachable ();
> -#endif
>  
>    if (free_region_copy)
>      free (region_copy);
> @@ -6529,18 +6499,18 @@ struct cfg_hooks gimple_cfg_hooks = {
>    gimple_verify_flow_info,
>    gimple_dump_bb,		/* dump_bb  */
>    create_bb,			/* create_basic_block  */
> -  0 /* FIXME tuples gimple_redirect_edge_and_branch */,/*
> redirect_edge_and_branch  */
> -  0 /* FIXME tuples gimple_redirect_edge_and_branch_force */,/*
> redirect_edge_and_branch_force  */
> +  gimple_redirect_edge_and_branch, /* redirect_edge_and_branch  */
> +  gimple_redirect_edge_and_branch_force, /*
> redirect_edge_and_branch_force  */
>    gimple_can_remove_branch_p,	/* can_remove_branch_p  */
>    remove_bb,			/* delete_basic_block  */
> -  0 /* FIXME tuples gimple_split_block */,		/* split_block  */
> +  gimple_split_block,		/* split_block  */
>    gimple_move_block_after,	/* move_block_after  */
>    gimple_can_merge_blocks_p,	/* can_merge_blocks_p  */
>    gimple_merge_blocks,		/* merge_blocks  */
>    gimple_predict_edge,		/* predict_edge  */
>    gimple_predicted_by_p,		/* predicted_by_p  */
>    gimple_can_duplicate_bb_p,	/* can_duplicate_block_p  */
> -  0 /* FIXME tuples gimple_duplicate_bb */,		/* duplicate_block  */
> +  gimple_duplicate_bb,		/* duplicate_block  */
>    gimple_split_edge,		/* split_edge  */
>    gimple_make_forwarder_block,	/* make_forward_block  */
>    NULL,				/* tidy_fallthru_edge  */
> Index: tree-ssa-operands.c
> ===================================================================
> --- tree-ssa-operands.c	(revision 132261)
> +++ tree-ssa-operands.c	(working copy)
> @@ -2549,6 +2549,9 @@ copy_virtual_operands (gimple dest, gimp
>    struct voptype_d vuse;
>    struct voptype_d vdef;
>  
> +  if (!gimple_has_mem_ops (src))
> +    return;
> +
>    gimple_set_vdef_ops (dest, NULL);
>    gimple_set_vuse_ops (dest, NULL);
>  
> 
> 


-----
Ray

LegacyUSA 
http://www.bakesmart.net Bakery Software 
http://www.legacyusa.net Print Estimating Software 


-- 
View this message in context: http://www.nabble.com/-tuples--Conversion-of-estimate_num_insns-and-pass_ch-tp15444605p16414151.html
Sent from the gcc - patches mailing list archive at Nabble.com.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]