[new-regalloc-branch] Whitespace cleanup for ra* files

Andreas Jaeger aj@suse.de
Fri Jun 21 03:07:00 GMT 2002


The appended patch removes whitespace in the ra* files on the
new-regalloc-branch.

Committed after approvel from Michael Matz,

Andreas

2002-06-21  Andreas Jaeger  <aj@suse.de>

	* ra.c: Cleanup whitespace.
	* ra.h: Cleanup whitespace.
	* ra-build.c: Cleanup whitespace.
	* ra-colorize.c: Cleanup whitespace.
	* ra-debug.c: Cleanup whitespace.
	* ra-ranges.c: Cleanup whitespace.
	* ra-rewrite.c: Cleanup whitespace.

============================================================
Index: gcc/ra-build.c
--- gcc/ra-build.c	20 Jun 2002 19:26:31 -0000	1.1.2.1
+++ gcc/ra-build.c	21 Jun 2002 09:34:34 -0000
@@ -381,7 +381,7 @@ union_web_part_roots (r1, r2)
     {
       /* The new root is the smaller (pointerwise) of both.  This is crucial
          to make the construction of webs from web parts work (so, when
-	 scanning all parts, we see the roots before all it's childs).  
+	 scanning all parts, we see the roots before all it's childs).
          Additionally this ensures, that if the web has a def at all, than
          the root is a def (because all def parts are before use parts in the
 	 web_parts[] array), or put another way, as soon, as the root of a
@@ -511,7 +511,7 @@ struct curr_use {
    and a is a multi-word pseudo.  If DEF or USE are hardregs, they are in
    wordmode, so we don't need to check for further hardregs which would result
    from wider references.  We are never called with paradoxical subregs.
- 
+
    This returns:
    0 for no common bits,
    1 if DEF and USE exactly cover the same bytes,
@@ -676,7 +676,7 @@ live_out_1 (df, use, insn)
 		 the web parts.  */
 	      wp = union_web_parts (wp, &web_parts[DF_REF_ID (ref)]);
 	    }
-	  else 
+	  else
 	    {
 	      unsigned HOST_WIDE_INT undef = use->undefined;
 	      if (regno == source_regno)
@@ -1051,7 +1051,7 @@ prune_hardregs_for_mode (s, mode)
      enum machine_mode mode;
 {
   /* We work by first noting _all_ hardregs for which MODE is OK (including
-     it's consecutive regs), and later intersect that with *S.  
+     it's consecutive regs), and later intersect that with *S.
      We are not interested only in the beginning of a multi-reg, but in
      all the hardregs involved.  May be HARD_REGNO_MODE_OK() only ok's
      for beginnings.  */
@@ -1217,8 +1217,8 @@ add_subweb_2 (web, size_word)
      unsigned int size_word;
 {
   /* To get a correct mode for the to be produced subreg, we don't want to
-     simply do a mode_for_size() for the mode_class of the whole web.  
-     Suppose we deal with a CDImode web, but search for a 8 byte part.  
+     simply do a mode_for_size() for the mode_class of the whole web.
+     Suppose we deal with a CDImode web, but search for a 8 byte part.
      Now mode_for_size() would only search in the class MODE_COMPLEX_INT
      and would find CSImode which probably is not what we want.  Instead
      we want DImode, which is in a completely other class.  For this to work
@@ -1361,7 +1361,7 @@ add_conflict_edge (from, to)
       struct sub_conflict *sl;
       struct conflict_link *cl = pfrom->conflict_list;
       int may_delete = 1;
-      
+
       /* This can happen when subwebs of one web conflict with each
 	 other.  In live_out_1() we created such conflicts between yet
 	 undefined webparts and defs of parts which didn't overlap with the
@@ -1433,7 +1433,7 @@ record_conflict (web1, web2)
     return;
   /* Conflicts with hardregs, which are not even a candidate
      for this pseudo are also pointless.  */
-  if ((web1->type == PRECOLORED 
+  if ((web1->type == PRECOLORED
        && ! TEST_HARD_REG_BIT (web2->usable_regs, web1->regno))
       || (web2->type == PRECOLORED
 	  && ! TEST_HARD_REG_BIT (web1->usable_regs, web2->regno)))
@@ -1884,18 +1884,18 @@ reset_conflicts (void)
       if (web->orig_conflict_list)
 	abort ();
       if (web->type != PRECOLORED && !web->old_web)
- 	{
+	{
 	  *pcl = NULL;
- 	  /* Useless conflicts will be rebuilt completely.  */
- 	  if (bitmap_first_set_bit (web->useless_conflicts) >= 0)
- 	    abort ();
- 	}
+	  /* Useless conflicts will be rebuilt completely.  */
+	  if (bitmap_first_set_bit (web->useless_conflicts) >= 0)
+	    abort ();
+	}
       else
 	{
- 	  /* Useless conflicts with new webs will be rebuilt if they
- 	     are still there.  */
- 	  bitmap_operation (web->useless_conflicts, web->useless_conflicts,
- 			    newwebs, BITMAP_AND_COMPL);
+	  /* Useless conflicts with new webs will be rebuilt if they
+	     are still there.  */
+	  bitmap_operation (web->useless_conflicts, web->useless_conflicts,
+			    newwebs, BITMAP_AND_COMPL);
 	  for (cl = web->conflict_list; cl; cl = cl->next)
 	    {
 	      if (cl->t->old_web || cl->t->type == PRECOLORED)
@@ -1941,14 +1941,14 @@ check_conflict_numbers (void)
     }
 }
 
-/* Convert the conflicts between web parts to conflicts between full webs.  
+/* Convert the conflicts between web parts to conflicts between full webs.
 
    This can't be done in parts_to_webs(), because for recording conflicts
    between webs we need to know their final usable_regs set, which is used
-   to discard non-conflicts (between webs having no hard reg in common).  
+   to discard non-conflicts (between webs having no hard reg in common).
    But this is set for spill temporaries only after the webs itself are
    built.  Until then the usable_regs set is based on the pseudo regno used
-   in this web, which may contain far less registers than later determined.  
+   in this web, which may contain far less registers than later determined.
    This would result in us loosing conflicts (due to record_conflict()
    thinking that a web can only be allocated to the current usable_regs,
    whereas later this is extended) leading to colorings, where some regs which
@@ -2014,7 +2014,7 @@ conflicts_between_webs (df)
 	       set/test.  The current approach needs more memory, but
 	       locality is large.  */
 	    pass++;
-	      
+
 	    /* Note, that there are only defs in the conflicts bitset.  */
 	    EXECUTE_IF_SET_IN_BITMAP (
 	      cl->conflicts, 0, j,
@@ -2317,7 +2317,7 @@ want_to_remat (x)
   /* XXX For now we don't allow any clobbers to be added, not just no
      hardreg clobbers.  */
   return ((icode = recog (PATTERN (test_insn), test_insn, &num_clobbers)) >= 0
-	  && (num_clobbers == 0 
+	  && (num_clobbers == 0
 	      /*|| ! added_clobbers_hard_reg_p (icode)*/));
 }
 
@@ -2446,7 +2446,7 @@ determine_web_costs (void)
     }
 }
 
-/* Detect webs which are set in a conditional jump insn (possibly a 
+/* Detect webs which are set in a conditional jump insn (possibly a
    decrement-and-branch type of insn), and mark them not to be
    spillable.  The stores for them would need to be placed on edges,
    which destroys the CFG.  (Somewhen we want to deal with that XXX)  */
@@ -2581,7 +2581,7 @@ handle_asm_insn (df, insn)
     return;
   pat = PATTERN (insn);
   CLEAR_HARD_REG_SET (clobbered);
-  
+
   if (GET_CODE (pat) == PARALLEL)
     for (i = 0; i < XVECLEN (pat, 0); i++)
       {
@@ -2689,10 +2689,10 @@ handle_asm_insn (df, insn)
 	      default:
 		cls =
 		  (int) reg_class_subunion[cls][(int)
-		  				REG_CLASS_FROM_LETTER (c)];
+						REG_CLASS_FROM_LETTER (c)];
 	    }
 	}
-      
+
       /* Now make conflicts between this web, and all hardregs, which
 	 are not allowed by the constraints.  */
       if (nothing_allowed)
@@ -2784,7 +2784,7 @@ ra_build_realloc (df)
   for (i = 0; i < last_def_id + last_use_id; i++)
     {
       struct web_part *dest = &web_parts[i < last_def_id
-	  				 ? i : (df->def_id + i - last_def_id)];
+					 ? i : (df->def_id + i - last_def_id)];
       struct web_part *up;
       *dest = last_web_parts[i];
       up = dest->uplink;
@@ -2865,7 +2865,7 @@ ra_build_free (void)
 {
   struct dlist *d;
   unsigned int i;
-  
+
   /* We must also cycle over subwebs.  */
   for (i = 0; i < num_webs; i++)
     {
@@ -2903,7 +2903,7 @@ ra_build_free (void)
     }
 
   wl_moves = NULL;
-  
+
   free (id2web);
   free (move_handled);
   sbitmap_free (sup_igraph);
============================================================
Index: gcc/ra-colorize.c
--- gcc/ra-colorize.c	20 Jun 2002 19:26:31 -0000	1.1.2.1
+++ gcc/ra-colorize.c	21 Jun 2002 09:34:34 -0000
@@ -484,7 +484,7 @@ merge_moves (u, v)
 {
   regset seen;
   struct move_list *ml;
-  
+
   seen = BITMAP_XMALLOC ();
   for (ml = u->moves; ml; ml = ml->next)
     bitmap_set_bit (seen, INSN_UID (ml->move->insn));
@@ -739,7 +739,7 @@ combine (u, v)
 	    decrement_degree (pweb, 1 + v->add_hardregs);
 	}
     }
-  
+
   /* Now merge the usable_regs together.  */
   /* XXX That merging might normally make it necessary to
      adjust add_hardregs, which also means to adjust neighbors.  This can
@@ -759,7 +759,7 @@ combine (u, v)
      possible hardreg in common), so abort.  */
   if (!u->num_freedom)
     abort();
-  
+
   if (u->num_conflicts >= NUM_REGS (u)
       && (u->type == FREEZE || simplify_p (u->type)))
     {
@@ -787,7 +787,7 @@ coalesce (void)
   struct move *m = DLIST_MOVE (d);
   struct web *source = alias (m->source_web);
   struct web *target = alias (m->target_web);
-  
+
   if (target->type == PRECOLORED)
     {
       struct web *h = source;
@@ -948,7 +948,7 @@ color_usable_p (c, dont_begin_colors, fr
     }
   return 0;
 }
-     
+
 /* Searches in FREE_COLORS for a block of hardregs of the right length
    for MODE.  If it needs more than one hardreg it prefers blocks beginning
    at an even hardreg, and only gives an odd begin reg if no other
@@ -963,7 +963,7 @@ get_free_reg (dont_begin_colors, free_co
   int pref_reg = -1;
   int pref_reg_order = INT_MAX;
   int last_resort_reg_order = INT_MAX;
-  
+
   for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
     if (!TEST_HARD_REG_BIT (dont_begin_colors, c)
 	&& TEST_HARD_REG_BIT (free_colors, c)
@@ -973,10 +973,10 @@ get_free_reg (dont_begin_colors, free_co
 	size = HARD_REGNO_NREGS (c, mode);
 	for (i = 1; i < size && TEST_HARD_REG_BIT (free_colors, c + i); i++);
 	if (i != size)
- 	  {
- 	    c += i;
- 	    continue;
- 	  }
+	  {
+	    c += i;
+	    continue;
+	  }
 	if (i == size)
 	  {
 	    if (size < 2 || (c & 1) == 0)
@@ -1177,7 +1177,7 @@ colorize_one_web (web, hard)
   int best_long_blocks = -1;
   HARD_REG_SET fat_colors;
   HARD_REG_SET bias;
-  
+
   if (web->regno >= max_normal_pseudo)
     hard = 0;
 
@@ -1221,14 +1221,14 @@ colorize_one_web (web, hard)
   while (1)
     {
       HARD_REG_SET call_clobbered;
-	
+
       /* Here we choose a hard-reg for the current web.  For non spill
          temporaries we first search in the hardregs for it's prefered
 	 class, then, if we found nothing appropriate, in those of the
 	 alternate class.  For spill temporaries we only search in
 	 usable_regs of this web (which is probably larger than that of
 	 the preferred or alternate class).  All searches first try to
-	 find a non-call-clobbered hard-reg.  
+	 find a non-call-clobbered hard-reg.
          XXX this should be more finegraned... First look into preferred
          non-callclobbered hardregs, then _if_ the web crosses calls, in
          alternate non-cc hardregs, and only _then_ also in preferred cc
@@ -1266,7 +1266,7 @@ colorize_one_web (web, hard)
       if (c < 0)
 	c = get_biased_reg (dont_begin, bias, web->prefer_colors,
 			  colors, PSEUDO_REGNO_MODE (web->regno));
-      
+
       if (/*!web->use_my_regs &&*/ c < 0)
 	{
 	  if (web->use_my_regs)
@@ -1281,7 +1281,7 @@ colorize_one_web (web, hard)
 #endif
 	  COPY_HARD_REG_SET (call_clobbered, colors);
 	  AND_HARD_REG_SET (call_clobbered, call_used_reg_set);
-	  
+
 	  c = get_biased_reg (dont_begin, bias, web->prefer_colors,
 			    call_clobbered, PSEUDO_REGNO_MODE (web->regno));
 	  if (c < 0)
@@ -1349,7 +1349,7 @@ colorize_one_web (web, hard)
 	 can result in endless iteration spilling the same register again and
 	 again.  That's why we try to find a neighbor, which spans more
 	 instructions that ourself, and got a color, and try to spill _that_.
-	 
+
 	 if (DLIST_WEB (d)->was_spilled < 0)
 	 abort (); */
       if (hard && (!web->was_spilled || web->spill_temp))
@@ -1387,7 +1387,7 @@ colorize_one_web (web, hard)
 		   heavy, that's why only done at the fifth pass.  */
 		if (loop > 3 && web->spill_temp)
 		  w = alias (w);
-	        if (w->type == COLORED 
+	        if (w->type == COLORED
 		    && (!w->spill_temp || (loop > 2 && w->spill_temp == 2))
 		    && (!w->is_coalesced || (loop > 1 && web->spill_temp))
 		    && (w->was_spilled || loop > 0)
@@ -2161,7 +2161,7 @@ restore_conflicts_from_coalesce (web)
     }
 
   /* We must restore usable_regs because record_conflict will use it.  */
-  COPY_HARD_REG_SET (web->usable_regs, web->orig_usable_regs); 
+  COPY_HARD_REG_SET (web->usable_regs, web->orig_usable_regs);
   /* We might have deleted some conflicts above, which really are still
      there (diamond pattern coalescing).  This is because we don't reference
      count interference edges but some of them were the result of different
@@ -2387,7 +2387,7 @@ aggressive_coalesce (void)
 	      {
 	        put_move (m, MV_COALESCED);
 		add_web_pair_cost (s, t, BLOCK_FOR_INSN (m->insn)->frequency,
-				   0); 
+				   0);
 	      }
 	    else if (s->type == PRECOLORED)
 	      /* It is !ok(t, s).  But later when coloring the graph it might
@@ -2577,7 +2577,7 @@ extended_coalesce_2 (void)
 				    source->id * num_webs + dest->id)
 		      && hard_regs_intersect_p (&source->usable_regs,
 						&dest->usable_regs))
-		    add_web_pair_cost (dest, source, 
+		    add_web_pair_cost (dest, source,
 				       BLOCK_FOR_INSN (insn)->frequency,
 				       dest->num_conflicts
 				       + source->num_conflicts);
============================================================
Index: gcc/ra-debug.c
--- gcc/ra-debug.c	20 Jun 2002 19:26:31 -0000	1.1.2.1
+++ gcc/ra-debug.c	21 Jun 2002 09:34:34 -0000
@@ -66,7 +66,7 @@ ra_debug_msg VPARAMS ((unsigned int leve
 #ifndef ANSI_PROTOTYPES
       format = va_arg (ap, const char *);
 #endif
-      
+
       vfprintf (rtl_dump_file, format, ap);
       va_end (ap);
     }
@@ -465,7 +465,7 @@ ra_print_rtx (file, x, with_pn)
 
       /* Different things of class 'x' */
       case SUBREG: ra_print_rtx_object (file, x); break;
-      case STRICT_LOW_PART: 
+      case STRICT_LOW_PART:
 		   fputs ("low(", file);
 		   ra_print_rtx (file, XEXP (x, 0), 0);
 		   fputs (")", file);
@@ -695,7 +695,7 @@ void
 dump_igraph_machine (void)
 {
   unsigned int i;
-  
+
   if (!rtl_dump_file || (debug_new_regalloc & DUMP_IGRAPH_M) == 0)
     return;
   ra_debug_msg (DUMP_IGRAPH_M, "g %d %d\n", num_webs - num_subwebs,
@@ -829,7 +829,7 @@ dump_ra (df)
   struct dlist *d;
   if (!rtl_dump_file || (debug_new_regalloc & DUMP_RESULTS) == 0)
     return;
-    
+
   ra_debug_msg (DUMP_RESULTS, "\nColored:\n");
   for (d = WEBS(COLORED); d; d = d->next)
     {
@@ -885,7 +885,7 @@ dump_static_insn_cost (file, message, pr
 	     sets only.  */
 	  if (INSN_P (insn) && ((set = single_set (insn)) != NULL))
 	    {
-	      rtx src = SET_SRC (set); 
+	      rtx src = SET_SRC (set);
 	      rtx dest = SET_DEST (set);
 	      struct cost *pcost = NULL;
 	      overall.cost += block_cost;
@@ -944,13 +944,13 @@ web_conflicts_p (web1, web2)
 {
   if (web1->type == PRECOLORED && web2->type == PRECOLORED)
     return 0;
-  
+
   if (web1->type == PRECOLORED)
     return TEST_HARD_REG_BIT (web2->usable_regs, web1->regno);
 
   if (web2->type == PRECOLORED)
     return TEST_HARD_REG_BIT (web1->usable_regs, web2->regno);
-    
+
   return hard_regs_intersect_p (&web1->usable_regs, &web2->usable_regs);
 }
 
@@ -960,13 +960,13 @@ dump_web_insns (web)
      struct web* web;
 {
   unsigned int i;
-  
+
   ra_debug_msg (DUMP_EVER, "Web: %i(%i)+%i class: %s freedom: %i degree %i\n",
 	     web->id, web->regno, web->add_hardregs,
 	     reg_class_names[web->regclass],
 	     web->num_freedom, web->num_conflicts);
   ra_debug_msg (DUMP_EVER, "   def insns:");
-  
+
   for (i = 0; i < web->num_defs; ++i)
     {
       ra_debug_msg (DUMP_EVER, " %d ", INSN_UID (web->defs[i]->insn));
@@ -993,14 +993,14 @@ dump_web_conflicts (web)
 	     web->id, web->regno, web->add_hardregs,
 	     reg_class_names[web->regclass],
 	     web->num_freedom, web->num_conflicts);
-  
+
   for (def2 = 0; def2 < num_webs; def2++)
     if (TEST_BIT (igraph, igraph_index (web->id, def2)) && web->id != def2)
       {
 	if ((num % 9) == 5)
 	  ra_debug_msg (DUMP_EVER, "\n             ");
 	num++;
-	
+
 	ra_debug_msg (DUMP_EVER, " %d(%d)", def2, id2web[def2]->regno);
 	if (id2web[def2]->add_hardregs)
 	  ra_debug_msg (DUMP_EVER, "+%d", id2web[def2]->add_hardregs);
@@ -1010,7 +1010,7 @@ dump_web_conflicts (web)
 
 	if (id2web[def2]->type == SELECT)
 	  ra_debug_msg (DUMP_EVER, "/s");
-	  
+
 	if (id2web[def2]->type == COALESCED)
 	  ra_debug_msg (DUMP_EVER,"/c/%d", alias (id2web[def2])->id);
       }
@@ -1028,7 +1028,7 @@ dump_web_conflicts (web)
 	ra_debug_msg (DUMP_EVER, "%d(%d)%s ", w->id, w->regno,
 		   web_conflicts_p (web, w) ? "+" : "");
       }
-    ra_debug_msg (DUMP_EVER, "\n");  
+    ra_debug_msg (DUMP_EVER, "\n");
   }
 }
 
@@ -1048,4 +1048,3 @@ debug_hard_reg_set (set)
     }
   fprintf (stderr, "\n");
 }
-
============================================================
Index: gcc/ra-ranges.c
--- gcc/ra-ranges.c	20 Jun 2002 19:26:32 -0000	1.1.2.1
+++ gcc/ra-ranges.c	21 Jun 2002 09:34:34 -0000
@@ -82,7 +82,7 @@ void calculate_pre_post ()
   pre = (int *) xcalloc (last_basic_block+1, sizeof (int));
   post = (int *) xcalloc (last_basic_block+1, sizeof (int));
   pre_inverse = (int *) xcalloc (last_basic_block+1, sizeof (int));
-  
+
   /* Allocate stack for back-tracking up CFG.  */
   stack = (edge *) xmalloc ((last_basic_block + 1) * sizeof (edge));
   sp = 0;
@@ -159,7 +159,7 @@ dom_parent (block)
   abort();
 }
 
-  
+
 
 static unsigned int
 walk_dom_tree (block, index, level)
@@ -208,7 +208,7 @@ reach_under (block, head, loop)
 {
   sbitmap worklist;
   edge edge;
-  
+
   worklist = sbitmap_alloc (last_basic_block + 1);
   sbitmap_zero (worklist);
   SET_BIT (worklist, pre[block->index]);
@@ -239,8 +239,8 @@ reach_under (block, head, loop)
   sbitmap_free (worklist);
 }
 
-	      
-      
+
+
 static bool
 dominates (parent, child)
      basic_block parent;
@@ -260,7 +260,7 @@ dfs (i)
   scc_info[i].in_stack = TRUE;
   scc_info[i].next = dfs_stack;
   dfs_stack = i;
-  
+
   visit_successors (i, i);
   if (scc_info[i].low == scc_info[i].dfs_num)
     {
@@ -295,7 +295,7 @@ dfs (i)
 		      node->name = name;
 		      node->next = dj_graph_info[i].children;
 		      dj_graph_info[i].children = node;
-		    }		 
+		    }
 		}
 	    }
 	}
@@ -342,7 +342,7 @@ DFS_DJ_graph (block, index)
   edge succ;
   int child;
   SET_BIT (visited, block->index);
-  dj_graph_info[preorder_index].dfs_index = (*index)++; 
+  dj_graph_info[preorder_index].dfs_index = (*index)++;
   if (dom_node_for_block (domtree, block->index) < 0)
     child = -1;
   else
@@ -368,7 +368,7 @@ DFS_DJ_graph (block, index)
     }
   return dj_graph_info[preorder_index].dfs_size = size;
 }
-static void 
+static void
 find_nesting_depths()
 {
   unsigned int *depthtemp;
@@ -377,7 +377,7 @@ find_nesting_depths()
   int i;
   int *idom = (int *)alloca (last_basic_block * sizeof (int));
   sbitmap loop;
-  
+
   memset (idom, -1, (size_t) last_basic_block * sizeof (int));
   calculate_pre_post ();
   depths = (unsigned int *)xcalloc (last_basic_block + 1, sizeof(unsigned int));
@@ -386,7 +386,7 @@ find_nesting_depths()
   domtree = dom_tree_from_idoms (idom);
   dj_graph_info = (struct dj_graph_info *) ggc_alloc_cleared
     ((last_basic_block + 1) * sizeof (struct dj_graph_info));
-  walk_dom_tree (BASIC_BLOCK (0), &index,  level);    
+  walk_dom_tree (BASIC_BLOCK (0), &index,  level);
   levels = (struct linked_list **) ggc_alloc_cleared ((max_level + 1) * sizeof (struct linked_list *));
   for (i = 0; i < last_basic_block; i++)
     {
@@ -406,7 +406,7 @@ find_nesting_depths()
   DFS_DJ_graph (BASIC_BLOCK (0), &index);
   loop = sbitmap_alloc (last_basic_block + 1);
   sbitmap_zero (loop);
-  
+
   for (i = max_level; i >= 0; i--)
     {
       bool irreducible_loop = FALSE;
@@ -431,7 +431,7 @@ find_nesting_depths()
 	    {
 	      int block_index = pre [block->index];
 	      unsigned int i;
-	      EXECUTE_IF_SET_IN_SBITMAP (loop, 0, i, 
+	      EXECUTE_IF_SET_IN_SBITMAP (loop, 0, i,
 	      {
 		unsigned int depth = ++(depths[i]);
 		if (depth > max_depth) max_depth = depth;
@@ -491,7 +491,7 @@ static struct split_range *split_ranges;
 static struct linked_list **split_around;
 static struct linked_list **neighbors_with_color;
 static inline double powraise PARAMS ((unsigned int));
-static inline double 
+static inline double
 powraise (power)
      unsigned int power;
 {
@@ -500,14 +500,14 @@ powraise (power)
       i *= 10.0;
   return i;
 }
-static bool 
+static bool
 find_splits (name, colors)
      unsigned int name ATTRIBUTE_UNUSED;
      int *colors ATTRIBUTE_UNUSED;
 {
   return FALSE;
 }
-static void 
+static void
 splits_init ()
 {
   if (split_live_ranges)
@@ -527,4 +527,3 @@ splits_init ()
       any_splits_found = FALSE;
     }
 }
-
============================================================
Index: gcc/ra-rewrite.c
--- gcc/ra-rewrite.c	20 Jun 2002 19:26:32 -0000	1.1.2.1
+++ gcc/ra-rewrite.c	21 Jun 2002 09:34:34 -0000
@@ -58,7 +58,7 @@ static int slots_overlap_p PARAMS ((rtx,
 static void delete_overlapping_slots PARAMS ((struct rtx_list **, rtx));
 static int slot_member_p PARAMS ((struct rtx_list *, rtx));
 static void insert_stores PARAMS ((bitmap));
-static int spill_same_color_p PARAMS ((struct web *, struct web *)); 
+static int spill_same_color_p PARAMS ((struct web *, struct web *));
 static int is_partly_live_1 PARAMS ((sbitmap, struct web *));
 static void update_spill_colors PARAMS ((HARD_REG_SET *, struct web *, int));
 static int spill_is_free PARAMS ((HARD_REG_SET *, struct web *));
@@ -369,7 +369,7 @@ rewrite_program (new_deaths)
 	rtx slot;
 	if (aweb->type != SPILLED)
 	  continue;
-      
+
 	if (flag_ra_spill_every_use)
 	  {
 	    bitmap_clear (b);
@@ -409,12 +409,12 @@ rewrite_program (new_deaths)
 		    set_block_for_insn (insn, bb);
 		    df_insn_modify (df, bb, insn);
 		  }
-		
+
 		emitted_spill_loads++;
 		spill_load_cost += bb->frequency + 1;
 	      }
 	  }
-	
+
 	/* If any uses were loaded from stackslots (compared to
 	   rematerialized or not reloaded due to IR spilling),
 	   aweb->stack_slot will be set.  If not, we don't need to emit
@@ -458,7 +458,7 @@ rewrite_program (new_deaths)
 		  else*/
 		    ra_emit_move_insn (dest, source);
 		}
-		
+
 	      insns = get_insns ();
 	      end_sequence ();
 	      if (insns)
@@ -945,7 +945,7 @@ detect_deaths_in_bb (bb, live, new_death
   head_prev = PREV_INSN (bb->head);
   sbitmap_zero (live);
   EXECUTE_IF_SET_IN_BITMAP (live_at_end[bb->index], 0, j,
-    { 
+    {
       struct web *web = use2web[j];
       struct web *aweb = alias (find_web_for_subweb (web));
       /* See below in rewrite_program2() for a comment which webs we
@@ -1016,7 +1016,7 @@ detect_deaths_in_bb (bb, live, new_death
 	      break;
 	    }
 	}
-      
+
       for (n = 0; n < info.num_uses; n++)
 	{
 	  struct web *web = use2web[DF_REF_ID (info.uses[n])];
@@ -1141,7 +1141,7 @@ rewrite_program2 (new_deaths)
       sbitmap_zero (ri.live);
       CLEAR_HARD_REG_SET (ri.colors_in_use);
       EXECUTE_IF_SET_IN_BITMAP (live_at_end[i - 2], 0, j,
-	{ 
+	{
 	  struct web *web = use2web[j];
 	  struct web *aweb = alias (find_web_for_subweb (web));
 	  /* A web is only live at end, if it isn't spilled.  If we wouldn't
@@ -1197,7 +1197,7 @@ rewrite_program2 (new_deaths)
 	    {
 	      int index = BLOCK_FOR_INSN (insn)->index + 2;
 	      EXECUTE_IF_SET_IN_BITMAP (live_at_end[index - 2], 0, j,
-		{ 
+		{
 		  struct web *web = use2web[j];
 		  struct web *aweb = alias (find_web_for_subweb (web));
 		  if (aweb->type != SPILLED)
@@ -1312,7 +1312,7 @@ rewrite_program2 (new_deaths)
 	  /* CALL_INSNs are not really deaths, but still more registers
 	     are free after a call, than before.
 	     XXX Note, that sometimes reload barfs when we emit insns between
-	     a call and the insn which copies the return register into a 
+	     a call and the insn which copies the return register into a
 	     pseudo.  */
 	  if (GET_CODE (insn) == CALL_INSN)
 	    ri.need_load = 1;
@@ -1334,7 +1334,7 @@ rewrite_program2 (new_deaths)
 		    break;
 		  }
 	      }
-	  
+
 	  if (INSN_P (insn) && ri.num_reloads)
 	    {
               int old_num_reloads = ri.num_reloads;
@@ -1365,7 +1365,7 @@ rewrite_program2 (new_deaths)
 		if (aweb->type != SPILLED)
 		  update_spill_colors (&(ri.colors_in_use), web, 1);
 	      }
-	  
+
 	  ri.any_spilltemps_spilled = 0;
 	  if (INSN_P (insn))
 	    for (n = 0; n < info.num_uses; n++)
@@ -1408,7 +1408,7 @@ rewrite_program2 (new_deaths)
 #endif
 	    break;
 	}
-      
+
       nl_first_reload = ri.nl_size;
       if (ri.num_reloads)
 	{
@@ -1422,7 +1422,7 @@ rewrite_program2 (new_deaths)
 	      int j;
 	      CLEAR_HARD_REG_SET (colors);
 	      EXECUTE_IF_SET_IN_BITMAP (live_at_end[e->src->index], 0, j,
-		{ 
+		{
 		  struct web *web = use2web[j];
 		  struct web *aweb = alias (find_web_for_subweb (web));
 		  if (aweb->type != SPILLED)
@@ -1432,7 +1432,7 @@ rewrite_program2 (new_deaths)
 	    }
 	  if (num == 5)
 	    in_ir = 1;
-	  
+
 	  bitmap_clear (ri.scratch);
 	  EXECUTE_IF_SET_IN_BITMAP (ri.need_reload, 0, j,
 	    {
@@ -1865,8 +1865,8 @@ delete_moves (void)
        Additionally: s->type != PRECOLORED && t->type != PRECOLORED, in case
        we want to prevent deletion of "special" copies.  */
     if (ml->move
-       	&& (s = alias (ml->move->source_web))->reg_rtx
-       	    == (t = alias (ml->move->target_web))->reg_rtx
+	&& (s = alias (ml->move->source_web))->reg_rtx
+	    == (t = alias (ml->move->target_web))->reg_rtx
 	&& s->type != PRECOLORED && t->type != PRECOLORED)
       {
 	basic_block bb = BLOCK_FOR_INSN (ml->move->insn);
============================================================
Index: gcc/ra.c
--- gcc/ra.c	20 Jun 2002 19:26:27 -0000	1.1.2.59
+++ gcc/ra.c	21 Jun 2002 09:34:34 -0000
@@ -52,12 +52,12 @@
 */
 
 /* TODO
- 
+
    * Lattice based rematerialization
    * do lots of commenting
    * look through all XXX's and do something about them
    * handle REG_NO_CONFLICTS blocks correctly (the current ad hoc approach
-     might miss some conflicts due to insns which only seem to be in a 
+     might miss some conflicts due to insns which only seem to be in a
      REG_NO_CONLICTS block)
      -- Don't necessary anymore, I believe, because SUBREG tracking is
      implemented.
@@ -123,7 +123,7 @@ bitmap *live_at_end;
 int ra_pass;
 unsigned int max_normal_pseudo;
 int an_unusable_color;
- 
+
 /* The different lists on which a web can be (based on the type).  */
 struct dlist *web_lists[(int) LAST_NODE_TYPE];
 
@@ -470,7 +470,7 @@ init_ra (void)
 
   for (i = HARD_REGNO_NREGS (ARG_POINTER_REGNUM, Pmode); i--;)
     SET_HARD_REG_BIT (never_use_colors, ARG_POINTER_REGNUM + i);
-	
+
   for (i = 0; i < 256; i++)
     {
       unsigned char byte = ((unsigned) i) & 0xFF;
@@ -483,7 +483,7 @@ init_ra (void)
 	}
       byte2bitcount[i] = count;
     }
-  
+
   for (i = 0; i < N_REG_CLASSES; i++)
     {
       int size;
@@ -509,7 +509,7 @@ init_ra (void)
 	  }
       COPY_HARD_REG_SET (hardregs_for_mode[i], rs);
     }
-  
+
   for (an_unusable_color = 0; an_unusable_color < FIRST_PSEUDO_REGISTER;
        an_unusable_color++)
     if (TEST_HARD_REG_BIT (never_use_colors, an_unusable_color))
@@ -545,7 +545,7 @@ check_df (df)
   for (ui = 0; ui < df->use_id; ui++)
     if (!df->uses[ui])
       bitmap_set_bit (empty_uses, ui);
-  
+
   for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
     if (INSN_P (insn))
       {
@@ -556,7 +556,7 @@ check_df (df)
 	    abort ();
 	  else
 	    bitmap_set_bit (b, DF_REF_ID (link->ref));
-			    
+
 	bitmap_clear (b);
 	for (link = DF_INSN_USES (df, insn); link; link = link->next)
 	  if (!link->ref || bitmap_bit_p (empty_uses, DF_REF_ID (link->ref))
@@ -584,7 +584,7 @@ check_df (df)
 	else
 	  bitmap_set_bit (b, DF_REF_ID (link->ref));
     }
-  
+
   BITMAP_XFREE (empty_uses);
   BITMAP_XFREE (empty_defs);
   BITMAP_XFREE (b);
@@ -731,7 +731,7 @@ reg_alloc (void)
   }
   ra_info_free (ra_info);
   free (ra_info);*/
-  
+
   /* XXX the REG_EQUIV notes currently are screwed up, when pseudos are
      coalesced, which have such notes.  In that case, the whole combined
      web gets that note too, which is wrong.  */
@@ -791,14 +791,14 @@ reg_alloc (void)
 	death_insns_max_uid = orig_max_uid;
       }
 #endif
-      
+
       df_analyse (df, (ra_pass == 1) ? 0 : (bitmap) -1,
 		  DF_HARD_REGS | DF_RD_CHAIN | DF_RU_CHAIN
 #ifndef NO_REMAT
 		  | DF_DU_CHAIN | DF_UD_CHAIN
 #endif
 		 );
-      
+
       /* FIXME denisc@overta.ru
 	 Example of usage ra_info ... routines */
 #if 0
@@ -818,12 +818,12 @@ reg_alloc (void)
       alloc_mem (df);
       /*ra_debug_msg (DUMP_EVER, "before one_pass()\n");
       if (rtl_dump_file)
-	print_rtl_with_bb (rtl_dump_file, get_insns ()); 
+	print_rtl_with_bb (rtl_dump_file, get_insns ());
       verify_flow_info ();*/
       changed = one_pass (df, ra_pass > 1);
       /*ra_debug_msg (DUMP_EVER, "after one_pass()\n");
       if (rtl_dump_file)
-        print_rtl_with_bb (rtl_dump_file, get_insns ()); 
+        print_rtl_with_bb (rtl_dump_file, get_insns ());
       verify_flow_info ();*/
       /* FIXME denisc@overta.ru
 	 Example of usage ra_info ... routines */
@@ -832,7 +832,7 @@ reg_alloc (void)
       free (df2ra.def2def);
       free (df2ra.use2use);
       free (ra_info);
-#endif 
+#endif
       if (!changed)
 	{
           emit_colors (df);
@@ -879,8 +879,8 @@ reg_alloc (void)
   /*ra_debug_msg (DUMP_COSTS, "ticks for build-phase: %ld\n", ticks_build);
   ra_debug_msg (DUMP_COSTS, "ticks for rebuild-phase: %ld\n", ticks_rebuild);*/
   if ((debug_new_regalloc & (DUMP_FINAL_RTL | DUMP_RTL)) != 0)
-    ra_print_rtl_with_bb (rtl_dump_file, get_insns ()); 
-  
+    ra_print_rtl_with_bb (rtl_dump_file, get_insns ());
+
   if ((debug_new_regalloc & DUMP_SM) == 0)
     rtl_dump_file = NULL;
   no_new_pseudos = 0;
@@ -898,17 +898,17 @@ reg_alloc (void)
   find_basic_blocks (get_insns (), max_reg_num (), rtl_dump_file);*/
   /*compute_bb_for_insn ();*/
   /*clear_log_links (get_insns ());*/
-  life_analysis (get_insns (), rtl_dump_file, 
+  life_analysis (get_insns (), rtl_dump_file,
 		 PROP_DEATH_NOTES | PROP_LOG_LINKS  | PROP_REG_INFO);
 /*  recompute_reg_usage (get_insns (), TRUE);
-  life_analysis (get_insns (), rtl_dump_file, 
+  life_analysis (get_insns (), rtl_dump_file,
 		 PROP_SCAN_DEAD_CODE | PROP_KILL_DEAD_CODE); */
   cleanup_cfg (CLEANUP_EXPENSIVE);
   recompute_reg_usage (get_insns (), TRUE);
 /*  delete_trivially_dead_insns (get_insns (), max_reg_num ());*/
   if (rtl_dump_file)
     dump_flow_info (rtl_dump_file);
-	  
+
   /* XXX: reg_scan screws up reg_renumber, and without reg_scan, we can't do
      regclass. */
   /*reg_scan (get_insns (), max_reg_num (), 1);
@@ -931,7 +931,7 @@ reg_alloc (void)
 
   remove_suspicious_death_notes ();
   if ((debug_new_regalloc & DUMP_LAST_RTL) != 0)
-    ra_print_rtl_with_bb (rtl_dump_file, get_insns ()); 
+    ra_print_rtl_with_bb (rtl_dump_file, get_insns ());
   dump_static_insn_cost (rtl_dump_file,
 			 "after allocation/spilling, before reload", NULL);
 
@@ -982,7 +982,7 @@ web_class ()
 
       if (web->type == PRECOLORED)
 	continue;
-      
+
       for (i = 0; i < LIM_REG_CLASSES; ++i)
 	class[i] = 0;
 
@@ -1014,7 +1014,7 @@ web_class ()
 	      }
 	    else if (!reg_class_subset_p (best, i))
 	      best = NO_REGS;
-/*  	    fprintf (stderr, "%s: %d ", reg_class_names[i], class[i]); */
+/*	    fprintf (stderr, "%s: %d ", reg_class_names[i], class[i]); */
 	  }
 /*    fprintf (stderr, " BEST: %s\n", reg_class_names[best]); */
       if (best == NO_REGS)
============================================================
Index: gcc/ra.h
--- gcc/ra.h	20 Jun 2002 19:26:31 -0000	1.1.2.1
+++ gcc/ra.h	21 Jun 2002 09:34:34 -0000
@@ -98,7 +98,7 @@ struct web
      involved in coalescing in some way.  */
   unsigned int is_coalesced:1;
   unsigned int artificial:1;
-  		    /* != 0 : there is no rtl in the code which corresponds
+		    /* != 0 : there is no rtl in the code which corresponds
                        to this web.  Happens e.g. with conflicts to a web,
                        of which only a part was still undefined at the point
                        of that conflict.  In this case we construct a subweb
@@ -274,7 +274,7 @@ extern bitmap *live_at_end;
 extern int ra_pass;
 extern unsigned int max_normal_pseudo;
 extern int an_unusable_color;
- 
+
 extern int *number_seen;
 
 /* The different lists on which a web can be (based on the type).  */

-- 
 Andreas Jaeger
  SuSE Labs aj@suse.de
   private aj@arthur.inka.de
    http://www.suse.de/~aj



More information about the Gcc-patches mailing list