This is the mail archive of the
gcc-patches@gcc.gnu.org
mailing list for the GCC project.
[ira] bug fixes
- From: Vladimir Makarov <vmakarov at redhat dot com>
- To: gcc-patches <gcc-patches at gcc dot gnu dot org>
- Date: Wed, 12 Mar 2008 13:32:29 -0400
- Subject: [ira] bug fixes
The patch mainly fixes a IRA gcc-torture regression, and several gcc
bootstrap bugs (in spill/restore optimization, dealing with read only
equivalence memory, sharing caller save stack slots). Thanks
Serge Belyshev for reporting the bugs.
2008-03-12 Vladimir Makarov <vmakarov@redhat.com>
* ira-int.h (allocno_pool, copy_pool, allocno_live_range_pool):
Remove external definitions.
(free_allocno_updated_costs, allocate_cost_vector,
free_cost_vector): New prototypes.
(allocate_and_set_costs, allocate_and_copy_costs,
allocate_and_set_or_copy_costs): Change prototypes. Use
allocate_cost_vector instead of ira_allocate.
* ira-conflicts.c (add_insn_allocno_copies): Pass register class
to allocate_and_set_costs.
* ira-color.c (update_copy_costs_1): Pass register class to
allocate_and_set_or_copy_costs.
(assign_hard_reg): Pass register class to allocate_and_copy_costs.
Free updated costs.
(pop_allocnos_from_stack, color_allocnos): Check updated costs.
(color_pass): Pass register class to allocate_and_set_costs. Free
updated costs.
(move_spill_restore): Check reg equivalences.
(setup_curr_costs): Pass register class to
allocate_and_set_or_copy_costs.
(reassign_conflict_allocnos, allocno_reload_assign): Check updated
costs.
(calculate_spill_cost): Use REG_N_REFS after checking regno.
* ira-lives.c (process_single_reg_class_operands): Pass register
class to allocate_and_set_costs.
* ira-build.c (allocno_pool, allocno_live_range_pool): New
variables.
(initiate_allocnos): Initiate the pools.
(free_allocno_updated_costs): New function.
(finish_allocno): Use free_cost_vector. Free the pools.
(copy_pool): New variable.
(initiate_copies): Initiate the pool.
(cost_vector_pool): New variable.
(initiate_cost_vectors, allocate_cost_vector, free_cost_vector,
finish_cost_vectors): New functions.
(ira_flattening): Call free_allocno_updated_costs. Check updated
costs.
(ira_build): Call initiate_cost_vectors.
(ira_destroy): Call finish_cost_vectors.
* ira.c (find_reg_equiv_invariant_const): Check read only memory.
(setup_reg_renumber, setup_allocno_assignment_flags): Call
free_allocno_updated_costs.
(ira): Remove initializations and freeing allocno_pool, copy_pool,
and allocno_live_range_pool. Call generate_setjmp_warnings.
* ira-costs.c (process_bb_node_for_hard_reg_moves,
tune_allocno_costs_and_cover_classes): Pass register class to
allocate_and_set_costs.
* caller-save.c (save_slots_num, save_slots): New variables.
(init_save_areas): Initiate save_slots_num.
(setup_save_areas): Use stack slots from the previous reload
iteration independetly what slot was used on the previous pass.
Index: ira-conflicts.c
===================================================================
--- ira-conflicts.c (revision 132993)
+++ ira-conflicts.c (working copy)
@@ -367,7 +367,7 @@ add_insn_allocno_copies (rtx insn)
rtx set, operand, dup;
const char *str;
int commut_p, bound_p;
- int i, j, freq, hard_regno, cost, index, hard_regs_num;
+ int i, j, freq, hard_regno, cost, index;
copy_t cp;
allocno_t a;
enum reg_class class, cover_class;
@@ -412,12 +412,11 @@ add_insn_allocno_copies (rtx insn)
cost = register_move_cost [mode] [cover_class] [class] * freq;
else
cost = register_move_cost [mode] [class] [cover_class] * freq;
- hard_regs_num = class_hard_regs_num [cover_class];
allocate_and_set_costs
- (&ALLOCNO_HARD_REG_COSTS (a), hard_regs_num,
+ (&ALLOCNO_HARD_REG_COSTS (a), cover_class,
ALLOCNO_COVER_CLASS_COST (a));
allocate_and_set_costs
- (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a), hard_regs_num, 0);
+ (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a), cover_class, 0);
ALLOCNO_HARD_REG_COSTS (a) [index] -= cost;
ALLOCNO_CONFLICT_HARD_REG_COSTS (a) [index] -= cost;
}
@@ -478,13 +477,12 @@ add_insn_allocno_copies (rtx insn)
cost
= register_move_cost [mode] [class] [cover_class];
cost *= freq;
- hard_regs_num = class_hard_regs_num [cover_class];
allocate_and_set_costs
- (&ALLOCNO_HARD_REG_COSTS (a), hard_regs_num,
+ (&ALLOCNO_HARD_REG_COSTS (a), cover_class,
ALLOCNO_COVER_CLASS_COST (a));
allocate_and_set_costs
(&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
- hard_regs_num, 0);
+ cover_class, 0);
ALLOCNO_HARD_REG_COSTS (a) [index] -= cost;
ALLOCNO_CONFLICT_HARD_REG_COSTS (a) [index] -= cost;
bound_p = TRUE;
@@ -544,13 +542,12 @@ add_insn_allocno_copies (rtx insn)
cost
= register_move_cost [mode] [class] [cover_class];
cost *= (freq < 8 ? 1 : freq / 8);
- hard_regs_num = class_hard_regs_num [cover_class];
allocate_and_set_costs
- (&ALLOCNO_HARD_REG_COSTS (a), hard_regs_num,
+ (&ALLOCNO_HARD_REG_COSTS (a), cover_class,
ALLOCNO_COVER_CLASS_COST (a));
allocate_and_set_costs
(&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
- hard_regs_num, 0);
+ cover_class, 0);
ALLOCNO_HARD_REG_COSTS (a) [index] -= cost;
ALLOCNO_CONFLICT_HARD_REG_COSTS (a) [index] -= cost;
}
Index: caller-save.c
===================================================================
--- caller-save.c (revision 132993)
+++ caller-save.c (working copy)
@@ -68,6 +68,12 @@ static enum machine_mode
static rtx
regno_save_mem[FIRST_PSEUDO_REGISTER][MAX_MOVE_MAX / MIN_UNITS_PER_WORD + 1];
+/* The number of elements in the subsequent array. */
+static int save_slots_num;
+
+/* Allocated slots so far. */
+static rtx save_slots [FIRST_PSEUDO_REGISTER];
+
/* We will only make a register eligible for caller-save if it can be
saved in its widest mode with a simple SET insn as long as the memory
address is valid. We record the INSN_CODE is those insns here since
@@ -289,6 +295,8 @@ init_save_areas (void)
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
for (j = 1; j <= MOVE_MAX_WORDS; j++)
regno_save_mem[i][j] = 0;
+ save_slots_num = 0;
+
}
/* The structure represents a hard register which should be saved
@@ -447,6 +455,9 @@ setup_save_areas (void)
struct saved_hard_reg *call_saved_regs [FIRST_PSEUDO_REGISTER];
HARD_REG_SET hard_regs_to_save, used_regs, this_insn_sets;
reg_set_iterator rsi;
+ int best_slot_num;
+ int prev_save_slots_num;
+ rtx prev_save_slots [FIRST_PSEUDO_REGISTER];
initiate_saved_hard_regs ();
/* Create hard reg saved regs. */
@@ -504,6 +515,7 @@ setup_save_areas (void)
hard_reg_map [r]->call_freq += freq;
else
saved_reg = new_saved_hard_reg (r, freq);
+ SET_HARD_REG_BIT (hard_regs_to_save, r);
}
}
}
@@ -576,10 +588,16 @@ setup_save_areas (void)
/* Sort saved hard regs. */
qsort (all_saved_regs, saved_regs_num, sizeof (struct saved_hard_reg *),
saved_hard_reg_compare_func);
+ /* Initiate slots available from the previous reload
+ iteration. */
+ prev_save_slots_num = save_slots_num;
+ memcpy (prev_save_slots, save_slots, save_slots_num * sizeof (rtx));
+ save_slots_num = 0;
/* Allocate stack slots for the saved hard registers. */
for (i = 0; i < saved_regs_num; i++)
{
saved_reg = all_saved_regs [i];
+ regno = saved_reg->hard_regno;
for (j = 0; j < i; j++)
{
saved_reg2 = all_saved_regs [j];
@@ -595,44 +613,56 @@ setup_save_areas (void)
break;
}
if (k < 0
- && (GET_MODE_SIZE (regno_save_mode
- [saved_reg->hard_regno] [1])
+ && (GET_MODE_SIZE (regno_save_mode [regno] [1])
<= GET_MODE_SIZE (regno_save_mode
[saved_reg2->hard_regno] [1])))
{
saved_reg->slot = slot;
- regno_save_mem [saved_reg->hard_regno] [1] = slot;
+ regno_save_mem [regno] [1] = slot;
saved_reg->next = saved_reg2->next;
saved_reg2->next = i;
if (dump_file != NULL)
fprintf (dump_file, "%d uses slot of %d\n",
- saved_reg->hard_regno, saved_reg2->hard_regno);
+ regno, saved_reg2->hard_regno);
break;
}
}
if (j == i)
{
saved_reg->first_p = TRUE;
- if (regno_save_mem [saved_reg->hard_regno] [1] != NULL_RTX)
+ for (best_slot_num = -1, j = 0; j < prev_save_slots_num; j++)
{
- saved_reg->slot = regno_save_mem [saved_reg->hard_regno] [1];
+ slot = prev_save_slots [j];
+ if (slot == NULL_RTX)
+ continue;
+ if (GET_MODE_SIZE (regno_save_mode [regno] [1])
+ <= GET_MODE_SIZE (GET_MODE (slot))
+ && best_slot_num < 0)
+ best_slot_num = j;
+ if (GET_MODE (slot) == regno_save_mode [regno] [1])
+ break;
+ }
+ if (best_slot_num >= 0)
+ {
+ saved_reg->slot = prev_save_slots [best_slot_num];
if (dump_file != NULL)
fprintf (dump_file,
- "%d uses slot from prev iteration\n",
- saved_reg->hard_regno);
+ "%d uses a slot from prev iteration\n", regno);
+ prev_save_slots [best_slot_num] = NULL_RTX;
+ if (best_slot_num + 1 == prev_save_slots_num)
+ prev_save_slots_num--;
}
else
{
saved_reg->slot
= assign_stack_local
- (regno_save_mode [saved_reg->hard_regno] [1],
- GET_MODE_SIZE (regno_save_mode
- [saved_reg->hard_regno] [1]), 0);
- regno_save_mem [saved_reg->hard_regno] [1] = saved_reg->slot;
+ (regno_save_mode [regno] [1],
+ GET_MODE_SIZE (regno_save_mode [regno] [1]), 0);
if (dump_file != NULL)
- fprintf (dump_file, "%d uses a new slot\n",
- saved_reg->hard_regno);
+ fprintf (dump_file, "%d uses a new slot\n", regno);
}
+ regno_save_mem [regno] [1] = saved_reg->slot;
+ save_slots [save_slots_num++] = saved_reg->slot;
}
}
free (saved_reg_conflicts);
@@ -1335,11 +1365,9 @@ save_call_clobbered_regs (void)
free_aux_for_blocks ();
}
-/* Here from note_stores, or directly from save_call_clobbered_regs, when
- an insn stores a value in a register.
- Set the proper bit or bits in this_insn_sets. All pseudos that have
- been assigned hard regs have had their register number changed already,
- so we can ignore pseudos. */
+/* Here from note_stores, or directly from save_call_clobbered_regs,
+ when an insn stores a value in a register. Set the proper bit or
+ bits in this_insn_sets. */
static void
mark_set_regs (rtx reg, const_rtx setter ATTRIBUTE_UNUSED, void *data)
{
@@ -1349,16 +1377,34 @@ mark_set_regs (rtx reg, const_rtx setter
if (GET_CODE (reg) == SUBREG)
{
rtx inner = SUBREG_REG (reg);
- if (!REG_P (inner) || REGNO (inner) >= FIRST_PSEUDO_REGISTER)
+ if (!REG_P (inner))
return;
- regno = subreg_regno (reg);
- endregno = regno + subreg_nregs (reg);
+ if ((regno = REGNO (inner)) >= FIRST_PSEUDO_REGISTER)
+ {
+ if (reg_renumber [regno] < 0)
+ return;
+ regno = reg_renumber [regno];
+ endregno
+ = hard_regno_nregs [regno] [PSEUDO_REGNO_MODE (REGNO (inner))];
+ }
+ else
+ {
+ regno = subreg_regno (reg);
+ endregno = regno + subreg_nregs (reg);
+ }
}
- else if (REG_P (reg)
- && REGNO (reg) < FIRST_PSEUDO_REGISTER)
+ else if (REG_P (reg))
{
- regno = REGNO (reg);
- endregno = END_HARD_REGNO (reg);
+ if ((regno = REGNO (reg)) < FIRST_PSEUDO_REGISTER)
+ endregno = END_HARD_REGNO (reg);
+ else
+ {
+ if (reg_renumber [regno] < 0)
+ return;
+ regno = reg_renumber [regno];
+ endregno
+ = hard_regno_nregs [regno] [PSEUDO_REGNO_MODE (REGNO (reg))];
+ }
}
else
return;
@@ -1405,7 +1451,7 @@ add_stored_regs (rtx reg, const_rtx sett
SET_REGNO_REG_SET ((regset) data, i);
}
-/* Walk X and record all referenced registers in REFERENCED_REGS. */
+/* Walk X and record all referenced registers in REFERENCED_REG. */
static void
mark_referenced_regs (rtx x)
{
Index: ira-int.h
===================================================================
--- ira-int.h (revision 132993)
+++ ira-int.h (working copy)
@@ -53,9 +53,6 @@ extern int internal_flag_ira_verbose;
/* Dump file of the allocator if it is not NULL. */
extern FILE *ira_dump_file;
-/* Pools for allocnos, copies, allocno live ranges. */
-extern alloc_pool allocno_pool, copy_pool, allocno_live_range_pool;
-
/* Allocno live range, allocno, and copy of allocnos. */
typedef struct loop_tree_node *loop_tree_node_t;
typedef struct allocno_live_range *allocno_live_range_t;
@@ -626,6 +623,7 @@ extern void print_expanded_allocno (allo
extern allocno_live_range_t create_allocno_live_range (allocno_t, int, int,
allocno_live_range_t);
extern void finish_allocno_live_range (allocno_live_range_t);
+extern void free_allocno_updated_costs (allocno_t);
extern copy_t create_copy (allocno_t, allocno_t, int, rtx, loop_tree_node_t);
extern void add_allocno_copy_to_list (copy_t);
extern void swap_allocno_copy_ends_if_necessary (copy_t);
@@ -633,6 +631,9 @@ extern void remove_allocno_copy_from_lis
extern copy_t add_allocno_copy (allocno_t, allocno_t, int, rtx,
loop_tree_node_t);
+extern int *allocate_cost_vector (enum reg_class);
+extern void free_cost_vector (int *, enum reg_class);
+
extern void ira_flattening (int, int);
extern int ira_build (int);
extern void ira_destroy (void);
@@ -696,13 +697,15 @@ hard_reg_not_in_set_p (int hard_regno, e
/* Allocate cost vector *VEC and initialize the elements by VAL if it
is necessary */
static inline void
-allocate_and_set_costs (int **vec, int len, int val)
+allocate_and_set_costs (int **vec, enum reg_class cover_class, int val)
{
int i, *reg_costs;
+ int len;
if (*vec != NULL)
return;
- *vec = reg_costs = ira_allocate (sizeof (int) * len);
+ *vec = reg_costs = allocate_cost_vector (cover_class);
+ len = class_hard_regs_num [cover_class];
for (i = 0; i < len; i++)
reg_costs [i] = val;
}
@@ -710,24 +713,30 @@ allocate_and_set_costs (int **vec, int l
/* Allocate cost vector *VEC and copy values of SRC into the vector if
it is necessary */
static inline void
-allocate_and_copy_costs (int **vec, int len, int *src)
+allocate_and_copy_costs (int **vec, enum reg_class cover_class, int *src)
{
+ int len;
+
if (*vec != NULL || src == NULL)
return;
- *vec = ira_allocate (sizeof (int) * len);
+ *vec = allocate_cost_vector (cover_class);
+ len = class_hard_regs_num [cover_class];
memcpy (*vec, src, sizeof (int) * len);
}
/* Allocate cost vector *VEC and copy values of SRC into the vector or
initialize it by VAL (if SRC is null). */
static inline void
-allocate_and_set_or_copy_costs (int **vec, int len, int val, int *src)
+allocate_and_set_or_copy_costs (int **vec, enum reg_class cover_class,
+ int val, int *src)
{
int i, *reg_costs;
+ int len;
if (*vec != NULL)
return;
- *vec = reg_costs = ira_allocate (sizeof (int) * len);
+ *vec = reg_costs = allocate_cost_vector (cover_class);
+ len = class_hard_regs_num [cover_class];
if (src != NULL)
memcpy (reg_costs, src, sizeof (int) * len);
else
Index: ira-color.c
===================================================================
--- ira-color.c (revision 132993)
+++ ira-color.c (working copy)
@@ -146,19 +146,20 @@ static void
update_copy_costs_1 (allocno_t allocno, int hard_regno,
int decr_p, int divisor)
{
- int i, cost, update_cost, hard_regs_num;
+ int i, cost, update_cost;
enum machine_mode mode;
- enum reg_class class;
+ enum reg_class class, cover_class;
allocno_t another_allocno;
copy_t cp, next_cp;
- if (ALLOCNO_COVER_CLASS (allocno) == NO_REGS)
+ cover_class = ALLOCNO_COVER_CLASS (allocno);
+ if (cover_class == NO_REGS)
return;
if (allocno_update_cost_check [ALLOCNO_NUM (allocno)] == update_cost_check)
return;
allocno_update_cost_check [ALLOCNO_NUM (allocno)] = update_cost_check;
ira_assert (hard_regno >= 0);
- i = class_hard_reg_index [ALLOCNO_COVER_CLASS (allocno)] [hard_regno];
+ i = class_hard_reg_index [cover_class] [hard_regno];
ira_assert (i >= 0);
class = REGNO_REG_CLASS (hard_regno);
mode = ALLOCNO_MODE (allocno);
@@ -176,11 +177,10 @@ update_copy_costs_1 (allocno_t allocno,
}
else
gcc_unreachable ();
- if (ALLOCNO_COVER_CLASS (allocno)
+ if (cover_class
!= ALLOCNO_COVER_CLASS (another_allocno)
|| ALLOCNO_ASSIGNED_P (another_allocno))
continue;
- hard_regs_num = class_hard_regs_num [ALLOCNO_COVER_CLASS (allocno)];
cost = (cp->second == allocno
? register_move_cost [mode] [class]
[ALLOCNO_COVER_CLASS (another_allocno)]
@@ -189,12 +189,12 @@ update_copy_costs_1 (allocno_t allocno,
if (decr_p)
cost = -cost;
allocate_and_set_or_copy_costs
- (&ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno), hard_regs_num,
+ (&ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno), cover_class,
ALLOCNO_COVER_CLASS_COST (another_allocno),
ALLOCNO_HARD_REG_COSTS (another_allocno));
allocate_and_set_or_copy_costs
(&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
- hard_regs_num, 0,
+ cover_class, 0,
ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
update_cost = cp->freq * cost / divisor;
ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno) [i] += update_cost;
@@ -298,7 +298,7 @@ assign_hard_reg (allocno_t allocno, int
IOR_HARD_REG_SET (conflicting_regs,
ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
allocate_and_copy_costs (&ALLOCNO_UPDATED_HARD_REG_COSTS (a),
- class_size, ALLOCNO_HARD_REG_COSTS (a));
+ cover_class, ALLOCNO_HARD_REG_COSTS (a));
a_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a);
#ifdef STACK_REGS
no_stack_reg_p = no_stack_reg_p || ALLOCNO_TOTAL_NO_STACK_REG_P (a);
@@ -348,7 +348,7 @@ assign_hard_reg (allocno_t allocno, int
{
allocate_and_copy_costs
(&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_allocno),
- class_size,
+ cover_class,
ALLOCNO_CONFLICT_HARD_REG_COSTS (conflict_allocno));
conflict_costs
= ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_allocno);
@@ -382,7 +382,7 @@ assign_hard_reg (allocno_t allocno, int
continue;
allocate_and_copy_costs
(&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
- class_size, ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
+ cover_class, ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
conflict_costs
= ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno);
if (conflict_costs != NULL
@@ -470,6 +470,9 @@ assign_hard_reg (allocno_t allocno, int
ALLOCNO_ASSIGNED_P (a) = TRUE;
if (best_hard_regno >= 0)
update_copy_costs (a, TRUE);
+ /* We don't need updated costs anymore: */
+ ira_assert (ALLOCNO_COVER_CLASS (a) == cover_class);
+ free_allocno_updated_costs (a);
if (a == allocno)
break;
}
@@ -926,6 +929,9 @@ pop_allocnos_from_stack (void)
{
ALLOCNO_HARD_REGNO (allocno) = -1;
ALLOCNO_ASSIGNED_P (allocno) = TRUE;
+ ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (allocno) == NULL);
+ ira_assert
+ (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno) == NULL);
if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
fprintf (ira_dump_file, "assign memory\n");
}
@@ -1288,6 +1294,8 @@ color_allocnos (void)
{
ALLOCNO_HARD_REGNO (a) = -1;
ALLOCNO_ASSIGNED_P (a) = TRUE;
+ ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
+ ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
{
fprintf (ira_dump_file, " Spill");
@@ -1358,12 +1366,12 @@ print_loop_title (loop_tree_node_t loop_
static void
color_pass (loop_tree_node_t loop_tree_node)
{
- int regno, hard_regno, hard_regs_num, index = -1;
+ int regno, hard_regno, index = -1;
int cost, exit_freq, enter_freq;
unsigned int j;
bitmap_iterator bi;
enum machine_mode mode;
- enum reg_class class;
+ enum reg_class class, cover_class;
allocno_t a, subloop_allocno;
loop_tree_node_t subloop_node;
@@ -1420,6 +1428,8 @@ color_pass (loop_tree_node_t loop_tree_n
ALLOCNO_ASSIGNED_P (subloop_allocno) = TRUE;
if (hard_regno >= 0)
update_copy_costs (subloop_allocno, TRUE);
+ /* We don't need updated costs anymore: */
+ free_allocno_updated_costs (subloop_allocno);
}
continue;
}
@@ -1435,6 +1445,8 @@ color_pass (loop_tree_node_t loop_tree_n
ALLOCNO_ASSIGNED_P (subloop_allocno) = TRUE;
if (hard_regno >= 0)
update_copy_costs (subloop_allocno, TRUE);
+ /* We don't need updated costs anymore: */
+ free_allocno_updated_costs (subloop_allocno);
}
}
else if (hard_regno < 0)
@@ -1445,15 +1457,13 @@ color_pass (loop_tree_node_t loop_tree_n
}
else
{
- hard_regs_num
- = class_hard_regs_num [ALLOCNO_COVER_CLASS
- (subloop_allocno)];
+ cover_class = ALLOCNO_COVER_CLASS (subloop_allocno);
allocate_and_set_costs
- (&ALLOCNO_HARD_REG_COSTS (subloop_allocno), hard_regs_num,
+ (&ALLOCNO_HARD_REG_COSTS (subloop_allocno), cover_class,
ALLOCNO_COVER_CLASS_COST (subloop_allocno));
allocate_and_set_costs
(&ALLOCNO_CONFLICT_HARD_REG_COSTS (subloop_allocno),
- hard_regs_num, 0);
+ cover_class, 0);
cost = (register_move_cost [mode] [class] [class]
* (exit_freq + enter_freq));
ALLOCNO_HARD_REG_COSTS (subloop_allocno) [index] -= cost;
@@ -1486,6 +1496,8 @@ color_pass (loop_tree_node_t loop_tree_n
ALLOCNO_ASSIGNED_P (subloop_allocno) = TRUE;
if (hard_regno >= 0)
update_copy_costs (subloop_allocno, TRUE);
+ /* We don't need updated costs anymore: */
+ free_allocno_updated_costs (subloop_allocno);
}
}
else if (flag_ira_propagate_cost && hard_regno >= 0)
@@ -1494,15 +1506,13 @@ color_pass (loop_tree_node_t loop_tree_n
enter_freq = loop_edge_freq (subloop_node, -1, FALSE);
cost = (register_move_cost [mode] [class] [class]
* (exit_freq + enter_freq));
- hard_regs_num
- = class_hard_regs_num [ALLOCNO_COVER_CLASS
- (subloop_allocno)];
+ cover_class = ALLOCNO_COVER_CLASS (subloop_allocno);
allocate_and_set_costs
- (&ALLOCNO_HARD_REG_COSTS (subloop_allocno), hard_regs_num,
+ (&ALLOCNO_HARD_REG_COSTS (subloop_allocno), cover_class,
ALLOCNO_COVER_CLASS_COST (subloop_allocno));
allocate_and_set_costs
(&ALLOCNO_CONFLICT_HARD_REG_COSTS (subloop_allocno),
- hard_regs_num, 0);
+ cover_class, 0);
ALLOCNO_HARD_REG_COSTS (subloop_allocno) [index] -= cost;
ALLOCNO_CONFLICT_HARD_REG_COSTS (subloop_allocno) [index]
-= cost;
@@ -1613,7 +1623,12 @@ move_spill_restore (void)
loop_node = ALLOCNO_LOOP_TREE_NODE (a);
if (ALLOCNO_CAP_MEMBER (a) != NULL
|| (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0
- || loop_node->inner == NULL)
+ || loop_node->inner == NULL
+ /* don't do the optimization because it can create
+ copies and reload can spill an allocno set by copy
+ although the allocno will not get memory slot. */
+ || reg_equiv_invariant_p [regno]
+ || reg_equiv_const [regno] != NULL_RTX)
continue;
mode = ALLOCNO_MODE (a);
class = ALLOCNO_COVER_CLASS (a);
@@ -1632,6 +1647,9 @@ move_spill_restore (void)
subloop_allocno = subloop_node->regno_allocno_map [regno];
if (subloop_allocno == NULL)
continue;
+ /* We have accumulated cost. To get a real cost of
+ allocno usage in the loop we should subtract costs of
+ the subloop allocnos. */
cost -= (ALLOCNO_MEMORY_COST (subloop_allocno)
- (ALLOCNO_HARD_REG_COSTS (subloop_allocno) == NULL
? ALLOCNO_COVER_CLASS_COST (subloop_allocno)
@@ -1693,7 +1711,7 @@ move_spill_restore (void)
static void
setup_curr_costs (allocno_t a)
{
- int i, hard_regno, cost, hard_regs_num;
+ int i, hard_regno, cost;
enum machine_mode mode;
enum reg_class cover_class, class;
allocno_t another_a;
@@ -1703,9 +1721,6 @@ setup_curr_costs (allocno_t a)
cover_class = ALLOCNO_COVER_CLASS (a);
if (cover_class == NO_REGS)
return;
- hard_regs_num = class_hard_regs_num [cover_class];
- if (hard_regs_num == 0)
- return;
mode = ALLOCNO_MODE (a);
for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
{
@@ -1733,11 +1748,11 @@ setup_curr_costs (allocno_t a)
: register_move_cost [mode] [cover_class] [class]);
allocate_and_set_or_copy_costs
(&ALLOCNO_UPDATED_HARD_REG_COSTS (a),
- hard_regs_num, ALLOCNO_COVER_CLASS_COST (a),
+ cover_class, ALLOCNO_COVER_CLASS_COST (a),
ALLOCNO_HARD_REG_COSTS (a));
allocate_and_set_or_copy_costs
(&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
- hard_regs_num, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
+ cover_class, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
ALLOCNO_UPDATED_HARD_REG_COSTS (a) [i] -= cp->freq * cost;
ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) [i] -= cp->freq * cost;
}
@@ -1770,6 +1785,8 @@ reassign_conflict_allocnos (int start_re
{
ALLOCNO_ASSIGNED_P (a) = TRUE;
ALLOCNO_HARD_REGNO (a) = -1;
+ ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
+ ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
}
bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (a));
}
@@ -1798,6 +1815,8 @@ reassign_conflict_allocnos (int start_re
{
a = sorted_allocnos [i];
ALLOCNO_ASSIGNED_P (a) = FALSE;
+ ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
+ ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
setup_curr_costs (a);
}
for (i = 0; i < allocnos_to_color_num; i++)
@@ -2157,6 +2176,8 @@ allocno_reload_assign (allocno_t a, HARD
if (! flag_caller_saves && ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a), call_used_reg_set);
ALLOCNO_ASSIGNED_P (a) = FALSE;
+ ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
+ ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
cover_class = ALLOCNO_COVER_CLASS (a);
setup_curr_costs (a);
assign_hard_reg (a, TRUE);
@@ -2187,12 +2208,11 @@ allocno_reload_assign (allocno_t a, HARD
if (reg_renumber[regno] >= 0)
{
if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
- fprintf (ira_dump_file, ": reassign to %d", reg_renumber[regno]);
+ fprintf (ira_dump_file, ": reassign to %d\n", reg_renumber[regno]);
SET_REGNO (regno_reg_rtx[regno], reg_renumber[regno]);
mark_home_live (regno);
}
-
- if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
+ else if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
fprintf (ira_dump_file, "\n");
return reg_renumber[regno] >= 0;
@@ -2472,9 +2492,9 @@ calculate_spill_cost (int *regnos, rtx i
for (length = count = cost = i = 0;; i++)
{
regno = regnos [i];
- *nrefs += REG_N_REFS (regno);
if (regno < 0)
break;
+ *nrefs += REG_N_REFS (regno);
hard_regno = reg_renumber [regno];
ira_assert (hard_regno >= 0);
a = regno_allocno_map [regno];
Index: ira-lives.c
===================================================================
--- ira-lives.c (revision 132993)
+++ ira-lives.c (working copy)
@@ -587,8 +587,7 @@ process_single_reg_class_operands (int i
? register_move_cost [mode] [cover_class] [cl]
: register_move_cost [mode] [cl] [cover_class]);
allocate_and_set_costs
- (&ALLOCNO_CONFLICT_HARD_REG_COSTS (operand_a),
- class_hard_regs_num [cover_class], 0);
+ (&ALLOCNO_CONFLICT_HARD_REG_COSTS (operand_a), cover_class, 0);
ALLOCNO_CONFLICT_HARD_REG_COSTS (operand_a)
[class_hard_reg_index [cover_class] [class_hard_regs [cl] [0]]]
-= cost;
Index: ira-build.c
===================================================================
--- ira-build.c (revision 132993)
+++ ira-build.c (working copy)
@@ -73,6 +73,9 @@ static void print_allocno_copies (FILE *
static void finish_copy (copy_t);
static void finish_copies (void);
+static void initiate_cost_vectors (void);
+static void finish_cost_vectors (void);
+
static void create_insn_allocnos (rtx, int);
static void create_bb_allocnos (loop_tree_node_t);
static void create_loop_allocnos (edge);
@@ -455,6 +458,9 @@ finish_calls (void)
+/* Pools for allocnos and allocno live ranges. */
+static alloc_pool allocno_pool, allocno_live_range_pool;
+
/* Varray containing references to all created allocnos. It is a
container of array allocnos. */
static varray_type allocno_varray;
@@ -463,6 +469,10 @@ static varray_type allocno_varray;
static void
initiate_allocnos (void)
{
+ allocno_live_range_pool
+ = create_alloc_pool ("allocno live ranges",
+ sizeof (struct allocno_live_range), 100);
+ allocno_pool = create_alloc_pool ("allocnos", sizeof (struct allocno), 100);
VARRAY_GENERIC_PTR_NOGC_INIT
(allocno_varray, max_reg_num () * 2, "allocnos");
allocnos = NULL;
@@ -705,7 +715,8 @@ create_cap_allocno (allocno_t a)
static void
propagate_info_to_cap (allocno_t cap)
{
- int i, regno, hard_regs_num, conflicts_num;
+ int i, regno, conflicts_num;
+ enum reg_class cover_class;
allocno_t a, conflict_allocno, conflict_father_allocno;
allocno_t another_a, father_a;
allocno_t *allocno_vec;
@@ -718,12 +729,11 @@ propagate_info_to_cap (allocno_t cap)
&& ALLOCNO_CALLS_CROSSED_NUM (cap) == 0);
a = ALLOCNO_CAP_MEMBER (cap);
father = ALLOCNO_LOOP_TREE_NODE (cap);
- hard_regs_num = class_hard_regs_num [ALLOCNO_COVER_CLASS (cap)];
+ cover_class = ALLOCNO_COVER_CLASS (cap);
allocate_and_copy_costs
- (&ALLOCNO_HARD_REG_COSTS (cap), hard_regs_num,
- ALLOCNO_HARD_REG_COSTS (a));
+ (&ALLOCNO_HARD_REG_COSTS (cap), cover_class, ALLOCNO_HARD_REG_COSTS (a));
allocate_and_copy_costs
- (&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap), hard_regs_num,
+ (&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap), cover_class,
ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
ALLOCNO_NREFS (cap) = ALLOCNO_NREFS (a);
ALLOCNO_FREQ (cap) = ALLOCNO_FREQ (a);
@@ -865,22 +875,39 @@ finish_allocno_live_range (allocno_live_
pool_free (allocno_live_range_pool, r);
}
+void
+free_allocno_updated_costs (allocno_t a)
+{
+ enum reg_class cover_class;
+
+ cover_class = ALLOCNO_COVER_CLASS (a);
+ if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
+ free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), cover_class);
+ ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
+ if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
+ free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
+ cover_class);
+ ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
+}
+
/* The function frees memory allocated for allocno A. */
static void
finish_allocno (allocno_t a)
{
allocno_live_range_t r, next_r;
+ enum reg_class cover_class = ALLOCNO_COVER_CLASS (a);
if (ALLOCNO_CONFLICT_ALLOCNO_VEC (a) != NULL)
ira_free (ALLOCNO_CONFLICT_ALLOCNO_VEC (a));
if (ALLOCNO_HARD_REG_COSTS (a) != NULL)
- ira_free (ALLOCNO_HARD_REG_COSTS (a));
+ free_cost_vector (ALLOCNO_HARD_REG_COSTS (a), cover_class);
if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL)
- ira_free (ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
+ free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a), cover_class);
if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
- ira_free (ALLOCNO_UPDATED_HARD_REG_COSTS (a));
+ free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), cover_class);
if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
- ira_free (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a));
+ free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
+ cover_class);
for (r = ALLOCNO_LIVE_RANGES (a); r != NULL; r = next_r)
{
next_r = r->next;
@@ -899,10 +926,15 @@ finish_allocnos (void)
finish_allocno (allocnos [i]);
ira_free (regno_allocno_map);
VARRAY_FREE (allocno_varray);
+ free_alloc_pool (allocno_pool);
+ free_alloc_pool (allocno_live_range_pool);
}
+/* Pools for copies. */
+static alloc_pool copy_pool;
+
/* Varray containing references to all created copies. It is a
container of array copies. */
static varray_type copy_varray;
@@ -911,6 +943,7 @@ static varray_type copy_varray;
static void
initiate_copies (void)
{
+ copy_pool = create_alloc_pool ("copies", sizeof (struct allocno_copy), 100);
VARRAY_GENERIC_PTR_NOGC_INIT (copy_varray, get_max_uid (), "copies");
copies = NULL;
copies_num = 0;
@@ -1133,6 +1166,61 @@ finish_copies (void)
for (i = 0; i < copies_num; i++)
finish_copy (copies [i]);
VARRAY_FREE (copy_varray);
+ free_alloc_pool (copy_pool);
+}
+
+
+
+/* Pools for cost vectors. */
+static alloc_pool cost_vector_pool [N_REG_CLASSES];
+
+/* The function initiates work with hard register cost vectors. It
+ creates allocation pool for each cover class. */
+static void
+initiate_cost_vectors (void)
+{
+ int i;
+ enum reg_class cover_class;
+
+ for (i = 0; i < reg_class_cover_size; i++)
+ {
+ cover_class = reg_class_cover [i];
+ cost_vector_pool [cover_class]
+ = create_alloc_pool ("cost vectors",
+ sizeof (int) * class_hard_regs_num [cover_class] + 32,
+ 100);
+ }
+}
+
+/* The function allocates and returns cost vector VEC for
+ COVER_CLASS. */
+int *
+allocate_cost_vector (enum reg_class cover_class)
+{
+ return pool_alloc (cost_vector_pool [cover_class]);
+}
+
+/* The function frees cost vector VEC for COVER_CLASS. */
+void
+free_cost_vector (int *vec, enum reg_class cover_class)
+{
+ ira_assert (vec != NULL);
+ pool_free (cost_vector_pool [cover_class], vec);
+}
+
+/* The function finishes work with hard register cost vectors. It
+ releases allocation pool for each cover class. */
+static void
+finish_cost_vectors (void)
+{
+ int i;
+ enum reg_class cover_class;
+
+ for (i = 0; i < reg_class_cover_size; i++)
+ {
+ cover_class = reg_class_cover [i];
+ free_alloc_pool (cost_vector_pool [cover_class]);
+ }
}
@@ -1899,6 +1987,11 @@ ira_flattening (int max_regno_before_emi
if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
fprintf (ira_dump_file, " Enumerate a%dr%d to a%d\n",
ALLOCNO_NUM (a), REGNO (ALLOCNO_REG (a)), free);
+ ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
+ if (! ALLOCNO_ASSIGNED_P (a))
+ free_allocno_updated_costs (a);
+ ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
+ ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
ALLOCNO_NUM (a) = free;
allocnos [free++] = a;
}
@@ -2004,6 +2097,7 @@ ira_build (int loops_p)
CLEAR_HARD_REG_SET (cfun->emit->call_used_regs);
initiate_calls ();
+ initiate_cost_vectors ();
initiate_allocnos ();
initiate_copies ();
create_loop_tree_nodes (loops_p);
@@ -2061,5 +2155,6 @@ ira_destroy (void)
finish_copies ();
finish_allocnos ();
finish_calls ();
+ finish_cost_vectors ();
finish_allocno_live_ranges ();
}
Index: ira.c
===================================================================
--- ira.c (revision 132993)
+++ ira.c (working copy)
@@ -1161,7 +1161,7 @@ find_reg_equiv_invariant_const (void)
reg_equiv_* arrays were originally legitimate, we
ignore such REG_EQUIV notes. */
if (memory_operand (x, VOIDmode))
- continue;
+ invariant_p = MEM_READONLY_P (x);
else if (function_invariant_p (x))
{
if (GET_CODE (x) == PLUS
@@ -1179,8 +1179,8 @@ find_reg_equiv_invariant_const (void)
-/* The function sets up REG_RENUMBER and CALLER_SAVE_NEEDED used by
- reload from the allocation found by IRA. */
+/* The function sets up REG_RENUMBER and CALLER_SAVE_NEEDED (used by
+ reload) from the allocation found by IRA. */
static void
setup_reg_renumber (void)
{
@@ -1194,8 +1194,10 @@ setup_reg_renumber (void)
/* There are no caps at this point. */
ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
if (! ALLOCNO_ASSIGNED_P (a))
+ /* It can happen if A is not referenced but partially anticipated
+ somewhere in a region. */
ALLOCNO_ASSIGNED_P (a) = TRUE;
- ira_assert (ALLOCNO_ASSIGNED_P (a));
+ free_allocno_updated_costs (a);
hard_regno = ALLOCNO_HARD_REGNO (a);
regno = (int) REGNO (ALLOCNO_REG (a));
reg_renumber [regno] = (hard_regno < 0 ? -1 : hard_regno);
@@ -1222,6 +1224,10 @@ setup_allocno_assignment_flags (void)
for (i = 0; i < allocnos_num; i++)
{
a = allocnos [i];
+ if (! ALLOCNO_ASSIGNED_P (a))
+ /* It can happen if A is not referenced but partially anticipated
+ somewhere in a region. */
+ free_allocno_updated_costs (a);
hard_regno = ALLOCNO_HARD_REGNO (a);
/* Don't assign hard registers to allocnos which are destination
of removed store at the end of loop. It has a few sense to
@@ -1555,13 +1561,15 @@ ira (FILE *f)
df_clear_flags (DF_NO_INSN_RESCAN);
- allocno_pool = create_alloc_pool ("allocnos", sizeof (struct allocno), 100);
- copy_pool = create_alloc_pool ("copies", sizeof (struct allocno_copy), 100);
- allocno_live_range_pool
- = create_alloc_pool ("allocno live ranges",
- sizeof (struct allocno_live_range), 100);
regstat_init_n_sets_and_refs ();
regstat_compute_ri ();
+
+ /* If we are not optimizing, then this is the only place before
+ register allocation where dataflow is done. And that is needed
+ to generate these warnings. */
+ if (warn_clobbered)
+ generate_setjmp_warnings ();
+
rebuild_p = update_equiv_regs ();
regstat_free_n_sets_and_refs ();
regstat_free_ri ();
@@ -1713,10 +1721,6 @@ ira (FILE *f)
ira_free (reg_equiv_invariant_p);
ira_free (reg_equiv_const);
-
- free_alloc_pool (allocno_live_range_pool);
- free_alloc_pool (copy_pool);
- free_alloc_pool (allocno_pool);
bitmap_obstack_release (&ira_bitmap_obstack);
#ifndef IRA_NO_OBSTACK
Index: ira-costs.c
===================================================================
--- ira-costs.c (revision 132993)
+++ ira-costs.c (working copy)
@@ -1328,9 +1328,9 @@ find_allocno_class_costs (void)
static void
process_bb_node_for_hard_reg_moves (loop_tree_node_t loop_tree_node)
{
- int i, freq, cost, src_regno, dst_regno, hard_regno, to_p, hard_regs_num;
+ int i, freq, cost, src_regno, dst_regno, hard_regno, to_p;
allocno_t a;
- enum reg_class class, hard_reg_class;
+ enum reg_class class, cover_class, hard_reg_class;
enum machine_mode mode;
basic_block bb;
rtx insn, set, src, dst;
@@ -1380,11 +1380,9 @@ process_bb_node_for_hard_reg_moves (loop
hard_reg_class = REGNO_REG_CLASS (hard_regno);
cost = (to_p ? register_move_cost [mode] [hard_reg_class] [class]
: register_move_cost [mode] [class] [hard_reg_class]) * freq;
- hard_regs_num = class_hard_regs_num [class];
- allocate_and_set_costs (&ALLOCNO_HARD_REG_COSTS (a), hard_regs_num,
+ allocate_and_set_costs (&ALLOCNO_HARD_REG_COSTS (a), class,
ALLOCNO_COVER_CLASS_COST (a));
- allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
- hard_regs_num, 0);
+ allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a), class, 0);
ALLOCNO_HARD_REG_COSTS (a) [i] -= cost;
ALLOCNO_CONFLICT_HARD_REG_COSTS (a) [i] -= cost;
ALLOCNO_COVER_CLASS_COST (a) = MIN (ALLOCNO_COVER_CLASS_COST (a),
@@ -1401,12 +1399,12 @@ process_bb_node_for_hard_reg_moves (loop
break;
if ((a = father->regno_allocno_map [regno]) == NULL)
break;
- hard_regs_num = class_hard_regs_num [ALLOCNO_COVER_CLASS (a)];
+ cover_class = ALLOCNO_COVER_CLASS (a);
allocate_and_set_costs
- (&ALLOCNO_HARD_REG_COSTS (a), hard_regs_num,
+ (&ALLOCNO_HARD_REG_COSTS (a), cover_class,
ALLOCNO_COVER_CLASS_COST (a));
allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
- hard_regs_num, 0);
+ cover_class, 0);
ALLOCNO_HARD_REG_COSTS (a) [i] -= cost;
ALLOCNO_CONFLICT_HARD_REG_COSTS (a) [i] -= cost;
ALLOCNO_COVER_CLASS_COST (a)
@@ -1449,7 +1447,7 @@ setup_allocno_cover_class_and_costs (voi
{
n = class_hard_regs_num [cover_class];
ALLOCNO_HARD_REG_COSTS (a)
- = reg_costs = ira_allocate (n * sizeof (int));
+ = reg_costs = allocate_cost_vector (cover_class);
for (j = n - 1; j >= 0; j--)
{
regno = class_hard_regs [cover_class] [j];
@@ -1588,7 +1586,8 @@ tune_allocno_costs_and_cover_classes (vo
if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
{
allocate_and_set_costs
- (&ALLOCNO_HARD_REG_COSTS (a), n, ALLOCNO_COVER_CLASS_COST (a));
+ (&ALLOCNO_HARD_REG_COSTS (a), cover_class,
+ ALLOCNO_COVER_CLASS_COST (a));
reg_costs = ALLOCNO_HARD_REG_COSTS (a);
for (j = n - 1; j >= 0; j--)
{