a round-robin fashion. */
static int last_spill_reg;
-/* Describes order of preference for putting regs into spill_regs.
- Contains the numbers of all the hard regs, in order most preferred first.
- This order is different for each function.
- It is set up by order_regs_for_reload.
- Empty elements at the end contain -1. */
-static short potential_reload_regs[FIRST_PSEUDO_REGISTER];
-
/* Nonzero if indirect addressing is supported on the machine; this means
that spilling (REG n) does not require reloading it into a register in
order to do (MEM (REG n)) or (MEM (PLUS (REG n) (CONST_INT c))). The
static int spill_stack_slot_width[FIRST_PSEUDO_REGISTER];
/* Record which pseudos needed to be spilled. */
-static regset spilled_pseudos;
+static regset_head spilled_pseudos;
+
+/* Used for communication between order_regs_for_reload and count_pseudo.
+ Used to avoid counting one pseudo twice. */
+static regset_head pseudos_counted;
/* First uid used by insns created by reload in this function.
Used in find_equiv_reg. */
char *reload_startobj;
/* The point after all insn_chain structures. Used to quickly deallocate
- memory used while processing one insn. */
+ memory allocated in copy_reloads during calculate_needs_all_insns. */
char *reload_firstobj;
+/* This points before all local rtl generated by register elimination.
+ Used to quickly free all memory after processing one insn. */
+static char *reload_insn_firstobj;
+
#define obstack_chunk_alloc xmalloc
#define obstack_chunk_free free
/* Number of labels in the current function. */
static int num_labels;
-
-struct hard_reg_n_uses
-{
- int regno;
- unsigned int uses;
-};
\f
static void maybe_fix_stack_asms PROTO((void));
+static void copy_reloads PROTO((struct insn_chain *));
static void calculate_needs_all_insns PROTO((int));
-static void calculate_needs PROTO((struct insn_chain *));
-static void find_reload_regs PROTO((struct insn_chain *chain,
- FILE *));
-static void find_tworeg_group PROTO((struct insn_chain *, int,
+static int find_reg PROTO((struct insn_chain *, int,
FILE *));
-static void find_group PROTO((struct insn_chain *, int,
- FILE *));
-static int possible_group_p PROTO((struct insn_chain *, int));
-static void count_possible_groups PROTO((struct insn_chain *, int));
-static int modes_equiv_for_class_p PROTO((enum machine_mode,
- enum machine_mode,
- enum reg_class));
+static void find_reload_regs PROTO((struct insn_chain *, FILE *));
+static void select_reload_regs PROTO((FILE *));
static void delete_caller_save_insns PROTO((void));
-static void spill_failure PROTO((rtx));
-static void new_spill_reg PROTO((struct insn_chain *, int, int,
- int, FILE *));
-static void maybe_mark_pseudo_spilled PROTO((int));
+static void spill_failure PROTO((rtx, enum reg_class));
+static void count_spilled_pseudo PROTO((int, int, int));
static void delete_dead_insn PROTO((rtx));
static void alter_reg PROTO((int, int));
static void set_label_offsets PROTO((rtx, rtx, int));
static void elimination_effects PROTO((rtx, enum machine_mode));
static int eliminate_regs_in_insn PROTO((rtx, int));
static void update_eliminable_offsets PROTO((void));
-static void mark_not_eliminable PROTO((rtx, rtx));
+static void mark_not_eliminable PROTO((rtx, rtx, void *));
static void set_initial_elim_offsets PROTO((void));
static void verify_initial_elim_offsets PROTO((void));
static void set_initial_label_offsets PROTO((void));
static int finish_spills PROTO((int, FILE *));
static void ior_hard_reg_set PROTO((HARD_REG_SET *, HARD_REG_SET *));
static void scan_paradoxical_subregs PROTO((rtx));
-static int hard_reg_use_compare PROTO((const PTR, const PTR));
-static void count_pseudo PROTO((struct hard_reg_n_uses *, int));
+static void count_pseudo PROTO((int));
static void order_regs_for_reload PROTO((struct insn_chain *));
static void reload_as_needed PROTO((int));
-static void forget_old_reloads_1 PROTO((rtx, rtx));
+static void forget_old_reloads_1 PROTO((rtx, rtx, void *));
static int reload_reg_class_lower PROTO((const PTR, const PTR));
static void mark_reload_reg_in_use PROTO((int, int, enum reload_type,
enum machine_mode));
static void clear_reload_reg_in_use PROTO((int, int, enum reload_type,
enum machine_mode));
static int reload_reg_free_p PROTO((int, int, enum reload_type));
-static int reload_reg_free_for_value_p PROTO((int, int, enum reload_type, rtx, rtx, int, int));
+static int reload_reg_free_for_value_p PROTO((int, int, enum reload_type,
+ rtx, rtx, int, int));
static int reload_reg_reaches_end_p PROTO((int, int, enum reload_type));
-static int allocate_reload_reg PROTO((struct insn_chain *, int, int,
- int));
+static int allocate_reload_reg PROTO((struct insn_chain *, int, int));
+static void failed_reload PROTO((rtx, int));
+static int set_reload_reg PROTO((int, int));
+static void choose_reload_regs_init PROTO((struct insn_chain *, rtx *));
static void choose_reload_regs PROTO((struct insn_chain *));
static void merge_assigned_reloads PROTO((rtx));
static void emit_reload_insns PROTO((struct insn_chain *));
static void reload_cse_invalidate_regno PROTO((int, enum machine_mode, int));
static int reload_cse_mem_conflict_p PROTO((rtx, rtx));
static void reload_cse_invalidate_mem PROTO((rtx));
-static void reload_cse_invalidate_rtx PROTO((rtx, rtx));
+static void reload_cse_invalidate_rtx PROTO((rtx, rtx, void *));
static int reload_cse_regno_equal_p PROTO((int, rtx, enum machine_mode));
static int reload_cse_noop_set_p PROTO((rtx, rtx));
static int reload_cse_simplify_set PROTO((rtx, rtx));
static int reload_cse_simplify_operands PROTO((rtx));
-static void reload_cse_check_clobber PROTO((rtx, rtx));
+static void reload_cse_check_clobber PROTO((rtx, rtx, void *));
static void reload_cse_record_set PROTO((rtx, rtx));
static void reload_combine PROTO((void));
static void reload_combine_note_use PROTO((rtx *, rtx));
-static void reload_combine_note_store PROTO((rtx, rtx));
+static void reload_combine_note_store PROTO((rtx, rtx, void *));
static void reload_cse_move2add PROTO((rtx));
-static void move2add_note_store PROTO((rtx, rtx));
+static void move2add_note_store PROTO((rtx, rtx, void *));
#ifdef AUTO_INC_DEC
static void add_auto_inc_notes PROTO((rtx, rtx));
#endif
static rtx gen_mode_int PROTO((enum machine_mode,
HOST_WIDE_INT));
+static void failed_reload PROTO((rtx, int));
+static int set_reload_reg PROTO((int, int));
extern void dump_needs PROTO((struct insn_chain *, FILE *));
\f
/* Initialize the reload pass once per compilation. */
/* Initialize obstack for our rtl allocation. */
gcc_obstack_init (&reload_obstack);
reload_startobj = (char *) obstack_alloc (&reload_obstack, 0);
+
+ INIT_REG_SET (&spilled_pseudos);
+ INIT_REG_SET (&pseudos_counted);
}
/* List of insn chains that are currently unused. */
{
c = (struct insn_chain *)
obstack_alloc (&reload_obstack, sizeof (struct insn_chain));
- c->live_before = OBSTACK_ALLOC_REG_SET (&reload_obstack);
- c->live_after = OBSTACK_ALLOC_REG_SET (&reload_obstack);
+ c->live_throughout = OBSTACK_ALLOC_REG_SET (&reload_obstack);
+ c->dead_or_set = OBSTACK_ALLOC_REG_SET (&reload_obstack);
}
else
{
for (insn = first; insn && num_eliminable; insn = NEXT_INSN (insn))
if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN
|| GET_CODE (insn) == CALL_INSN)
- note_stores (PATTERN (insn), mark_not_eliminable);
+ note_stores (PATTERN (insn), mark_not_eliminable, NULL);
maybe_fix_stack_asms ();
/* Initialize to -1, which means take the first spill register. */
last_spill_reg = -1;
- spilled_pseudos = ALLOCA_REG_SET ();
-
/* Spill any hard regs that we know we can't eliminate. */
CLEAR_HARD_REG_SET (used_spill_regs);
for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++)
{
int something_changed;
int did_spill;
- struct insn_chain *chain;
HOST_WIDE_INT starting_frame_size;
calculate_needs_all_insns (global);
- CLEAR_REG_SET (spilled_pseudos);
+ CLEAR_REG_SET (&spilled_pseudos);
did_spill = 0;
something_changed = 0;
}
}
- CLEAR_HARD_REG_SET (used_spill_regs);
- /* Try to satisfy the needs for each insn. */
- for (chain = insns_need_reload; chain != 0;
- chain = chain->next_need_reload)
- find_reload_regs (chain, dumpfile);
-
+ select_reload_regs (dumpfile);
if (failure)
goto failed;
if (caller_save_needed)
delete_caller_save_insns ();
+
+ obstack_free (&reload_obstack, reload_firstobj);
}
/* If global-alloc was run, notify it of any register eliminations we have
and we decide not to abort about it. */
failed:
+ CLEAR_REG_SET (&spilled_pseudos);
reload_in_progress = 0;
/* Now eliminate all pseudo regs by modifying them into
which are only valid during and after reload. */
reload_completed = 1;
- /* Make a pass over all the insns and delete all USEs which we
- inserted only to tag a REG_EQUAL note on them. Remove all
- REG_DEAD and REG_UNUSED notes. Delete all CLOBBER insns and
- simplify (subreg (reg)) operands. Also remove all REG_RETVAL and
- REG_LIBCALL notes since they are no longer useful or accurate.
- Strip and regenerate REG_INC notes that may have been moved
- around. */
+ /* Make a pass over all the insns and delete all USEs which we inserted
+ only to tag a REG_EQUAL note on them. Remove all REG_DEAD and REG_UNUSED
+ notes. Delete all CLOBBER insns that don't refer to the return value
+ and simplify (subreg (reg)) operands. Also remove all REG_RETVAL and
+ REG_LIBCALL notes since they are no longer useful or accurate. Strip
+ and regenerate REG_INC notes that may have been moved around. */
for (insn = first; insn; insn = NEXT_INSN (insn))
if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
if ((GET_CODE (PATTERN (insn)) == USE
&& find_reg_note (insn, REG_EQUAL, NULL_RTX))
- || GET_CODE (PATTERN (insn)) == CLOBBER)
+ || (GET_CODE (PATTERN (insn)) == CLOBBER
+ && (GET_CODE (XEXP (PATTERN (insn), 0)) != REG
+ || ! REG_FUNCTION_VALUE_P (XEXP (PATTERN (insn), 0)))))
{
PUT_CODE (insn, NOTE);
NOTE_SOURCE_FILE (insn) = 0;
free (pseudo_previous_regs);
free (pseudo_forbidden_regs);
- FREE_REG_SET (spilled_pseudos);
-
CLEAR_HARD_REG_SET (used_spill_regs);
for (i = 0; i < n_spills; i++)
SET_HARD_REG_BIT (used_spill_regs, spill_regs[i]);
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (TEST_HARD_REG_BIT (allowed, i))
{
- CLEAR_REGNO_REG_SET (chain->live_before, i);
- CLEAR_REGNO_REG_SET (chain->live_after, i);
+ CLEAR_REGNO_REG_SET (chain->live_throughout, i);
+ CLEAR_REGNO_REG_SET (chain->dead_or_set, i);
}
}
#endif
}
-
\f
+/* Copy the global variables n_reloads and rld into the corresponding elts
+ of CHAIN. */
+static void
+copy_reloads (chain)
+ struct insn_chain *chain;
+{
+ chain->n_reloads = n_reloads;
+ chain->rld
+ = (struct reload *) obstack_alloc (&reload_obstack,
+ n_reloads * sizeof (struct reload));
+ memcpy (chain->rld, rld, n_reloads * sizeof (struct reload));
+ reload_insn_firstobj = (char *) obstack_alloc (&reload_obstack, 0);
+}
+
/* Walk the chain of insns, and determine for each whether it needs reloads
and/or eliminations. Build the corresponding insns_need_reload list, and
set something_needs_elimination as appropriate. */
int global;
{
struct insn_chain **pprev_reload = &insns_need_reload;
- struct insn_chain **pchain;
+ struct insn_chain *chain;
something_needs_elimination = 0;
- for (pchain = &reload_insn_chain; *pchain != 0; pchain = &(*pchain)->next)
+ reload_insn_firstobj = (char *) obstack_alloc (&reload_obstack, 0);
+ for (chain = reload_insn_chain; chain != 0; chain = chain->next)
{
- rtx insn;
- struct insn_chain *chain;
+ rtx insn = chain->insn;
- chain = *pchain;
- insn = chain->insn;
+ /* Clear out the shortcuts. */
+ chain->n_reloads = 0;
+ chain->need_elim = 0;
+ chain->need_reload = 0;
+ chain->need_operand_change = 0;
/* If this is a label, a JUMP_INSN, or has REG_NOTES (which might
include REG_LABEL), we need to see what effects this has on the
if (set && GET_CODE (SET_DEST (set)) == REG
&& reg_renumber[REGNO (SET_DEST (set))] < 0
&& reg_equiv_constant[REGNO (SET_DEST (set))])
- {
- /* Must clear out the shortcuts, in case they were set last
- time through. */
- chain->need_elim = 0;
- chain->need_reload = 0;
- chain->need_operand_change = 0;
- continue;
- }
+ continue;
/* If needed, eliminate any eliminable registers. */
if (num_eliminable || num_eliminable_invariants)
/* Discard any register replacements done. */
if (did_elimination)
{
- obstack_free (&reload_obstack, reload_firstobj);
+ obstack_free (&reload_obstack, reload_insn_firstobj);
PATTERN (insn) = old_body;
INSN_CODE (insn) = old_code;
REG_NOTES (insn) = old_notes;
if (n_reloads != 0)
{
+ copy_reloads (chain);
*pprev_reload = chain;
pprev_reload = &chain->next_need_reload;
-
- calculate_needs (chain);
}
}
}
*pprev_reload = 0;
}
+\f
+/* Comparison function for qsort to decide which of two reloads
+ should be handled first. *P1 and *P2 are the reload numbers. */
-/* Compute the most additional registers needed by one instruction,
- given by CHAIN. Collect information separately for each class of regs.
-
- To compute the number of reload registers of each class needed for an
- insn, we must simulate what choose_reload_regs can do. We do this by
- splitting an insn into an "input" and an "output" part. RELOAD_OTHER
- reloads are used in both. The input part uses those reloads,
- RELOAD_FOR_INPUT reloads, which must be live over the entire input section
- of reloads, and the maximum of all the RELOAD_FOR_INPUT_ADDRESS and
- RELOAD_FOR_OPERAND_ADDRESS reloads, which conflict with the inputs.
-
- The registers needed for output are RELOAD_OTHER and RELOAD_FOR_OUTPUT,
- which are live for the entire output portion, and the maximum of all the
- RELOAD_FOR_OUTPUT_ADDRESS reloads for each operand.
-
- The total number of registers needed is the maximum of the
- inputs and outputs. */
-
-static void
-calculate_needs (chain)
- struct insn_chain *chain;
+static int
+reload_reg_class_lower (r1p, r2p)
+ const PTR r1p;
+ const PTR r2p;
{
- int i;
-
- /* Each `struct needs' corresponds to one RELOAD_... type. */
- struct {
- struct needs other;
- struct needs input;
- struct needs output;
- struct needs insn;
- struct needs other_addr;
- struct needs op_addr;
- struct needs op_addr_reload;
- struct needs in_addr[MAX_RECOG_OPERANDS];
- struct needs in_addr_addr[MAX_RECOG_OPERANDS];
- struct needs out_addr[MAX_RECOG_OPERANDS];
- struct needs out_addr_addr[MAX_RECOG_OPERANDS];
- } insn_needs;
-
- bzero ((char *) chain->group_size, sizeof chain->group_size);
- for (i = 0; i < N_REG_CLASSES; i++)
- chain->group_mode[i] = VOIDmode;
- bzero ((char *) &insn_needs, sizeof insn_needs);
-
- /* Count each reload once in every class
- containing the reload's own class. */
-
- for (i = 0; i < n_reloads; i++)
- {
- register enum reg_class *p;
- enum reg_class class = rld[i].class;
- int size;
- enum machine_mode mode;
- struct needs *this_needs;
-
- /* Don't count the dummy reloads, for which one of the
- regs mentioned in the insn can be used for reloading.
- Don't count optional reloads.
- Don't count reloads that got combined with others. */
- if (rld[i].reg_rtx != 0
- || rld[i].optional != 0
- || (rld[i].out == 0 && rld[i].in == 0
- && ! rld[i].secondary_p))
- continue;
-
- mode = rld[i].inmode;
- if (GET_MODE_SIZE (rld[i].outmode) > GET_MODE_SIZE (mode))
- mode = rld[i].outmode;
- size = CLASS_MAX_NREGS (class, mode);
+ register int r1 = *(short *)r1p, r2 = *(short *)r2p;
+ register int t;
- /* Decide which time-of-use to count this reload for. */
- switch (rld[i].when_needed)
- {
- case RELOAD_OTHER:
- this_needs = &insn_needs.other;
- break;
- case RELOAD_FOR_INPUT:
- this_needs = &insn_needs.input;
- break;
- case RELOAD_FOR_OUTPUT:
- this_needs = &insn_needs.output;
- break;
- case RELOAD_FOR_INSN:
- this_needs = &insn_needs.insn;
- break;
- case RELOAD_FOR_OTHER_ADDRESS:
- this_needs = &insn_needs.other_addr;
- break;
- case RELOAD_FOR_INPUT_ADDRESS:
- this_needs = &insn_needs.in_addr[rld[i].opnum];
- break;
- case RELOAD_FOR_INPADDR_ADDRESS:
- this_needs = &insn_needs.in_addr_addr[rld[i].opnum];
- break;
- case RELOAD_FOR_OUTPUT_ADDRESS:
- this_needs = &insn_needs.out_addr[rld[i].opnum];
- break;
- case RELOAD_FOR_OUTADDR_ADDRESS:
- this_needs = &insn_needs.out_addr_addr[rld[i].opnum];
- break;
- case RELOAD_FOR_OPERAND_ADDRESS:
- this_needs = &insn_needs.op_addr;
- break;
- case RELOAD_FOR_OPADDR_ADDR:
- this_needs = &insn_needs.op_addr_reload;
- break;
- default:
- abort();
- }
+ /* Consider required reloads before optional ones. */
+ t = rld[r1].optional - rld[r2].optional;
+ if (t != 0)
+ return t;
- if (size > 1)
- {
- enum machine_mode other_mode, allocate_mode;
-
- /* Count number of groups needed separately from
- number of individual regs needed. */
- this_needs->groups[(int) class]++;
- p = reg_class_superclasses[(int) class];
- while (*p != LIM_REG_CLASSES)
- this_needs->groups[(int) *p++]++;
-
- /* Record size and mode of a group of this class. */
- /* If more than one size group is needed,
- make all groups the largest needed size. */
- if (chain->group_size[(int) class] < size)
- {
- other_mode = chain->group_mode[(int) class];
- allocate_mode = mode;
+ /* Count all solitary classes before non-solitary ones. */
+ t = ((reg_class_size[(int) rld[r2].class] == 1)
+ - (reg_class_size[(int) rld[r1].class] == 1));
+ if (t != 0)
+ return t;
- chain->group_size[(int) class] = size;
- chain->group_mode[(int) class] = mode;
- }
- else
- {
- other_mode = mode;
- allocate_mode = chain->group_mode[(int) class];
- }
+ /* Aside from solitaires, consider all multi-reg groups first. */
+ t = rld[r2].nregs - rld[r1].nregs;
+ if (t != 0)
+ return t;
- /* Crash if two dissimilar machine modes both need
- groups of consecutive regs of the same class. */
+ /* Consider reloads in order of increasing reg-class number. */
+ t = (int) rld[r1].class - (int) rld[r2].class;
+ if (t != 0)
+ return t;
- if (other_mode != VOIDmode && other_mode != allocate_mode
- && ! modes_equiv_for_class_p (allocate_mode,
- other_mode, class))
- fatal_insn ("Two dissimilar machine modes both need groups of consecutive regs of the same class",
- chain->insn);
- }
- else if (size == 1)
- {
- this_needs->regs[(unsigned char)rld[i].nongroup][(int) class] += 1;
- p = reg_class_superclasses[(int) class];
- while (*p != LIM_REG_CLASSES)
- this_needs->regs[(unsigned char)rld[i].nongroup][(int) *p++] += 1;
- }
- else
- abort ();
- }
+ /* If reloads are equally urgent, sort by reload number,
+ so that the results of qsort leave nothing to chance. */
+ return r1 - r2;
+}
+\f
+/* The cost of spilling each hard reg. */
+static int spill_cost[FIRST_PSEUDO_REGISTER];
- /* All reloads have been counted for this insn;
- now merge the various times of use.
- This sets insn_needs, etc., to the maximum total number
- of registers needed at any point in this insn. */
+/* When spilling multiple hard registers, we use SPILL_COST for the first
+ spilled hard reg and SPILL_ADD_COST for subsequent regs. SPILL_ADD_COST
+ only the first hard reg for a multi-reg pseudo. */
+static int spill_add_cost[FIRST_PSEUDO_REGISTER];
- for (i = 0; i < N_REG_CLASSES; i++)
- {
- int j, in_max, out_max;
+/* Update the spill cost arrays, considering that pseudo REG is live. */
+static void
+count_pseudo (reg)
+ int reg;
+{
+ int n_refs = REG_N_REFS (reg);
+ int r = reg_renumber[reg];
+ int nregs;
- /* Compute normal and nongroup needs. */
- for (j = 0; j <= 1; j++)
- {
- int k;
- for (in_max = 0, out_max = 0, k = 0; k < reload_n_operands; k++)
- {
- in_max = MAX (in_max,
- (insn_needs.in_addr[k].regs[j][i]
- + insn_needs.in_addr_addr[k].regs[j][i]));
- out_max = MAX (out_max, insn_needs.out_addr[k].regs[j][i]);
- out_max = MAX (out_max,
- insn_needs.out_addr_addr[k].regs[j][i]);
- }
+ if (REGNO_REG_SET_P (&pseudos_counted, reg)
+ || REGNO_REG_SET_P (&spilled_pseudos, reg))
+ return;
- /* RELOAD_FOR_INSN reloads conflict with inputs, outputs,
- and operand addresses but not things used to reload
- them. Similarly, RELOAD_FOR_OPERAND_ADDRESS reloads
- don't conflict with things needed to reload inputs or
- outputs. */
+ SET_REGNO_REG_SET (&pseudos_counted, reg);
- in_max = MAX (MAX (insn_needs.op_addr.regs[j][i],
- insn_needs.op_addr_reload.regs[j][i]),
- in_max);
+ if (r < 0)
+ abort ();
+
+ spill_add_cost[r] += n_refs;
- out_max = MAX (out_max, insn_needs.insn.regs[j][i]);
+ nregs = HARD_REGNO_NREGS (r, PSEUDO_REGNO_MODE (reg));
+ while (nregs-- > 0)
+ spill_cost[r + nregs] += n_refs;
+}
- insn_needs.input.regs[j][i]
- = MAX (insn_needs.input.regs[j][i]
- + insn_needs.op_addr.regs[j][i]
- + insn_needs.insn.regs[j][i],
- in_max + insn_needs.input.regs[j][i]);
+/* Calculate the SPILL_COST and SPILL_ADD_COST arrays and determine the
+ contents of BAD_SPILL_REGS for the insn described by CHAIN. */
+static void
+order_regs_for_reload (chain)
+ struct insn_chain *chain;
+{
+ register int i, j;
- insn_needs.output.regs[j][i] += out_max;
- insn_needs.other.regs[j][i]
- += MAX (MAX (insn_needs.input.regs[j][i],
- insn_needs.output.regs[j][i]),
- insn_needs.other_addr.regs[j][i]);
+ COPY_HARD_REG_SET (bad_spill_regs, bad_spill_regs_global);
- }
+ memset (spill_cost, 0, sizeof spill_cost);
+ memset (spill_add_cost, 0, sizeof spill_add_cost);
- /* Now compute group needs. */
- for (in_max = 0, out_max = 0, j = 0; j < reload_n_operands; j++)
- {
- in_max = MAX (in_max, insn_needs.in_addr[j].groups[i]);
- in_max = MAX (in_max, insn_needs.in_addr_addr[j].groups[i]);
- out_max = MAX (out_max, insn_needs.out_addr[j].groups[i]);
- out_max = MAX (out_max, insn_needs.out_addr_addr[j].groups[i]);
- }
+ /* Count number of uses of each hard reg by pseudo regs allocated to it
+ and then order them by decreasing use. */
- in_max = MAX (MAX (insn_needs.op_addr.groups[i],
- insn_needs.op_addr_reload.groups[i]),
- in_max);
- out_max = MAX (out_max, insn_needs.insn.groups[i]);
-
- insn_needs.input.groups[i]
- = MAX (insn_needs.input.groups[i]
- + insn_needs.op_addr.groups[i]
- + insn_needs.insn.groups[i],
- in_max + insn_needs.input.groups[i]);
-
- insn_needs.output.groups[i] += out_max;
- insn_needs.other.groups[i]
- += MAX (MAX (insn_needs.input.groups[i],
- insn_needs.output.groups[i]),
- insn_needs.other_addr.groups[i]);
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ /* Test the various reasons why we can't use a register for
+ spilling in this insn. */
+ if (fixed_regs[i]
+ || REGNO_REG_SET_P (chain->live_throughout, i)
+ || REGNO_REG_SET_P (chain->dead_or_set, i))
+ SET_HARD_REG_BIT (bad_spill_regs, i);
}
+ /* Now find out which pseudos are allocated to it, and update
+ hard_reg_n_uses. */
+ CLEAR_REG_SET (&pseudos_counted);
- /* Record the needs for later. */
- chain->need = insn_needs.other;
+ EXECUTE_IF_SET_IN_REG_SET
+ (chain->live_throughout, FIRST_PSEUDO_REGISTER, j,
+ {
+ count_pseudo (j);
+ });
+ EXECUTE_IF_SET_IN_REG_SET
+ (chain->dead_or_set, FIRST_PSEUDO_REGISTER, j,
+ {
+ count_pseudo (j);
+ });
+ CLEAR_REG_SET (&pseudos_counted);
}
\f
-/* Find a group of exactly 2 registers.
-
- First try to fill out the group by spilling a single register which
- would allow completion of the group.
-
- Then try to create a new group from a pair of registers, neither of
- which are explicitly used.
+/* Vector of reload-numbers showing the order in which the reloads should
+ be processed. */
+static short reload_order[MAX_RELOADS];
- Then try to create a group from any pair of registers. */
+/* This is used to keep track of the spill regs used in one insn. */
+static HARD_REG_SET used_spill_regs_local;
+/* We decided to spill hard register SPILLED, which has a size of
+ SPILLED_NREGS. Determine how pseudo REG, which is live during the insn,
+ is affected. We will add it to SPILLED_PSEUDOS if necessary, and we will
+ update SPILL_COST/SPILL_ADD_COST. */
static void
-find_tworeg_group (chain, class, dumpfile)
- struct insn_chain *chain;
- int class;
- FILE *dumpfile;
+count_spilled_pseudo (spilled, spilled_nregs, reg)
+ int spilled, spilled_nregs, reg;
{
- int i;
- /* First, look for a register that will complete a group. */
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- {
- int j, other;
-
- j = potential_reload_regs[i];
- if (j >= 0 && ! TEST_HARD_REG_BIT (bad_spill_regs, j)
- && ((j > 0 && (other = j - 1, spill_reg_order[other] >= 0)
- && TEST_HARD_REG_BIT (reg_class_contents[class], j)
- && TEST_HARD_REG_BIT (reg_class_contents[class], other)
- && HARD_REGNO_MODE_OK (other, chain->group_mode[class])
- && ! TEST_HARD_REG_BIT (chain->counted_for_nongroups, other)
- /* We don't want one part of another group.
- We could get "two groups" that overlap! */
- && ! TEST_HARD_REG_BIT (chain->counted_for_groups, other))
- || (j < FIRST_PSEUDO_REGISTER - 1
- && (other = j + 1, spill_reg_order[other] >= 0)
- && TEST_HARD_REG_BIT (reg_class_contents[class], j)
- && TEST_HARD_REG_BIT (reg_class_contents[class], other)
- && HARD_REGNO_MODE_OK (j, chain->group_mode[class])
- && ! TEST_HARD_REG_BIT (chain->counted_for_nongroups, other)
- && ! TEST_HARD_REG_BIT (chain->counted_for_groups, other))))
- {
- register enum reg_class *p;
-
- /* We have found one that will complete a group,
- so count off one group as provided. */
- chain->need.groups[class]--;
- p = reg_class_superclasses[class];
- while (*p != LIM_REG_CLASSES)
- {
- if (chain->group_size [(int) *p] <= chain->group_size [class])
- chain->need.groups[(int) *p]--;
- p++;
- }
+ int r = reg_renumber[reg];
+ int nregs = HARD_REGNO_NREGS (r, PSEUDO_REGNO_MODE (reg));
- /* Indicate both these regs are part of a group. */
- SET_HARD_REG_BIT (chain->counted_for_groups, j);
- SET_HARD_REG_BIT (chain->counted_for_groups, other);
- break;
- }
- }
- /* We can't complete a group, so start one. */
- if (i == FIRST_PSEUDO_REGISTER)
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- {
- int j, k;
- j = potential_reload_regs[i];
- /* Verify that J+1 is a potential reload reg. */
- for (k = 0; k < FIRST_PSEUDO_REGISTER; k++)
- if (potential_reload_regs[k] == j + 1)
- break;
- if (j >= 0 && j + 1 < FIRST_PSEUDO_REGISTER
- && k < FIRST_PSEUDO_REGISTER
- && spill_reg_order[j] < 0 && spill_reg_order[j + 1] < 0
- && TEST_HARD_REG_BIT (reg_class_contents[class], j)
- && TEST_HARD_REG_BIT (reg_class_contents[class], j + 1)
- && HARD_REGNO_MODE_OK (j, chain->group_mode[class])
- && ! TEST_HARD_REG_BIT (chain->counted_for_nongroups, j + 1)
- && ! TEST_HARD_REG_BIT (bad_spill_regs, j + 1))
- break;
- }
+ if (REGNO_REG_SET_P (&spilled_pseudos, reg)
+ || spilled + spilled_nregs <= r || r + nregs <= spilled)
+ return;
- /* I should be the index in potential_reload_regs
- of the new reload reg we have found. */
+ SET_REGNO_REG_SET (&spilled_pseudos, reg);
- new_spill_reg (chain, i, class, 0, dumpfile);
+ spill_add_cost[r] -= REG_N_REFS (reg);
+ while (nregs-- > 0)
+ spill_cost[r + nregs] -= REG_N_REFS (reg);
}
-/* Find a group of more than 2 registers.
- Look for a sufficient sequence of unspilled registers, and spill them all
- at once. */
+/* Find reload register to use for reload number ORDER. */
-static void
-find_group (chain, class, dumpfile)
+static int
+find_reg (chain, order, dumpfile)
struct insn_chain *chain;
- int class;
+ int order;
FILE *dumpfile;
{
- int i;
+ int rnum = reload_order[order];
+ struct reload *rl = rld + rnum;
+ int best_cost = INT_MAX;
+ int best_reg = -1;
+ int i, j;
+ HARD_REG_SET not_usable;
+ HARD_REG_SET used_by_other_reload;
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ COPY_HARD_REG_SET (not_usable, bad_spill_regs);
+ IOR_HARD_REG_SET (not_usable, bad_spill_regs_global);
+ IOR_COMPL_HARD_REG_SET (not_usable, reg_class_contents[rl->class]);
+
+ CLEAR_HARD_REG_SET (used_by_other_reload);
+ for (i = 0; i < order; i++)
{
- int j = potential_reload_regs[i];
+ int other = reload_order[i];
+ if (rld[other].regno >= 0 && reloads_conflict (other, rnum))
+ for (j = 0; j < rld[other].nregs; j++)
+ SET_HARD_REG_BIT (used_by_other_reload, rld[other].regno + j);
+ }
- if (j >= 0
- && j + chain->group_size[class] <= FIRST_PSEUDO_REGISTER
- && HARD_REGNO_MODE_OK (j, chain->group_mode[class]))
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ int regno = i;
+ if (! TEST_HARD_REG_BIT (not_usable, regno)
+ && ! TEST_HARD_REG_BIT (used_by_other_reload, regno)
+ && HARD_REGNO_MODE_OK (regno, rl->mode))
{
- int k;
- /* Check each reg in the sequence. */
- for (k = 0; k < chain->group_size[class]; k++)
- if (! (spill_reg_order[j + k] < 0
- && ! TEST_HARD_REG_BIT (bad_spill_regs, j + k)
- && TEST_HARD_REG_BIT (reg_class_contents[class], j + k)))
- break;
- /* We got a full sequence, so spill them all. */
- if (k == chain->group_size[class])
- {
- register enum reg_class *p;
- for (k = 0; k < chain->group_size[class]; k++)
- {
- int idx;
- SET_HARD_REG_BIT (chain->counted_for_groups, j + k);
- for (idx = 0; idx < FIRST_PSEUDO_REGISTER; idx++)
- if (potential_reload_regs[idx] == j + k)
- break;
- new_spill_reg (chain, idx, class, 0, dumpfile);
- }
+ int this_cost = spill_cost[regno];
+ int ok = 1;
+ int this_nregs = HARD_REGNO_NREGS (regno, rl->mode);
- /* We have found one that will complete a group,
- so count off one group as provided. */
- chain->need.groups[class]--;
- p = reg_class_superclasses[class];
- while (*p != LIM_REG_CLASSES)
- {
- if (chain->group_size [(int) *p]
- <= chain->group_size [class])
- chain->need.groups[(int) *p]--;
- p++;
- }
- return;
+ for (j = 1; j < this_nregs; j++)
+ {
+ this_cost += spill_add_cost[regno + j];
+ if ((TEST_HARD_REG_BIT (not_usable, regno + j))
+ || TEST_HARD_REG_BIT (used_by_other_reload, regno + j))
+ ok = 0;
+ }
+ if (! ok)
+ continue;
+ if (rl->in && GET_CODE (rl->in) == REG && REGNO (rl->in) == regno)
+ this_cost--;
+ if (rl->out && GET_CODE (rl->out) == REG && REGNO (rl->out) == regno)
+ this_cost--;
+ if (this_cost < best_cost
+ /* Among registers with equal cost, prefer caller-saved ones, or
+ use REG_ALLOC_ORDER if it is defined. */
+ || (this_cost == best_cost
+#ifdef REG_ALLOC_ORDER
+ && (inv_reg_alloc_order[regno]
+ < inv_reg_alloc_order[best_reg])
+#else
+ && call_used_regs[regno]
+ && ! call_used_regs[best_reg]
+#endif
+ ))
+ {
+ best_reg = regno;
+ best_cost = this_cost;
}
}
}
- /* There are no groups left. */
- spill_failure (chain->insn);
- failure = 1;
-}
+ if (best_reg == -1)
+ return 0;
+ if (dumpfile)
+ fprintf (dumpfile, "Using reg %d for reload %d\n", best_reg, rnum);
+ rl->nregs = HARD_REGNO_NREGS (best_reg, rl->mode);
+ rl->regno = best_reg;
-/* If pseudo REG conflicts with one of our reload registers, mark it as
- spilled. */
-static void
-maybe_mark_pseudo_spilled (reg)
- int reg;
-{
- int i;
- int r = reg_renumber[reg];
- int nregs;
+ EXECUTE_IF_SET_IN_REG_SET
+ (chain->live_throughout, FIRST_PSEUDO_REGISTER, j,
+ {
+ count_spilled_pseudo (best_reg, rl->nregs, j);
+ });
+ EXECUTE_IF_SET_IN_REG_SET
+ (chain->dead_or_set, FIRST_PSEUDO_REGISTER, j,
+ {
+ count_spilled_pseudo (best_reg, rl->nregs, j);
+ });
- if (r < 0)
- abort ();
- nregs = HARD_REGNO_NREGS (r, PSEUDO_REGNO_MODE (reg));
- for (i = 0; i < n_spills; i++)
- if (r <= spill_regs[i] && r + nregs > spill_regs[i])
- {
- SET_REGNO_REG_SET (spilled_pseudos, reg);
- return;
- }
+ for (i = 0; i < rl->nregs; i++)
+ {
+ if (spill_cost[best_reg + i] != 0
+ || spill_add_cost[best_reg + i] != 0)
+ abort ();
+ SET_HARD_REG_BIT (used_spill_regs_local, best_reg + i);
+ }
+ return 1;
}
/* Find more reload regs to satisfy the remaining need of an insn, which
is given by CHAIN.
Do it by ascending class number, since otherwise a reg
might be spilled for a big class and might fail to count
- for a smaller class even though it belongs to that class.
-
- Count spilled regs in `spills', and add entries to
- `spill_regs' and `spill_reg_order'.
-
- ??? Note there is a problem here.
- When there is a need for a group in a high-numbered class,
- and also need for non-group regs that come from a lower class,
- the non-group regs are chosen first. If there aren't many regs,
- they might leave no room for a group.
-
- This was happening on the 386. To fix it, we added the code
- that calls possible_group_p, so that the lower class won't
- break up the last possible group.
-
- Really fixing the problem would require changes above
- in counting the regs already spilled, and in choose_reload_regs.
- It might be hard to avoid introducing bugs there. */
+ for a smaller class even though it belongs to that class. */
static void
find_reload_regs (chain, dumpfile)
struct insn_chain *chain;
FILE *dumpfile;
{
- int i, class;
- short *group_needs = chain->need.groups;
- short *simple_needs = chain->need.regs[0];
- short *nongroup_needs = chain->need.regs[1];
-
- if (dumpfile)
- fprintf (dumpfile, "Spilling for insn %d.\n", INSN_UID (chain->insn));
-
- /* Compute the order of preference for hard registers to spill.
- Store them by decreasing preference in potential_reload_regs. */
-
- order_regs_for_reload (chain);
-
- /* So far, no hard regs have been spilled. */
- n_spills = 0;
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- spill_reg_order[i] = -1;
-
- CLEAR_HARD_REG_SET (chain->used_spill_regs);
- CLEAR_HARD_REG_SET (chain->counted_for_groups);
- CLEAR_HARD_REG_SET (chain->counted_for_nongroups);
+ int i;
- for (class = 0; class < N_REG_CLASSES; class++)
+ /* In order to be certain of getting the registers we need,
+ we must sort the reloads into order of increasing register class.
+ Then our grabbing of reload registers will parallel the process
+ that provided the reload registers. */
+ for (i = 0; i < chain->n_reloads; i++)
{
- /* First get the groups of registers.
- If we got single registers first, we might fragment
- possible groups. */
- while (group_needs[class] > 0)
+ /* Show whether this reload already has a hard reg. */
+ if (chain->rld[i].reg_rtx)
{
- /* If any single spilled regs happen to form groups,
- count them now. Maybe we don't really need
- to spill another group. */
- count_possible_groups (chain, class);
-
- if (group_needs[class] <= 0)
- break;
-
- /* Groups of size 2, the only groups used on most machines,
- are treated specially. */
- if (chain->group_size[class] == 2)
- find_tworeg_group (chain, class, dumpfile);
- else
- find_group (chain, class, dumpfile);
- if (failure)
- return;
+ int regno = REGNO (chain->rld[i].reg_rtx);
+ chain->rld[i].regno = regno;
+ chain->rld[i].nregs = HARD_REGNO_NREGS (regno, GET_MODE (chain->rld[i].reg_rtx));
}
+ else
+ chain->rld[i].regno = -1;
+ reload_order[i] = i;
+ }
- /* Now similarly satisfy all need for single registers. */
-
- while (simple_needs[class] > 0 || nongroup_needs[class] > 0)
- {
- /* If we spilled enough regs, but they weren't counted
- against the non-group need, see if we can count them now.
- If so, we can avoid some actual spilling. */
- if (simple_needs[class] <= 0 && nongroup_needs[class] > 0)
- for (i = 0; i < n_spills; i++)
- {
- int regno = spill_regs[i];
- if (TEST_HARD_REG_BIT (reg_class_contents[class], regno)
- && !TEST_HARD_REG_BIT (chain->counted_for_groups, regno)
- && !TEST_HARD_REG_BIT (chain->counted_for_nongroups, regno)
- && nongroup_needs[class] > 0)
- {
- register enum reg_class *p;
+ n_reloads = chain->n_reloads;
+ memcpy (rld, chain->rld, n_reloads * sizeof (struct reload));
- SET_HARD_REG_BIT (chain->counted_for_nongroups, regno);
- nongroup_needs[class]--;
- p = reg_class_superclasses[class];
- while (*p != LIM_REG_CLASSES)
- nongroup_needs[(int) *p++]--;
- }
- }
+ CLEAR_HARD_REG_SET (used_spill_regs_local);
- if (simple_needs[class] <= 0 && nongroup_needs[class] <= 0)
- break;
+ if (dumpfile)
+ fprintf (dumpfile, "Spilling for insn %d.\n", INSN_UID (chain->insn));
- /* Consider the potential reload regs that aren't
- yet in use as reload regs, in order of preference.
- Find the most preferred one that's in this class. */
+ qsort (reload_order, n_reloads, sizeof (short), reload_reg_class_lower);
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- {
- int regno = potential_reload_regs[i];
- if (regno >= 0
- && TEST_HARD_REG_BIT (reg_class_contents[class], regno)
- /* If this reg will not be available for groups,
- pick one that does not foreclose possible groups.
- This is a kludge, and not very general,
- but it should be sufficient to make the 386 work,
- and the problem should not occur on machines with
- more registers. */
- && (nongroup_needs[class] == 0
- || possible_group_p (chain, regno)))
- break;
- }
+ /* Compute the order of preference for hard registers to spill. */
- /* If we couldn't get a register, try to get one even if we
- might foreclose possible groups. This may cause problems
- later, but that's better than aborting now, since it is
- possible that we will, in fact, be able to form the needed
- group even with this allocation. */
-
- if (i >= FIRST_PSEUDO_REGISTER
- && asm_noperands (chain->insn) < 0)
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (potential_reload_regs[i] >= 0
- && TEST_HARD_REG_BIT (reg_class_contents[class],
- potential_reload_regs[i]))
- break;
+ order_regs_for_reload (chain);
- /* I should be the index in potential_reload_regs
- of the new reload reg we have found. */
+ for (i = 0; i < n_reloads; i++)
+ {
+ int r = reload_order[i];
- new_spill_reg (chain, i, class, 1, dumpfile);
- if (failure)
+ /* Ignore reloads that got marked inoperative. */
+ if ((rld[r].out != 0 || rld[r].in != 0 || rld[r].secondary_p)
+ && ! rld[r].optional
+ && rld[r].regno == -1)
+ if (! find_reg (chain, i, dumpfile))
+ {
+ spill_failure (chain->insn, rld[r].class);
+ failure = 1;
return;
- }
+ }
}
- /* We know which hard regs to use, now mark the pseudos that live in them
- as needing to be kicked out. */
- EXECUTE_IF_SET_IN_REG_SET
- (chain->live_before, FIRST_PSEUDO_REGISTER, i,
- {
- maybe_mark_pseudo_spilled (i);
- });
- EXECUTE_IF_SET_IN_REG_SET
- (chain->live_after, FIRST_PSEUDO_REGISTER, i,
- {
- maybe_mark_pseudo_spilled (i);
- });
+ COPY_HARD_REG_SET (chain->used_spill_regs, used_spill_regs_local);
+ IOR_HARD_REG_SET (used_spill_regs, used_spill_regs_local);
- IOR_HARD_REG_SET (used_spill_regs, chain->used_spill_regs);
+ memcpy (chain->rld, rld, n_reloads * sizeof (struct reload));
}
-void
-dump_needs (chain, dumpfile)
- struct insn_chain *chain;
+static void
+select_reload_regs (dumpfile)
FILE *dumpfile;
{
- static const char * const reg_class_names[] = REG_CLASS_NAMES;
- int i;
- struct needs *n = &chain->need;
+ struct insn_chain *chain;
- for (i = 0; i < N_REG_CLASSES; i++)
- {
- if (n->regs[i][0] > 0)
- fprintf (dumpfile,
- ";; Need %d reg%s of class %s.\n",
- n->regs[i][0], n->regs[i][0] == 1 ? "" : "s",
- reg_class_names[i]);
- if (n->regs[i][1] > 0)
- fprintf (dumpfile,
- ";; Need %d nongroup reg%s of class %s.\n",
- n->regs[i][1], n->regs[i][1] == 1 ? "" : "s",
- reg_class_names[i]);
- if (n->groups[i] > 0)
- fprintf (dumpfile,
- ";; Need %d group%s (%smode) of class %s.\n",
- n->groups[i], n->groups[i] == 1 ? "" : "s",
- GET_MODE_NAME(chain->group_mode[i]),
- reg_class_names[i]);
- }
+ /* Try to satisfy the needs for each insn. */
+ for (chain = insns_need_reload; chain != 0;
+ chain = chain->next_need_reload)
+ find_reload_regs (chain, dumpfile);
}
\f
/* Delete all insns that were inserted by emit_caller_save_insns during
}
}
\f
-/* Nonzero if, after spilling reg REGNO for non-groups,
- it will still be possible to find a group if we still need one. */
-
-static int
-possible_group_p (chain, regno)
- struct insn_chain *chain;
- int regno;
-{
- int i;
- int class = (int) NO_REGS;
-
- for (i = 0; i < (int) N_REG_CLASSES; i++)
- if (chain->need.groups[i] > 0)
- {
- class = i;
- break;
- }
-
- if (class == (int) NO_REGS)
- return 1;
-
- /* Consider each pair of consecutive registers. */
- for (i = 0; i < FIRST_PSEUDO_REGISTER - 1; i++)
- {
- /* Ignore pairs that include reg REGNO. */
- if (i == regno || i + 1 == regno)
- continue;
-
- /* Ignore pairs that are outside the class that needs the group.
- ??? Here we fail to handle the case where two different classes
- independently need groups. But this never happens with our
- current machine descriptions. */
- if (! (TEST_HARD_REG_BIT (reg_class_contents[class], i)
- && TEST_HARD_REG_BIT (reg_class_contents[class], i + 1)))
- continue;
-
- /* A pair of consecutive regs we can still spill does the trick. */
- if (spill_reg_order[i] < 0 && spill_reg_order[i + 1] < 0
- && ! TEST_HARD_REG_BIT (bad_spill_regs, i)
- && ! TEST_HARD_REG_BIT (bad_spill_regs, i + 1))
- return 1;
-
- /* A pair of one already spilled and one we can spill does it
- provided the one already spilled is not otherwise reserved. */
- if (spill_reg_order[i] < 0
- && ! TEST_HARD_REG_BIT (bad_spill_regs, i)
- && spill_reg_order[i + 1] >= 0
- && ! TEST_HARD_REG_BIT (chain->counted_for_groups, i + 1)
- && ! TEST_HARD_REG_BIT (chain->counted_for_nongroups, i + 1))
- return 1;
- if (spill_reg_order[i + 1] < 0
- && ! TEST_HARD_REG_BIT (bad_spill_regs, i + 1)
- && spill_reg_order[i] >= 0
- && ! TEST_HARD_REG_BIT (chain->counted_for_groups, i)
- && ! TEST_HARD_REG_BIT (chain->counted_for_nongroups, i))
- return 1;
- }
-
- return 0;
-}
-
-/* Count any groups of CLASS that can be formed from the registers recently
- spilled. */
-
-static void
-count_possible_groups (chain, class)
- struct insn_chain *chain;
- int class;
-{
- HARD_REG_SET new;
- int i, j;
-
- /* Now find all consecutive groups of spilled registers
- and mark each group off against the need for such groups.
- But don't count them against ordinary need, yet. */
-
- if (chain->group_size[class] == 0)
- return;
-
- CLEAR_HARD_REG_SET (new);
-
- /* Make a mask of all the regs that are spill regs in class I. */
- for (i = 0; i < n_spills; i++)
- {
- int regno = spill_regs[i];
-
- if (TEST_HARD_REG_BIT (reg_class_contents[class], regno)
- && ! TEST_HARD_REG_BIT (chain->counted_for_groups, regno)
- && ! TEST_HARD_REG_BIT (chain->counted_for_nongroups, regno))
- SET_HARD_REG_BIT (new, regno);
- }
-
- /* Find each consecutive group of them. */
- for (i = 0; i < FIRST_PSEUDO_REGISTER && chain->need.groups[class] > 0; i++)
- if (TEST_HARD_REG_BIT (new, i)
- && i + chain->group_size[class] <= FIRST_PSEUDO_REGISTER
- && HARD_REGNO_MODE_OK (i, chain->group_mode[class]))
- {
- for (j = 1; j < chain->group_size[class]; j++)
- if (! TEST_HARD_REG_BIT (new, i + j))
- break;
-
- if (j == chain->group_size[class])
- {
- /* We found a group. Mark it off against this class's need for
- groups, and against each superclass too. */
- register enum reg_class *p;
-
- chain->need.groups[class]--;
- p = reg_class_superclasses[class];
- while (*p != LIM_REG_CLASSES)
- {
- if (chain->group_size [(int) *p] <= chain->group_size [class])
- chain->need.groups[(int) *p]--;
- p++;
- }
-
- /* Don't count these registers again. */
- for (j = 0; j < chain->group_size[class]; j++)
- SET_HARD_REG_BIT (chain->counted_for_groups, i + j);
- }
-
- /* Skip to the last reg in this group. When i is incremented above,
- it will then point to the first reg of the next possible group. */
- i += j - 1;
- }
-}
-\f
-/* ALLOCATE_MODE is a register mode that needs to be reloaded. OTHER_MODE is
- another mode that needs to be reloaded for the same register class CLASS.
- If any reg in CLASS allows ALLOCATE_MODE but not OTHER_MODE, fail.
- ALLOCATE_MODE will never be smaller than OTHER_MODE.
-
- This code used to also fail if any reg in CLASS allows OTHER_MODE but not
- ALLOCATE_MODE. This test is unnecessary, because we will never try to put
- something of mode ALLOCATE_MODE into an OTHER_MODE register. Testing this
- causes unnecessary failures on machines requiring alignment of register
- groups when the two modes are different sizes, because the larger mode has
- more strict alignment rules than the smaller mode. */
-
-static int
-modes_equiv_for_class_p (allocate_mode, other_mode, class)
- enum machine_mode allocate_mode, other_mode;
- enum reg_class class;
-{
- register int regno;
- for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
- {
- if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], regno)
- && HARD_REGNO_MODE_OK (regno, allocate_mode)
- && ! HARD_REGNO_MODE_OK (regno, other_mode))
- return 0;
- }
- return 1;
-}
-\f
/* Handle the failure to find a register to spill.
INSN should be one of the insns which needed this particular spill reg. */
static void
-spill_failure (insn)
+spill_failure (insn, class)
rtx insn;
+ enum reg_class class;
{
+ static const char *const reg_class_names[] = REG_CLASS_NAMES;
if (asm_noperands (PATTERN (insn)) >= 0)
- error_for_asm (insn, "`asm' needs too many reloads");
+ error_for_asm (insn, "Can't find a register in class `%s' while reloading `asm'.",
+ reg_class_names[class]);
else
- fatal_insn ("Unable to find a register to spill.", insn);
-}
-
-/* Add a new register to the tables of available spill-registers.
- CHAIN is the insn for which the register will be used; we decrease the
- needs of that insn.
- I is the index of this register in potential_reload_regs.
- CLASS is the regclass whose need is being satisfied.
- NONGROUP is 0 if this register is part of a group.
- DUMPFILE is the same as the one that `reload' got. */
-
-static void
-new_spill_reg (chain, i, class, nongroup, dumpfile)
- struct insn_chain *chain;
- int i;
- int class;
- int nongroup;
- FILE *dumpfile;
-{
- register enum reg_class *p;
- int regno = potential_reload_regs[i];
-
- if (i >= FIRST_PSEUDO_REGISTER)
{
- spill_failure (chain->insn);
- failure = 1;
- return;
- }
-
- if (TEST_HARD_REG_BIT (bad_spill_regs, regno))
- {
- static const char * const reg_class_names[] = REG_CLASS_NAMES;
-
- if (asm_noperands (PATTERN (chain->insn)) < 0)
- {
- /* The error message is still correct - we know only that it wasn't
- an asm statement that caused the problem, but one of the global
- registers declared by the users might have screwed us. */
- error ("fixed or forbidden register %d (%s) was spilled for class %s.",
- regno, reg_names[regno], reg_class_names[class]);
- error ("This may be due to a compiler bug or to impossible asm");
- error ("statements or clauses.");
- fatal_insn ("This is the instruction:", chain->insn);
- }
- error_for_asm (chain->insn, "Invalid `asm' statement:");
- error_for_asm (chain->insn,
- "fixed or forbidden register %d (%s) was spilled for class %s.",
- regno, reg_names[regno], reg_class_names[class]);
- failure = 1;
- return;
+ error ("Unable to find a register to spill in class `%s'.",
+ reg_class_names[class]);
+ fatal_insn ("This is the insn:", insn);
}
-
- /* Make reg REGNO an additional reload reg. */
-
- potential_reload_regs[i] = -1;
- spill_regs[n_spills] = regno;
- spill_reg_order[regno] = n_spills;
- if (dumpfile)
- fprintf (dumpfile, "Spilling reg %d.\n", regno);
- SET_HARD_REG_BIT (chain->used_spill_regs, regno);
-
- /* Clear off the needs we just satisfied. */
-
- chain->need.regs[0][class]--;
- p = reg_class_superclasses[class];
- while (*p != LIM_REG_CLASSES)
- chain->need.regs[0][(int) *p++]--;
-
- if (nongroup && chain->need.regs[1][class] > 0)
- {
- SET_HARD_REG_BIT (chain->counted_for_nongroups, regno);
- chain->need.regs[1][class]--;
- p = reg_class_superclasses[class];
- while (*p != LIM_REG_CLASSES)
- chain->need.regs[1][(int) *p++]--;
- }
-
- n_spills++;
}
\f
/* Delete an unneeded INSN and any previous insns who sole purpose is loading
and one is inside RTL that has been copied while the other is not. */
new_body = old_body;
if (! replace)
- new_body = copy_insn (old_body);
+ {
+ new_body = copy_insn (old_body);
+ if (REG_NOTES (insn))
+ REG_NOTES (insn) = copy_insn_1 (REG_NOTES (insn));
+ }
PATTERN (insn) = new_body;
/* If we had a move insn but now we don't, rerecognize it. This will
the insns of the function. */
static void
-mark_not_eliminable (dest, x)
+mark_not_eliminable (dest, x, data)
rtx dest;
rtx x;
+ void *data ATTRIBUTE_UNUSED;
{
register unsigned int i;
+ HARD_REGNO_NREGS (reg_renumber[i],
PSEUDO_REGNO_MODE (i))
> regno))
- SET_REGNO_REG_SET (spilled_pseudos, i);
+ SET_REGNO_REG_SET (&spilled_pseudos, i);
}
/* I'm getting weird preprocessor errors if I use IOR_HARD_REG_SET
spill_reg_order[i] = -1;
for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
- if (REGNO_REG_SET_P (spilled_pseudos, i))
+ if (REGNO_REG_SET_P (&spilled_pseudos, i))
{
/* Record the current hard register the pseudo is allocated to in
pseudo_previous_regs so we avoid reallocating it to the same
for (chain = insns_need_reload; chain; chain = chain->next_need_reload)
{
EXECUTE_IF_SET_IN_REG_SET
- (chain->live_before, FIRST_PSEUDO_REGISTER, i,
+ (chain->live_throughout, FIRST_PSEUDO_REGISTER, i,
{
ior_hard_reg_set (pseudo_forbidden_regs + i,
&chain->used_spill_regs);
});
EXECUTE_IF_SET_IN_REG_SET
- (chain->live_after, FIRST_PSEUDO_REGISTER, i,
+ (chain->dead_or_set, FIRST_PSEUDO_REGISTER, i,
{
ior_hard_reg_set (pseudo_forbidden_regs + i,
&chain->used_spill_regs);
IOR_HARD_REG_SET (forbidden, pseudo_previous_regs[i]);
retry_global_alloc (i, forbidden);
if (reg_renumber[i] >= 0)
- CLEAR_REGNO_REG_SET (spilled_pseudos, i);
+ CLEAR_REGNO_REG_SET (&spilled_pseudos, i);
}
}
HARD_REG_SET used_by_pseudos;
HARD_REG_SET used_by_pseudos2;
- AND_COMPL_REG_SET (chain->live_before, spilled_pseudos);
- AND_COMPL_REG_SET (chain->live_after, spilled_pseudos);
+ AND_COMPL_REG_SET (chain->live_throughout, &spilled_pseudos);
+ AND_COMPL_REG_SET (chain->dead_or_set, &spilled_pseudos);
/* Mark any unallocated hard regs as available for spills. That
makes inheritance work somewhat better. */
if (chain->need_reload)
{
- REG_SET_TO_HARD_REG_SET (used_by_pseudos, chain->live_before);
- REG_SET_TO_HARD_REG_SET (used_by_pseudos2, chain->live_after);
+ REG_SET_TO_HARD_REG_SET (used_by_pseudos, chain->live_throughout);
+ REG_SET_TO_HARD_REG_SET (used_by_pseudos2, chain->dead_or_set);
IOR_HARD_REG_SET (used_by_pseudos, used_by_pseudos2);
/* Save the old value for the sanity test below. */
COPY_HARD_REG_SET (used_by_pseudos2, chain->used_spill_regs);
- compute_use_by_pseudos (&used_by_pseudos, chain->live_before);
- compute_use_by_pseudos (&used_by_pseudos, chain->live_after);
+ compute_use_by_pseudos (&used_by_pseudos, chain->live_throughout);
+ compute_use_by_pseudos (&used_by_pseudos, chain->dead_or_set);
COMPL_HARD_REG_SET (chain->used_spill_regs, used_by_pseudos);
AND_HARD_REG_SET (chain->used_spill_regs, used_spill_regs);
default:
break;
}
-
- fmt = GET_RTX_FORMAT (code);
- for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
- {
- if (fmt[i] == 'e')
- scan_paradoxical_subregs (XEXP (x, i));
- else if (fmt[i] == 'E')
- {
- register int j;
- for (j = XVECLEN (x, i) - 1; j >=0; j--)
- scan_paradoxical_subregs (XVECEXP (x, i, j));
- }
- }
-}
-\f
-static int
-hard_reg_use_compare (p1p, p2p)
- const PTR p1p;
- const PTR p2p;
-{
- const struct hard_reg_n_uses *p1 = (const struct hard_reg_n_uses *)p1p;
- const struct hard_reg_n_uses *p2 = (const struct hard_reg_n_uses *)p2p;
- int bad1 = TEST_HARD_REG_BIT (bad_spill_regs, p1->regno);
- int bad2 = TEST_HARD_REG_BIT (bad_spill_regs, p2->regno);
- if (bad1 && bad2)
- return p1->regno - p2->regno;
- if (bad1)
- return 1;
- if (bad2)
- return -1;
- if (p1->uses > p2->uses)
- return 1;
- if (p1->uses < p2->uses)
- return -1;
- /* If regs are equally good, sort by regno,
- so that the results of qsort leave nothing to chance. */
- return p1->regno - p2->regno;
-}
-
-/* Used for communication between order_regs_for_reload and count_pseudo.
- Used to avoid counting one pseudo twice. */
-static regset pseudos_counted;
-
-/* Update the costs in N_USES, considering that pseudo REG is live. */
-static void
-count_pseudo (n_uses, reg)
- struct hard_reg_n_uses *n_uses;
- int reg;
-{
- int r = reg_renumber[reg];
- int nregs;
-
- if (REGNO_REG_SET_P (pseudos_counted, reg))
- return;
- SET_REGNO_REG_SET (pseudos_counted, reg);
-
- if (r < 0)
- abort ();
-
- nregs = HARD_REGNO_NREGS (r, PSEUDO_REGNO_MODE (reg));
- while (nregs-- > 0)
- n_uses[r++].uses += REG_N_REFS (reg);
-}
-/* Choose the order to consider regs for use as reload registers
- based on how much trouble would be caused by spilling one.
- Store them in order of decreasing preference in potential_reload_regs. */
-
-static void
-order_regs_for_reload (chain)
- struct insn_chain *chain;
-{
- register int i;
- register int o = 0;
- struct hard_reg_n_uses hard_reg_n_uses[FIRST_PSEUDO_REGISTER];
-
- pseudos_counted = ALLOCA_REG_SET ();
-
- COPY_HARD_REG_SET (bad_spill_regs, bad_spill_regs_global);
-
- /* Count number of uses of each hard reg by pseudo regs allocated to it
- and then order them by decreasing use. */
-
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- {
- hard_reg_n_uses[i].regno = i;
- hard_reg_n_uses[i].uses = 0;
-
- /* Test the various reasons why we can't use a register for
- spilling in this insn. */
- if (fixed_regs[i]
- || REGNO_REG_SET_P (chain->live_before, i)
- || REGNO_REG_SET_P (chain->live_after, i))
- SET_HARD_REG_BIT (bad_spill_regs, i);
- }
-
- /* Now compute hard_reg_n_uses. */
- CLEAR_REG_SET (pseudos_counted);
-
- EXECUTE_IF_SET_IN_REG_SET
- (chain->live_before, FIRST_PSEUDO_REGISTER, i,
- {
- count_pseudo (hard_reg_n_uses, i);
- });
- EXECUTE_IF_SET_IN_REG_SET
- (chain->live_after, FIRST_PSEUDO_REGISTER, i,
- {
- count_pseudo (hard_reg_n_uses, i);
- });
-
- FREE_REG_SET (pseudos_counted);
-
- /* Prefer registers not so far used, for use in temporary loading.
- Among them, if REG_ALLOC_ORDER is defined, use that order.
- Otherwise, prefer registers not preserved by calls. */
-
-#ifdef REG_ALLOC_ORDER
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- {
- int regno = reg_alloc_order[i];
-
- if (hard_reg_n_uses[regno].uses == 0
- && ! TEST_HARD_REG_BIT (bad_spill_regs, regno))
- potential_reload_regs[o++] = regno;
- }
-#else
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- {
- if (hard_reg_n_uses[i].uses == 0 && call_used_regs[i]
- && ! TEST_HARD_REG_BIT (bad_spill_regs, i))
- potential_reload_regs[o++] = i;
- }
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
{
- if (hard_reg_n_uses[i].uses == 0 && ! call_used_regs[i]
- && ! TEST_HARD_REG_BIT (bad_spill_regs, i))
- potential_reload_regs[o++] = i;
+ if (fmt[i] == 'e')
+ scan_paradoxical_subregs (XEXP (x, i));
+ else if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = XVECLEN (x, i) - 1; j >=0; j--)
+ scan_paradoxical_subregs (XVECEXP (x, i, j));
+ }
}
-#endif
-
- qsort (hard_reg_n_uses, FIRST_PSEUDO_REGISTER,
- sizeof hard_reg_n_uses[0], hard_reg_use_compare);
-
- /* Now add the regs that are already used,
- preferring those used less often. The fixed and otherwise forbidden
- registers will be at the end of this list. */
-
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (hard_reg_n_uses[i].uses != 0
- && ! TEST_HARD_REG_BIT (bad_spill_regs, hard_reg_n_uses[i].regno))
- potential_reload_regs[o++] = hard_reg_n_uses[i].regno;
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (TEST_HARD_REG_BIT (bad_spill_regs, hard_reg_n_uses[i].regno))
- potential_reload_regs[o++] = hard_reg_n_uses[i].regno;
}
\f
/* Reload pseudo-registers into hard regs around each insn as needed.
int live_known;
{
struct insn_chain *chain;
-#if defined (AUTO_INC_DEC) || defined (INSN_CLOBBERS_REGNO_P)
+#if defined (AUTO_INC_DEC)
register int i;
#endif
rtx x;
bzero ((char *) spill_reg_rtx, sizeof spill_reg_rtx);
bzero ((char *) spill_reg_store, sizeof spill_reg_store);
- reg_last_reload_reg = (rtx *) alloca (max_regno * sizeof (rtx));
- bzero ((char *) reg_last_reload_reg, max_regno * sizeof (rtx));
- reg_has_output_reload = (char *) alloca (max_regno);
+ reg_last_reload_reg = (rtx *) xcalloc (max_regno, sizeof (rtx));
+ reg_has_output_reload = (char *) xmalloc (max_regno);
CLEAR_HARD_REG_SET (reg_reloaded_valid);
set_initial_elim_offsets ();
for this insn in order to be stored in
(obeying register constraints). That is correct; such reload
registers ARE still valid. */
- note_stores (oldpat, forget_old_reloads_1);
+ note_stores (oldpat, forget_old_reloads_1, NULL);
/* There may have been CLOBBER insns placed after INSN. So scan
between INSN and NEXT and use them to forget old reloads. */
for (x = NEXT_INSN (insn); x != old_next; x = NEXT_INSN (x))
if (GET_CODE (x) == INSN && GET_CODE (PATTERN (x)) == CLOBBER)
- note_stores (PATTERN (x), forget_old_reloads_1);
+ note_stores (PATTERN (x), forget_old_reloads_1, NULL);
#ifdef AUTO_INC_DEC
/* Likewise for regs altered by auto-increment in this insn.
reg_has_output_reload[REGNO (XEXP (in_reg, 0))] = 1;
}
else
- forget_old_reloads_1 (XEXP (in_reg, 0), NULL_RTX);
+ forget_old_reloads_1 (XEXP (in_reg, 0), NULL_RTX,
+ NULL);
}
else if ((code == PRE_INC || code == PRE_DEC)
&& TEST_HARD_REG_BIT (reg_reloaded_valid,
break;
if (i == n_reloads)
- forget_old_reloads_1 (XEXP (x, 0), NULL_RTX);
+ forget_old_reloads_1 (XEXP (x, 0), NULL_RTX, NULL);
}
#endif
}
if it is a call-used reg. */
else if (GET_CODE (insn) == CALL_INSN)
AND_COMPL_HARD_REG_SET(reg_reloaded_valid, call_used_reg_set);
-
- /* In case registers overlap, allow certain insns to invalidate
- particular hard registers. */
-
-#ifdef INSN_CLOBBERS_REGNO_P
- for (i = 0 ; i < FIRST_PSEUDO_REGISTER; i++)
- if (TEST_HARD_REG_BIT (reg_reloaded_valid, i)
- && INSN_CLOBBERS_REGNO_P (insn, i))
- CLEAR_HARD_REG_BIT (reg_reloaded_valid, i);
-#endif
-
-#ifdef USE_C_ALLOCA
- alloca (0);
-#endif
}
+
+ /* Clean up. */
+ free (reg_last_reload_reg);
+ free (reg_has_output_reload);
}
/* Discard all record of any value reloaded from X,
or it may be a pseudo reg that was reloaded from. */
static void
-forget_old_reloads_1 (x, ignored)
+forget_old_reloads_1 (x, ignored, data)
rtx x;
rtx ignored ATTRIBUTE_UNUSED;
+ void *data ATTRIBUTE_UNUSED;
{
register int regno;
int nr;
reg_last_reload_reg[regno + nr] = 0;
}
\f
-/* Comparison function for qsort to decide which of two reloads
- should be handled first. *P1 and *P2 are the reload numbers. */
-
-static int
-reload_reg_class_lower (r1p, r2p)
- const PTR r1p;
- const PTR r2p;
-{
- register int r1 = *(const short *)r1p, r2 = *(const short *)r2p;
- register int t;
-
- /* Consider required reloads before optional ones. */
- t = rld[r1].optional - rld[r2].optional;
- if (t != 0)
- return t;
-
- /* Count all solitary classes before non-solitary ones. */
- t = ((reg_class_size[(int) rld[r2].class] == 1)
- - (reg_class_size[(int) rld[r1].class] == 1));
- if (t != 0)
- return t;
-
- /* Aside from solitaires, consider all multi-reg groups first. */
- t = rld[r2].nregs - rld[r1].nregs;
- if (t != 0)
- return t;
-
- /* Consider reloads in order of increasing reg-class number. */
- t = (int) rld[r1].class - (int) rld[r2].class;
- if (t != 0)
- return t;
-
- /* If reloads are equally urgent, sort by reload number,
- so that the results of qsort leave nothing to chance. */
- return r1 - r2;
-}
-\f
/* The following HARD_REG_SETs indicate when each hard register is
used for a reload of various parts of the current insn. */
return (r2_type == RELOAD_FOR_INSN || r2_type == RELOAD_FOR_OUTPUT
|| ((r2_type == RELOAD_FOR_OUTPUT_ADDRESS
|| r2_type == RELOAD_FOR_OUTADDR_ADDRESS)
- && r2_opnum >= r1_opnum));
+ && r2_opnum <= r1_opnum));
case RELOAD_FOR_INSN:
return (r2_type == RELOAD_FOR_INPUT || r2_type == RELOAD_FOR_OUTPUT
}
}
\f
-/* Vector of reload-numbers showing the order in which the reloads should
- be processed. */
-short reload_order[MAX_RELOADS];
-
/* Indexed by reload number, 1 if incoming value
inherited from previous insns. */
char reload_inherited[MAX_RELOADS];
int ignore_address_reloads;
{
int time1;
+ /* Set if we see an input reload that must not share its reload register
+ with any new earlyclobber, but might otherwise share the reload
+ register with an output or input-output reload. */
+ int check_earlyclobber = 0;
int i;
int copy = 0;
+ /* ??? reload_reg_used is abused to hold the registers that are not
+ available as spill registers, including hard registers that are
+ earlyclobbered in asms. As a temporary measure, reject anything
+ in reload_reg_used. */
+ if (TEST_HARD_REG_BIT (reload_reg_used, regno))
+ return 0;
+
if (out == const0_rtx)
{
copy = 1;
if (! rld[i].in || ! rtx_equal_p (rld[i].in, value)
|| rld[i].out || out)
{
- int j, time2;
+ int time2;
switch (rld[i].when_needed)
{
case RELOAD_FOR_OTHER_ADDRESS:
break;
case RELOAD_FOR_INPUT:
time2 = rld[i].opnum * 4 + 4;
+ check_earlyclobber = 1;
break;
/* rld[i].opnum * 4 + 4 <= (MAX_RECOG_OPERAND - 1) * 4 + 4
== MAX_RECOG_OPERAND * 4 */
break;
case RELOAD_FOR_OPERAND_ADDRESS:
time2 = MAX_RECOG_OPERANDS * 4 + 2;
+ check_earlyclobber = 1;
break;
case RELOAD_FOR_INSN:
time2 = MAX_RECOG_OPERANDS * 4 + 3;
{
time2 = MAX_RECOG_OPERANDS * 4 + 4;
/* Earlyclobbered outputs must conflict with inputs. */
- for (j = 0; j < n_earlyclobbers; j++)
- if (rld[i].out == reload_earlyclobbers[j])
- time2 = MAX_RECOG_OPERANDS * 4 + 3;
+ if (earlyclobber_operand_p (rld[i].out))
+ time2 = MAX_RECOG_OPERANDS * 4 + 3;
break;
}
}
}
}
+
+ /* Earlyclobbered outputs must conflict with inputs. */
+ if (check_earlyclobber && out && earlyclobber_operand_p (out))
+ return 0;
+
return 1;
}
+/* Give an error message saying we failed to find a reload for INSN,
+ and clear out reload R. */
+static void
+failed_reload (insn, r)
+ rtx insn;
+ int r;
+{
+ if (asm_noperands (PATTERN (insn)) < 0)
+ /* It's the compiler's fault. */
+ fatal_insn ("Could not find a spill register", insn);
+
+ /* It's the user's fault; the operand's mode and constraint
+ don't match. Disable this reload so we don't crash in final. */
+ error_for_asm (insn,
+ "`asm' operand constraint incompatible with operand size");
+ rld[r].in = 0;
+ rld[r].out = 0;
+ rld[r].reg_rtx = 0;
+ rld[r].optional = 1;
+ rld[r].secondary_p = 1;
+}
+
+/* I is the index in SPILL_REG_RTX of the reload register we are to allocate
+ for reload R. If it's valid, get an rtx for it. Return nonzero if
+ successful. */
+static int
+set_reload_reg (i, r)
+ int i, r;
+{
+ int regno;
+ rtx reg = spill_reg_rtx[i];
+
+ if (reg == 0 || GET_MODE (reg) != rld[r].mode)
+ spill_reg_rtx[i] = reg
+ = gen_rtx_REG (rld[r].mode, spill_regs[i]);
+
+ regno = true_regnum (reg);
+
+ /* Detect when the reload reg can't hold the reload mode.
+ This used to be one `if', but Sequent compiler can't handle that. */
+ if (HARD_REGNO_MODE_OK (regno, rld[r].mode))
+ {
+ enum machine_mode test_mode = VOIDmode;
+ if (rld[r].in)
+ test_mode = GET_MODE (rld[r].in);
+ /* If rld[r].in has VOIDmode, it means we will load it
+ in whatever mode the reload reg has: to wit, rld[r].mode.
+ We have already tested that for validity. */
+ /* Aside from that, we need to test that the expressions
+ to reload from or into have modes which are valid for this
+ reload register. Otherwise the reload insns would be invalid. */
+ if (! (rld[r].in != 0 && test_mode != VOIDmode
+ && ! HARD_REGNO_MODE_OK (regno, test_mode)))
+ if (! (rld[r].out != 0
+ && ! HARD_REGNO_MODE_OK (regno, GET_MODE (rld[r].out))))
+ {
+ /* The reg is OK. */
+ last_spill_reg = i;
+
+ /* Mark as in use for this insn the reload regs we use
+ for this. */
+ mark_reload_reg_in_use (spill_regs[i], rld[r].opnum,
+ rld[r].when_needed, rld[r].mode);
+
+ rld[r].reg_rtx = reg;
+ reload_spill_index[r] = spill_regs[i];
+ return 1;
+ }
+ }
+ return 0;
+}
+
/* Find a spill register to use as a reload register for reload R.
LAST_RELOAD is non-zero if this is the last reload for the insn being
processed.
Set rld[R].reg_rtx to the register allocated.
- If NOERROR is nonzero, we return 1 if successful,
- or 0 if we couldn't find a spill reg and we didn't change anything. */
+ We return 1 if successful, or 0 if we couldn't find a spill reg and
+ we didn't change anything. */
static int
-allocate_reload_reg (chain, r, last_reload, noerror)
+allocate_reload_reg (chain, r, last_reload)
struct insn_chain *chain;
int r;
int last_reload;
- int noerror;
{
rtx insn = chain->insn;
- int i, pass, count, regno;
- rtx new;
+ int i, pass, count;
/* If we put this reload ahead, thinking it is a group,
then insist on finding a group. Otherwise we can grab a
/* I is the index in spill_regs.
We advance it round-robin between insns to use all spill regs
equally, so that inherited reloads have a chance
- of leapfrogging each other. Don't do this, however, when we have
- group needs and failure would be fatal; if we only have a relatively
- small number of spill registers, and more than one of them has
- group needs, then by starting in the middle, we may end up
- allocating the first one in such a way that we are not left with
- sufficient groups to handle the rest. */
-
- if (noerror || ! force_group)
- i = last_spill_reg;
- else
- i = -1;
+ of leapfrogging each other. */
+
+ i = last_spill_reg;
for (count = 0; count < n_spills; count++)
{
(on 68000) got us two FP regs. If NR is 1,
we would reject both of them. */
if (force_group)
- nr = CLASS_MAX_NREGS (rld[r].class, rld[r].mode);
+ nr = rld[r].nregs;
/* If we need only one reg, we have already won. */
if (nr == 1)
{
break;
}
/* Otherwise check that as many consecutive regs as we need
- are available here.
- Also, don't use for a group registers that are
- needed for nongroups. */
- if (! TEST_HARD_REG_BIT (chain->counted_for_nongroups, regnum))
- while (nr > 1)
- {
- regno = regnum + nr - 1;
- if (!(TEST_HARD_REG_BIT (reg_class_contents[class], regno)
- && spill_reg_order[regno] >= 0
- && reload_reg_free_p (regno, rld[r].opnum,
- rld[r].when_needed)
- && ! TEST_HARD_REG_BIT (chain->counted_for_nongroups,
- regno)))
- break;
- nr--;
- }
+ are available here. */
+ while (nr > 1)
+ {
+ int regno = regnum + nr - 1;
+ if (!(TEST_HARD_REG_BIT (reg_class_contents[class], regno)
+ && spill_reg_order[regno] >= 0
+ && reload_reg_free_p (regno, rld[r].opnum,
+ rld[r].when_needed)))
+ break;
+ nr--;
+ }
if (nr == 1)
break;
}
if (count < n_spills)
break;
}
-
+
/* We should have found a spill register by now. */
- if (count == n_spills)
- {
- if (noerror)
- return 0;
- goto failure;
- }
+ if (count >= n_spills)
+ return 0;
/* I is the index in SPILL_REG_RTX of the reload register we are to
allocate. Get an rtx for it and find its register number. */
- new = spill_reg_rtx[i];
-
- if (new == 0 || GET_MODE (new) != rld[r].mode)
- spill_reg_rtx[i] = new
- = gen_rtx_REG (rld[r].mode, spill_regs[i]);
-
- regno = true_regnum (new);
-
- /* Detect when the reload reg can't hold the reload mode.
- This used to be one `if', but Sequent compiler can't handle that. */
- if (HARD_REGNO_MODE_OK (regno, rld[r].mode))
- {
- enum machine_mode test_mode = VOIDmode;
- if (rld[r].in)
- test_mode = GET_MODE (rld[r].in);
- /* If rld[r].in has VOIDmode, it means we will load it
- in whatever mode the reload reg has: to wit, rld[r].mode.
- We have already tested that for validity. */
- /* Aside from that, we need to test that the expressions
- to reload from or into have modes which are valid for this
- reload register. Otherwise the reload insns would be invalid. */
- if (! (rld[r].in != 0 && test_mode != VOIDmode
- && ! HARD_REGNO_MODE_OK (regno, test_mode)))
- if (! (rld[r].out != 0
- && ! HARD_REGNO_MODE_OK (regno, GET_MODE (rld[r].out))))
- {
- /* The reg is OK. */
- last_spill_reg = i;
-
- /* Mark as in use for this insn the reload regs we use
- for this. */
- mark_reload_reg_in_use (spill_regs[i], rld[r].opnum,
- rld[r].when_needed, rld[r].mode);
-
- rld[r].reg_rtx = new;
- reload_spill_index[r] = spill_regs[i];
- return 1;
- }
- }
-
- /* The reg is not OK. */
- if (noerror)
- return 0;
-
- failure:
- if (asm_noperands (PATTERN (insn)) < 0)
- /* It's the compiler's fault. */
- fatal_insn ("Could not find a spill register", insn);
-
- /* It's the user's fault; the operand's mode and constraint
- don't match. Disable this reload so we don't crash in final. */
- error_for_asm (insn,
- "`asm' operand constraint incompatible with operand size");
- rld[r].in = 0;
- rld[r].out = 0;
- rld[r].reg_rtx = 0;
- rld[r].optional = 1;
- rld[r].secondary_p = 1;
-
- return 1;
+ return set_reload_reg (i, r);
}
\f
-/* Assign hard reg targets for the pseudo-registers we must reload
- into hard regs for this insn.
- Also output the instructions to copy them in and out of the hard regs.
-
- For machines with register classes, we are responsible for
- finding a reload reg in the proper class. */
-
+/* Initialize all the tables needed to allocate reload registers.
+ CHAIN is the insn currently being processed; SAVE_RELOAD_REG_RTX
+ is the array we use to restore the reg_rtx field for every reload. */
static void
-choose_reload_regs (chain)
+choose_reload_regs_init (chain, save_reload_reg_rtx)
struct insn_chain *chain;
+ rtx *save_reload_reg_rtx;
{
- rtx insn = chain->insn;
- register int i, j;
- int max_group_size = 1;
- enum reg_class group_class = NO_REGS;
- int inheritance;
- int pass;
+ int i;
- rtx save_reload_reg_rtx[MAX_RELOADS];
- char save_reload_inherited[MAX_RELOADS];
- rtx save_reload_inheritance_insn[MAX_RELOADS];
- rtx save_reload_override_in[MAX_RELOADS];
- int save_reload_spill_index[MAX_RELOADS];
- HARD_REG_SET save_reload_reg_used;
- HARD_REG_SET save_reload_reg_used_in_input_addr[MAX_RECOG_OPERANDS];
- HARD_REG_SET save_reload_reg_used_in_inpaddr_addr[MAX_RECOG_OPERANDS];
- HARD_REG_SET save_reload_reg_used_in_output_addr[MAX_RECOG_OPERANDS];
- HARD_REG_SET save_reload_reg_used_in_outaddr_addr[MAX_RECOG_OPERANDS];
- HARD_REG_SET save_reload_reg_used_in_input[MAX_RECOG_OPERANDS];
- HARD_REG_SET save_reload_reg_used_in_output[MAX_RECOG_OPERANDS];
- HARD_REG_SET save_reload_reg_used_in_op_addr;
- HARD_REG_SET save_reload_reg_used_in_op_addr_reload;
- HARD_REG_SET save_reload_reg_used_in_insn;
- HARD_REG_SET save_reload_reg_used_in_other_addr;
- HARD_REG_SET save_reload_reg_used_at_all;
+ for (i = 0; i < n_reloads; i++)
+ rld[i].reg_rtx = save_reload_reg_rtx[i];
bzero (reload_inherited, MAX_RELOADS);
bzero ((char *) reload_inheritance_insn, MAX_RELOADS * sizeof (rtx));
CLEAR_HARD_REG_SET (reg_used_in_insn);
{
HARD_REG_SET tmp;
- REG_SET_TO_HARD_REG_SET (tmp, chain->live_before);
+ REG_SET_TO_HARD_REG_SET (tmp, chain->live_throughout);
IOR_HARD_REG_SET (reg_used_in_insn, tmp);
- REG_SET_TO_HARD_REG_SET (tmp, chain->live_after);
+ REG_SET_TO_HARD_REG_SET (tmp, chain->dead_or_set);
IOR_HARD_REG_SET (reg_used_in_insn, tmp);
- compute_use_by_pseudos (®_used_in_insn, chain->live_before);
- compute_use_by_pseudos (®_used_in_insn, chain->live_after);
+ compute_use_by_pseudos (®_used_in_insn, chain->live_throughout);
+ compute_use_by_pseudos (®_used_in_insn, chain->dead_or_set);
}
for (i = 0; i < reload_n_operands; i++)
{
IOR_COMPL_HARD_REG_SET (reload_reg_used, chain->used_spill_regs);
-#if 0 /* Not needed, now that we can always retry without inheritance. */
- /* See if we have more mandatory reloads than spill regs.
- If so, then we cannot risk optimizations that could prevent
- reloads from sharing one spill register.
+ CLEAR_HARD_REG_SET (reload_reg_used_for_inherit);
- Since we will try finding a better register than reload_reg_rtx
- unless it is equal to reload_in or reload_out, count such reloads. */
+ for (i = 0; i < n_reloads; i++)
+ /* If we have already decided to use a certain register,
+ don't use it in another way. */
+ if (rld[i].reg_rtx)
+ mark_reload_reg_in_use (REGNO (rld[i].reg_rtx), rld[i].opnum,
+ rld[i].when_needed, rld[i].mode);
+}
- {
- int tem = 0;
- for (j = 0; j < n_reloads; j++)
- if (! rld[j].optional
- && (rld[j].in != 0 || rld[j].out != 0 || rld[j].secondary_p)
- && (rld[j].reg_rtx == 0
- || (! rtx_equal_p (rld[j].reg_rtx, rld[j].in)
- && ! rtx_equal_p (rld[j].reg_rtx, rld[j].out))))
- tem++;
- if (tem > n_spills)
- must_reuse = 1;
- }
-#endif
+/* Assign hard reg targets for the pseudo-registers we must reload
+ into hard regs for this insn.
+ Also output the instructions to copy them in and out of the hard regs.
+
+ For machines with register classes, we are responsible for
+ finding a reload reg in the proper class. */
+
+static void
+choose_reload_regs (chain)
+ struct insn_chain *chain;
+{
+ rtx insn = chain->insn;
+ register int i, j;
+ int max_group_size = 1;
+ enum reg_class group_class = NO_REGS;
+ int pass, win, inheritance;
+
+ rtx save_reload_reg_rtx[MAX_RELOADS];
/* In order to be certain of getting the registers we need,
we must sort the reloads into order of increasing register class.
reload_order[j] = j;
reload_spill_index[j] = -1;
- rld[j].mode = ((rld[j].inmode == VOIDmode
- || (GET_MODE_SIZE (rld[j].outmode)
- > GET_MODE_SIZE (rld[j].inmode)))
- ? rld[j].outmode : rld[j].inmode);
-
- rld[j].nregs = CLASS_MAX_NREGS (rld[j].class, rld[j].mode);
-
if (rld[j].nregs > 1)
{
max_group_size = MAX (rld[j].nregs, max_group_size);
}
save_reload_reg_rtx[j] = rld[j].reg_rtx;
- /* If we have already decided to use a certain register,
- don't use it in another way. */
- if (rld[j].reg_rtx)
- mark_reload_reg_in_use (REGNO (rld[j].reg_rtx), rld[j].opnum,
- rld[j].when_needed, rld[j].mode);
}
if (n_reloads > 1)
qsort (reload_order, n_reloads, sizeof (short), reload_reg_class_lower);
- bcopy (reload_inherited, save_reload_inherited, sizeof reload_inherited);
- bcopy ((char *) reload_inheritance_insn,
- (char *) save_reload_inheritance_insn,
- sizeof reload_inheritance_insn);
- bcopy ((char *) reload_override_in, (char *) save_reload_override_in,
- sizeof reload_override_in);
- bcopy ((char *) reload_spill_index, (char *) save_reload_spill_index,
- sizeof reload_spill_index);
- COPY_HARD_REG_SET (save_reload_reg_used, reload_reg_used);
- COPY_HARD_REG_SET (save_reload_reg_used_at_all, reload_reg_used_at_all);
- COPY_HARD_REG_SET (save_reload_reg_used_in_op_addr,
- reload_reg_used_in_op_addr);
-
- COPY_HARD_REG_SET (save_reload_reg_used_in_op_addr_reload,
- reload_reg_used_in_op_addr_reload);
-
- COPY_HARD_REG_SET (save_reload_reg_used_in_insn,
- reload_reg_used_in_insn);
- COPY_HARD_REG_SET (save_reload_reg_used_in_other_addr,
- reload_reg_used_in_other_addr);
-
- for (i = 0; i < reload_n_operands; i++)
- {
- COPY_HARD_REG_SET (save_reload_reg_used_in_output[i],
- reload_reg_used_in_output[i]);
- COPY_HARD_REG_SET (save_reload_reg_used_in_input[i],
- reload_reg_used_in_input[i]);
- COPY_HARD_REG_SET (save_reload_reg_used_in_input_addr[i],
- reload_reg_used_in_input_addr[i]);
- COPY_HARD_REG_SET (save_reload_reg_used_in_inpaddr_addr[i],
- reload_reg_used_in_inpaddr_addr[i]);
- COPY_HARD_REG_SET (save_reload_reg_used_in_output_addr[i],
- reload_reg_used_in_output_addr[i]);
- COPY_HARD_REG_SET (save_reload_reg_used_in_outaddr_addr[i],
- reload_reg_used_in_outaddr_addr[i]);
- }
-
/* If -O, try first with inheritance, then turning it off.
If not -O, don't do inheritance.
Using inheritance when not optimizing leads to paradoxes
with fp on the 68k: fp numbers (not NaNs) fail to be equal to themselves
because one side of the comparison might be inherited. */
-
+ win = 0;
for (inheritance = optimize > 0; inheritance >= 0; inheritance--)
{
+ choose_reload_regs_init (chain, save_reload_reg_rtx);
+
/* Process the reloads in order of preference just found.
Beyond this point, subregs can be found in reload_reg_rtx.
Then make a second pass over the reloads to allocate any reloads
that haven't been given registers yet. */
- CLEAR_HARD_REG_SET (reload_reg_used_for_inherit);
-
for (j = 0; j < n_reloads; j++)
{
register int r = reload_order[j];
|| rld[reload_order[i]].secondary_p)
&& ! rld[reload_order[i]].optional
&& rld[reload_order[i]].reg_rtx == 0)
- allocate_reload_reg (chain, reload_order[i], 0, inheritance);
+ allocate_reload_reg (chain, reload_order[i], 0);
#endif
/* First see if this pseudo is already available as reloaded
&& rld[r].out
&& ! TEST_HARD_REG_BIT (reg_reloaded_dead, i))
/* Don't clobber the frame pointer. */
- || (i == HARD_FRAME_POINTER_REGNUM
- && rld[r].out)
+ || (i == HARD_FRAME_POINTER_REGNUM && rld[r].out)
/* Don't really use the inherited spill reg
if we need it wider than we've got it. */
|| (GET_MODE_SIZE (rld[r].mode)
register, stay with it - that leaves the
inherited register for subsequent reloads. */
|| (rld[r].out && rld[r].reg_rtx
- && rtx_equal_p (rld[r].out,
- rld[r].reg_rtx)))
+ && rtx_equal_p (rld[r].out, rld[r].reg_rtx)))
{
reload_override_in[r] = last_reg;
reload_inheritance_insn[r]
if (i == n_reloads)
continue;
- allocate_reload_reg (chain, r, j == n_reloads - 1, inheritance);
+ allocate_reload_reg (chain, r, j == n_reloads - 1);
#endif
}
if (rld[r].reg_rtx != 0 || rld[r].optional)
continue;
- if (! allocate_reload_reg (chain, r, j == n_reloads - 1, inheritance))
+ if (! allocate_reload_reg (chain, r, j == n_reloads - 1))
break;
}
/* If that loop got all the way, we have won. */
if (j == n_reloads)
- break;
+ {
+ win = 1;
+ break;
+ }
/* Loop around and try without any inheritance. */
+ }
+
+ if (! win)
+ {
/* First undo everything done by the failed attempt
to allocate with inheritance. */
- for (i = 0; i < n_reloads; i++)
- rld[i].reg_rtx = save_reload_reg_rtx[i];
- bcopy ((char *) save_reload_inherited, (char *) reload_inherited,
- sizeof reload_inherited);
- bcopy ((char *) save_reload_inheritance_insn,
- (char *) reload_inheritance_insn,
- sizeof reload_inheritance_insn);
- bcopy ((char *) save_reload_override_in, (char *) reload_override_in,
- sizeof reload_override_in);
- bcopy ((char *) save_reload_spill_index, (char *) reload_spill_index,
- sizeof reload_spill_index);
- COPY_HARD_REG_SET (reload_reg_used, save_reload_reg_used);
- COPY_HARD_REG_SET (reload_reg_used_at_all, save_reload_reg_used_at_all);
- COPY_HARD_REG_SET (reload_reg_used_in_op_addr,
- save_reload_reg_used_in_op_addr);
- COPY_HARD_REG_SET (reload_reg_used_in_op_addr_reload,
- save_reload_reg_used_in_op_addr_reload);
- COPY_HARD_REG_SET (reload_reg_used_in_insn,
- save_reload_reg_used_in_insn);
- COPY_HARD_REG_SET (reload_reg_used_in_other_addr,
- save_reload_reg_used_in_other_addr);
+ choose_reload_regs_init (chain, save_reload_reg_rtx);
- for (i = 0; i < reload_n_operands; i++)
+ /* Some sanity tests to verify that the reloads found in the first
+ pass are identical to the ones we have now. */
+ if (chain->n_reloads != n_reloads)
+ abort ();
+
+ for (i = 0; i < n_reloads; i++)
{
- COPY_HARD_REG_SET (reload_reg_used_in_input[i],
- save_reload_reg_used_in_input[i]);
- COPY_HARD_REG_SET (reload_reg_used_in_output[i],
- save_reload_reg_used_in_output[i]);
- COPY_HARD_REG_SET (reload_reg_used_in_input_addr[i],
- save_reload_reg_used_in_input_addr[i]);
- COPY_HARD_REG_SET (reload_reg_used_in_inpaddr_addr[i],
- save_reload_reg_used_in_inpaddr_addr[i]);
- COPY_HARD_REG_SET (reload_reg_used_in_output_addr[i],
- save_reload_reg_used_in_output_addr[i]);
- COPY_HARD_REG_SET (reload_reg_used_in_outaddr_addr[i],
- save_reload_reg_used_in_outaddr_addr[i]);
+ if (chain->rld[i].regno < 0 || chain->rld[i].reg_rtx != 0)
+ continue;
+ if (chain->rld[i].when_needed != rld[i].when_needed)
+ abort ();
+ for (j = 0; j < n_spills; j++)
+ if (spill_regs[j] == chain->rld[i].regno)
+ if (! set_reload_reg (j, i))
+ failed_reload (chain->insn, i);
}
}
clear any memory of reloaded copies of the pseudo reg.
If this output reload comes from a spill reg,
reg_has_output_reload will make this do nothing. */
- note_stores (pat, forget_old_reloads_1);
+ note_stores (pat, forget_old_reloads_1, NULL);
if (reg_mentioned_p (rld[j].reg_rtx, pat))
{
note_stores; it is ignored. */
static void
-reload_cse_invalidate_rtx (dest, ignore)
+reload_cse_invalidate_rtx (dest, ignore, data)
rtx dest;
rtx ignore ATTRIBUTE_UNUSED;
+ void *data ATTRIBUTE_UNUSED;
{
while (GET_CODE (dest) == STRICT_LOW_PART
|| GET_CODE (dest) == SIGN_EXTRACT
if (GET_CODE (x) == SET)
reload_cse_record_set (x, body);
else
- note_stores (x, reload_cse_invalidate_rtx);
+ note_stores (x, reload_cse_invalidate_rtx, NULL);
}
}
else
- note_stores (body, reload_cse_invalidate_rtx);
+ note_stores (body, reload_cse_invalidate_rtx, NULL);
#ifdef AUTO_INC_DEC
/* Clobber any registers which appear in REG_INC notes. We
for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
if (REG_NOTE_KIND (x) == REG_INC)
- reload_cse_invalidate_rtx (XEXP (x, 0), NULL_RTX);
+ reload_cse_invalidate_rtx (XEXP (x, 0), NULL_RTX, NULL);
}
#endif
for (x = CALL_INSN_FUNCTION_USAGE (insn); x; x = XEXP (x, 1))
if (GET_CODE (XEXP (x, 0)) == CLOBBER)
- reload_cse_invalidate_rtx (XEXP (XEXP (x, 0), 0), NULL_RTX);
+ reload_cse_invalidate_rtx (XEXP (XEXP (x, 0), 0), NULL_RTX,
+ NULL);
}
}
+ /* Clean up. */
+ end_alias_analysis ();
+
/* Free all the temporary structures we created, and go back to the
regular obstacks. */
obstack_free (&reload_obstack, firstobj);
second argument, which is passed by note_stores, is ignored. */
static void
-reload_cse_check_clobber (dest, ignore)
+reload_cse_check_clobber (dest, ignore, data)
rtx dest;
rtx ignore ATTRIBUTE_UNUSED;
+ void *data ATTRIBUTE_UNUSED;
{
if (reg_overlap_mentioned_p (dest, reload_cse_check_src))
reload_cse_check_clobbered = 1;
x = XEXP (x, 0);
if (push_operand (x, GET_MODE (x)))
{
- reload_cse_invalidate_rtx (stack_pointer_rtx, NULL_RTX);
- reload_cse_invalidate_rtx (dest, NULL_RTX);
+ reload_cse_invalidate_rtx (stack_pointer_rtx, NULL_RTX, NULL);
+ reload_cse_invalidate_rtx (dest, NULL_RTX, NULL);
return;
}
|| side_effects_p (src)
|| side_effects_p (dest))
{
- reload_cse_invalidate_rtx (dest, NULL_RTX);
+ reload_cse_invalidate_rtx (dest, NULL_RTX, NULL);
return;
}
if (reg_mentioned_p (cc0_rtx, src)
|| reg_mentioned_p (cc0_rtx, dest))
{
- reload_cse_invalidate_rtx (dest, NULL_RTX);
+ reload_cse_invalidate_rtx (dest, NULL_RTX, NULL);
return;
}
#endif
reload_cse_check_clobbered = 0;
reload_cse_check_src = src;
- note_stores (x, reload_cse_check_clobber);
+ note_stores (x, reload_cse_check_clobber, NULL);
if (reload_cse_check_clobbered)
{
- reload_cse_invalidate_rtx (dest, NULL_RTX);
+ reload_cse_invalidate_rtx (dest, NULL_RTX, NULL);
return;
}
}
}
}
}
- note_stores (PATTERN (insn), reload_combine_note_store);
+ note_stores (PATTERN (insn), reload_combine_note_store, NULL);
if (GET_CODE (insn) == CALL_INSN)
{
rtx link;
update reg_state[regno].store_ruid and reg_state[regno].use_index
accordingly. Called via note_stores from reload_combine. */
static void
-reload_combine_note_store (dst, set)
+reload_combine_note_store (dst, set, data)
rtx dst, set;
+ void *data ATTRIBUTE_UNUSED;
{
int regno = 0;
int i;
}
}
}
- note_stores (PATTERN (insn), move2add_note_store);
+ note_stores (PATTERN (insn), move2add_note_store, NULL);
/* If this is a CALL_INSN, all call used registers are stored with
unknown values. */
if (GET_CODE (insn) == CALL_INSN)
Update reg_set_luid, reg_offset and reg_base_reg accordingly.
Called from reload_cse_move2add via note_stores. */
static void
-move2add_note_store (dst, set)
+move2add_note_store (dst, set, data)
rtx dst, set;
+ void *data ATTRIBUTE_UNUSED;
{
int regno = 0;
int i;