/* Optimize jump instructions, for GNU compiler.
- Copyright (C) 1987, 88, 89, 91-98, 1999 Free Software Foundation, Inc.
+ Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997
+ 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
This file is part of GNU CC.
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-
/* This is the jump-optimization pass of the compiler.
It is run two or three times: once before cse, sometimes once after cse,
and once after reload (before final).
#include "config.h"
#include "system.h"
#include "rtl.h"
+#include "tm_p.h"
#include "flags.h"
#include "hard-reg-set.h"
#include "regs.h"
#include "insn-flags.h"
#include "insn-attr.h"
#include "recog.h"
+#include "function.h"
#include "expr.h"
#include "real.h"
#include "except.h"
static rtx *jump_chain;
-/* List of labels referred to from initializers.
- These can never be deleted. */
-rtx forced_labels;
-
/* Maximum index in jump_chain. */
static int max_jump_chain;
-/* Set nonzero by jump_optimize if control can fall through
- to the end of the function. */
-int can_reach_end;
-
/* Indicates whether death notes are significant in cross jump analysis.
Normally they are not significant, because of A and B jump to C,
and R dies in A, it must die in B. But this might not be true after
static int cross_jump_death_matters = 0;
-static int init_label_info PROTO((rtx));
-static void delete_barrier_successors PROTO((rtx));
-static void mark_all_labels PROTO((rtx, int));
-static rtx delete_unreferenced_labels PROTO((rtx));
-static void delete_noop_moves PROTO((rtx));
-static int calculate_can_reach_end PROTO((rtx, int, int));
-static int duplicate_loop_exit_test PROTO((rtx));
-static void find_cross_jump PROTO((rtx, rtx, int, rtx *, rtx *));
-static void do_cross_jump PROTO((rtx, rtx, rtx));
-static int jump_back_p PROTO((rtx, rtx));
-static int tension_vector_labels PROTO((rtx, int));
-static void mark_jump_label PROTO((rtx, rtx, int));
-static void delete_computation PROTO((rtx));
-static void delete_from_jump_chain PROTO((rtx));
-static int delete_labelref_insn PROTO((rtx, rtx, int));
-static void mark_modified_reg PROTO((rtx, rtx));
-static void redirect_tablejump PROTO((rtx, rtx));
-static void jump_optimize_1 PROTO ((rtx, int, int, int, int));
-#ifndef HAVE_cc0
-static rtx find_insert_position PROTO((rtx, rtx));
-#endif
-
+static int init_label_info PARAMS ((rtx));
+static void delete_barrier_successors PARAMS ((rtx));
+static void mark_all_labels PARAMS ((rtx, int));
+static rtx delete_unreferenced_labels PARAMS ((rtx));
+static void delete_noop_moves PARAMS ((rtx));
+static int duplicate_loop_exit_test PARAMS ((rtx));
+static void find_cross_jump PARAMS ((rtx, rtx, int, rtx *, rtx *));
+static void do_cross_jump PARAMS ((rtx, rtx, rtx));
+static int jump_back_p PARAMS ((rtx, rtx));
+static int tension_vector_labels PARAMS ((rtx, int));
+static void delete_computation PARAMS ((rtx));
+static void redirect_exp_1 PARAMS ((rtx *, rtx, rtx, rtx));
+static int redirect_exp PARAMS ((rtx, rtx, rtx));
+static void invert_exp_1 PARAMS ((rtx));
+static int invert_exp PARAMS ((rtx));
+static void delete_from_jump_chain PARAMS ((rtx));
+static int delete_labelref_insn PARAMS ((rtx, rtx, int));
+static void mark_modified_reg PARAMS ((rtx, rtx, void *));
+static void redirect_tablejump PARAMS ((rtx, rtx));
+static void jump_optimize_1 PARAMS ((rtx, int, int, int, int, int));
+static int returnjump_p_1 PARAMS ((rtx *, void *));
+static void delete_prior_computation PARAMS ((rtx, rtx));
+\f
/* Main external entry point into the jump optimizer. See comments before
jump_optimize_1 for descriptions of the arguments. */
void
int noop_moves;
int after_regscan;
{
- jump_optimize_1 (f, cross_jump, noop_moves, after_regscan, 0);
+ jump_optimize_1 (f, cross_jump, noop_moves, after_regscan, 0, 0);
}
/* Alternate entry into the jump optimizer. This entry point only rebuilds
rebuild_jump_labels (f)
rtx f;
{
- jump_optimize_1 (f, 0, 0, 0, 1);
+ jump_optimize_1 (f, 0, 0, 0, 1, 0);
}
+/* Alternate entry into the jump optimizer. Do only trivial optimizations. */
+
+void
+jump_optimize_minimal (f)
+ rtx f;
+{
+ jump_optimize_1 (f, 0, 0, 0, 0, 1);
+}
\f
/* Delete no-op jumps and optimize jumps to jumps
and jumps around jumps.
just determine whether control drops off the end of the function.
This case occurs when we have -W and not -O.
It works because `delete_insn' checks the value of `optimize'
- and refrains from actually deleting when that is 0. */
+ and refrains from actually deleting when that is 0.
+
+ If MINIMAL is nonzero, then we only perform trivial optimizations:
+
+ * Removal of unreachable code after BARRIERs.
+ * Removal of unreferenced CODE_LABELs.
+ * Removal of a jump to the next instruction.
+ * Removal of a conditional jump followed by an unconditional jump
+ to the same target as the conditional jump.
+ * Simplify a conditional jump around an unconditional jump.
+ * Simplify a jump to a jump.
+ * Delete extraneous line number notes.
+ */
static void
-jump_optimize_1 (f, cross_jump, noop_moves, after_regscan, mark_labels_only)
+jump_optimize_1 (f, cross_jump, noop_moves, after_regscan,
+ mark_labels_only, minimal)
rtx f;
int cross_jump;
int noop_moves;
int after_regscan;
int mark_labels_only;
+ int minimal;
{
register rtx insn, next;
int changed;
int first = 1;
int max_uid = 0;
rtx last_insn;
+#ifdef HAVE_trap
+ enum rtx_code reversed_code;
+#endif
cross_jump_death_matters = (cross_jump == 2);
max_uid = init_label_info (f) + 1;
if (flag_exceptions && cross_jump)
init_insn_eh_region (f, max_uid);
- delete_barrier_successors (f);
+ if (! mark_labels_only)
+ delete_barrier_successors (f);
/* Leave some extra room for labels and duplicate exit test insns
we make. */
max_jump_chain = max_uid * 14 / 10;
- jump_chain = (rtx *) alloca (max_jump_chain * sizeof (rtx));
- bzero ((char *) jump_chain, max_jump_chain * sizeof (rtx));
+ jump_chain = (rtx *) xcalloc (max_jump_chain, sizeof (rtx));
mark_all_labels (f, cross_jump);
- /* Keep track of labels used from static data;
- they cannot ever be deleted. */
+ /* Keep track of labels used from static data; we don't track them
+ closely enough to delete them here, so make sure their reference
+ count doesn't drop to zero. */
for (insn = forced_labels; insn; insn = XEXP (insn, 1))
- LABEL_NUSES (XEXP (insn, 0))++;
+ if (GET_CODE (XEXP (insn, 0)) == CODE_LABEL)
+ LABEL_NUSES (XEXP (insn, 0))++;
check_exception_handler_labels ();
regions; they cannot usually be deleted. */
for (insn = exception_handler_labels; insn; insn = XEXP (insn, 1))
- LABEL_NUSES (XEXP (insn, 0))++;
+ if (GET_CODE (XEXP (insn, 0)) == CODE_LABEL)
+ LABEL_NUSES (XEXP (insn, 0))++;
/* Quit now if we just wanted to rebuild the JUMP_LABEL and REG_LABEL
notes and recompute LABEL_NUSES. */
if (mark_labels_only)
- return;
+ goto end;
- exception_optimize ();
+ if (! minimal)
+ exception_optimize ();
last_insn = delete_unreferenced_labels (f);
- if (!optimize)
- {
- can_reach_end = calculate_can_reach_end (last_insn, 1, 0);
-
- /* Zero the "deleted" flag of all the "deleted" insns. */
- for (insn = f; insn; insn = NEXT_INSN (insn))
- INSN_DELETED_P (insn) = 0;
-
- /* Show that the jump chain is not valid. */
- jump_chain = 0;
- return;
- }
-
-#ifdef HAVE_return
- if (HAVE_return)
- {
- /* If we fall through to the epilogue, see if we can insert a RETURN insn
- in front of it. If the machine allows it at this point (we might be
- after reload for a leaf routine), it will improve optimization for it
- to be there. */
- insn = get_last_insn ();
- while (insn && GET_CODE (insn) == NOTE)
- insn = PREV_INSN (insn);
-
- if (insn && GET_CODE (insn) != BARRIER)
- {
- emit_jump_insn (gen_return ());
- emit_barrier ();
- }
- }
-#endif
-
if (noop_moves)
delete_noop_moves (f);
This helps some of the optimizations below by having less insns
being jumped around. */
- if (! reload_completed && after_regscan)
+ if (optimize && ! reload_completed && after_regscan)
for (insn = f; insn; insn = next)
{
rtx set = single_set (insn);
&& REGNO_FIRST_UID (REGNO (SET_DEST (set))) == INSN_UID (insn)
/* We use regno_last_note_uid so as not to delete the setting
of a reg that's used in notes. A subsequent optimization
- might arrange to use that reg for real. */
+ might arrange to use that reg for real. */
&& REGNO_LAST_NOTE_UID (REGNO (SET_DEST (set))) == INSN_UID (insn)
&& ! side_effects_p (SET_SRC (set))
- && ! find_reg_note (insn, REG_RETVAL, 0))
+ && ! find_reg_note (insn, REG_RETVAL, 0)
+ /* An ADDRESSOF expression can turn into a use of the internal arg
+ pointer, so do not delete the initialization of the internal
+ arg pointer yet. If it is truly dead, flow will delete the
+ initializing insn. */
+ && SET_DEST (set) != current_function_internal_arg_pointer)
delete_insn (insn);
}
for (insn = f; insn; insn = next)
{
rtx reallabelprev;
- rtx temp, temp1, temp2, temp3, temp4, temp5, temp6;
+ rtx temp, temp1, temp2 = NULL_RTX;
+ rtx temp4 ATTRIBUTE_UNUSED;
rtx nlabel;
- int this_is_simplejump, this_is_condjump, reversep = 0;
- int this_is_condjump_in_parallel;
-
-#if 0
- /* If NOT the first iteration, if this is the last jump pass
- (just before final), do the special peephole optimizations.
- Avoiding the first iteration gives ordinary jump opts
- a chance to work before peephole opts. */
-
- if (reload_completed && !first && !flag_no_peephole)
- if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
- peephole (insn);
-#endif
-
- /* That could have deleted some insns after INSN, so check now
- what the following insn is. */
+ int this_is_any_uncondjump;
+ int this_is_any_condjump;
+ int this_is_onlyjump;
next = NEXT_INSN (insn);
if (after_regscan && GET_CODE (insn) == NOTE
&& NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
&& (temp1 = next_nonnote_insn (insn)) != 0
- && simplejump_p (temp1))
+ && any_uncondjump_p (temp1)
+ && onlyjump_p (temp1))
{
temp = PREV_INSN (insn);
if (duplicate_loop_exit_test (insn))
if (GET_CODE (insn) != JUMP_INSN)
continue;
- this_is_simplejump = simplejump_p (insn);
- this_is_condjump = condjump_p (insn);
- this_is_condjump_in_parallel = condjump_in_parallel_p (insn);
+ this_is_any_condjump = any_condjump_p (insn);
+ this_is_any_uncondjump = any_uncondjump_p (insn);
+ this_is_onlyjump = onlyjump_p (insn);
/* Tension the labels in dispatch tables. */
if (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
changed |= tension_vector_labels (PATTERN (insn), 1);
+ /* See if this jump goes to another jump and redirect if so. */
+ nlabel = follow_jumps (JUMP_LABEL (insn));
+ if (nlabel != JUMP_LABEL (insn))
+ changed |= redirect_jump (insn, nlabel, 1);
+
+ if (! optimize || minimal)
+ continue;
+
/* If a dispatch table always goes to the same place,
get rid of it and replace the insn that uses it. */
int diff_vec_p = GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC;
int len = XVECLEN (pat, diff_vec_p);
rtx dispatch = prev_real_insn (insn);
+ rtx set;
for (i = 0; i < len; i++)
if (XEXP (XVECEXP (pat, diff_vec_p, i), 0)
!= XEXP (XVECEXP (pat, diff_vec_p, 0), 0))
break;
+
if (i == len
&& dispatch != 0
&& GET_CODE (dispatch) == JUMP_INSN
&& JUMP_LABEL (dispatch) != 0
- /* Don't mess with a casesi insn. */
- && !(GET_CODE (PATTERN (dispatch)) == SET
- && (GET_CODE (SET_SRC (PATTERN (dispatch)))
- == IF_THEN_ELSE))
+ /* Don't mess with a casesi insn.
+ XXX according to the comment before computed_jump_p(),
+ all casesi insns should be a parallel of the jump
+ and a USE of a LABEL_REF. */
+ && ! ((set = single_set (dispatch)) != NULL
+ && (GET_CODE (SET_SRC (set)) == IF_THEN_ELSE))
&& next_real_insn (JUMP_LABEL (dispatch)) == insn)
{
redirect_tablejump (dispatch,
reallabelprev = prev_active_insn (JUMP_LABEL (insn));
- /* If a jump references the end of the function, try to turn
- it into a RETURN insn, possibly a conditional one. */
- if (JUMP_LABEL (insn)
- && (next_active_insn (JUMP_LABEL (insn)) == 0
- || GET_CODE (PATTERN (next_active_insn (JUMP_LABEL (insn))))
- == RETURN))
- changed |= redirect_jump (insn, NULL_RTX);
-
/* Detect jump to following insn. */
- if (reallabelprev == insn && condjump_p (insn))
+ if (reallabelprev == insn
+ && (this_is_any_condjump || this_is_any_uncondjump)
+ && this_is_onlyjump)
{
next = next_real_insn (JUMP_LABEL (insn));
delete_jump (insn);
- changed = 1;
- continue;
- }
- /* If we have an unconditional jump preceded by a USE, try to put
- the USE before the target and jump there. This simplifies many
- of the optimizations below since we don't have to worry about
- dealing with these USE insns. We only do this if the label
- being branch to already has the identical USE or if code
- never falls through to that label. */
-
- if (this_is_simplejump
- && (temp = prev_nonnote_insn (insn)) != 0
- && GET_CODE (temp) == INSN && GET_CODE (PATTERN (temp)) == USE
- && (temp1 = prev_nonnote_insn (JUMP_LABEL (insn))) != 0
- && (GET_CODE (temp1) == BARRIER
- || (GET_CODE (temp1) == INSN
- && rtx_equal_p (PATTERN (temp), PATTERN (temp1))))
- /* Don't do this optimization if we have a loop containing only
- the USE instruction, and the loop start label has a usage
- count of 1. This is because we will redo this optimization
- everytime through the outer loop, and jump opt will never
- exit. */
- && ! ((temp2 = prev_nonnote_insn (temp)) != 0
- && temp2 == JUMP_LABEL (insn)
- && LABEL_NUSES (temp2) == 1))
- {
- if (GET_CODE (temp1) == BARRIER)
- {
- emit_insn_after (PATTERN (temp), temp1);
- temp1 = NEXT_INSN (temp1);
- }
+ /* Remove the "inactive" but "real" insns (i.e. uses and
+ clobbers) in between here and there. */
+ temp = insn;
+ while ((temp = next_real_insn (temp)) != next)
+ delete_insn (temp);
- delete_insn (temp);
- redirect_jump (insn, get_label_before (temp1));
- reallabelprev = prev_real_insn (temp1);
changed = 1;
+ continue;
}
- /* Simplify if (...) x = a; else x = b; by converting it
- to x = b; if (...) x = a;
- if B is sufficiently simple, the test doesn't involve X,
- and nothing in the test modifies B or X.
-
- If we have small register classes, we also can't do this if X
- is a hard register.
-
- If the "x = b;" insn has any REG_NOTES, we don't do this because
- of the possibility that we are running after CSE and there is a
- REG_EQUAL note that is only valid if the branch has already been
- taken. If we move the insn with the REG_EQUAL note, we may
- fold the comparison to always be false in a later CSE pass.
- (We could also delete the REG_NOTES when moving the insn, but it
- seems simpler to not move it.) An exception is that we can move
- the insn if the only note is a REG_EQUAL or REG_EQUIV whose
- value is the same as "b".
-
- INSN is the branch over the `else' part.
-
- We set:
-
- TEMP to the jump insn preceding "x = a;"
- TEMP1 to X
- TEMP2 to the insn that sets "x = b;"
- TEMP3 to the insn that sets "x = a;"
- TEMP4 to the set of "x = b"; */
-
- if (this_is_simplejump
- && (temp3 = prev_active_insn (insn)) != 0
- && GET_CODE (temp3) == INSN
- && (temp4 = single_set (temp3)) != 0
- && GET_CODE (temp1 = SET_DEST (temp4)) == REG
- && (! SMALL_REGISTER_CLASSES
- || REGNO (temp1) >= FIRST_PSEUDO_REGISTER)
- && (temp2 = next_active_insn (insn)) != 0
- && GET_CODE (temp2) == INSN
- && (temp4 = single_set (temp2)) != 0
- && rtx_equal_p (SET_DEST (temp4), temp1)
- && ! side_effects_p (SET_SRC (temp4))
- && ! may_trap_p (SET_SRC (temp4))
- && (REG_NOTES (temp2) == 0
- || ((REG_NOTE_KIND (REG_NOTES (temp2)) == REG_EQUAL
- || REG_NOTE_KIND (REG_NOTES (temp2)) == REG_EQUIV)
- && XEXP (REG_NOTES (temp2), 1) == 0
- && rtx_equal_p (XEXP (REG_NOTES (temp2), 0),
- SET_SRC (temp4))))
- && (temp = prev_active_insn (temp3)) != 0
- && condjump_p (temp) && ! simplejump_p (temp)
- /* TEMP must skip over the "x = a;" insn */
- && prev_real_insn (JUMP_LABEL (temp)) == insn
- && no_labels_between_p (insn, JUMP_LABEL (temp))
- /* There must be no other entries to the "x = b;" insn. */
- && no_labels_between_p (JUMP_LABEL (temp), temp2)
- /* INSN must either branch to the insn after TEMP2 or the insn
- after TEMP2 must branch to the same place as INSN. */
- && (reallabelprev == temp2
- || ((temp5 = next_active_insn (temp2)) != 0
- && simplejump_p (temp5)
- && JUMP_LABEL (temp5) == JUMP_LABEL (insn))))
+ /* Detect a conditional jump going to the same place
+ as an immediately following unconditional jump. */
+ else if (this_is_any_condjump && this_is_onlyjump
+ && (temp = next_active_insn (insn)) != 0
+ && simplejump_p (temp)
+ && (next_active_insn (JUMP_LABEL (insn))
+ == next_active_insn (JUMP_LABEL (temp))))
{
- /* The test expression, X, may be a complicated test with
- multiple branches. See if we can find all the uses of
- the label that TEMP branches to without hitting a CALL_INSN
- or a jump to somewhere else. */
- rtx target = JUMP_LABEL (temp);
- int nuses = LABEL_NUSES (target);
- rtx p;
-#ifdef HAVE_cc0
- rtx q;
-#endif
-
- /* Set P to the first jump insn that goes around "x = a;". */
- for (p = temp; nuses && p; p = prev_nonnote_insn (p))
- {
- if (GET_CODE (p) == JUMP_INSN)
- {
- if (condjump_p (p) && ! simplejump_p (p)
- && JUMP_LABEL (p) == target)
- {
- nuses--;
- if (nuses == 0)
- break;
- }
- else
- break;
- }
- else if (GET_CODE (p) == CALL_INSN)
+ /* Don't mess up test coverage analysis. */
+ temp2 = temp;
+ if (flag_test_coverage && !reload_completed)
+ for (temp2 = insn; temp2 != temp; temp2 = NEXT_INSN (temp2))
+ if (GET_CODE (temp2) == NOTE && NOTE_LINE_NUMBER (temp2) > 0)
break;
- }
-
-#ifdef HAVE_cc0
- /* We cannot insert anything between a set of cc and its use
- so if P uses cc0, we must back up to the previous insn. */
- q = prev_nonnote_insn (p);
- if (q && GET_RTX_CLASS (GET_CODE (q)) == 'i'
- && sets_cc0_p (PATTERN (q)))
- p = q;
-#endif
- if (p)
- p = PREV_INSN (p);
-
- /* If we found all the uses and there was no data conflict, we
- can move the assignment unless we can branch into the middle
- from somewhere. */
- if (nuses == 0 && p
- && no_labels_between_p (p, insn)
- && ! reg_referenced_between_p (temp1, p, NEXT_INSN (temp3))
- && ! reg_set_between_p (temp1, p, temp3)
- && (GET_CODE (SET_SRC (temp4)) == CONST_INT
- || ! modified_between_p (SET_SRC (temp4), p, temp2))
- /* Verify that registers used by the jump are not clobbered
- by the instruction being moved. */
- && ! regs_set_between_p (PATTERN (temp),
- PREV_INSN (temp2),
- NEXT_INSN (temp2)))
+ if (temp2 == temp)
{
- emit_insn_after_with_line_notes (PATTERN (temp2), p, temp2);
- delete_insn (temp2);
-
- /* Set NEXT to an insn that we know won't go away. */
- next = next_active_insn (insn);
-
- /* Delete the jump around the set. Note that we must do
- this before we redirect the test jumps so that it won't
- delete the code immediately following the assignment
- we moved (which might be a jump). */
-
- delete_insn (insn);
-
- /* We either have two consecutive labels or a jump to
- a jump, so adjust all the JUMP_INSNs to branch to where
- INSN branches to. */
- for (p = NEXT_INSN (p); p != next; p = NEXT_INSN (p))
- if (GET_CODE (p) == JUMP_INSN)
- redirect_jump (p, target);
+ /* Ensure that we jump to the later of the two labels.
+ Consider:
+
+ if (test) goto L2;
+ goto L1;
+ ...
+ L1:
+ (clobber return-reg)
+ L2:
+ (use return-reg)
+
+ If we leave the goto L1, we'll incorrectly leave
+ return-reg dead for TEST true. */
+
+ temp2 = next_active_insn (JUMP_LABEL (insn));
+ if (!temp2)
+ temp2 = get_last_insn ();
+ if (GET_CODE (temp2) != CODE_LABEL)
+ temp2 = prev_label (temp2);
+ if (temp2 != JUMP_LABEL (temp))
+ redirect_jump (temp, temp2, 1);
+ delete_jump (insn);
changed = 1;
continue;
}
}
- /* Simplify if (...) { x = a; goto l; } x = b; by converting it
- to x = a; if (...) goto l; x = b;
- if A is sufficiently simple, the test doesn't involve X,
- and nothing in the test modifies A or X.
-
- If we have small register classes, we also can't do this if X
- is a hard register.
-
- If the "x = a;" insn has any REG_NOTES, we don't do this because
- of the possibility that we are running after CSE and there is a
- REG_EQUAL note that is only valid if the branch has already been
- taken. If we move the insn with the REG_EQUAL note, we may
- fold the comparison to always be false in a later CSE pass.
- (We could also delete the REG_NOTES when moving the insn, but it
- seems simpler to not move it.) An exception is that we can move
- the insn if the only note is a REG_EQUAL or REG_EQUIV whose
- value is the same as "a".
-
- INSN is the goto.
-
- We set:
-
- TEMP to the jump insn preceding "x = a;"
- TEMP1 to X
- TEMP2 to the insn that sets "x = b;"
- TEMP3 to the insn that sets "x = a;"
- TEMP4 to the set of "x = a"; */
-
- if (this_is_simplejump
- && (temp2 = next_active_insn (insn)) != 0
- && GET_CODE (temp2) == INSN
- && (temp4 = single_set (temp2)) != 0
- && GET_CODE (temp1 = SET_DEST (temp4)) == REG
- && (! SMALL_REGISTER_CLASSES
- || REGNO (temp1) >= FIRST_PSEUDO_REGISTER)
- && (temp3 = prev_active_insn (insn)) != 0
- && GET_CODE (temp3) == INSN
- && (temp4 = single_set (temp3)) != 0
- && rtx_equal_p (SET_DEST (temp4), temp1)
- && ! side_effects_p (SET_SRC (temp4))
- && ! may_trap_p (SET_SRC (temp4))
- && (REG_NOTES (temp3) == 0
- || ((REG_NOTE_KIND (REG_NOTES (temp3)) == REG_EQUAL
- || REG_NOTE_KIND (REG_NOTES (temp3)) == REG_EQUIV)
- && XEXP (REG_NOTES (temp3), 1) == 0
- && rtx_equal_p (XEXP (REG_NOTES (temp3), 0),
- SET_SRC (temp4))))
- && (temp = prev_active_insn (temp3)) != 0
- && condjump_p (temp) && ! simplejump_p (temp)
- /* TEMP must skip over the "x = a;" insn */
- && prev_real_insn (JUMP_LABEL (temp)) == insn
- && no_labels_between_p (temp, insn))
- {
- rtx prev_label = JUMP_LABEL (temp);
- rtx insert_after = prev_nonnote_insn (temp);
-
-#ifdef HAVE_cc0
- /* We cannot insert anything between a set of cc and its use. */
- if (insert_after && GET_RTX_CLASS (GET_CODE (insert_after)) == 'i'
- && sets_cc0_p (PATTERN (insert_after)))
- insert_after = prev_nonnote_insn (insert_after);
-#endif
- ++LABEL_NUSES (prev_label);
-
- if (insert_after
- && no_labels_between_p (insert_after, temp)
- && ! reg_referenced_between_p (temp1, insert_after, temp3)
- && ! reg_referenced_between_p (temp1, temp3,
- NEXT_INSN (temp2))
- && ! reg_set_between_p (temp1, insert_after, temp)
- && ! modified_between_p (SET_SRC (temp4), insert_after, temp)
- /* Verify that registers used by the jump are not clobbered
- by the instruction being moved. */
- && ! regs_set_between_p (PATTERN (temp),
- PREV_INSN (temp3),
- NEXT_INSN (temp3))
- && invert_jump (temp, JUMP_LABEL (insn)))
- {
- emit_insn_after_with_line_notes (PATTERN (temp3),
- insert_after, temp3);
- delete_insn (temp3);
- delete_insn (insn);
- /* Set NEXT to an insn that we know won't go away. */
- next = temp2;
- changed = 1;
- }
- if (prev_label && --LABEL_NUSES (prev_label) == 0)
- delete_insn (prev_label);
- if (changed)
- continue;
- }
-
-#ifndef HAVE_cc0
- /* If we have if (...) x = exp; and branches are expensive,
- EXP is a single insn, does not have any side effects, cannot
- trap, and is not too costly, convert this to
- t = exp; if (...) x = t;
-
- Don't do this when we have CC0 because it is unlikely to help
- and we'd need to worry about where to place the new insn and
- the potential for conflicts. We also can't do this when we have
- notes on the insn for the same reason as above.
-
- We set:
-
- TEMP to the "x = exp;" insn.
- TEMP1 to the single set in the "x = exp;" insn.
- TEMP2 to "x". */
-
- if (! reload_completed
- && this_is_condjump && ! this_is_simplejump
- && BRANCH_COST >= 3
- && (temp = next_nonnote_insn (insn)) != 0
- && GET_CODE (temp) == INSN
- && REG_NOTES (temp) == 0
- && (reallabelprev == temp
- || ((temp2 = next_active_insn (temp)) != 0
- && simplejump_p (temp2)
- && JUMP_LABEL (temp2) == JUMP_LABEL (insn)))
- && (temp1 = single_set (temp)) != 0
- && (temp2 = SET_DEST (temp1), GET_CODE (temp2) == REG)
- && (! SMALL_REGISTER_CLASSES
- || REGNO (temp2) >= FIRST_PSEUDO_REGISTER)
- && GET_CODE (SET_SRC (temp1)) != REG
- && GET_CODE (SET_SRC (temp1)) != SUBREG
- && GET_CODE (SET_SRC (temp1)) != CONST_INT
- && ! side_effects_p (SET_SRC (temp1))
- && ! may_trap_p (SET_SRC (temp1))
- && rtx_cost (SET_SRC (temp1), SET) < 10)
- {
- rtx new = gen_reg_rtx (GET_MODE (temp2));
-
- if ((temp3 = find_insert_position (insn, temp))
- && validate_change (temp, &SET_DEST (temp1), new, 0))
- {
- next = emit_insn_after (gen_move_insn (temp2, new), insn);
- emit_insn_after_with_line_notes (PATTERN (temp),
- PREV_INSN (temp3), temp);
- delete_insn (temp);
- reallabelprev = prev_active_insn (JUMP_LABEL (insn));
-
- if (after_regscan)
- {
- reg_scan_update (temp3, NEXT_INSN (next), old_max_reg);
- old_max_reg = max_reg_num ();
- }
- }
- }
-
- /* Similarly, if it takes two insns to compute EXP but they
- have the same destination. Here TEMP3 will be the second
- insn and TEMP4 the SET from that insn. */
-
- if (! reload_completed
- && this_is_condjump && ! this_is_simplejump
- && BRANCH_COST >= 4
- && (temp = next_nonnote_insn (insn)) != 0
- && GET_CODE (temp) == INSN
- && REG_NOTES (temp) == 0
- && (temp3 = next_nonnote_insn (temp)) != 0
- && GET_CODE (temp3) == INSN
- && REG_NOTES (temp3) == 0
- && (reallabelprev == temp3
- || ((temp2 = next_active_insn (temp3)) != 0
- && simplejump_p (temp2)
- && JUMP_LABEL (temp2) == JUMP_LABEL (insn)))
- && (temp1 = single_set (temp)) != 0
- && (temp2 = SET_DEST (temp1), GET_CODE (temp2) == REG)
- && GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT
- && (! SMALL_REGISTER_CLASSES
- || REGNO (temp2) >= FIRST_PSEUDO_REGISTER)
- && ! side_effects_p (SET_SRC (temp1))
- && ! may_trap_p (SET_SRC (temp1))
- && rtx_cost (SET_SRC (temp1), SET) < 10
- && (temp4 = single_set (temp3)) != 0
- && rtx_equal_p (SET_DEST (temp4), temp2)
- && ! side_effects_p (SET_SRC (temp4))
- && ! may_trap_p (SET_SRC (temp4))
- && rtx_cost (SET_SRC (temp4), SET) < 10)
- {
- rtx new = gen_reg_rtx (GET_MODE (temp2));
-
- if ((temp5 = find_insert_position (insn, temp))
- && (temp6 = find_insert_position (insn, temp3))
- && validate_change (temp, &SET_DEST (temp1), new, 0))
- {
- /* Use the earliest of temp5 and temp6. */
- if (temp5 != insn)
- temp6 = temp5;
- next = emit_insn_after (gen_move_insn (temp2, new), insn);
- emit_insn_after_with_line_notes (PATTERN (temp),
- PREV_INSN (temp6), temp);
- emit_insn_after_with_line_notes
- (replace_rtx (PATTERN (temp3), temp2, new),
- PREV_INSN (temp6), temp3);
- delete_insn (temp);
- delete_insn (temp3);
- reallabelprev = prev_active_insn (JUMP_LABEL (insn));
-
- if (after_regscan)
- {
- reg_scan_update (temp6, NEXT_INSN (next), old_max_reg);
- old_max_reg = max_reg_num ();
- }
- }
- }
-
- /* Finally, handle the case where two insns are used to
- compute EXP but a temporary register is used. Here we must
- ensure that the temporary register is not used anywhere else. */
-
- if (! reload_completed
- && after_regscan
- && this_is_condjump && ! this_is_simplejump
- && BRANCH_COST >= 4
- && (temp = next_nonnote_insn (insn)) != 0
- && GET_CODE (temp) == INSN
- && REG_NOTES (temp) == 0
- && (temp3 = next_nonnote_insn (temp)) != 0
- && GET_CODE (temp3) == INSN
- && REG_NOTES (temp3) == 0
- && (reallabelprev == temp3
- || ((temp2 = next_active_insn (temp3)) != 0
- && simplejump_p (temp2)
- && JUMP_LABEL (temp2) == JUMP_LABEL (insn)))
- && (temp1 = single_set (temp)) != 0
- && (temp5 = SET_DEST (temp1),
- (GET_CODE (temp5) == REG
- || (GET_CODE (temp5) == SUBREG
- && (temp5 = SUBREG_REG (temp5),
- GET_CODE (temp5) == REG))))
- && REGNO (temp5) >= FIRST_PSEUDO_REGISTER
- && REGNO_FIRST_UID (REGNO (temp5)) == INSN_UID (temp)
- && REGNO_LAST_UID (REGNO (temp5)) == INSN_UID (temp3)
- && ! side_effects_p (SET_SRC (temp1))
- && ! may_trap_p (SET_SRC (temp1))
- && rtx_cost (SET_SRC (temp1), SET) < 10
- && (temp4 = single_set (temp3)) != 0
- && (temp2 = SET_DEST (temp4), GET_CODE (temp2) == REG)
- && GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT
- && (! SMALL_REGISTER_CLASSES
- || REGNO (temp2) >= FIRST_PSEUDO_REGISTER)
- && rtx_equal_p (SET_DEST (temp4), temp2)
- && ! side_effects_p (SET_SRC (temp4))
- && ! may_trap_p (SET_SRC (temp4))
- && rtx_cost (SET_SRC (temp4), SET) < 10)
- {
- rtx new = gen_reg_rtx (GET_MODE (temp2));
-
- if ((temp5 = find_insert_position (insn, temp))
- && (temp6 = find_insert_position (insn, temp3))
- && validate_change (temp3, &SET_DEST (temp4), new, 0))
- {
- /* Use the earliest of temp5 and temp6. */
- if (temp5 != insn)
- temp6 = temp5;
- next = emit_insn_after (gen_move_insn (temp2, new), insn);
- emit_insn_after_with_line_notes (PATTERN (temp),
- PREV_INSN (temp6), temp);
- emit_insn_after_with_line_notes (PATTERN (temp3),
- PREV_INSN (temp6), temp3);
- delete_insn (temp);
- delete_insn (temp3);
- reallabelprev = prev_active_insn (JUMP_LABEL (insn));
-
- if (after_regscan)
- {
- reg_scan_update (temp6, NEXT_INSN (next), old_max_reg);
- old_max_reg = max_reg_num ();
- }
- }
- }
-#endif /* HAVE_cc0 */
-
- /* Try to use a conditional move (if the target has them), or a
- store-flag insn. The general case is:
-
- 1) x = a; if (...) x = b; and
- 2) if (...) x = b;
-
- If the jump would be faster, the machine should not have defined
- the movcc or scc insns!. These cases are often made by the
- previous optimization.
-
- The second case is treated as x = x; if (...) x = b;.
-
- INSN here is the jump around the store. We set:
-
- TEMP to the "x = b;" insn.
- TEMP1 to X.
- TEMP2 to B.
- TEMP3 to A (X in the second case).
- TEMP4 to the condition being tested.
- TEMP5 to the earliest insn used to find the condition. */
-
- if (/* We can't do this after reload has completed. */
- ! reload_completed
- && this_is_condjump && ! this_is_simplejump
- /* Set TEMP to the "x = b;" insn. */
- && (temp = next_nonnote_insn (insn)) != 0
- && GET_CODE (temp) == INSN
- && GET_CODE (PATTERN (temp)) == SET
- && GET_CODE (temp1 = SET_DEST (PATTERN (temp))) == REG
- && (! SMALL_REGISTER_CLASSES
- || REGNO (temp1) >= FIRST_PSEUDO_REGISTER)
- && ! side_effects_p (temp2 = SET_SRC (PATTERN (temp)))
- && ! may_trap_p (temp2)
- /* Allow either form, but prefer the former if both apply.
- There is no point in using the old value of TEMP1 if
- it is a register, since cse will alias them. It can
- lose if the old value were a hard register since CSE
- won't replace hard registers. Avoid using TEMP3 if
- small register classes and it is a hard register. */
- && (((temp3 = reg_set_last (temp1, insn)) != 0
- && ! (SMALL_REGISTER_CLASSES && GET_CODE (temp3) == REG
- && REGNO (temp3) < FIRST_PSEUDO_REGISTER))
- /* Make the latter case look like x = x; if (...) x = b; */
- || (temp3 = temp1, 1))
- /* INSN must either branch to the insn after TEMP or the insn
- after TEMP must branch to the same place as INSN. */
- && (reallabelprev == temp
- || ((temp4 = next_active_insn (temp)) != 0
- && simplejump_p (temp4)
- && JUMP_LABEL (temp4) == JUMP_LABEL (insn)))
- && (temp4 = get_condition (insn, &temp5)) != 0
- /* We must be comparing objects whose modes imply the size.
- We could handle BLKmode if (1) emit_store_flag could
- and (2) we could find the size reliably. */
- && GET_MODE (XEXP (temp4, 0)) != BLKmode
- /* Even if branches are cheap, the store_flag optimization
- can win when the operation to be performed can be
- expressed directly. */
-#ifdef HAVE_cc0
- /* If the previous insn sets CC0 and something else, we can't
- do this since we are going to delete that insn. */
-
- && ! ((temp6 = prev_nonnote_insn (insn)) != 0
- && GET_CODE (temp6) == INSN
- && (sets_cc0_p (PATTERN (temp6)) == -1
- || (sets_cc0_p (PATTERN (temp6)) == 1
- && FIND_REG_INC_NOTE (temp6, NULL_RTX))))
-#endif
- )
- {
-#ifdef HAVE_conditional_move
- /* First try a conditional move. */
- {
- enum rtx_code code = GET_CODE (temp4);
- rtx var = temp1;
- rtx cond0, cond1, aval, bval;
- rtx target;
-
- /* Copy the compared variables into cond0 and cond1, so that
- any side effects performed in or after the old comparison,
- will not affect our compare which will come later. */
- /* ??? Is it possible to just use the comparison in the jump
- insn? After all, we're going to delete it. We'd have
- to modify emit_conditional_move to take a comparison rtx
- instead or write a new function. */
- cond0 = gen_reg_rtx (GET_MODE (XEXP (temp4, 0)));
- /* We want the target to be able to simplify comparisons with
- zero (and maybe other constants as well), so don't create
- pseudos for them. There's no need to either. */
- if (GET_CODE (XEXP (temp4, 1)) == CONST_INT
- || GET_CODE (XEXP (temp4, 1)) == CONST_DOUBLE)
- cond1 = XEXP (temp4, 1);
- else
- cond1 = gen_reg_rtx (GET_MODE (XEXP (temp4, 1)));
-
- aval = temp3;
- bval = temp2;
-
- start_sequence ();
- target = emit_conditional_move (var, code,
- cond0, cond1, VOIDmode,
- aval, bval, GET_MODE (var),
- (code == LTU || code == GEU
- || code == LEU || code == GTU));
-
- if (target)
- {
- rtx seq1,seq2,last;
-
- /* Save the conditional move sequence but don't emit it
- yet. On some machines, like the alpha, it is possible
- that temp5 == insn, so next generate the sequence that
- saves the compared values and then emit both
- sequences ensuring seq1 occurs before seq2. */
- seq2 = get_insns ();
- end_sequence ();
-
- /* Now that we can't fail, generate the copy insns that
- preserve the compared values. */
- start_sequence ();
- emit_move_insn (cond0, XEXP (temp4, 0));
- if (cond1 != XEXP (temp4, 1))
- emit_move_insn (cond1, XEXP (temp4, 1));
- seq1 = get_insns ();
- end_sequence ();
-
- emit_insns_before (seq1, temp5);
- /* Insert conditional move after insn, to be sure that
- the jump and a possible compare won't be separated */
- last = emit_insns_after (seq2, insn);
-
- /* ??? We can also delete the insn that sets X to A.
- Flow will do it too though. */
- delete_insn (temp);
- next = NEXT_INSN (insn);
- delete_jump (insn);
-
- if (after_regscan)
- {
- reg_scan_update (seq1, NEXT_INSN (last), old_max_reg);
- old_max_reg = max_reg_num ();
- }
-
- changed = 1;
- continue;
- }
- else
- end_sequence ();
- }
-#endif
-
- /* That didn't work, try a store-flag insn.
-
- We further divide the cases into:
-
- 1) x = a; if (...) x = b; and either A or B is zero,
- 2) if (...) x = 0; and jumps are expensive,
- 3) x = a; if (...) x = b; and A and B are constants where all
- the set bits in A are also set in B and jumps are expensive,
- 4) x = a; if (...) x = b; and A and B non-zero, and jumps are
- more expensive, and
- 5) if (...) x = b; if jumps are even more expensive. */
-
- if (GET_MODE_CLASS (GET_MODE (temp1)) == MODE_INT
- && ((GET_CODE (temp3) == CONST_INT)
- /* Make the latter case look like
- x = x; if (...) x = 0; */
- || (temp3 = temp1,
- ((BRANCH_COST >= 2
- && temp2 == const0_rtx)
- || BRANCH_COST >= 3)))
- /* If B is zero, OK; if A is zero, can only do (1) if we
- can reverse the condition. See if (3) applies possibly
- by reversing the condition. Prefer reversing to (4) when
- branches are very expensive. */
- && (((BRANCH_COST >= 2
- || STORE_FLAG_VALUE == -1
- || (STORE_FLAG_VALUE == 1
- /* Check that the mask is a power of two,
- so that it can probably be generated
- with a shift. */
- && GET_CODE (temp3) == CONST_INT
- && exact_log2 (INTVAL (temp3)) >= 0))
- && (reversep = 0, temp2 == const0_rtx))
- || ((BRANCH_COST >= 2
- || STORE_FLAG_VALUE == -1
- || (STORE_FLAG_VALUE == 1
- && GET_CODE (temp2) == CONST_INT
- && exact_log2 (INTVAL (temp2)) >= 0))
- && temp3 == const0_rtx
- && (reversep = can_reverse_comparison_p (temp4, insn)))
- || (BRANCH_COST >= 2
- && GET_CODE (temp2) == CONST_INT
- && GET_CODE (temp3) == CONST_INT
- && ((INTVAL (temp2) & INTVAL (temp3)) == INTVAL (temp2)
- || ((INTVAL (temp2) & INTVAL (temp3)) == INTVAL (temp3)
- && (reversep = can_reverse_comparison_p (temp4,
- insn)))))
- || BRANCH_COST >= 3)
- )
- {
- enum rtx_code code = GET_CODE (temp4);
- rtx uval, cval, var = temp1;
- int normalizep;
- rtx target;
-
- /* If necessary, reverse the condition. */
- if (reversep)
- code = reverse_condition (code), uval = temp2, cval = temp3;
- else
- uval = temp3, cval = temp2;
-
- /* If CVAL is non-zero, normalize to -1. Otherwise, if UVAL
- is the constant 1, it is best to just compute the result
- directly. If UVAL is constant and STORE_FLAG_VALUE
- includes all of its bits, it is best to compute the flag
- value unnormalized and `and' it with UVAL. Otherwise,
- normalize to -1 and `and' with UVAL. */
- normalizep = (cval != const0_rtx ? -1
- : (uval == const1_rtx ? 1
- : (GET_CODE (uval) == CONST_INT
- && (INTVAL (uval) & ~STORE_FLAG_VALUE) == 0)
- ? 0 : -1));
-
- /* We will be putting the store-flag insn immediately in
- front of the comparison that was originally being done,
- so we know all the variables in TEMP4 will be valid.
- However, this might be in front of the assignment of
- A to VAR. If it is, it would clobber the store-flag
- we will be emitting.
-
- Therefore, emit into a temporary which will be copied to
- VAR immediately after TEMP. */
-
- start_sequence ();
- target = emit_store_flag (gen_reg_rtx (GET_MODE (var)), code,
- XEXP (temp4, 0), XEXP (temp4, 1),
- VOIDmode,
- (code == LTU || code == LEU
- || code == GEU || code == GTU),
- normalizep);
- if (target)
- {
- rtx seq;
- rtx before = insn;
-
- seq = get_insns ();
- end_sequence ();
-
- /* Put the store-flag insns in front of the first insn
- used to compute the condition to ensure that we
- use the same values of them as the current
- comparison. However, the remainder of the insns we
- generate will be placed directly in front of the
- jump insn, in case any of the pseudos we use
- are modified earlier. */
-
- emit_insns_before (seq, temp5);
-
- start_sequence ();
-
- /* Both CVAL and UVAL are non-zero. */
- if (cval != const0_rtx && uval != const0_rtx)
- {
- rtx tem1, tem2;
-
- tem1 = expand_and (uval, target, NULL_RTX);
- if (GET_CODE (cval) == CONST_INT
- && GET_CODE (uval) == CONST_INT
- && (INTVAL (cval) & INTVAL (uval)) == INTVAL (cval))
- tem2 = cval;
- else
- {
- tem2 = expand_unop (GET_MODE (var), one_cmpl_optab,
- target, NULL_RTX, 0);
- tem2 = expand_and (cval, tem2,
- (GET_CODE (tem2) == REG
- ? tem2 : 0));
- }
-
- /* If we usually make new pseudos, do so here. This
- turns out to help machines that have conditional
- move insns. */
- /* ??? Conditional moves have already been handled.
- This may be obsolete. */
-
- if (flag_expensive_optimizations)
- target = 0;
-
- target = expand_binop (GET_MODE (var), ior_optab,
- tem1, tem2, target,
- 1, OPTAB_WIDEN);
- }
- else if (normalizep != 1)
- {
- /* We know that either CVAL or UVAL is zero. If
- UVAL is zero, negate TARGET and `and' with CVAL.
- Otherwise, `and' with UVAL. */
- if (uval == const0_rtx)
- {
- target = expand_unop (GET_MODE (var), one_cmpl_optab,
- target, NULL_RTX, 0);
- uval = cval;
- }
-
- target = expand_and (uval, target,
- (GET_CODE (target) == REG
- && ! preserve_subexpressions_p ()
- ? target : NULL_RTX));
- }
-
- emit_move_insn (var, target);
- seq = get_insns ();
- end_sequence ();
-#ifdef HAVE_cc0
- /* If INSN uses CC0, we must not separate it from the
- insn that sets cc0. */
- if (reg_mentioned_p (cc0_rtx, PATTERN (before)))
- before = prev_nonnote_insn (before);
-#endif
- emit_insns_before (seq, before);
-
- delete_insn (temp);
- next = NEXT_INSN (insn);
- delete_jump (insn);
-
- if (after_regscan)
- {
- reg_scan_update (seq, NEXT_INSN (next), old_max_reg);
- old_max_reg = max_reg_num ();
- }
-
- changed = 1;
- continue;
- }
- else
- end_sequence ();
- }
- }
-
- /* If branches are expensive, convert
- if (foo) bar++; to bar += (foo != 0);
- and similarly for "bar--;"
-
- INSN is the conditional branch around the arithmetic. We set:
-
- TEMP is the arithmetic insn.
- TEMP1 is the SET doing the arithmetic.
- TEMP2 is the operand being incremented or decremented.
- TEMP3 to the condition being tested.
- TEMP4 to the earliest insn used to find the condition. */
+ /* Detect a conditional jump jumping over an unconditional jump. */
- if ((BRANCH_COST >= 2
-#ifdef HAVE_incscc
- || HAVE_incscc
-#endif
-#ifdef HAVE_decscc
- || HAVE_decscc
-#endif
- )
- && ! reload_completed
- && this_is_condjump && ! this_is_simplejump
- && (temp = next_nonnote_insn (insn)) != 0
- && (temp1 = single_set (temp)) != 0
- && (temp2 = SET_DEST (temp1),
- GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT)
- && GET_CODE (SET_SRC (temp1)) == PLUS
- && (XEXP (SET_SRC (temp1), 1) == const1_rtx
- || XEXP (SET_SRC (temp1), 1) == constm1_rtx)
- && rtx_equal_p (temp2, XEXP (SET_SRC (temp1), 0))
- && ! side_effects_p (temp2)
- && ! may_trap_p (temp2)
- /* INSN must either branch to the insn after TEMP or the insn
- after TEMP must branch to the same place as INSN. */
- && (reallabelprev == temp
- || ((temp3 = next_active_insn (temp)) != 0
- && simplejump_p (temp3)
- && JUMP_LABEL (temp3) == JUMP_LABEL (insn)))
- && (temp3 = get_condition (insn, &temp4)) != 0
- /* We must be comparing objects whose modes imply the size.
- We could handle BLKmode if (1) emit_store_flag could
- and (2) we could find the size reliably. */
- && GET_MODE (XEXP (temp3, 0)) != BLKmode
- && can_reverse_comparison_p (temp3, insn))
+ else if (this_is_any_condjump
+ && reallabelprev != 0
+ && GET_CODE (reallabelprev) == JUMP_INSN
+ && prev_active_insn (reallabelprev) == insn
+ && no_labels_between_p (insn, reallabelprev)
+ && any_uncondjump_p (reallabelprev)
+ && onlyjump_p (reallabelprev))
{
- rtx temp6, target = 0, seq, init_insn = 0, init = temp2;
- enum rtx_code code = reverse_condition (GET_CODE (temp3));
-
- start_sequence ();
-
- /* It must be the case that TEMP2 is not modified in the range
- [TEMP4, INSN). The one exception we make is if the insn
- before INSN sets TEMP2 to something which is also unchanged
- in that range. In that case, we can move the initialization
- into our sequence. */
-
- if ((temp5 = prev_active_insn (insn)) != 0
- && no_labels_between_p (temp5, insn)
- && GET_CODE (temp5) == INSN
- && (temp6 = single_set (temp5)) != 0
- && rtx_equal_p (temp2, SET_DEST (temp6))
- && (CONSTANT_P (SET_SRC (temp6))
- || GET_CODE (SET_SRC (temp6)) == REG
- || GET_CODE (SET_SRC (temp6)) == SUBREG))
- {
- emit_insn (PATTERN (temp5));
- init_insn = temp5;
- init = SET_SRC (temp6);
- }
-
- if (CONSTANT_P (init)
- || ! reg_set_between_p (init, PREV_INSN (temp4), insn))
- target = emit_store_flag (gen_reg_rtx (GET_MODE (temp2)), code,
- XEXP (temp3, 0), XEXP (temp3, 1),
- VOIDmode,
- (code == LTU || code == LEU
- || code == GTU || code == GEU), 1);
-
- /* If we can do the store-flag, do the addition or
- subtraction. */
+ /* When we invert the unconditional jump, we will be
+ decrementing the usage count of its old label.
+ Make sure that we don't delete it now because that
+ might cause the following code to be deleted. */
+ rtx prev_uses = prev_nonnote_insn (reallabelprev);
+ rtx prev_label = JUMP_LABEL (insn);
- if (target)
- target = expand_binop (GET_MODE (temp2),
- (XEXP (SET_SRC (temp1), 1) == const1_rtx
- ? add_optab : sub_optab),
- temp2, target, temp2, 0, OPTAB_WIDEN);
+ if (prev_label)
+ ++LABEL_NUSES (prev_label);
- if (target != 0)
+ if (invert_jump (insn, JUMP_LABEL (reallabelprev), 1))
{
- /* Put the result back in temp2 in case it isn't already.
- Then replace the jump, possible a CC0-setting insn in
- front of the jump, and TEMP, with the sequence we have
- made. */
-
- if (target != temp2)
- emit_move_insn (temp2, target);
-
- seq = get_insns ();
- end_sequence ();
-
- emit_insns_before (seq, temp4);
- delete_insn (temp);
-
- if (init_insn)
- delete_insn (init_insn);
+ /* It is very likely that if there are USE insns before
+ this jump, they hold REG_DEAD notes. These REG_DEAD
+ notes are no longer valid due to this optimization,
+ and will cause the life-analysis that following passes
+ (notably delayed-branch scheduling) to think that
+ these registers are dead when they are not.
- next = NEXT_INSN (insn);
-#ifdef HAVE_cc0
- delete_insn (prev_nonnote_insn (insn));
-#endif
- delete_insn (insn);
+ To prevent this trouble, we just remove the USE insns
+ from the insn chain. */
- if (after_regscan)
+ while (prev_uses && GET_CODE (prev_uses) == INSN
+ && GET_CODE (PATTERN (prev_uses)) == USE)
{
- reg_scan_update (seq, NEXT_INSN (next), old_max_reg);
- old_max_reg = max_reg_num ();
+ rtx useless = prev_uses;
+ prev_uses = prev_nonnote_insn (prev_uses);
+ delete_insn (useless);
}
+ delete_insn (reallabelprev);
changed = 1;
- continue;
}
- else
- end_sequence ();
- }
-
- /* Simplify if (...) x = 1; else {...} if (x) ...
- We recognize this case scanning backwards as well.
-
- TEMP is the assignment to x;
- TEMP1 is the label at the head of the second if. */
- /* ?? This should call get_condition to find the values being
- compared, instead of looking for a COMPARE insn when HAVE_cc0
- is not defined. This would allow it to work on the m88k. */
- /* ?? This optimization is only safe before cse is run if HAVE_cc0
- is not defined and the condition is tested by a separate compare
- insn. This is because the code below assumes that the result
- of the compare dies in the following branch.
-
- Not only that, but there might be other insns between the
- compare and branch whose results are live. Those insns need
- to be executed.
-
- A way to fix this is to move the insns at JUMP_LABEL (insn)
- to before INSN. If we are running before flow, they will
- be deleted if they aren't needed. But this doesn't work
- well after flow.
- This is really a special-case of jump threading, anyway. The
- right thing to do is to replace this and jump threading with
- much simpler code in cse.
-
- This code has been turned off in the non-cc0 case in the
- meantime. */
-
-#ifdef HAVE_cc0
- else if (this_is_simplejump
- /* Safe to skip USE and CLOBBER insns here
- since they will not be deleted. */
- && (temp = prev_active_insn (insn))
- && no_labels_between_p (temp, insn)
- && GET_CODE (temp) == INSN
- && GET_CODE (PATTERN (temp)) == SET
- && GET_CODE (SET_DEST (PATTERN (temp))) == REG
- && CONSTANT_P (SET_SRC (PATTERN (temp)))
- && (temp1 = next_active_insn (JUMP_LABEL (insn)))
- /* If we find that the next value tested is `x'
- (TEMP1 is the insn where this happens), win. */
- && GET_CODE (temp1) == INSN
- && GET_CODE (PATTERN (temp1)) == SET
-#ifdef HAVE_cc0
- /* Does temp1 `tst' the value of x? */
- && SET_SRC (PATTERN (temp1)) == SET_DEST (PATTERN (temp))
- && SET_DEST (PATTERN (temp1)) == cc0_rtx
- && (temp1 = next_nonnote_insn (temp1))
-#else
- /* Does temp1 compare the value of x against zero? */
- && GET_CODE (SET_SRC (PATTERN (temp1))) == COMPARE
- && XEXP (SET_SRC (PATTERN (temp1)), 1) == const0_rtx
- && (XEXP (SET_SRC (PATTERN (temp1)), 0)
- == SET_DEST (PATTERN (temp)))
- && GET_CODE (SET_DEST (PATTERN (temp1))) == REG
- && (temp1 = find_next_ref (SET_DEST (PATTERN (temp1)), temp1))
-#endif
- && condjump_p (temp1))
- {
- /* Get the if_then_else from the condjump. */
- rtx choice = SET_SRC (PATTERN (temp1));
- if (GET_CODE (choice) == IF_THEN_ELSE)
- {
- enum rtx_code code = GET_CODE (XEXP (choice, 0));
- rtx val = SET_SRC (PATTERN (temp));
- rtx cond
- = simplify_relational_operation (code, GET_MODE (SET_DEST (PATTERN (temp))),
- val, const0_rtx);
- rtx ultimate;
-
- if (cond == const_true_rtx)
- ultimate = XEXP (choice, 1);
- else if (cond == const0_rtx)
- ultimate = XEXP (choice, 2);
- else
- ultimate = 0;
-
- if (ultimate == pc_rtx)
- ultimate = get_label_after (temp1);
- else if (ultimate && GET_CODE (ultimate) != RETURN)
- ultimate = XEXP (ultimate, 0);
+ /* We can now safely delete the label if it is unreferenced
+ since the delete_insn above has deleted the BARRIER. */
+ if (prev_label && --LABEL_NUSES (prev_label) == 0)
+ delete_insn (prev_label);
- if (ultimate && JUMP_LABEL(insn) != ultimate)
- changed |= redirect_jump (insn, ultimate);
- }
+ next = NEXT_INSN (insn);
}
-#endif
-#if 0
- /* @@ This needs a bit of work before it will be right.
-
- Any type of comparison can be accepted for the first and
- second compare. When rewriting the first jump, we must
- compute the what conditions can reach label3, and use the
- appropriate code. We can not simply reverse/swap the code
- of the first jump. In some cases, the second jump must be
- rewritten also.
-
- For example,
- < == converts to > ==
- < != converts to == >
- etc.
-
- If the code is written to only accept an '==' test for the second
- compare, then all that needs to be done is to swap the condition
- of the first branch.
-
- It is questionable whether we want this optimization anyways,
- since if the user wrote code like this because he/she knew that
- the jump to label1 is taken most of the time, then rewriting
- this gives slower code. */
- /* @@ This should call get_condition to find the values being
- compared, instead of looking for a COMPARE insn when HAVE_cc0
- is not defined. This would allow it to work on the m88k. */
- /* @@ This optimization is only safe before cse is run if HAVE_cc0
- is not defined and the condition is tested by a separate compare
- insn. This is because the code below assumes that the result
- of the compare dies in the following branch. */
-
- /* Simplify test a ~= b
- condjump label1;
- test a == b
- condjump label2;
- jump label3;
- label1:
-
- rewriting as
- test a ~~= b
- condjump label3
- test a == b
- condjump label2
- label1:
-
- where ~= is an inequality, e.g. >, and ~~= is the swapped
- inequality, e.g. <.
-
- We recognize this case scanning backwards.
-
- TEMP is the conditional jump to `label2';
- TEMP1 is the test for `a == b';
- TEMP2 is the conditional jump to `label1';
- TEMP3 is the test for `a ~= b'. */
- else if (this_is_simplejump
- && (temp = prev_active_insn (insn))
- && no_labels_between_p (temp, insn)
- && condjump_p (temp)
- && (temp1 = prev_active_insn (temp))
- && no_labels_between_p (temp1, temp)
- && GET_CODE (temp1) == INSN
- && GET_CODE (PATTERN (temp1)) == SET
-#ifdef HAVE_cc0
- && sets_cc0_p (PATTERN (temp1)) == 1
-#else
- && GET_CODE (SET_SRC (PATTERN (temp1))) == COMPARE
- && GET_CODE (SET_DEST (PATTERN (temp1))) == REG
- && (temp == find_next_ref (SET_DEST (PATTERN (temp1)), temp1))
-#endif
- && (temp2 = prev_active_insn (temp1))
- && no_labels_between_p (temp2, temp1)
- && condjump_p (temp2)
- && JUMP_LABEL (temp2) == next_nonnote_insn (NEXT_INSN (insn))
- && (temp3 = prev_active_insn (temp2))
- && no_labels_between_p (temp3, temp2)
- && GET_CODE (PATTERN (temp3)) == SET
- && rtx_equal_p (SET_DEST (PATTERN (temp3)),
- SET_DEST (PATTERN (temp1)))
- && rtx_equal_p (SET_SRC (PATTERN (temp1)),
- SET_SRC (PATTERN (temp3)))
- && ! inequality_comparisons_p (PATTERN (temp))
- && inequality_comparisons_p (PATTERN (temp2)))
- {
- rtx fallthrough_label = JUMP_LABEL (temp2);
-
- ++LABEL_NUSES (fallthrough_label);
- if (swap_jump (temp2, JUMP_LABEL (insn)))
- {
- delete_insn (insn);
- changed = 1;
- }
+ /* If we have an unconditional jump preceded by a USE, try to put
+ the USE before the target and jump there. This simplifies many
+ of the optimizations below since we don't have to worry about
+ dealing with these USE insns. We only do this if the label
+ being branch to already has the identical USE or if code
+ never falls through to that label. */
- if (--LABEL_NUSES (fallthrough_label) == 0)
- delete_insn (fallthrough_label);
- }
-#endif
- /* Simplify if (...) {... x = 1;} if (x) ...
-
- We recognize this case backwards.
-
- TEMP is the test of `x';
- TEMP1 is the assignment to `x' at the end of the
- previous statement. */
- /* @@ This should call get_condition to find the values being
- compared, instead of looking for a COMPARE insn when HAVE_cc0
- is not defined. This would allow it to work on the m88k. */
- /* @@ This optimization is only safe before cse is run if HAVE_cc0
- is not defined and the condition is tested by a separate compare
- insn. This is because the code below assumes that the result
- of the compare dies in the following branch. */
-
- /* ??? This has to be turned off. The problem is that the
- unconditional jump might indirectly end up branching to the
- label between TEMP1 and TEMP. We can't detect this, in general,
- since it may become a jump to there after further optimizations.
- If that jump is done, it will be deleted, so we will retry
- this optimization in the next pass, thus an infinite loop.
-
- The present code prevents this by putting the jump after the
- label, but this is not logically correct. */
-#if 0
- else if (this_is_condjump
- /* Safe to skip USE and CLOBBER insns here
- since they will not be deleted. */
- && (temp = prev_active_insn (insn))
- && no_labels_between_p (temp, insn)
+ else if (this_is_any_uncondjump
+ && (temp = prev_nonnote_insn (insn)) != 0
&& GET_CODE (temp) == INSN
- && GET_CODE (PATTERN (temp)) == SET
-#ifdef HAVE_cc0
- && sets_cc0_p (PATTERN (temp)) == 1
- && GET_CODE (SET_SRC (PATTERN (temp))) == REG
-#else
- /* Temp must be a compare insn, we can not accept a register
- to register move here, since it may not be simply a
- tst insn. */
- && GET_CODE (SET_SRC (PATTERN (temp))) == COMPARE
- && XEXP (SET_SRC (PATTERN (temp)), 1) == const0_rtx
- && GET_CODE (XEXP (SET_SRC (PATTERN (temp)), 0)) == REG
- && GET_CODE (SET_DEST (PATTERN (temp))) == REG
- && insn == find_next_ref (SET_DEST (PATTERN (temp)), temp)
-#endif
- /* May skip USE or CLOBBER insns here
- for checking for opportunity, since we
- take care of them later. */
- && (temp1 = prev_active_insn (temp))
- && GET_CODE (temp1) == INSN
- && GET_CODE (PATTERN (temp1)) == SET
-#ifdef HAVE_cc0
- && SET_SRC (PATTERN (temp)) == SET_DEST (PATTERN (temp1))
-#else
- && (XEXP (SET_SRC (PATTERN (temp)), 0)
- == SET_DEST (PATTERN (temp1)))
-#endif
- && CONSTANT_P (SET_SRC (PATTERN (temp1)))
- /* If this isn't true, cse will do the job. */
- && ! no_labels_between_p (temp1, temp))
+ && GET_CODE (PATTERN (temp)) == USE
+ && (temp1 = prev_nonnote_insn (JUMP_LABEL (insn))) != 0
+ && (GET_CODE (temp1) == BARRIER
+ || (GET_CODE (temp1) == INSN
+ && rtx_equal_p (PATTERN (temp), PATTERN (temp1))))
+ /* Don't do this optimization if we have a loop containing
+ only the USE instruction, and the loop start label has
+ a usage count of 1. This is because we will redo this
+ optimization everytime through the outer loop, and jump
+ opt will never exit. */
+ && ! ((temp2 = prev_nonnote_insn (temp)) != 0
+ && temp2 == JUMP_LABEL (insn)
+ && LABEL_NUSES (temp2) == 1))
{
- /* Get the if_then_else from the condjump. */
- rtx choice = SET_SRC (PATTERN (insn));
- if (GET_CODE (choice) == IF_THEN_ELSE
- && (GET_CODE (XEXP (choice, 0)) == EQ
- || GET_CODE (XEXP (choice, 0)) == NE))
+ if (GET_CODE (temp1) == BARRIER)
{
- int want_nonzero = (GET_CODE (XEXP (choice, 0)) == NE);
- rtx last_insn;
- rtx ultimate;
- rtx p;
-
- /* Get the place that condjump will jump to
- if it is reached from here. */
- if ((SET_SRC (PATTERN (temp1)) != const0_rtx)
- == want_nonzero)
- ultimate = XEXP (choice, 1);
- else
- ultimate = XEXP (choice, 2);
- /* Get it as a CODE_LABEL. */
- if (ultimate == pc_rtx)
- ultimate = get_label_after (insn);
- else
- /* Get the label out of the LABEL_REF. */
- ultimate = XEXP (ultimate, 0);
-
- /* Insert the jump immediately before TEMP, specifically
- after the label that is between TEMP1 and TEMP. */
- last_insn = PREV_INSN (temp);
-
- /* If we would be branching to the next insn, the jump
- would immediately be deleted and the re-inserted in
- a subsequent pass over the code. So don't do anything
- in that case. */
- if (next_active_insn (last_insn)
- != next_active_insn (ultimate))
- {
- emit_barrier_after (last_insn);
- p = emit_jump_insn_after (gen_jump (ultimate),
- last_insn);
- JUMP_LABEL (p) = ultimate;
- ++LABEL_NUSES (ultimate);
- if (INSN_UID (ultimate) < max_jump_chain
- && INSN_CODE (p) < max_jump_chain)
- {
- jump_chain[INSN_UID (p)]
- = jump_chain[INSN_UID (ultimate)];
- jump_chain[INSN_UID (ultimate)] = p;
- }
- changed = 1;
- continue;
- }
+ emit_insn_after (PATTERN (temp), temp1);
+ temp1 = NEXT_INSN (temp1);
}
- }
-#endif
- /* Detect a conditional jump going to the same place
- as an immediately following unconditional jump. */
- else if (this_is_condjump
- && (temp = next_active_insn (insn)) != 0
- && simplejump_p (temp)
- && (next_active_insn (JUMP_LABEL (insn))
- == next_active_insn (JUMP_LABEL (temp))))
- {
- rtx tem = temp;
- /* ??? Optional. Disables some optimizations, but makes
- gcov output more accurate with -O. */
- if (flag_test_coverage && !reload_completed)
- for (tem = insn; tem != temp; tem = NEXT_INSN (tem))
- if (GET_CODE (tem) == NOTE && NOTE_LINE_NUMBER (tem) > 0)
- break;
-
- if (tem == temp)
- {
- delete_jump (insn);
- changed = 1;
- continue;
- }
+ delete_insn (temp);
+ redirect_jump (insn, get_label_before (temp1), 1);
+ reallabelprev = prev_real_insn (temp1);
+ changed = 1;
+ next = NEXT_INSN (insn);
}
-#ifdef HAVE_trap
- /* Detect a conditional jump jumping over an unconditional trap. */
- else if (HAVE_trap
- && this_is_condjump && ! this_is_simplejump
- && reallabelprev != 0
- && GET_CODE (reallabelprev) == INSN
- && GET_CODE (PATTERN (reallabelprev)) == TRAP_IF
- && TRAP_CONDITION (PATTERN (reallabelprev)) == const_true_rtx
- && prev_active_insn (reallabelprev) == insn
- && no_labels_between_p (insn, reallabelprev)
- && (temp2 = get_condition (insn, &temp4))
- && can_reverse_comparison_p (temp2, insn))
+
+#ifdef HAVE_trap
+ /* Detect a conditional jump jumping over an unconditional trap. */
+ if (HAVE_trap
+ && this_is_any_condjump && this_is_onlyjump
+ && reallabelprev != 0
+ && GET_CODE (reallabelprev) == INSN
+ && GET_CODE (PATTERN (reallabelprev)) == TRAP_IF
+ && TRAP_CONDITION (PATTERN (reallabelprev)) == const_true_rtx
+ && prev_active_insn (reallabelprev) == insn
+ && no_labels_between_p (insn, reallabelprev)
+ && (temp2 = get_condition (insn, &temp4))
+ && ((reversed_code = reversed_comparison_code (temp2, insn))
+ != UNKNOWN))
{
- rtx new = gen_cond_trap (reverse_condition (GET_CODE (temp2)),
+ rtx new = gen_cond_trap (reversed_code,
XEXP (temp2, 0), XEXP (temp2, 1),
TRAP_CODE (PATTERN (reallabelprev)));
}
}
/* Detect a jump jumping to an unconditional trap. */
- else if (HAVE_trap && this_is_condjump
+ else if (HAVE_trap && this_is_onlyjump
&& (temp = next_active_insn (JUMP_LABEL (insn)))
&& GET_CODE (temp) == INSN
&& GET_CODE (PATTERN (temp)) == TRAP_IF
- && (this_is_simplejump
- || (temp2 = get_condition (insn, &temp4))))
+ && (this_is_any_uncondjump
+ || (this_is_any_condjump
+ && (temp2 = get_condition (insn, &temp4)))))
{
rtx tc = TRAP_CONDITION (PATTERN (temp));
if (tc == const_true_rtx
- || (! this_is_simplejump && rtx_equal_p (temp2, tc)))
+ || (! this_is_any_uncondjump && rtx_equal_p (temp2, tc)))
{
rtx new;
/* Replace an unconditional jump to a trap with a trap. */
- if (this_is_simplejump)
+ if (this_is_any_uncondjump)
{
emit_barrier_after (emit_insn_before (gen_trap (), insn));
delete_jump (insn);
/* If the trap condition and jump condition are mutually
exclusive, redirect the jump to the following insn. */
else if (GET_RTX_CLASS (GET_CODE (tc)) == '<'
- && ! this_is_simplejump
+ && this_is_any_condjump
&& swap_condition (GET_CODE (temp2)) == GET_CODE (tc)
&& rtx_equal_p (XEXP (tc, 0), XEXP (temp2, 0))
&& rtx_equal_p (XEXP (tc, 1), XEXP (temp2, 1))
- && redirect_jump (insn, get_label_after (temp)))
+ && redirect_jump (insn, get_label_after (temp), 1))
{
changed = 1;
continue;
}
}
#endif
-
- /* Detect a conditional jump jumping over an unconditional jump. */
-
- else if ((this_is_condjump || this_is_condjump_in_parallel)
- && ! this_is_simplejump
- && reallabelprev != 0
- && GET_CODE (reallabelprev) == JUMP_INSN
- && prev_active_insn (reallabelprev) == insn
- && no_labels_between_p (insn, reallabelprev)
- && simplejump_p (reallabelprev))
- {
- /* When we invert the unconditional jump, we will be
- decrementing the usage count of its old label.
- Make sure that we don't delete it now because that
- might cause the following code to be deleted. */
- rtx prev_uses = prev_nonnote_insn (reallabelprev);
- rtx prev_label = JUMP_LABEL (insn);
-
- if (prev_label)
- ++LABEL_NUSES (prev_label);
-
- if (invert_jump (insn, JUMP_LABEL (reallabelprev)))
- {
- /* It is very likely that if there are USE insns before
- this jump, they hold REG_DEAD notes. These REG_DEAD
- notes are no longer valid due to this optimization,
- and will cause the life-analysis that following passes
- (notably delayed-branch scheduling) to think that
- these registers are dead when they are not.
-
- To prevent this trouble, we just remove the USE insns
- from the insn chain. */
-
- while (prev_uses && GET_CODE (prev_uses) == INSN
- && GET_CODE (PATTERN (prev_uses)) == USE)
- {
- rtx useless = prev_uses;
- prev_uses = prev_nonnote_insn (prev_uses);
- delete_insn (useless);
- }
-
- delete_insn (reallabelprev);
- next = insn;
- changed = 1;
- }
-
- /* We can now safely delete the label if it is unreferenced
- since the delete_insn above has deleted the BARRIER. */
- if (prev_label && --LABEL_NUSES (prev_label) == 0)
- delete_insn (prev_label);
- continue;
- }
else
{
- /* Detect a jump to a jump. */
-
- nlabel = follow_jumps (JUMP_LABEL (insn));
- if (nlabel != JUMP_LABEL (insn)
- && redirect_jump (insn, nlabel))
- {
- changed = 1;
- next = insn;
- }
-
- /* Look for if (foo) bar; else break; */
- /* The insns look like this:
- insn = condjump label1;
- ...range1 (some insns)...
- jump label2;
- label1:
- ...range2 (some insns)...
- jump somewhere unconditionally
- label2: */
- {
- rtx label1 = next_label (insn);
- rtx range1end = label1 ? prev_active_insn (label1) : 0;
- /* Don't do this optimization on the first round, so that
- jump-around-a-jump gets simplified before we ask here
- whether a jump is unconditional.
-
- Also don't do it when we are called after reload since
- it will confuse reorg. */
- if (! first
- && (reload_completed ? ! flag_delayed_branch : 1)
- /* Make sure INSN is something we can invert. */
- && condjump_p (insn)
- && label1 != 0
- && JUMP_LABEL (insn) == label1
- && LABEL_NUSES (label1) == 1
- && GET_CODE (range1end) == JUMP_INSN
- && simplejump_p (range1end))
- {
- rtx label2 = next_label (label1);
- rtx range2end = label2 ? prev_active_insn (label2) : 0;
- if (range1end != range2end
- && JUMP_LABEL (range1end) == label2
- && GET_CODE (range2end) == JUMP_INSN
- && GET_CODE (NEXT_INSN (range2end)) == BARRIER
- /* Invert the jump condition, so we
- still execute the same insns in each case. */
- && invert_jump (insn, label1))
- {
- rtx range1beg = next_active_insn (insn);
- rtx range2beg = next_active_insn (label1);
- rtx range1after, range2after;
- rtx range1before, range2before;
- rtx rangenext;
-
- /* Include in each range any notes before it, to be
- sure that we get the line number note if any, even
- if there are other notes here. */
- while (PREV_INSN (range1beg)
- && GET_CODE (PREV_INSN (range1beg)) == NOTE)
- range1beg = PREV_INSN (range1beg);
-
- while (PREV_INSN (range2beg)
- && GET_CODE (PREV_INSN (range2beg)) == NOTE)
- range2beg = PREV_INSN (range2beg);
-
- /* Don't move NOTEs for blocks or loops; shift them
- outside the ranges, where they'll stay put. */
- range1beg = squeeze_notes (range1beg, range1end);
- range2beg = squeeze_notes (range2beg, range2end);
-
- /* Get current surrounds of the 2 ranges. */
- range1before = PREV_INSN (range1beg);
- range2before = PREV_INSN (range2beg);
- range1after = NEXT_INSN (range1end);
- range2after = NEXT_INSN (range2end);
-
- /* Splice range2 where range1 was. */
- NEXT_INSN (range1before) = range2beg;
- PREV_INSN (range2beg) = range1before;
- NEXT_INSN (range2end) = range1after;
- PREV_INSN (range1after) = range2end;
- /* Splice range1 where range2 was. */
- NEXT_INSN (range2before) = range1beg;
- PREV_INSN (range1beg) = range2before;
- NEXT_INSN (range1end) = range2after;
- PREV_INSN (range2after) = range1end;
-
- /* Check for a loop end note between the end of
- range2, and the next code label. If there is one,
- then what we have really seen is
- if (foo) break; end_of_loop;
- and moved the break sequence outside the loop.
- We must move the LOOP_END note to where the
- loop really ends now, or we will confuse loop
- optimization. Stop if we find a LOOP_BEG note
- first, since we don't want to move the LOOP_END
- note in that case. */
- for (;range2after != label2; range2after = rangenext)
- {
- rangenext = NEXT_INSN (range2after);
- if (GET_CODE (range2after) == NOTE)
- {
- if (NOTE_LINE_NUMBER (range2after)
- == NOTE_INSN_LOOP_END)
- {
- NEXT_INSN (PREV_INSN (range2after))
- = rangenext;
- PREV_INSN (rangenext)
- = PREV_INSN (range2after);
- PREV_INSN (range2after)
- = PREV_INSN (range1beg);
- NEXT_INSN (range2after) = range1beg;
- NEXT_INSN (PREV_INSN (range1beg))
- = range2after;
- PREV_INSN (range1beg) = range2after;
- }
- else if (NOTE_LINE_NUMBER (range2after)
- == NOTE_INSN_LOOP_BEG)
- break;
- }
- }
- changed = 1;
- continue;
- }
- }
- }
-
/* Now that the jump has been tensioned,
try cross jumping: check for identical code
before the jump and before its target label. */
rtx last_note = 0;
for (insn = f; insn; insn = NEXT_INSN (insn))
- if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) >= 0)
+ if (GET_CODE (insn) == NOTE)
{
- /* Delete this note if it is identical to previous note. */
- if (last_note
- && NOTE_SOURCE_FILE (insn) == NOTE_SOURCE_FILE (last_note)
- && NOTE_LINE_NUMBER (insn) == NOTE_LINE_NUMBER (last_note))
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_BEG)
+ /* Any previous line note was for the prologue; gdb wants a new
+ note after the prologue even if it is for the same line. */
+ last_note = NULL_RTX;
+ else if (NOTE_LINE_NUMBER (insn) >= 0)
{
- delete_insn (insn);
- continue;
- }
+ /* Delete this note if it is identical to previous note. */
+ if (last_note
+ && NOTE_SOURCE_FILE (insn) == NOTE_SOURCE_FILE (last_note)
+ && NOTE_LINE_NUMBER (insn) == NOTE_LINE_NUMBER (last_note))
+ {
+ delete_insn (insn);
+ continue;
+ }
- last_note = insn;
+ last_note = insn;
+ }
}
}
-#ifdef HAVE_return
- if (HAVE_return)
- {
- /* If we fall through to the epilogue, see if we can insert a RETURN insn
- in front of it. If the machine allows it at this point (we might be
- after reload for a leaf routine), it will improve optimization for it
- to be there. We do this both here and at the start of this pass since
- the RETURN might have been deleted by some of our optimizations. */
- insn = get_last_insn ();
- while (insn && GET_CODE (insn) == NOTE)
- insn = PREV_INSN (insn);
-
- if (insn && GET_CODE (insn) != BARRIER)
- {
- emit_jump_insn (gen_return ());
- emit_barrier ();
- }
- }
-#endif
-
- can_reach_end = calculate_can_reach_end (last_insn, 0, 1);
-
- /* Show JUMP_CHAIN no longer valid. */
+end:
+ /* Clean up. */
+ free (jump_chain);
jump_chain = 0;
}
\f
return largest_uid;
}
-/* Delete insns following barriers, up to next label.
+/* Delete insns following barriers, up to next label.
Also delete no-op jumps created by gcse. */
+
static void
delete_barrier_successors (f)
rtx f;
{
rtx insn;
+ rtx set;
for (insn = f; insn;)
{
if (GET_CODE (insn) == BARRIER)
{
insn = NEXT_INSN (insn);
+
+ never_reached_warning (insn);
+
while (insn != 0 && GET_CODE (insn) != CODE_LABEL)
{
if (GET_CODE (insn) == NOTE
}
/* INSN is now the code_label. */
}
+
/* Also remove (set (pc) (pc)) insns which can be created by
gcse. We eliminate such insns now to avoid having them
cause problems later. */
else if (GET_CODE (insn) == JUMP_INSN
- && SET_SRC (PATTERN (insn)) == pc_rtx
- && SET_DEST (PATTERN (insn)) == pc_rtx)
+ && (set = pc_set (insn)) != NULL
+ && SET_SRC (set) == pc_rtx
+ && SET_DEST (set) == pc_rtx
+ && onlyjump_p (insn))
insn = delete_insn (insn);
else
rtx insn;
for (insn = f; insn; insn = NEXT_INSN (insn))
- if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ if (INSN_P (insn))
{
- mark_jump_label (PATTERN (insn), insn, cross_jump);
+ if (GET_CODE (insn) == CALL_INSN
+ && GET_CODE (PATTERN (insn)) == CALL_PLACEHOLDER)
+ {
+ mark_all_labels (XEXP (PATTERN (insn), 0), cross_jump);
+ mark_all_labels (XEXP (PATTERN (insn), 1), cross_jump);
+ mark_all_labels (XEXP (PATTERN (insn), 2), cross_jump);
+ continue;
+ }
+
+ mark_jump_label (PATTERN (insn), insn, cross_jump, 0);
if (! INSN_DELETED_P (insn) && GET_CODE (insn) == JUMP_INSN)
{
+ /* When we know the LABEL_REF contained in a REG used in
+ an indirect jump, we'll have a REG_LABEL note so that
+ flow can tell where it's going. */
+ if (JUMP_LABEL (insn) == 0)
+ {
+ rtx label_note = find_reg_note (insn, REG_LABEL, NULL_RTX);
+ if (label_note)
+ {
+ /* But a LABEL_REF around the REG_LABEL note, so
+ that we can canonicalize it. */
+ rtx label_ref = gen_rtx_LABEL_REF (VOIDmode,
+ XEXP (label_note, 0));
+
+ mark_jump_label (label_ref, insn, cross_jump, 0);
+ XEXP (label_note, 0) = XEXP (label_ref, 0);
+ JUMP_LABEL (insn) = XEXP (label_note, 0);
+ }
+ }
if (JUMP_LABEL (insn) != 0 && simplejump_p (insn))
{
jump_chain[INSN_UID (insn)]
rtx final = NULL_RTX;
rtx insn;
- for (insn = f; insn; )
+ for (insn = f; insn;)
{
- if (GET_CODE (insn) == CODE_LABEL && LABEL_NUSES (insn) == 0)
+ if (GET_CODE (insn) == CODE_LABEL
+ && LABEL_NUSES (insn) == 0
+ && LABEL_ALTERNATE_NAME (insn) == NULL)
insn = delete_insn (insn);
else
{
{
rtx insn, next;
- for (insn = f; insn; )
+ for (insn = f; insn;)
{
next = NEXT_INSN (insn);
{
register rtx body = PATTERN (insn);
-/* Combine stack_adjusts with following push_insns. */
-#ifdef PUSH_ROUNDING
- if (GET_CODE (body) == SET
- && SET_DEST (body) == stack_pointer_rtx
- && GET_CODE (SET_SRC (body)) == PLUS
- && XEXP (SET_SRC (body), 0) == stack_pointer_rtx
- && GET_CODE (XEXP (SET_SRC (body), 1)) == CONST_INT
- && INTVAL (XEXP (SET_SRC (body), 1)) > 0)
- {
- rtx p;
- rtx stack_adjust_insn = insn;
- int stack_adjust_amount = INTVAL (XEXP (SET_SRC (body), 1));
- int total_pushed = 0;
- int pushes = 0;
-
- /* Find all successive push insns. */
- p = insn;
- /* Don't convert more than three pushes;
- that starts adding too many displaced addresses
- and the whole thing starts becoming a losing
- proposition. */
- while (pushes < 3)
- {
- rtx pbody, dest;
- p = next_nonnote_insn (p);
- if (p == 0 || GET_CODE (p) != INSN)
- break;
- pbody = PATTERN (p);
- if (GET_CODE (pbody) != SET)
- break;
- dest = SET_DEST (pbody);
- /* Allow a no-op move between the adjust and the push. */
- if (GET_CODE (dest) == REG
- && GET_CODE (SET_SRC (pbody)) == REG
- && REGNO (dest) == REGNO (SET_SRC (pbody)))
- continue;
- if (! (GET_CODE (dest) == MEM
- && GET_CODE (XEXP (dest, 0)) == POST_INC
- && XEXP (XEXP (dest, 0), 0) == stack_pointer_rtx))
- break;
- pushes++;
- if (total_pushed + GET_MODE_SIZE (GET_MODE (SET_DEST (pbody)))
- > stack_adjust_amount)
- break;
- total_pushed += GET_MODE_SIZE (GET_MODE (SET_DEST (pbody)));
- }
-
- /* Discard the amount pushed from the stack adjust;
- maybe eliminate it entirely. */
- if (total_pushed >= stack_adjust_amount)
- {
- delete_computation (stack_adjust_insn);
- total_pushed = stack_adjust_amount;
- }
- else
- XEXP (SET_SRC (PATTERN (stack_adjust_insn)), 1)
- = GEN_INT (stack_adjust_amount - total_pushed);
-
- /* Change the appropriate push insns to ordinary stores. */
- p = insn;
- while (total_pushed > 0)
- {
- rtx pbody, dest;
- p = next_nonnote_insn (p);
- if (GET_CODE (p) != INSN)
- break;
- pbody = PATTERN (p);
- if (GET_CODE (pbody) != SET)
- break;
- dest = SET_DEST (pbody);
- /* Allow a no-op move between the adjust and the push. */
- if (GET_CODE (dest) == REG
- && GET_CODE (SET_SRC (pbody)) == REG
- && REGNO (dest) == REGNO (SET_SRC (pbody)))
- continue;
- if (! (GET_CODE (dest) == MEM
- && GET_CODE (XEXP (dest, 0)) == POST_INC
- && XEXP (XEXP (dest, 0), 0) == stack_pointer_rtx))
- break;
- total_pushed -= GET_MODE_SIZE (GET_MODE (SET_DEST (pbody)));
- /* If this push doesn't fully fit in the space
- of the stack adjust that we deleted,
- make another stack adjust here for what we
- didn't use up. There should be peepholes
- to recognize the resulting sequence of insns. */
- if (total_pushed < 0)
- {
- emit_insn_before (gen_add2_insn (stack_pointer_rtx,
- GEN_INT (- total_pushed)),
- p);
- break;
- }
- XEXP (dest, 0)
- = plus_constant (stack_pointer_rtx, total_pushed);
- }
- }
-#endif
-
/* Detect and delete no-op move instructions
resulting from not allocating a parameter in a register. */
|| dreg != sreg)
break;
}
-
+
if (i < 0)
delete_insn (insn);
}
}
}
-/* See if there is still a NOTE_INSN_FUNCTION_END in this function.
- If so indicate that this function can drop off the end by returning
- 1, else return 0.
-
- CHECK_DELETED indicates whether we must check if the note being
- searched for has the deleted flag set.
-
- DELETE_FINAL_NOTE indicates whether we should delete the note
- if we find it. */
-
-static int
-calculate_can_reach_end (last, check_deleted, delete_final_note)
- rtx last;
- int check_deleted;
- int delete_final_note;
-{
- rtx insn = last;
- int n_labels = 1;
-
- while (insn != NULL_RTX)
- {
- int ok = 0;
-
- /* One label can follow the end-note: the return label. */
- if (GET_CODE (insn) == CODE_LABEL && n_labels-- > 0)
- ok = 1;
- /* Ordinary insns can follow it if returning a structure. */
- else if (GET_CODE (insn) == INSN)
- ok = 1;
- /* If machine uses explicit RETURN insns, no epilogue,
- then one of them follows the note. */
- else if (GET_CODE (insn) == JUMP_INSN
- && GET_CODE (PATTERN (insn)) == RETURN)
- ok = 1;
- /* A barrier can follow the return insn. */
- else if (GET_CODE (insn) == BARRIER)
- ok = 1;
- /* Other kinds of notes can follow also. */
- else if (GET_CODE (insn) == NOTE
- && NOTE_LINE_NUMBER (insn) != NOTE_INSN_FUNCTION_END)
- ok = 1;
-
- if (ok != 1)
- break;
-
- insn = PREV_INSN (insn);
- }
-
- /* See if we backed up to the appropriate type of note. */
- if (insn != NULL_RTX
- && GET_CODE (insn) == NOTE
- && NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_END
- && (check_deleted == 0
- || ! INSN_DELETED_P (insn)))
- {
- if (delete_final_note)
- delete_insn (insn);
- return 1;
- }
-
- return 0;
-}
-
/* LOOP_START is a NOTE_INSN_LOOP_BEG note that is followed by an unconditional
jump. Assume that this unconditional jump is to the exit test code. If
the code is sufficiently simple, make a copy of it before INSN,
rtx loop_start;
{
rtx insn, set, reg, p, link;
- rtx copy = 0;
+ rtx copy = 0, first_copy = 0;
int num_insns = 0;
rtx exitcode = NEXT_INSN (JUMP_LABEL (next_nonnote_insn (loop_start)));
rtx lastexit;
remove_note (insn, p);
if (++num_insns > 20
|| find_reg_note (insn, REG_RETVAL, NULL_RTX)
- || find_reg_note (insn, REG_LIBCALL, NULL_RTX)
- || asm_noperands (PATTERN (insn)) > 0)
+ || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
return 0;
break;
default:
/* We can do the replacement. Allocate reg_map if this is the
first replacement we found. */
if (reg_map == 0)
- {
- reg_map = (rtx *) alloca (max_reg * sizeof (rtx));
- bzero ((char *) reg_map, max_reg * sizeof (rtx));
- }
+ reg_map = (rtx *) xcalloc (max_reg, sizeof (rtx));
REG_LOOP_TEST_P (reg) = 1;
/* Now copy each insn. */
for (insn = exitcode; insn != lastexit; insn = NEXT_INSN (insn))
- switch (GET_CODE (insn))
- {
- case BARRIER:
- copy = emit_barrier_before (loop_start);
- break;
- case NOTE:
- /* Only copy line-number notes. */
- if (NOTE_LINE_NUMBER (insn) >= 0)
- {
- copy = emit_note_before (NOTE_LINE_NUMBER (insn), loop_start);
- NOTE_SOURCE_FILE (copy) = NOTE_SOURCE_FILE (insn);
- }
- break;
+ {
+ switch (GET_CODE (insn))
+ {
+ case BARRIER:
+ copy = emit_barrier_before (loop_start);
+ break;
+ case NOTE:
+ /* Only copy line-number notes. */
+ if (NOTE_LINE_NUMBER (insn) >= 0)
+ {
+ copy = emit_note_before (NOTE_LINE_NUMBER (insn), loop_start);
+ NOTE_SOURCE_FILE (copy) = NOTE_SOURCE_FILE (insn);
+ }
+ break;
- case INSN:
- copy = emit_insn_before (copy_rtx (PATTERN (insn)), loop_start);
- if (reg_map)
- replace_regs (PATTERN (copy), reg_map, max_reg, 1);
-
- mark_jump_label (PATTERN (copy), copy, 0);
-
- /* Copy all REG_NOTES except REG_LABEL since mark_jump_label will
- make them. */
- for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
- if (REG_NOTE_KIND (link) != REG_LABEL)
- REG_NOTES (copy)
- = copy_rtx (gen_rtx_EXPR_LIST (REG_NOTE_KIND (link),
- XEXP (link, 0),
- REG_NOTES (copy)));
- if (reg_map && REG_NOTES (copy))
- replace_regs (REG_NOTES (copy), reg_map, max_reg, 1);
- break;
+ case INSN:
+ copy = emit_insn_before (copy_insn (PATTERN (insn)), loop_start);
+ if (reg_map)
+ replace_regs (PATTERN (copy), reg_map, max_reg, 1);
- case JUMP_INSN:
- copy = emit_jump_insn_before (copy_rtx (PATTERN (insn)), loop_start);
- if (reg_map)
- replace_regs (PATTERN (copy), reg_map, max_reg, 1);
- mark_jump_label (PATTERN (copy), copy, 0);
- if (REG_NOTES (insn))
- {
- REG_NOTES (copy) = copy_rtx (REG_NOTES (insn));
- if (reg_map)
- replace_regs (REG_NOTES (copy), reg_map, max_reg, 1);
- }
-
- /* If this is a simple jump, add it to the jump chain. */
+ mark_jump_label (PATTERN (copy), copy, 0, 0);
- if (INSN_UID (copy) < max_jump_chain && JUMP_LABEL (copy)
- && simplejump_p (copy))
- {
- jump_chain[INSN_UID (copy)]
- = jump_chain[INSN_UID (JUMP_LABEL (copy))];
- jump_chain[INSN_UID (JUMP_LABEL (copy))] = copy;
- }
- break;
+ /* Copy all REG_NOTES except REG_LABEL since mark_jump_label will
+ make them. */
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) != REG_LABEL)
+ {
+ if (GET_CODE (link) == EXPR_LIST)
+ REG_NOTES (copy)
+ = copy_insn_1 (gen_rtx_EXPR_LIST (REG_NOTE_KIND (link),
+ XEXP (link, 0),
+ REG_NOTES (copy)));
+ else
+ REG_NOTES (copy)
+ = copy_insn_1 (gen_rtx_INSN_LIST (REG_NOTE_KIND (link),
+ XEXP (link, 0),
+ REG_NOTES (copy)));
+ }
- default:
- abort ();
- }
+ if (reg_map && REG_NOTES (copy))
+ replace_regs (REG_NOTES (copy), reg_map, max_reg, 1);
+ break;
+
+ case JUMP_INSN:
+ copy = emit_jump_insn_before (copy_insn (PATTERN (insn)),
+ loop_start);
+ if (reg_map)
+ replace_regs (PATTERN (copy), reg_map, max_reg, 1);
+ mark_jump_label (PATTERN (copy), copy, 0, 0);
+ if (REG_NOTES (insn))
+ {
+ REG_NOTES (copy) = copy_insn_1 (REG_NOTES (insn));
+ if (reg_map)
+ replace_regs (REG_NOTES (copy), reg_map, max_reg, 1);
+ }
+
+ /* If this is a simple jump, add it to the jump chain. */
+
+ if (INSN_UID (copy) < max_jump_chain && JUMP_LABEL (copy)
+ && simplejump_p (copy))
+ {
+ jump_chain[INSN_UID (copy)]
+ = jump_chain[INSN_UID (JUMP_LABEL (copy))];
+ jump_chain[INSN_UID (JUMP_LABEL (copy))] = copy;
+ }
+ break;
+
+ default:
+ abort ();
+ }
+
+ /* Record the first insn we copied. We need it so that we can
+ scan the copied insns for new pseudo registers. */
+ if (! first_copy)
+ first_copy = copy;
+ }
/* Now clean up by emitting a jump to the end label and deleting the jump
at the start of the loop. */
{
copy = emit_jump_insn_before (gen_jump (get_label_after (insn)),
loop_start);
- mark_jump_label (PATTERN (copy), copy, 0);
+
+ /* Record the first insn we copied. We need it so that we can
+ scan the copied insns for new pseudo registers. This may not
+ be strictly necessary since we should have copied at least one
+ insn above. But I am going to be safe. */
+ if (! first_copy)
+ first_copy = copy;
+
+ mark_jump_label (PATTERN (copy), copy, 0, 0);
if (INSN_UID (copy) < max_jump_chain
&& INSN_UID (JUMP_LABEL (copy)) < max_jump_chain)
{
emit_barrier_before (loop_start);
}
+ /* Now scan from the first insn we copied to the last insn we copied
+ (copy) for new pseudo registers. Do this after the code to jump to
+ the end label since that might create a new pseudo too. */
+ reg_scan_update (first_copy, copy, max_reg);
+
/* Mark the exit code as the virtual top of the converted loop. */
emit_note_before (NOTE_INSN_LOOP_VTOP, exitcode);
delete_insn (next_nonnote_insn (loop_start));
+ /* Clean up. */
+ if (reg_map)
+ free (reg_map);
+
return 1;
}
\f
-/* Move all block-beg, block-end, loop-beg, loop-cont, loop-vtop, and
- loop-end notes between START and END out before START. Assume that
- END is not such a note. START may be such a note. Returns the value
- of the new starting insn, which may be different if the original start
- was such a note. */
+/* Move all block-beg, block-end, loop-beg, loop-cont, loop-vtop, loop-end,
+ notes between START and END out before START. Assume that END is not
+ such a note. START may be such a note. Returns the value of the new
+ starting insn, which may be different if the original start was such a
+ note. */
rtx
squeeze_notes (start, end)
p1 = PATTERN (i1);
p2 = PATTERN (i2);
-
+
/* If this is a CALL_INSN, compare register usage information.
If we don't check this on stack register machines, the two
CALL_INSNs might be merged leaving reg-stack.c with mismatching
}
}
else
- redirect_jump (insn, label);
+ redirect_jump (insn, label, 1);
/* Delete the matching insns before the jump. Also, remove any REG_EQUAL
or REG_EQUIV note in the NEWLPOS stream that isn't also present in
{
rtx cinsn, ctarget;
enum rtx_code codei, codet;
+ rtx set, tset;
- if (simplejump_p (insn) || ! condjump_p (insn)
- || simplejump_p (target)
+ if (! any_condjump_p (insn)
+ || any_uncondjump_p (target)
|| target != prev_real_insn (JUMP_LABEL (insn)))
return 0;
+ set = pc_set (insn);
+ tset = pc_set (target);
- cinsn = XEXP (SET_SRC (PATTERN (insn)), 0);
- ctarget = XEXP (SET_SRC (PATTERN (target)), 0);
+ cinsn = XEXP (SET_SRC (set), 0);
+ ctarget = XEXP (SET_SRC (tset), 0);
codei = GET_CODE (cinsn);
codet = GET_CODE (ctarget);
- if (XEXP (SET_SRC (PATTERN (insn)), 1) == pc_rtx)
+ if (XEXP (SET_SRC (set), 1) == pc_rtx)
{
- if (! can_reverse_comparison_p (cinsn, insn))
+ codei = reversed_comparison_code (cinsn, insn);
+ if (codei == UNKNOWN)
return 0;
- codei = reverse_condition (codei);
}
- if (XEXP (SET_SRC (PATTERN (target)), 2) == pc_rtx)
+ if (XEXP (SET_SRC (tset), 2) == pc_rtx)
{
- if (! can_reverse_comparison_p (ctarget, target))
+ codet = reversed_comparison_code (ctarget, target);
+ if (codei == UNKNOWN)
return 0;
- codet = reverse_condition (codet);
}
return (codei == codet
&& rtx_renumbered_equal_p (XEXP (cinsn, 1), XEXP (ctarget, 1)));
}
\f
-/* Given a comparison, COMPARISON, inside a conditional jump insn, INSN,
- return non-zero if it is safe to reverse this comparison. It is if our
- floating-point is not IEEE, if this is an NE or EQ comparison, or if
- this is known to be an integer comparison. */
-
-int
-can_reverse_comparison_p (comparison, insn)
- rtx comparison;
- rtx insn;
+/* Given a comparison (CODE ARG0 ARG1), inside a insn, INSN, return an code
+ of reversed comparison if it is possible to do so. Otherwise return UNKNOWN.
+ UNKNOWN may be returned in case we are having CC_MODE compare and we don't
+ know whether it's source is floating point or integer comparison. Machine
+ description should define REVERSIBLE_CC_MODE and REVERSE_CONDITION macros
+ to help this function avoid overhead in these cases. */
+enum rtx_code
+reversed_comparison_code_parts (code, arg0, arg1, insn)
+ rtx insn, arg0, arg1;
+ enum rtx_code code;
{
- rtx arg0;
+ enum machine_mode mode;
/* If this is not actually a comparison, we can't reverse it. */
- if (GET_RTX_CLASS (GET_CODE (comparison)) != '<')
- return 0;
-
- if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
- /* If this is an NE comparison, it is safe to reverse it to an EQ
- comparison and vice versa, even for floating point. If no operands
- are NaNs, the reversal is valid. If some operand is a NaN, EQ is
- always false and NE is always true, so the reversal is also valid. */
- || flag_fast_math
- || GET_CODE (comparison) == NE
- || GET_CODE (comparison) == EQ)
- return 1;
+ if (GET_RTX_CLASS (code) != '<')
+ return UNKNOWN;
+
+ mode = GET_MODE (arg0);
+ if (mode == VOIDmode)
+ mode = GET_MODE (arg1);
+
+ /* First see if machine description supply us way to reverse the comparison.
+ Give it priority over everything else to allow machine description to do
+ tricks. */
+#ifdef REVERSIBLE_CC_MODE
+ if (GET_MODE_CLASS (mode) == MODE_CC
+ && REVERSIBLE_CC_MODE (mode))
+ {
+#ifdef REVERSE_CONDITION
+ return REVERSE_CONDITION (code, mode);
+#endif
+ return reverse_condition (code);
+ }
+#endif
- arg0 = XEXP (comparison, 0);
+ /* Try few special cases based on the comparison code. */
+ switch (code)
+ {
+ case GEU:
+ case GTU:
+ case LEU:
+ case LTU:
+ case NE:
+ case EQ:
+ /* It is always safe to reverse EQ and NE, even for the floating
+ point. Similary the unsigned comparisons are never used for
+ floating point so we can reverse them in the default way. */
+ return reverse_condition (code);
+ case ORDERED:
+ case UNORDERED:
+ case LTGT:
+ case UNEQ:
+ /* In case we already see unordered comparison, we can be sure to
+ be dealing with floating point so we don't need any more tests. */
+ return reverse_condition_maybe_unordered (code);
+ case UNLT:
+ case UNLE:
+ case UNGT:
+ case UNGE:
+ /* We don't have safe way to reverse these yet. */
+ return UNKNOWN;
+ default:
+ break;
+ }
- /* Make sure ARG0 is one of the actual objects being compared. If we
- can't do this, we can't be sure the comparison can be reversed.
+ /* In case we give up IEEE compatibility, all comparisons are reversible. */
+ if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || flag_unsafe_math_optimizations)
+ return reverse_condition (code);
- Handle cc0 and a MODE_CC register. */
- if ((GET_CODE (arg0) == REG && GET_MODE_CLASS (GET_MODE (arg0)) == MODE_CC)
+ if (GET_MODE_CLASS (mode) == MODE_CC
#ifdef HAVE_cc0
|| arg0 == cc0_rtx
#endif
)
{
- rtx prev = prev_nonnote_insn (insn);
- rtx set;
-
- /* If the comparison itself was a loop invariant, it could have been
- hoisted out of the loop. If we proceed to unroll such a loop, then
- we may not be able to find the comparison when copying the loop.
-
- Returning zero in that case is the safe thing to do. */
- if (prev == 0)
- return 0;
+ rtx prev;
+ /* Try to search for the comparison to determine the real mode.
+ This code is expensive, but with sane machine description it
+ will be never used, since REVERSIBLE_CC_MODE will return true
+ in all cases. */
+ if (! insn)
+ return UNKNOWN;
+
+ for (prev = prev_nonnote_insn (insn);
+ prev != 0 && GET_CODE (prev) != CODE_LABEL;
+ prev = prev_nonnote_insn (prev))
+ {
+ rtx set = set_of (arg0, prev);
+ if (set && GET_CODE (set) == SET
+ && rtx_equal_p (SET_DEST (set), arg0))
+ {
+ rtx src = SET_SRC (set);
- set = single_set (prev);
- if (set == 0 || SET_DEST (set) != arg0)
- return 0;
+ if (GET_CODE (src) == COMPARE)
+ {
+ rtx comparison = src;
+ arg0 = XEXP (src, 0);
+ mode = GET_MODE (arg0);
+ if (mode == VOIDmode)
+ mode = GET_MODE (XEXP (comparison, 1));
+ break;
+ }
+ /* We can get past reg-reg moves. This may be usefull for model
+ of i387 comparisons that first move flag registers around. */
+ if (REG_P (src))
+ {
+ arg0 = src;
+ continue;
+ }
+ }
+ /* If register is clobbered in some ununderstandable way,
+ give up. */
+ if (set)
+ return UNKNOWN;
+ }
+ }
- arg0 = SET_SRC (set);
+ /* An integer condition. */
+ if (GET_CODE (arg0) == CONST_INT
+ || (GET_MODE (arg0) != VOIDmode
+ && GET_MODE_CLASS (mode) != MODE_CC
+ && ! FLOAT_MODE_P (mode)))
+ return reverse_condition (code);
- if (GET_CODE (arg0) == COMPARE)
- arg0 = XEXP (arg0, 0);
- }
+ return UNKNOWN;
+}
- /* We can reverse this if ARG0 is a CONST_INT or if its mode is
- not VOIDmode and neither a MODE_CC nor MODE_FLOAT type. */
- return (GET_CODE (arg0) == CONST_INT
- || (GET_MODE (arg0) != VOIDmode
- && GET_MODE_CLASS (GET_MODE (arg0)) != MODE_CC
- && GET_MODE_CLASS (GET_MODE (arg0)) != MODE_FLOAT));
+/* An wrapper around the previous function to take COMPARISON as rtx
+ expression. This simplifies many callers. */
+enum rtx_code
+reversed_comparison_code (comparison, insn)
+ rtx comparison, insn;
+{
+ if (GET_RTX_CLASS (GET_CODE (comparison)) != '<')
+ return UNKNOWN;
+ return reversed_comparison_code_parts (GET_CODE (comparison),
+ XEXP (comparison, 0),
+ XEXP (comparison, 1), insn);
}
+\f
+/* Given an rtx-code for a comparison, return the code for the negated
+ comparison. If no such code exists, return UNKNOWN.
-/* Given an rtx-code for a comparison, return the code
- for the negated comparison.
- WATCH OUT! reverse_condition is not safe to use on a jump
- that might be acting on the results of an IEEE floating point comparison,
- because of the special treatment of non-signaling nans in comparisons.
- Use can_reverse_comparison_p to be sure. */
+ WATCH OUT! reverse_condition is not safe to use on a jump that might
+ be acting on the results of an IEEE floating point comparison, because
+ of the special treatment of non-signaling nans in comparisons.
+ Use reversed_comparison_code instead. */
enum rtx_code
reverse_condition (code)
{
case EQ:
return NE;
-
case NE:
return EQ;
-
case GT:
return LE;
-
case GE:
return LT;
+ case LT:
+ return GE;
+ case LE:
+ return GT;
+ case GTU:
+ return LEU;
+ case GEU:
+ return LTU;
+ case LTU:
+ return GEU;
+ case LEU:
+ return GTU;
+ case UNORDERED:
+ return ORDERED;
+ case ORDERED:
+ return UNORDERED;
+
+ case UNLT:
+ case UNLE:
+ case UNGT:
+ case UNGE:
+ case UNEQ:
+ case LTGT:
+ return UNKNOWN;
+
+ default:
+ abort ();
+ }
+}
+
+/* Similar, but we're allowed to generate unordered comparisons, which
+ makes it safe for IEEE floating-point. Of course, we have to recognize
+ that the target will support them too... */
+
+enum rtx_code
+reverse_condition_maybe_unordered (code)
+ enum rtx_code code;
+{
+ /* Non-IEEE formats don't have unordered conditions. */
+ if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT)
+ return reverse_condition (code);
+ switch (code)
+ {
+ case EQ:
+ return NE;
+ case NE:
+ return EQ;
+ case GT:
+ return UNLE;
+ case GE:
+ return UNLT;
case LT:
- return GE;
-
+ return UNGE;
case LE:
+ return UNGT;
+ case LTGT:
+ return UNEQ;
+ case UNORDERED:
+ return ORDERED;
+ case ORDERED:
+ return UNORDERED;
+ case UNLT:
+ return GE;
+ case UNLE:
return GT;
-
- case GTU:
- return LEU;
-
- case GEU:
- return LTU;
-
- case LTU:
- return GEU;
-
- case LEU:
- return GTU;
+ case UNGT:
+ return LE;
+ case UNGE:
+ return LT;
+ case UNEQ:
+ return LTGT;
default:
abort ();
- return UNKNOWN;
}
}
{
case EQ:
case NE:
+ case UNORDERED:
+ case ORDERED:
+ case UNEQ:
+ case LTGT:
return code;
case GT:
return LT;
-
case GE:
return LE;
-
case LT:
return GT;
-
case LE:
return GE;
-
case GTU:
return LTU;
-
case GEU:
return LEU;
-
case LTU:
return GTU;
-
case LEU:
return GEU;
+ case UNLT:
+ return UNGT;
+ case UNLE:
+ return UNGE;
+ case UNGT:
+ return UNLT;
+ case UNGE:
+ return UNLE;
default:
abort ();
- return UNKNOWN;
}
}
case GT:
return GTU;
-
case GE:
return GEU;
-
case LT:
return LTU;
-
case LE:
return LEU;
case GTU:
return GT;
-
case GEU:
return GE;
-
case LTU:
return LT;
-
case LEU:
return LE;
comparison_dominates_p (code1, code2)
enum rtx_code code1, code2;
{
+ /* UNKNOWN comparison codes can happen as a result of trying to revert
+ comparison codes.
+ They can't match anything, so we have to reject them here. */
+ if (code1 == UNKNOWN || code2 == UNKNOWN)
+ return 0;
+
if (code1 == code2)
return 1;
switch (code1)
{
+ case UNEQ:
+ if (code2 == UNLE || code2 == UNGE)
+ return 1;
+ break;
+
case EQ:
- if (code2 == LE || code2 == LEU || code2 == GE || code2 == GEU)
+ if (code2 == LE || code2 == LEU || code2 == GE || code2 == GEU
+ || code2 == ORDERED)
+ return 1;
+ break;
+
+ case UNLT:
+ if (code2 == UNLE || code2 == NE)
return 1;
break;
case LT:
- if (code2 == LE || code2 == NE)
+ if (code2 == LE || code2 == NE || code2 == ORDERED || code2 == LTGT)
+ return 1;
+ break;
+
+ case UNGT:
+ if (code2 == UNGE || code2 == NE)
return 1;
break;
case GT:
- if (code2 == GE || code2 == NE)
+ if (code2 == GE || code2 == NE || code2 == ORDERED || code2 == LTGT)
+ return 1;
+ break;
+
+ case GE:
+ case LE:
+ if (code2 == ORDERED)
+ return 1;
+ break;
+
+ case LTGT:
+ if (code2 == NE || code2 == ORDERED)
return 1;
break;
if (code2 == GEU || code2 == NE)
return 1;
break;
-
+
+ case UNORDERED:
+ if (code2 == NE || code2 == UNEQ || code2 == UNLE || code2 == UNLT
+ || code2 == UNGE || code2 == UNGT)
+ return 1;
+ break;
+
default:
break;
}
}
/* Return nonzero if INSN is a (possibly) conditional jump
- and nothing more. */
+ and nothing more.
+
+ Use this function is deprecated, since we need to support combined
+ branch and compare insns. Use any_condjump_p instead whenever possible. */
int
condjump_p (insn)
rtx insn;
{
register rtx x = PATTERN (insn);
- if (GET_CODE (x) != SET)
- return 0;
- if (GET_CODE (SET_DEST (x)) != PC)
- return 0;
- if (GET_CODE (SET_SRC (x)) == LABEL_REF)
- return 1;
- if (GET_CODE (SET_SRC (x)) != IF_THEN_ELSE)
+
+ if (GET_CODE (x) != SET
+ || GET_CODE (SET_DEST (x)) != PC)
return 0;
- if (XEXP (SET_SRC (x), 2) == pc_rtx
- && (GET_CODE (XEXP (SET_SRC (x), 1)) == LABEL_REF
- || GET_CODE (XEXP (SET_SRC (x), 1)) == RETURN))
- return 1;
- if (XEXP (SET_SRC (x), 1) == pc_rtx
- && (GET_CODE (XEXP (SET_SRC (x), 2)) == LABEL_REF
- || GET_CODE (XEXP (SET_SRC (x), 2)) == RETURN))
+
+ x = SET_SRC (x);
+ if (GET_CODE (x) == LABEL_REF)
return 1;
+ else
+ return (GET_CODE (x) == IF_THEN_ELSE
+ && ((GET_CODE (XEXP (x, 2)) == PC
+ && (GET_CODE (XEXP (x, 1)) == LABEL_REF
+ || GET_CODE (XEXP (x, 1)) == RETURN))
+ || (GET_CODE (XEXP (x, 1)) == PC
+ && (GET_CODE (XEXP (x, 2)) == LABEL_REF
+ || GET_CODE (XEXP (x, 2)) == RETURN))));
+
return 0;
}
-/* Return nonzero if INSN is a (possibly) conditional jump
- and nothing more. */
+/* Return nonzero if INSN is a (possibly) conditional jump inside a
+ PARALLEL.
+
+ Use this function is deprecated, since we need to support combined
+ branch and compare insns. Use any_condjump_p instead whenever possible. */
int
condjump_in_parallel_p (insn)
return 0;
}
+/* Return set of PC, otherwise NULL. */
+
+rtx
+pc_set (insn)
+ rtx insn;
+{
+ rtx pat;
+ if (GET_CODE (insn) != JUMP_INSN)
+ return NULL_RTX;
+ pat = PATTERN (insn);
+
+ /* The set is allowed to appear either as the insn pattern or
+ the first set in a PARALLEL. */
+ if (GET_CODE (pat) == PARALLEL)
+ pat = XVECEXP (pat, 0, 0);
+ if (GET_CODE (pat) == SET && GET_CODE (SET_DEST (pat)) == PC)
+ return pat;
+
+ return NULL_RTX;
+}
+
+/* Return true when insn is an unconditional direct jump,
+ possibly bundled inside a PARALLEL. */
+
+int
+any_uncondjump_p (insn)
+ rtx insn;
+{
+ rtx x = pc_set (insn);
+ if (!x)
+ return 0;
+ if (GET_CODE (SET_SRC (x)) != LABEL_REF)
+ return 0;
+ return 1;
+}
+
+/* Return true when insn is a conditional jump. This function works for
+ instructions containing PC sets in PARALLELs. The instruction may have
+ various other effects so before removing the jump you must verify
+ onlyjump_p.
+
+ Note that unlike condjump_p it returns false for unconditional jumps. */
+
+int
+any_condjump_p (insn)
+ rtx insn;
+{
+ rtx x = pc_set (insn);
+ enum rtx_code a, b;
+
+ if (!x)
+ return 0;
+ if (GET_CODE (SET_SRC (x)) != IF_THEN_ELSE)
+ return 0;
+
+ a = GET_CODE (XEXP (SET_SRC (x), 1));
+ b = GET_CODE (XEXP (SET_SRC (x), 2));
+
+ return ((b == PC && (a == LABEL_REF || a == RETURN))
+ || (a == PC && (b == LABEL_REF || b == RETURN)));
+}
+
/* Return the label of a conditional jump. */
rtx
condjump_label (insn)
rtx insn;
{
- register rtx x = PATTERN (insn);
+ rtx x = pc_set (insn);
- if (GET_CODE (x) == PARALLEL)
- x = XVECEXP (x, 0, 0);
- if (GET_CODE (x) != SET)
- return NULL_RTX;
- if (GET_CODE (SET_DEST (x)) != PC)
+ if (!x)
return NULL_RTX;
x = SET_SRC (x);
if (GET_CODE (x) == LABEL_REF)
void *data ATTRIBUTE_UNUSED;
{
rtx x = *loc;
- return GET_CODE (x) == RETURN;
+ return x && GET_CODE (x) == RETURN;
}
int
returnjump_p (insn)
rtx insn;
{
+ if (GET_CODE (insn) != JUMP_INSN)
+ return 0;
return for_each_rtx (&PATTERN (insn), returnjump_p_1, NULL);
}
+/* Return true if INSN is a jump that only transfers control and
+ nothing more. */
+
+int
+onlyjump_p (insn)
+ rtx insn;
+{
+ rtx set;
+
+ if (GET_CODE (insn) != JUMP_INSN)
+ return 0;
+
+ set = single_set (insn);
+ if (set == NULL)
+ return 0;
+ if (GET_CODE (SET_DEST (set)) != PC)
+ return 0;
+ if (side_effects_p (SET_SRC (set)))
+ return 0;
+
+ return 1;
+}
+
#ifdef HAVE_cc0
/* Return 1 if X is an RTX that does nothing but set the condition codes
(depth < 10
&& (insn = next_active_insn (value)) != 0
&& GET_CODE (insn) == JUMP_INSN
- && ((JUMP_LABEL (insn) != 0 && simplejump_p (insn))
+ && ((JUMP_LABEL (insn) != 0 && any_uncondjump_p (insn)
+ && onlyjump_p (insn))
|| GET_CODE (PATTERN (insn)) == RETURN)
&& (next = NEXT_INSN (insn))
&& GET_CODE (next) == BARRIER);
Once reload has completed (CROSS_JUMP non-zero), we need not consider
two labels distinct if they are separated by only USE or CLOBBER insns. */
-static void
-mark_jump_label (x, insn, cross_jump)
+void
+mark_jump_label (x, insn, cross_jump, in_mem)
register rtx x;
rtx insn;
int cross_jump;
+ int in_mem;
{
register RTX_CODE code = GET_CODE (x);
register int i;
- register char *fmt;
+ register const char *fmt;
switch (code)
{
case REG:
case SUBREG:
case CONST_INT:
- case SYMBOL_REF:
case CONST_DOUBLE:
case CLOBBER:
case CALL:
return;
case MEM:
+ in_mem = 1;
+ break;
+
+ case SYMBOL_REF:
+ if (!in_mem)
+ return;
+
/* If this is a constant-pool reference, see if it is a label. */
- if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
- && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
- mark_jump_label (get_pool_constant (XEXP (x, 0)), insn, cross_jump);
+ if (CONSTANT_POOL_ADDRESS_P (x))
+ mark_jump_label (get_pool_constant (x), insn, cross_jump, in_mem);
break;
case LABEL_REF:
rtx note;
rtx next;
+ /* Ignore remaining references to unreachable labels that
+ have been deleted. */
+ if (GET_CODE (label) == NOTE
+ && NOTE_LINE_NUMBER (label) == NOTE_INSN_DELETED_LABEL)
+ break;
+
if (GET_CODE (label) != CODE_LABEL)
abort ();
|| NOTE_LINE_NUMBER (next) == NOTE_INSN_FUNCTION_END
/* ??? Optional. Disables some optimizations, but
makes gcov output more accurate with -O. */
- || (flag_test_coverage && NOTE_LINE_NUMBER (next) > 0)))
+ || (flag_test_coverage
+ && NOTE_LINE_NUMBER (next) > 0)))
break;
}
is no longer valid because of the more accurate cfg
we build in find_basic_blocks -- it no longer pessimizes
code when it finds a REG_LABEL note. */
- REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, label,
+ REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, label,
REG_NOTES (insn));
}
}
int eltnum = code == ADDR_DIFF_VEC ? 1 : 0;
for (i = 0; i < XVECLEN (x, eltnum); i++)
- mark_jump_label (XVECEXP (x, eltnum, i), NULL_RTX, cross_jump);
+ mark_jump_label (XVECEXP (x, eltnum, i), NULL_RTX,
+ cross_jump, in_mem);
}
return;
-
+
default:
break;
}
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
- mark_jump_label (XEXP (x, i), insn, cross_jump);
+ mark_jump_label (XEXP (x, i), insn, cross_jump, in_mem);
else if (fmt[i] == 'E')
{
register int j;
for (j = 0; j < XVECLEN (x, i); j++)
- mark_jump_label (XVECEXP (x, i, j), insn, cross_jump);
+ mark_jump_label (XVECEXP (x, i, j), insn, cross_jump, in_mem);
}
}
}
delete_computation (insn);
}
+/* Verify INSN is a BARRIER and delete it. */
+
+void
+delete_barrier (insn)
+ rtx insn;
+{
+ if (GET_CODE (insn) != BARRIER)
+ abort ();
+
+ delete_insn (insn);
+}
+
+/* Recursively delete prior insns that compute the value (used only by INSN
+ which the caller is deleting) stored in the register mentioned by NOTE
+ which is a REG_DEAD note associated with INSN. */
+
+static void
+delete_prior_computation (note, insn)
+ rtx note;
+ rtx insn;
+{
+ rtx our_prev;
+ rtx reg = XEXP (note, 0);
+
+ for (our_prev = prev_nonnote_insn (insn);
+ our_prev && (GET_CODE (our_prev) == INSN
+ || GET_CODE (our_prev) == CALL_INSN);
+ our_prev = prev_nonnote_insn (our_prev))
+ {
+ rtx pat = PATTERN (our_prev);
+
+ /* If we reach a CALL which is not calling a const function
+ or the callee pops the arguments, then give up. */
+ if (GET_CODE (our_prev) == CALL_INSN
+ && (! CONST_CALL_P (our_prev)
+ || GET_CODE (pat) != SET || GET_CODE (SET_SRC (pat)) != CALL))
+ break;
+
+ /* If we reach a SEQUENCE, it is too complex to try to
+ do anything with it, so give up. */
+ if (GET_CODE (pat) == SEQUENCE)
+ break;
+
+ if (GET_CODE (pat) == USE
+ && GET_CODE (XEXP (pat, 0)) == INSN)
+ /* reorg creates USEs that look like this. We leave them
+ alone because reorg needs them for its own purposes. */
+ break;
+
+ if (reg_set_p (reg, pat))
+ {
+ if (side_effects_p (pat) && GET_CODE (our_prev) != CALL_INSN)
+ break;
+
+ if (GET_CODE (pat) == PARALLEL)
+ {
+ /* If we find a SET of something else, we can't
+ delete the insn. */
+
+ int i;
+
+ for (i = 0; i < XVECLEN (pat, 0); i++)
+ {
+ rtx part = XVECEXP (pat, 0, i);
+
+ if (GET_CODE (part) == SET
+ && SET_DEST (part) != reg)
+ break;
+ }
+
+ if (i == XVECLEN (pat, 0))
+ delete_computation (our_prev);
+ }
+ else if (GET_CODE (pat) == SET
+ && GET_CODE (SET_DEST (pat)) == REG)
+ {
+ int dest_regno = REGNO (SET_DEST (pat));
+ int dest_endregno
+ = (dest_regno
+ + (dest_regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (dest_regno,
+ GET_MODE (SET_DEST (pat))) : 1));
+ int regno = REGNO (reg);
+ int endregno
+ = (regno
+ + (regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (regno, GET_MODE (reg)) : 1));
+
+ if (dest_regno >= regno
+ && dest_endregno <= endregno)
+ delete_computation (our_prev);
+
+ /* We may have a multi-word hard register and some, but not
+ all, of the words of the register are needed in subsequent
+ insns. Write REG_UNUSED notes for those parts that were not
+ needed. */
+ else if (dest_regno <= regno
+ && dest_endregno >= endregno)
+ {
+ int i;
+
+ REG_NOTES (our_prev)
+ = gen_rtx_EXPR_LIST (REG_UNUSED, reg,
+ REG_NOTES (our_prev));
+
+ for (i = dest_regno; i < dest_endregno; i++)
+ if (! find_regno_note (our_prev, REG_UNUSED, i))
+ break;
+
+ if (i == dest_endregno)
+ delete_computation (our_prev);
+ }
+ }
+
+ break;
+ }
+
+ /* If PAT references the register that dies here, it is an
+ additional use. Hence any prior SET isn't dead. However, this
+ insn becomes the new place for the REG_DEAD note. */
+ if (reg_overlap_mentioned_p (reg, pat))
+ {
+ XEXP (note, 1) = REG_NOTES (our_prev);
+ REG_NOTES (our_prev) = note;
+ break;
+ }
+ }
+}
+
/* Delete INSN and recursively delete insns that compute values used only
by INSN. This uses the REG_DEAD notes computed during flow analysis.
If we are running before flow.c, we need do nothing since flow.c will
&& sets_cc0_p (PATTERN (prev)))
{
if (sets_cc0_p (PATTERN (prev)) > 0
- && !FIND_REG_INC_NOTE (prev, NULL_RTX))
+ && ! side_effects_p (PATTERN (prev)))
delete_computation (prev);
else
/* Otherwise, show that cc0 won't be used. */
}
#endif
-#ifdef INSN_SCHEDULING
- /* ?!? The schedulers do not keep REG_DEAD notes accurate after
- reload has completed. The schedulers need to be fixed. Until
- they are, we must not rely on the death notes here. */
- if (reload_completed && flag_schedule_insns_after_reload)
- {
- delete_insn (insn);
- return;
- }
-#endif
-
for (note = REG_NOTES (insn); note; note = next)
{
- rtx our_prev;
-
next = XEXP (note, 1);
if (REG_NOTE_KIND (note) != REG_DEAD
|| GET_CODE (XEXP (note, 0)) != REG)
continue;
- for (our_prev = prev_nonnote_insn (insn);
- our_prev && GET_CODE (our_prev) == INSN;
- our_prev = prev_nonnote_insn (our_prev))
- {
- /* If we reach a SEQUENCE, it is too complex to try to
- do anything with it, so give up. */
- if (GET_CODE (PATTERN (our_prev)) == SEQUENCE)
- break;
-
- if (GET_CODE (PATTERN (our_prev)) == USE
- && GET_CODE (XEXP (PATTERN (our_prev), 0)) == INSN)
- /* reorg creates USEs that look like this. We leave them
- alone because reorg needs them for its own purposes. */
- break;
-
- if (reg_set_p (XEXP (note, 0), PATTERN (our_prev)))
- {
- if (FIND_REG_INC_NOTE (our_prev, NULL_RTX))
- break;
-
- if (GET_CODE (PATTERN (our_prev)) == PARALLEL)
- {
- /* If we find a SET of something else, we can't
- delete the insn. */
-
- int i;
-
- for (i = 0; i < XVECLEN (PATTERN (our_prev), 0); i++)
- {
- rtx part = XVECEXP (PATTERN (our_prev), 0, i);
-
- if (GET_CODE (part) == SET
- && SET_DEST (part) != XEXP (note, 0))
- break;
- }
-
- if (i == XVECLEN (PATTERN (our_prev), 0))
- delete_computation (our_prev);
- }
- else if (GET_CODE (PATTERN (our_prev)) == SET
- && SET_DEST (PATTERN (our_prev)) == XEXP (note, 0))
- delete_computation (our_prev);
-
- break;
- }
-
- /* If OUR_PREV references the register that dies here, it is an
- additional use. Hence any prior SET isn't dead. However, this
- insn becomes the new place for the REG_DEAD note. */
- if (reg_overlap_mentioned_p (XEXP (note, 0),
- PATTERN (our_prev)))
- {
- XEXP (note, 1) = REG_NOTES (our_prev);
- REG_NOTES (our_prev) = note;
- break;
- }
- }
+ delete_prior_computation (note, insn);
}
delete_insn (insn);
register rtx prev = PREV_INSN (insn);
register int was_code_label = (GET_CODE (insn) == CODE_LABEL);
register int dont_really_delete = 0;
+ rtx note;
while (next && INSN_DELETED_P (next))
next = NEXT_INSN (next);
if (was_code_label)
remove_node_from_expr_list (insn, &nonlocal_goto_handler_labels);
- /* Don't delete user-declared labels. Convert them to special NOTEs
- instead. */
- if (was_code_label && LABEL_NAME (insn) != 0
- && optimize && ! dont_really_delete)
+ /* Don't delete user-declared labels. When optimizing, convert them
+ to special NOTEs instead. When not optimizing, leave them alone. */
+ if (was_code_label && LABEL_NAME (insn) != 0)
{
- PUT_CODE (insn, NOTE);
- NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED_LABEL;
- NOTE_SOURCE_FILE (insn) = 0;
- dont_really_delete = 1;
+ if (! optimize)
+ dont_really_delete = 1;
+ else if (! dont_really_delete)
+ {
+ const char *name = LABEL_NAME (insn);
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED_LABEL;
+ NOTE_SOURCE_FILE (insn) = name;
+ dont_really_delete = 1;
+ }
}
else
/* Mark this insn as deleted. */
/* Patch out INSN (and the barrier if any) */
- if (optimize && ! dont_really_delete)
+ if (! dont_really_delete)
{
if (prev)
{
and delete the label if it is now unused. */
if (GET_CODE (insn) == JUMP_INSN && JUMP_LABEL (insn))
- if (--LABEL_NUSES (JUMP_LABEL (insn)) == 0)
- {
- /* This can delete NEXT or PREV,
- either directly if NEXT is JUMP_LABEL (INSN),
- or indirectly through more levels of jumps. */
- delete_insn (JUMP_LABEL (insn));
- /* I feel a little doubtful about this loop,
- but I see no clean and sure alternative way
- to find the first insn after INSN that is not now deleted.
- I hope this works. */
- while (next && INSN_DELETED_P (next))
- next = NEXT_INSN (next);
- return next;
- }
+ {
+ rtx lab = JUMP_LABEL (insn), lab_next;
+
+ if (--LABEL_NUSES (lab) == 0)
+ {
+ /* This can delete NEXT or PREV,
+ either directly if NEXT is JUMP_LABEL (INSN),
+ or indirectly through more levels of jumps. */
+ delete_insn (lab);
+
+ /* I feel a little doubtful about this loop,
+ but I see no clean and sure alternative way
+ to find the first insn after INSN that is not now deleted.
+ I hope this works. */
+ while (next && INSN_DELETED_P (next))
+ next = NEXT_INSN (next);
+ return next;
+ }
+ else if ((lab_next = next_nonnote_insn (lab)) != NULL
+ && GET_CODE (lab_next) == JUMP_INSN
+ && (GET_CODE (PATTERN (lab_next)) == ADDR_VEC
+ || GET_CODE (PATTERN (lab_next)) == ADDR_DIFF_VEC))
+ {
+ /* If we're deleting the tablejump, delete the dispatch table.
+ We may not be able to kill the label immediately preceeding
+ just yet, as it might be referenced in code leading up to
+ the tablejump. */
+ delete_insn (lab_next);
+ }
+ }
/* Likewise if we're deleting a dispatch table. */
return next;
}
+ /* Likewise for an ordinary INSN / CALL_INSN with a REG_LABEL note. */
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
+ for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_LABEL
+ /* This could also be a NOTE_INSN_DELETED_LABEL note. */
+ && GET_CODE (XEXP (note, 0)) == CODE_LABEL)
+ if (--LABEL_NUSES (XEXP (note, 0)) == 0)
+ delete_insn (XEXP (note, 0));
+
while (prev && (INSN_DELETED_P (prev) || GET_CODE (prev) == NOTE))
prev = PREV_INSN (prev);
is also an unconditional jump in that case. */
}
\f
-/* Invert the condition of the jump JUMP, and make it jump
- to label NLABEL instead of where it jumps now. */
+/* We have determined that INSN is never reached, and are about to
+ delete it. Print a warning if the user asked for one.
-int
-invert_jump (jump, nlabel)
- rtx jump, nlabel;
+ To try to make this warning more useful, this should only be called
+ once per basic block not reached, and it only warns when the basic
+ block contains more than one line from the current function, and
+ contains at least one operation. CSE and inlining can duplicate insns,
+ so it's possible to get spurious warnings from this. */
+
+void
+never_reached_warning (avoided_insn)
+ rtx avoided_insn;
{
- /* We have to either invert the condition and change the label or
- do neither. Either operation could fail. We first try to invert
- the jump. If that succeeds, we try changing the label. If that fails,
- we invert the jump back to what it was. */
+ rtx insn;
+ rtx a_line_note = NULL;
+ int two_avoided_lines = 0;
+ int contains_insn = 0;
- if (! invert_exp (PATTERN (jump), jump))
- return 0;
+ if (! warn_notreached)
+ return;
+
+ /* Scan forwards, looking at LINE_NUMBER notes, until
+ we hit a LABEL or we run out of insns. */
- if (redirect_jump (jump, nlabel))
+ for (insn = avoided_insn; insn != NULL; insn = NEXT_INSN (insn))
{
- if (flag_branch_probabilities)
+ if (GET_CODE (insn) == CODE_LABEL)
+ break;
+ else if (GET_CODE (insn) == NOTE /* A line number note? */
+ && NOTE_LINE_NUMBER (insn) >= 0)
{
- rtx note = find_reg_note (jump, REG_BR_PROB, 0);
-
- /* An inverted jump means that a probability taken becomes a
- probability not taken. Subtract the branch probability from the
- probability base to convert it back to a taken probability.
- (We don't flip the probability on a branch that's never taken. */
- if (note && XINT (XEXP (note, 0), 0) >= 0)
- XINT (XEXP (note, 0), 0) = REG_BR_PROB_BASE - XINT (XEXP (note, 0), 0);
+ if (a_line_note == NULL)
+ a_line_note = insn;
+ else
+ two_avoided_lines |= (NOTE_LINE_NUMBER (a_line_note)
+ != NOTE_LINE_NUMBER (insn));
}
-
- return 1;
+ else if (INSN_P (insn))
+ contains_insn = 1;
}
-
- if (! invert_exp (PATTERN (jump), jump))
- /* This should just be putting it back the way it was. */
- abort ();
-
- return 0;
+ if (two_avoided_lines && contains_insn)
+ warning_with_file_and_line (NOTE_SOURCE_FILE (a_line_note),
+ NOTE_LINE_NUMBER (a_line_note),
+ "will never be executed");
}
+\f
+/* Throughout LOC, redirect OLABEL to NLABEL. Treat null OLABEL or
+ NLABEL as a return. Accrue modifications into the change group. */
-/* Invert the jump condition of rtx X contained in jump insn, INSN.
-
- Return 1 if we can do so, 0 if we cannot find a way to do so that
- matches a pattern. */
-
-int
-invert_exp (x, insn)
- rtx x;
+static void
+redirect_exp_1 (loc, olabel, nlabel, insn)
+ rtx *loc;
+ rtx olabel, nlabel;
rtx insn;
{
- register RTX_CODE code;
+ register rtx x = *loc;
+ register RTX_CODE code = GET_CODE (x);
register int i;
- register char *fmt;
-
- code = GET_CODE (x);
+ register const char *fmt;
- if (code == IF_THEN_ELSE)
+ if (code == LABEL_REF)
{
- register rtx comp = XEXP (x, 0);
- register rtx tem;
+ if (XEXP (x, 0) == olabel)
+ {
+ rtx n;
+ if (nlabel)
+ n = gen_rtx_LABEL_REF (VOIDmode, nlabel);
+ else
+ n = gen_rtx_RETURN (VOIDmode);
- /* We can do this in two ways: The preferable way, which can only
- be done if this is not an integer comparison, is to reverse
- the comparison code. Otherwise, swap the THEN-part and ELSE-part
- of the IF_THEN_ELSE. If we can't do either, fail. */
+ validate_change (insn, loc, n, 1);
+ return;
+ }
+ }
+ else if (code == RETURN && olabel == 0)
+ {
+ x = gen_rtx_LABEL_REF (VOIDmode, nlabel);
+ if (loc == &PATTERN (insn))
+ x = gen_rtx_SET (VOIDmode, pc_rtx, x);
+ validate_change (insn, loc, x, 1);
+ return;
+ }
- if (can_reverse_comparison_p (comp, insn)
- && validate_change (insn, &XEXP (x, 0),
- gen_rtx_fmt_ee (reverse_condition (GET_CODE (comp)),
- GET_MODE (comp), XEXP (comp, 0),
- XEXP (comp, 1)), 0))
- return 1;
-
- tem = XEXP (x, 1);
- validate_change (insn, &XEXP (x, 1), XEXP (x, 2), 1);
- validate_change (insn, &XEXP (x, 2), tem, 1);
- return apply_change_group ();
+ if (code == SET && nlabel == 0 && SET_DEST (x) == pc_rtx
+ && GET_CODE (SET_SRC (x)) == LABEL_REF
+ && XEXP (SET_SRC (x), 0) == olabel)
+ {
+ validate_change (insn, loc, gen_rtx_RETURN (VOIDmode), 1);
+ return;
}
fmt = GET_RTX_FORMAT (code);
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
- if (! invert_exp (XEXP (x, i), insn))
- return 0;
- if (fmt[i] == 'E')
+ redirect_exp_1 (&XEXP (x, i), olabel, nlabel, insn);
+ else if (fmt[i] == 'E')
{
register int j;
for (j = 0; j < XVECLEN (x, i); j++)
- if (!invert_exp (XVECEXP (x, i, j), insn))
- return 0;
+ redirect_exp_1 (&XVECEXP (x, i, j), olabel, nlabel, insn);
}
}
+}
- return 1;
+/* Similar, but apply the change group and report success or failure. */
+
+static int
+redirect_exp (olabel, nlabel, insn)
+ rtx olabel, nlabel;
+ rtx insn;
+{
+ rtx *loc;
+
+ if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ loc = &XVECEXP (PATTERN (insn), 0, 0);
+ else
+ loc = &PATTERN (insn);
+
+ redirect_exp_1 (loc, olabel, nlabel, insn);
+ if (num_validated_changes () == 0)
+ return 0;
+
+ return apply_change_group ();
}
-\f
-/* Make jump JUMP jump to label NLABEL instead of where it jumps now.
- If the old jump target label is unused as a result,
- it and the code following it may be deleted.
+
+/* Make JUMP go to NLABEL instead of where it jumps now. Accrue
+ the modifications into the change group. Return false if we did
+ not see how to do that. */
+
+int
+redirect_jump_1 (jump, nlabel)
+ rtx jump, nlabel;
+{
+ int ochanges = num_validated_changes ();
+ rtx *loc;
+
+ if (GET_CODE (PATTERN (jump)) == PARALLEL)
+ loc = &XVECEXP (PATTERN (jump), 0, 0);
+ else
+ loc = &PATTERN (jump);
+
+ redirect_exp_1 (loc, JUMP_LABEL (jump), nlabel, jump);
+ return num_validated_changes () > ochanges;
+}
+
+/* Make JUMP go to NLABEL instead of where it jumps now. If the old
+ jump target label is unused as a result, it and the code following
+ it may be deleted.
If NLABEL is zero, we are to turn the jump into a (possibly conditional)
RETURN insn.
- The return value will be 1 if the change was made, 0 if it wasn't (this
- can only occur for NLABEL == 0). */
+ The return value will be 1 if the change was made, 0 if it wasn't
+ (this can only occur for NLABEL == 0). */
int
-redirect_jump (jump, nlabel)
+redirect_jump (jump, nlabel, delete_unused)
rtx jump, nlabel;
+ int delete_unused;
{
register rtx olabel = JUMP_LABEL (jump);
if (nlabel == olabel)
return 1;
- if (! redirect_exp (&PATTERN (jump), olabel, nlabel, jump))
+ if (! redirect_exp (olabel, nlabel, jump))
return 0;
/* If this is an unconditional branch, delete it from the jump_chain of
if (nlabel)
++LABEL_NUSES (nlabel);
- if (olabel && --LABEL_NUSES (olabel) == 0)
+ /* If we're eliding the jump over exception cleanups at the end of a
+ function, move the function end note so that -Wreturn-type works. */
+ if (olabel && nlabel
+ && NEXT_INSN (olabel)
+ && GET_CODE (NEXT_INSN (olabel)) == NOTE
+ && NOTE_LINE_NUMBER (NEXT_INSN (olabel)) == NOTE_INSN_FUNCTION_END)
+ emit_note_after (NOTE_INSN_FUNCTION_END, nlabel);
+
+ if (olabel && --LABEL_NUSES (olabel) == 0 && delete_unused)
delete_insn (olabel);
return 1;
}
+/* Invert the jump condition of rtx X contained in jump insn, INSN.
+ Accrue the modifications into the change group. */
+
+static void
+invert_exp_1 (insn)
+ rtx insn;
+{
+ register RTX_CODE code;
+ rtx x = pc_set (insn);
+
+ if (!x)
+ abort ();
+ x = SET_SRC (x);
+
+ code = GET_CODE (x);
+
+ if (code == IF_THEN_ELSE)
+ {
+ register rtx comp = XEXP (x, 0);
+ register rtx tem;
+ enum rtx_code reversed_code;
+
+ /* We can do this in two ways: The preferable way, which can only
+ be done if this is not an integer comparison, is to reverse
+ the comparison code. Otherwise, swap the THEN-part and ELSE-part
+ of the IF_THEN_ELSE. If we can't do either, fail. */
+
+ reversed_code = reversed_comparison_code (comp, insn);
+
+ if (reversed_code != UNKNOWN)
+ {
+ validate_change (insn, &XEXP (x, 0),
+ gen_rtx_fmt_ee (reversed_code,
+ GET_MODE (comp), XEXP (comp, 0),
+ XEXP (comp, 1)),
+ 1);
+ return;
+ }
+
+ tem = XEXP (x, 1);
+ validate_change (insn, &XEXP (x, 1), XEXP (x, 2), 1);
+ validate_change (insn, &XEXP (x, 2), tem, 1);
+ }
+ else
+ abort ();
+}
+
+/* Invert the jump condition of conditional jump insn, INSN.
+
+ Return 1 if we can do so, 0 if we cannot find a way to do so that
+ matches a pattern. */
+
+static int
+invert_exp (insn)
+ rtx insn;
+{
+ invert_exp_1 (insn);
+ if (num_validated_changes () == 0)
+ return 0;
+
+ return apply_change_group ();
+}
+
+/* Invert the condition of the jump JUMP, and make it jump to label
+ NLABEL instead of where it jumps now. Accrue changes into the
+ change group. Return false if we didn't see how to perform the
+ inversion and redirection. */
+
+int
+invert_jump_1 (jump, nlabel)
+ rtx jump, nlabel;
+{
+ int ochanges;
+
+ ochanges = num_validated_changes ();
+ invert_exp_1 (jump);
+ if (num_validated_changes () == ochanges)
+ return 0;
+
+ return redirect_jump_1 (jump, nlabel);
+}
+
+/* Invert the condition of the jump JUMP, and make it jump to label
+ NLABEL instead of where it jumps now. Return true if successful. */
+
+int
+invert_jump (jump, nlabel, delete_unused)
+ rtx jump, nlabel;
+ int delete_unused;
+{
+ /* We have to either invert the condition and change the label or
+ do neither. Either operation could fail. We first try to invert
+ the jump. If that succeeds, we try changing the label. If that fails,
+ we invert the jump back to what it was. */
+
+ if (! invert_exp (jump))
+ return 0;
+
+ if (redirect_jump (jump, nlabel, delete_unused))
+ {
+ /* An inverted jump means that a probability taken becomes a
+ probability not taken. Subtract the branch probability from the
+ probability base to convert it back to a taken probability. */
+
+ rtx note = find_reg_note (jump, REG_BR_PROB, NULL_RTX);
+ if (note)
+ XEXP (note, 0) = GEN_INT (REG_BR_PROB_BASE - INTVAL (XEXP (note, 0)));
+
+ return 1;
+ }
+
+ if (! invert_exp (jump))
+ /* This should just be putting it back the way it was. */
+ abort ();
+
+ return 0;
+}
+
/* Delete the instruction JUMP from any jump chain it might be on. */
static void
/* Handle return insns. */
else if (jump_chain && GET_CODE (PATTERN (jump)) == RETURN)
index = 0;
- else return;
+ else
+ return;
if (jump_chain[index] == jump)
jump_chain[index] = jump_chain[INSN_UID (jump)];
}
}
}
-
-/* If NLABEL is nonzero, throughout the rtx at LOC,
- alter (LABEL_REF OLABEL) to (LABEL_REF NLABEL). If OLABEL is
- zero, alter (RETURN) to (LABEL_REF NLABEL).
-
- If NLABEL is zero, alter (LABEL_REF OLABEL) to (RETURN) and check
- validity with validate_change. Convert (set (pc) (label_ref olabel))
- to (return).
-
- Return 0 if we found a change we would like to make but it is invalid.
- Otherwise, return 1. */
-
-int
-redirect_exp (loc, olabel, nlabel, insn)
- rtx *loc;
- rtx olabel, nlabel;
- rtx insn;
-{
- register rtx x = *loc;
- register RTX_CODE code = GET_CODE (x);
- register int i;
- register char *fmt;
-
- if (code == LABEL_REF)
- {
- if (XEXP (x, 0) == olabel)
- {
- if (nlabel)
- XEXP (x, 0) = nlabel;
- else
- return validate_change (insn, loc, gen_rtx_RETURN (VOIDmode), 0);
- return 1;
- }
- }
- else if (code == RETURN && olabel == 0)
- {
- x = gen_rtx_LABEL_REF (VOIDmode, nlabel);
- if (loc == &PATTERN (insn))
- x = gen_rtx_SET (VOIDmode, pc_rtx, x);
- return validate_change (insn, loc, x, 0);
- }
-
- if (code == SET && nlabel == 0 && SET_DEST (x) == pc_rtx
- && GET_CODE (SET_SRC (x)) == LABEL_REF
- && XEXP (SET_SRC (x), 0) == olabel)
- return validate_change (insn, loc, gen_rtx_RETURN (VOIDmode), 0);
-
- fmt = GET_RTX_FORMAT (code);
- for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
- {
- if (fmt[i] == 'e')
- if (! redirect_exp (&XEXP (x, i), olabel, nlabel, insn))
- return 0;
- if (fmt[i] == 'E')
- {
- register int j;
- for (j = 0; j < XVECLEN (x, i); j++)
- if (! redirect_exp (&XVECEXP (x, i, j), olabel, nlabel, insn))
- return 0;
- }
- }
-
- return 1;
-}
\f
/* Make jump JUMP jump to label NLABEL, assuming it used to be a tablejump.
rtx jump, nlabel;
{
register rtx olabel = JUMP_LABEL (jump);
+ rtx *notep, note, next;
/* Add this jump to the jump_chain of NLABEL. */
if (jump_chain && INSN_UID (nlabel) < max_jump_chain
jump_chain[INSN_UID (nlabel)] = jump;
}
+ for (notep = ®_NOTES (jump), note = *notep; note; note = next)
+ {
+ next = XEXP (note, 1);
+
+ if (REG_NOTE_KIND (note) != REG_DEAD
+ /* Verify that the REG_NOTE is legitimate. */
+ || GET_CODE (XEXP (note, 0)) != REG
+ || ! reg_mentioned_p (XEXP (note, 0), PATTERN (jump)))
+ notep = &XEXP (note, 1);
+ else
+ {
+ delete_prior_computation (note, jump);
+ *notep = next;
+ }
+ }
+
PATTERN (jump) = gen_jump (nlabel);
JUMP_LABEL (jump) = nlabel;
++LABEL_NUSES (nlabel);
{
register int i;
register RTX_CODE code = GET_CODE (x);
- register char *fmt;
-
+ register const char *fmt;
+
if (x == y)
return 1;
return reg_x >= 0 && reg_x == reg_y && word_x == word_y;
}
- /* Now we have disposed of all the cases
+ /* Now we have disposed of all the cases
in which different rtx codes can match. */
if (code != GET_CODE (y))
return 0;
In general, if the first test fails, the program can branch
directly to `foo' and skip the second try which is doomed to fail.
We run this after loop optimization and before flow analysis. */
-
+
/* When comparing the insn patterns, we track the fact that different
pseudo-register numbers may have been used in each computation.
The following array stores an equivalence -- same_regs[I] == J means
static int modified_mem;
-/* Called via note_stores on each insn between the target of the first
+/* Called via note_stores on each insn between the target of the first
branch and the second branch. It marks any changed registers. */
static void
-mark_modified_reg (dest, x)
+mark_modified_reg (dest, x, data)
rtx dest;
rtx x ATTRIBUTE_UNUSED;
+ void *data ATTRIBUTE_UNUSED;
{
- int regno, i;
+ int regno;
+ unsigned int i;
if (GET_CODE (dest) == SUBREG)
dest = SUBREG_REG (dest);
}
/* F is the first insn in the chain of insns. */
-
+
void
thread_jumps (f, max_reg, flag_before_loop)
rtx f;
will either always succeed or always fail depending on the relative
senses of the two branches. So adjust the first branch accordingly
in this case. */
-
+
rtx label, b1, b2, t1, t2;
enum rtx_code code1, code2;
rtx b1op0, b1op1, b2op0, b2op1;
int changed = 1;
int i;
int *all_reset;
+ enum rtx_code reversed_code1, reversed_code2;
/* Allocate register tables and quick-reset table. */
- modified_regs = (char *) alloca (max_reg * sizeof (char));
- same_regs = (int *) alloca (max_reg * sizeof (int));
- all_reset = (int *) alloca (max_reg * sizeof (int));
+ modified_regs = (char *) xmalloc (max_reg * sizeof (char));
+ same_regs = (int *) xmalloc (max_reg * sizeof (int));
+ all_reset = (int *) xmalloc (max_reg * sizeof (int));
for (i = 0; i < max_reg; i++)
all_reset[i] = -1;
-
+
while (changed)
{
changed = 0;
for (b1 = f; b1; b1 = NEXT_INSN (b1))
{
+ rtx set;
+ rtx set2;
+
/* Get to a candidate branch insn. */
if (GET_CODE (b1) != JUMP_INSN
- || ! condjump_p (b1) || simplejump_p (b1)
- || JUMP_LABEL (b1) == 0)
+ || ! any_condjump_p (b1) || JUMP_LABEL (b1) == 0)
continue;
- bzero (modified_regs, max_reg * sizeof (char));
+ memset (modified_regs, 0, max_reg * sizeof (char));
modified_mem = 0;
- bcopy ((char *) all_reset, (char *) same_regs,
- max_reg * sizeof (int));
+ memcpy (same_regs, all_reset, max_reg * sizeof (int));
num_same_regs = 0;
label = JUMP_LABEL (b1);
{
/* If this is an unconditional jump and is the only use of
its target label, we can follow it. */
- if (simplejump_p (b2)
+ if (any_uncondjump_p (b2)
+ && onlyjump_p (b2)
&& JUMP_LABEL (b2) != 0
&& LABEL_NUSES (JUMP_LABEL (b2)) == 1)
{
modified_regs[i] = 1;
}
- note_stores (PATTERN (b2), mark_modified_reg);
+ note_stores (PATTERN (b2), mark_modified_reg, NULL);
}
/* Check the next candidate branch insn from the label
if (b2 == 0
|| GET_CODE (b2) != JUMP_INSN
|| b2 == b1
- || ! condjump_p (b2)
- || simplejump_p (b2))
+ || !any_condjump_p (b2)
+ || !onlyjump_p (b2))
continue;
+ set = pc_set (b1);
+ set2 = pc_set (b2);
/* Get the comparison codes and operands, reversing the
codes if appropriate. If we don't have comparison codes,
we can't do anything. */
- b1op0 = XEXP (XEXP (SET_SRC (PATTERN (b1)), 0), 0);
- b1op1 = XEXP (XEXP (SET_SRC (PATTERN (b1)), 0), 1);
- code1 = GET_CODE (XEXP (SET_SRC (PATTERN (b1)), 0));
- if (XEXP (SET_SRC (PATTERN (b1)), 1) == pc_rtx)
- code1 = reverse_condition (code1);
-
- b2op0 = XEXP (XEXP (SET_SRC (PATTERN (b2)), 0), 0);
- b2op1 = XEXP (XEXP (SET_SRC (PATTERN (b2)), 0), 1);
- code2 = GET_CODE (XEXP (SET_SRC (PATTERN (b2)), 0));
- if (XEXP (SET_SRC (PATTERN (b2)), 1) == pc_rtx)
- code2 = reverse_condition (code2);
+ b1op0 = XEXP (XEXP (SET_SRC (set), 0), 0);
+ b1op1 = XEXP (XEXP (SET_SRC (set), 0), 1);
+ code1 = GET_CODE (XEXP (SET_SRC (set), 0));
+ reversed_code1 = code1;
+ if (XEXP (SET_SRC (set), 1) == pc_rtx)
+ code1 = reversed_comparison_code (XEXP (SET_SRC (set), 0), b1);
+ else
+ reversed_code1 = reversed_comparison_code (XEXP (SET_SRC (set), 0), b1);
+
+ b2op0 = XEXP (XEXP (SET_SRC (set2), 0), 0);
+ b2op1 = XEXP (XEXP (SET_SRC (set2), 0), 1);
+ code2 = GET_CODE (XEXP (SET_SRC (set2), 0));
+ reversed_code2 = code2;
+ if (XEXP (SET_SRC (set2), 1) == pc_rtx)
+ code2 = reversed_comparison_code (XEXP (SET_SRC (set2), 0), b2);
+ else
+ reversed_code2 = reversed_comparison_code (XEXP (SET_SRC (set2), 0), b2);
/* If they test the same things and knowing that B1 branches
tells us whether or not B2 branches, check if we
if (rtx_equal_for_thread_p (b1op0, b2op0, b2)
&& rtx_equal_for_thread_p (b1op1, b2op1, b2)
&& (comparison_dominates_p (code1, code2)
- || (comparison_dominates_p (code1, reverse_condition (code2))
- && can_reverse_comparison_p (XEXP (SET_SRC (PATTERN (b1)),
- 0),
- b1))))
+ || comparison_dominates_p (code1, reversed_code2)))
+
{
t1 = prev_nonnote_insn (b1);
t2 = prev_nonnote_insn (b2);
-
+
while (t1 != 0 && t2 != 0)
{
if (t2 == label)
break;
if (comparison_dominates_p (code1, code2))
- new_label = JUMP_LABEL (b2);
+ new_label = JUMP_LABEL (b2);
else
new_label = get_label_after (b2);
new_label = gen_label_rtx ();
emit_label_after (new_label, PREV_INSN (prev));
}
- changed |= redirect_jump (b1, new_label);
+ changed |= redirect_jump (b1, new_label, 1);
}
break;
}
-
+
/* If either of these is not a normal insn (it might be
a JUMP_INSN, CALL_INSN, or CODE_LABEL) we fail. (NOTEs
have already been skipped above.) Similarly, fail
|| ! rtx_equal_for_thread_p (PATTERN (t1),
PATTERN (t2), t2))
break;
-
+
t1 = prev_nonnote_insn (t1);
t2 = prev_nonnote_insn (t2);
}
}
}
}
+
+ /* Clean up. */
+ free (modified_regs);
+ free (same_regs);
+ free (all_reset);
}
\f
/* This is like RTX_EQUAL_P except that it knows about our handling of
possibly equivalent registers and knows to consider volatile and
modified objects as not equal.
-
+
YINSN is the insn containing Y. */
int
register int i;
register int j;
register enum rtx_code code;
- register char *fmt;
+ register const char *fmt;
code = GET_CODE (x);
/* Rtx's of different codes cannot be equal. */
pessimistic, but this pass would only rarely do anything for FP
anyway. */
if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
- && FLOAT_MODE_P (GET_MODE (x)) && ! flag_fast_math)
+ && FLOAT_MODE_P (GET_MODE (x)) && ! flag_unsafe_math_optimizations)
return 0;
/* For commutative operations, the RTX match if the operand match in any
num_same_regs++;
/* If this is the first time we are seeing a register on the `Y'
- side, see if it is the last use. If not, we can't thread the
+ side, see if it is the last use. If not, we can't thread the
jump, so mark it as not equivalent. */
if (REGNO_LAST_UID (REGNO (y)) != INSN_UID (yinsn))
return 0;
return 1;
}
else
- return (same_regs[REGNO (x)] == REGNO (y));
+ return (same_regs[REGNO (x)] == (int) REGNO (y));
break;
if (GET_CODE (SET_DEST (x)) == REG
&& GET_CODE (SET_DEST (y)) == REG)
{
- if (same_regs[REGNO (SET_DEST (x))] == REGNO (SET_DEST (y)))
+ if (same_regs[REGNO (SET_DEST (x))] == (int) REGNO (SET_DEST (y)))
{
same_regs[REGNO (SET_DEST (x))] = -1;
num_same_regs--;
return 0;
}
else
- if (rtx_equal_for_thread_p (SET_DEST (x), SET_DEST (y), yinsn) == 0)
- return 0;
+ {
+ if (rtx_equal_for_thread_p (SET_DEST (x), SET_DEST (y), yinsn) == 0)
+ return 0;
+ }
return rtx_equal_for_thread_p (SET_SRC (x), SET_SRC (y), yinsn);
case SYMBOL_REF:
return XSTR (x, 0) == XSTR (y, 0);
-
+
default:
break;
}
/* And the corresponding elements must match. */
for (j = 0; j < XVECLEN (x, i); j++)
if (rtx_equal_for_thread_p (XVECEXP (x, i, j),
- XVECEXP (y, i, j), yinsn) == 0)
+ XVECEXP (y, i, j), yinsn) == 0)
return 0;
break;
break;
case '0':
+ case 't':
break;
/* It is believed that rtx's at this level will never
}
return 1;
}
-\f
-
-#ifndef HAVE_cc0
-/* Return the insn that NEW can be safely inserted in front of starting at
- the jump insn INSN. Return 0 if it is not safe to do this jump
- optimization. Note that NEW must contain a single set. */
-
-static rtx
-find_insert_position (insn, new)
- rtx insn;
- rtx new;
-{
- int i;
- rtx prev;
-
- /* If NEW does not clobber, it is safe to insert NEW before INSN. */
- if (GET_CODE (PATTERN (new)) != PARALLEL)
- return insn;
-
- for (i = XVECLEN (PATTERN (new), 0) - 1; i >= 0; i--)
- if (GET_CODE (XVECEXP (PATTERN (new), 0, i)) == CLOBBER
- && reg_overlap_mentioned_p (XEXP (XVECEXP (PATTERN (new), 0, i), 0),
- insn))
- break;
-
- if (i < 0)
- return insn;
-
- /* There is a good chance that the previous insn PREV sets the thing
- being clobbered (often the CC in a hard reg). If PREV does not
- use what NEW sets, we can insert NEW before PREV. */
-
- prev = prev_active_insn (insn);
- for (i = XVECLEN (PATTERN (new), 0) - 1; i >= 0; i--)
- if (GET_CODE (XVECEXP (PATTERN (new), 0, i)) == CLOBBER
- && reg_overlap_mentioned_p (XEXP (XVECEXP (PATTERN (new), 0, i), 0),
- insn)
- && ! modified_in_p (XEXP (XVECEXP (PATTERN (new), 0, i), 0),
- prev))
- return 0;
-
- return reg_mentioned_p (SET_DEST (single_set (new)), prev) ? 0 : prev;
-}
-#endif /* !HAVE_cc0 */