This is the mail archive of the
gcc-patches@gcc.gnu.org
mailing list for the GCC project.
RFA: Fix rtl-optimization/28618
- From: Joern RENNECKE <joern dot rennecke at st dot com>
- To: GCC Patches <gcc-patches at gcc dot gnu dot org>
- Date: Wed, 15 Nov 2006 16:48:14 +0000
- Subject: RFA: Fix rtl-optimization/28618
Regression tested in revision 118652 on i686-pc-linux-gnu.
One FAIL in the libjava testsuite is replaced by another:
27c27
< FAIL: Thread_Alive -O3 execution - source compiled test
---
> FAIL: events output
According to Keith Seitz, the 'events output' FAILure is a testsuite bug.
:ADDPATCH scheduler:
2006-11-15 J"orn Rennecke <joern.rennecke@st.com>
PR rtl-optimization/28618:
* sched-deps.c (sched_analyze_reg): When a likely spilled register
is used, put insn into a scheduling group with the insn that
sets it and with all the insns in-between.
Index: sched-deps.c
===================================================================
/usr/bin/diff -p -d -F^( -u -L sched-deps.c (revision 118652) -L sched-deps.c (working copy) .svn/text-base/sched-deps.c.svn-base sched-deps.c
--- sched-deps.c (revision 118652)
+++ sched-deps.c (working copy)
@@ -736,7 +736,58 @@ sched_analyze_reg (struct deps *deps, in
else if (ref == USE)
{
while (--i >= 0)
- SET_REGNO_REG_SET (reg_pending_uses, regno + i);
+ {
+ rtx sets;
+
+ /* We have to prevent extending the lifetime of
+ CLASS_LIKELY_SPILLED registers, no matter where they are
+ used within a basic block. SH -m4-nofpu -O -fschedule-insns
+ testcase extracted from newlib/libm/math/k_rem_pio2.c :
+ void
+ __kernel_rem_pio2 ()
+ {
+ int jz, jk, iq[20], i, j, k;
+ double z, f[20], fq[20], q[20];
+
+ for (i = 0, j = jz, z = q[1]; j > 0; i++, j--)
+ {
+ iq[i] = (int) (z - 1.);
+ z = q[0] + 1.;
+ }
+
+ for (k = 1; iq[jk - k] == 0; k++);
+
+ for (i = 1; i <= 1 + k; i++)
+ q[i] = 1.;
+ }
+ Here, the problem is that a (reg:DF 0 r0) result from an
+ sfunc conflicts with an R0_REGS requirement from a store
+ that the scheduler has placed before the copy of the result
+ to a pseudo. */
+ if (next_nonnote_insn (insn)
+ && !reload_completed && !fixed_regs[regno+i]
+ && CLASS_LIKELY_SPILLED_P (REGNO_REG_CLASS (regno + i))
+ && (sets = deps->reg_last[regno+i].sets))
+ {
+ rtx set = XEXP (sets, 0);
+ rtx curr;
+
+ for (curr = insn; curr != set;
+ curr = prev_nonnote_insn (curr))
+ CANT_MOVE (curr) = 1;
+ CANT_MOVE (set) = 1;
+ for (curr = next_nonnote_insn (set);
+ curr != insn && SCHED_GROUP_P (curr);)
+ curr = next_nonnote_insn (curr);
+ for (; curr != insn; curr = next_nonnote_insn (curr))
+ {
+ SCHED_GROUP_P (curr) = 1;
+ fixup_sched_groups (curr);
+ }
+ SCHED_GROUP_P (insn) = 1;
+ }
+ SET_REGNO_REG_SET (reg_pending_uses, regno + i);
+ }
}
else
{