This is the mail archive of the
gcc-patches@gcc.gnu.org
mailing list for the GCC project.
copy_insn_p usage.
- From: Denis Chertykov <denisc at overta dot ru>
- To: Michael Matz <matz at suse dot de>
- Cc: denisc at overta dot ru, gcc-patches at gcc dot gnu dot org
- Date: Sat, 22 Feb 2003 11:53:22 +0300
- Subject: copy_insn_p usage.
Hi Michael !
I have reviewed your last patch and remembered that I have used
copy_insn_p in insert_stores and emit_loads for incorporate
pseudo-stack slots inside move insns without overconstraining such
pseudo-stack slots.
Now this will not work in some cases.
How about the following fix ?
Also, I have not touched the call to `copy_p_type' in `live_out_1'.
IMHO: This right. (May be I'm wrong ?)
2003-02-22 Denis Chertykov <denisc at overta dot ru>
* ra-build.c (copy_p_type): New enum.
(copy_p_cache): Use copy_p_type as type of seen.
(copy_insn_p): Handle different types of moves - operate with
copy_p_type.
(remember_move): Handle only moves without spill temporaries.
(update_regnos_mentioned): Likewise.
diff -c3p /root/d/cvs/new-regalloc-branch/gcc/ra-build.c.\~1.1.2.10.\~ /root/d/cvs/new-regalloc-branch/gcc/ra-build.c
*** /root/d/cvs/new-regalloc-branch/gcc/ra-build.c.~1.1.2.10.~ Sat Feb 22 09:58:29 2003
--- /root/d/cvs/new-regalloc-branch/gcc/ra-build.c Sat Feb 22 11:42:31 2003
*************** rtx_to_undefined (x)
*** 204,214 ****
return ret;
}
/* We remember if we've analyzed an insn for being a move insn, and if yes
between which operands. */
struct copy_p_cache
{
! int seen;
rtx source;
rtx target;
};
--- 204,215 ----
return ret;
}
+ enum copy_p_type {COPY_P_MOVE = 1, COPY_P_OTHER = 2, COPY_P_SPILL = 3};
/* We remember if we've analyzed an insn for being a move insn, and if yes
between which operands. */
struct copy_p_cache
{
! enum copy_p_type seen;
rtx source;
rtx target;
};
*************** copy_insn_p (insn, source, target)
*** 240,258 ****
if (copy_cache[uid].seen)
{
/* And if we saw it, if it's actually a copy insn. */
! if (copy_cache[uid].seen == 1)
{
if (source)
*source = copy_cache[uid].source;
if (target)
*target = copy_cache[uid].target;
! return 1;
}
return 0;
}
/* Mark it as seen, but not being a copy insn. */
! copy_cache[uid].seen = 2;
insn = single_set (insn);
if (!insn)
return 0;
--- 241,259 ----
if (copy_cache[uid].seen)
{
/* And if we saw it, if it's actually a copy insn. */
! if (copy_cache[uid].seen != COPY_P_OTHER)
{
if (source)
*source = copy_cache[uid].source;
if (target)
*target = copy_cache[uid].target;
! return copy_cache[uid].seen;
}
return 0;
}
/* Mark it as seen, but not being a copy insn. */
! copy_cache[uid].seen = COPY_P_OTHER;
insn = single_set (insn);
if (!insn)
return 0;
*************** copy_insn_p (insn, source, target)
*** 278,286 ****
/* Copies between hardregs are useless for us, as not coalesable anyway. */
if ((s_regno < FIRST_PSEUDO_REGISTER
! && d_regno < FIRST_PSEUDO_REGISTER)
! || SPILL_SLOT_P (s_regno)
! || SPILL_SLOT_P (d_regno))
return 0;
if (source)
--- 279,285 ----
/* Copies between hardregs are useless for us, as not coalesable anyway. */
if ((s_regno < FIRST_PSEUDO_REGISTER
! && d_regno < FIRST_PSEUDO_REGISTER))
return 0;
if (source)
*************** copy_insn_p (insn, source, target)
*** 289,298 ****
*target = d;
/* Still mark it as seen, but as a copy insn this time. */
! copy_cache[uid].seen = 1;
copy_cache[uid].source = s;
copy_cache[uid].target = d;
! return 1;
}
/* We build webs, as we process the conflicts. For each use we go upward
--- 288,300 ----
*target = d;
/* Still mark it as seen, but as a copy insn this time. */
! if (SPILL_SLOT_P (s_regno) || SPILL_SLOT_P (d_regno))
! copy_cache[uid].seen = COPY_P_SPILL;
! else
! copy_cache[uid].seen = COPY_P_MOVE;
copy_cache[uid].source = s;
copy_cache[uid].target = d;
! return copy_cache[uid].seen;
}
/* We build webs, as we process the conflicts. For each use we go upward
*************** remember_move (insn)
*** 562,568 ****
{
rtx s, d;
SET_BIT (move_handled, INSN_UID (insn));
! if (copy_insn_p (insn, &s, &d))
{
/* Some sanity test for the copy insn. */
struct df_link *slink = DF_INSN_USES (df, insn);
--- 564,570 ----
{
rtx s, d;
SET_BIT (move_handled, INSN_UID (insn));
! if (copy_insn_p (insn, NULL, NULL) == COPY_P_MOVE)
{
/* Some sanity test for the copy insn. */
struct df_link *slink = DF_INSN_USES (df, insn);
*************** update_regnos_mentioned ()
*** 1025,1031 ****
{
/* XXX We should also remember moves over iterations (we already
save the cache, but not the movelist). */
! if (copy_insn_p (insn, NULL, NULL))
remember_move (insn);
}
else if ((bb = BLOCK_FOR_INSN (insn)) != NULL)
--- 1027,1033 ----
{
/* XXX We should also remember moves over iterations (we already
save the cache, but not the movelist). */
! if (copy_insn_p (insn, NULL, NULL) == COPY_P_MOVE)
remember_move (insn);
}
else if ((bb = BLOCK_FOR_INSN (insn)) != NULL)
*************** update_regnos_mentioned ()
*** 1037,1043 ****
if (last_changed_insns
&& bitmap_bit_p (last_changed_insns, INSN_UID (insn)))
copy_cache[INSN_UID (insn)].seen = 0;
! if (copy_insn_p (insn, &source, NULL))
{
remember_move (insn);
bitmap_set_bit (mentioned,
--- 1039,1045 ----
if (last_changed_insns
&& bitmap_bit_p (last_changed_insns, INSN_UID (insn)))
copy_cache[INSN_UID (insn)].seen = 0;
! if (copy_insn_p (insn, &source, NULL) == COPY_P_MOVE)
{
remember_move (insn);
bitmap_set_bit (mentioned,