This is the mail archive of the
gcc-patches@gcc.gnu.org
mailing list for the GCC project.
[PATCH] avoid cselib rtx_equal_for_cselib_1 infinite recursion (PR debug/80025)
- From: Jakub Jelinek <jakub at redhat dot com>
- To: Alexandre Oliva <aoliva at redhat dot com>, Jeff Law <law at redhat dot com>, Bernd Schmidt <bschmidt at redhat dot com>
- Cc: gcc-patches at gcc dot gnu dot org
- Date: Thu, 23 Mar 2017 21:39:16 +0100
- Subject: [PATCH] avoid cselib rtx_equal_for_cselib_1 infinite recursion (PR debug/80025)
- Authentication-results: sourceware.org; auth=none
- Authentication-results: ext-mx10.extmail.prod.ext.phx2.redhat.com; dmarc=none (p=none dis=none) header.from=redhat.com
- Authentication-results: ext-mx10.extmail.prod.ext.phx2.redhat.com; spf=pass smtp.mailfrom=jakub at redhat dot com
- Dkim-filter: OpenDKIM Filter v2.11.0 mx1.redhat.com CD5AF61D1D
- Dmarc-filter: OpenDMARC Filter v1.3.2 mx1.redhat.com CD5AF61D1D
- References: <orwpbiuu6w.fsf@lxoliva.fsfla.org> <20170323140004.GS11094@tucnak>
- Reply-to: Jakub Jelinek <jakub at redhat dot com>
Hi!
On Thu, Mar 23, 2017 at 03:00:04PM +0100, Jakub Jelinek wrote:
> On Tue, Mar 21, 2017 at 07:43:51PM -0300, Alexandre Oliva wrote:
> > When two VALUEs are recorded in the cselib equivalence table such that
> > they are equivalent to each other XORed with the same expression, if
> > we started a cselib equivalence test between say the odd one and the
> > even one, we'd end up recursing to compare the even one with the odd
> > one, and then again, indefinitely.
> >
> > This patch cuts off this infinite recursion by recognizing the XOR
> > special case (it's the only binary commutative operation in which
> > applying one of the operands of an operation to the result of that
> > operation brings you back to the other operand) and determining
> > whether we have an equivalence without recursing down the infinite
> > path.
>
> Is XOR the only special case though? E.g. PLUS or MINUS with
> the most negative constant act the same, and I don't see why if unlucky
> enough with reverse ops etc. you couldn't get something similar through
> combination thereof, perhaps indirectly through multiple VALUEs.
>
> So I think it is safer to just put a cap on the rtx_equal_for_cselib_1
> recursion depth (should be enough to bump the depth only when walking
> locs of a VALUE). I'll test such a patch.
Here is that patch, bootstrapped/regtested on x86_64-linux and i686-linux,
ok for trunk? Or shall I turn that 128 into a --param?
2017-03-23 Jakub Jelinek <jakub@redhat.com>
PR debug/80025
* cselib.h (rtx_equal_for_cselib_1): Add depth argument.
(rtx_equal_for_cselib_p): Pass 0 to it.
* cselib.c (cselib_hasher::equal): Likewise.
(rtx_equal_for_cselib_1): Add depth argument. If depth
is 128, don't look up VALUE locs and punt. Increment
depth in recursive calls when walking VALUE locs.
* gcc.dg/torture/pr80025.c: New test.
--- gcc/cselib.h.jj 2017-01-01 12:45:37.000000000 +0100
+++ gcc/cselib.h 2017-03-23 14:06:35.348504435 +0100
@@ -82,7 +82,7 @@ extern void cselib_finish (void);
extern void cselib_process_insn (rtx_insn *);
extern bool fp_setter_insn (rtx_insn *);
extern machine_mode cselib_reg_set_mode (const_rtx);
-extern int rtx_equal_for_cselib_1 (rtx, rtx, machine_mode);
+extern int rtx_equal_for_cselib_1 (rtx, rtx, machine_mode, int);
extern int references_value_p (const_rtx, int);
extern rtx cselib_expand_value_rtx (rtx, bitmap, int);
typedef rtx (*cselib_expand_callback)(rtx, bitmap, int, void *);
@@ -134,7 +134,7 @@ rtx_equal_for_cselib_p (rtx x, rtx y)
if (x == y)
return 1;
- return rtx_equal_for_cselib_1 (x, y, VOIDmode);
+ return rtx_equal_for_cselib_1 (x, y, VOIDmode, 0);
}
#endif /* GCC_CSELIB_H */
--- gcc/cselib.c.jj 2017-01-01 12:45:39.000000000 +0100
+++ gcc/cselib.c 2017-03-23 14:38:50.238487353 +0100
@@ -125,7 +125,7 @@ cselib_hasher::equal (const cselib_val *
/* We don't guarantee that distinct rtx's have different hash values,
so we need to do a comparison. */
for (l = v->locs; l; l = l->next)
- if (rtx_equal_for_cselib_1 (l->loc, x, memmode))
+ if (rtx_equal_for_cselib_1 (l->loc, x, memmode, 0))
{
promote_debug_loc (l);
return true;
@@ -834,7 +834,7 @@ autoinc_split (rtx x, rtx *off, machine_
addresses, MEMMODE should be VOIDmode. */
int
-rtx_equal_for_cselib_1 (rtx x, rtx y, machine_mode memmode)
+rtx_equal_for_cselib_1 (rtx x, rtx y, machine_mode memmode, int depth)
{
enum rtx_code code;
const char *fmt;
@@ -867,6 +867,9 @@ rtx_equal_for_cselib_1 (rtx x, rtx y, ma
if (GET_CODE (y) == VALUE)
return e == canonical_cselib_val (CSELIB_VAL_PTR (y));
+ if (depth == 128)
+ return 0;
+
for (l = e->locs; l; l = l->next)
{
rtx t = l->loc;
@@ -876,7 +879,7 @@ rtx_equal_for_cselib_1 (rtx x, rtx y, ma
list. */
if (REG_P (t) || MEM_P (t) || GET_CODE (t) == VALUE)
continue;
- else if (rtx_equal_for_cselib_1 (t, y, memmode))
+ else if (rtx_equal_for_cselib_1 (t, y, memmode, depth + 1))
return 1;
}
@@ -887,13 +890,16 @@ rtx_equal_for_cselib_1 (rtx x, rtx y, ma
cselib_val *e = canonical_cselib_val (CSELIB_VAL_PTR (y));
struct elt_loc_list *l;
+ if (depth == 128)
+ return 0;
+
for (l = e->locs; l; l = l->next)
{
rtx t = l->loc;
if (REG_P (t) || MEM_P (t) || GET_CODE (t) == VALUE)
continue;
- else if (rtx_equal_for_cselib_1 (x, t, memmode))
+ else if (rtx_equal_for_cselib_1 (x, t, memmode, depth + 1))
return 1;
}
@@ -914,12 +920,12 @@ rtx_equal_for_cselib_1 (rtx x, rtx y, ma
if (!xoff != !yoff)
return 0;
- if (xoff && !rtx_equal_for_cselib_1 (xoff, yoff, memmode))
+ if (xoff && !rtx_equal_for_cselib_1 (xoff, yoff, memmode, depth))
return 0;
/* Don't recurse if nothing changed. */
if (x != xorig || y != yorig)
- return rtx_equal_for_cselib_1 (x, y, memmode);
+ return rtx_equal_for_cselib_1 (x, y, memmode, depth);
return 0;
}
@@ -953,7 +959,8 @@ rtx_equal_for_cselib_1 (rtx x, rtx y, ma
case MEM:
/* We have to compare any autoinc operations in the addresses
using this MEM's mode. */
- return rtx_equal_for_cselib_1 (XEXP (x, 0), XEXP (y, 0), GET_MODE (x));
+ return rtx_equal_for_cselib_1 (XEXP (x, 0), XEXP (y, 0), GET_MODE (x),
+ depth);
default:
break;
@@ -988,17 +995,20 @@ rtx_equal_for_cselib_1 (rtx x, rtx y, ma
/* And the corresponding elements must match. */
for (j = 0; j < XVECLEN (x, i); j++)
if (! rtx_equal_for_cselib_1 (XVECEXP (x, i, j),
- XVECEXP (y, i, j), memmode))
+ XVECEXP (y, i, j), memmode, depth))
return 0;
break;
case 'e':
if (i == 1
&& targetm.commutative_p (x, UNKNOWN)
- && rtx_equal_for_cselib_1 (XEXP (x, 1), XEXP (y, 0), memmode)
- && rtx_equal_for_cselib_1 (XEXP (x, 0), XEXP (y, 1), memmode))
+ && rtx_equal_for_cselib_1 (XEXP (x, 1), XEXP (y, 0), memmode,
+ depth)
+ && rtx_equal_for_cselib_1 (XEXP (x, 0), XEXP (y, 1), memmode,
+ depth))
return 1;
- if (! rtx_equal_for_cselib_1 (XEXP (x, i), XEXP (y, i), memmode))
+ if (! rtx_equal_for_cselib_1 (XEXP (x, i), XEXP (y, i), memmode,
+ depth))
return 0;
break;
--- gcc/testsuite/gcc.dg/torture/pr80025.c.jj 2017-03-23 14:44:34.801024681 +0100
+++ gcc/testsuite/gcc.dg/torture/pr80025.c 2017-03-23 14:43:54.000000000 +0100
@@ -0,0 +1,24 @@
+/* PR debug/80025 */
+/* { dg-do compile } */
+/* { dg-options "-g -ftracer -w" } */
+
+int a;
+long int b, c;
+
+long int
+foo (void)
+{
+}
+
+void
+bar (int x, short int y, unsigned short int z)
+{
+}
+
+int
+baz (void)
+{
+ a -= b;
+ b = !foo ();
+ bar (b ^= (c ^ 1) ? (c ^ 1) : foo (), (__INTPTR_TYPE__) &bar, a);
+}
Jakub