Backports from trunk to 4.5
Jakub Jelinek
jakub@redhat.com
Tue Dec 7 15:40:00 GMT 2010
Hi!
I've backported a dozen - 1 patches of mine from trunk to 4.5, committed
after bootstrapping/regtesting them on x86_64-linux and i686-linux on the
branch.
Jakub
-------------- next part --------------
2010-12-07 Jakub Jelinek <jakub@redhat.com>
Backport from mainline
2010-11-18 Jakub Jelinek <jakub@redhat.com>
PR middle-end/46534
* builtins.c (fold_builtin_printf): Don't copy and modify string
before build_string_literal, instead modify what
build_string_literal returned.
* gcc.c-torture/compile/pr46534.c: New test.
--- gcc/builtins.c (revision 166917)
+++ gcc/builtins.c (revision 166918)
@@ -12537,15 +12537,30 @@ fold_builtin_printf (location_t loc, tre
{
/* If the string was "string\n", call puts("string"). */
size_t len = strlen (str);
- if ((unsigned char)str[len - 1] == target_newline)
+ if ((unsigned char)str[len - 1] == target_newline
+ && (size_t) (int) len == len
+ && (int) len > 0)
{
+ char *newstr;
+ tree offset_node, string_cst;
+
/* Create a NUL-terminated string that's one char shorter
than the original, stripping off the trailing '\n'. */
- char *newstr = XALLOCAVEC (char, len);
- memcpy (newstr, str, len - 1);
- newstr[len - 1] = 0;
-
- newarg = build_string_literal (len, newstr);
+ newarg = build_string_literal (len, str);
+ string_cst = string_constant (newarg, &offset_node);
+#ifdef ENABLE_CHECKING
+ gcc_assert (string_cst
+ && (TREE_STRING_LENGTH (string_cst)
+ == (int) len)
+ && integer_zerop (offset_node)
+ && (unsigned char)
+ TREE_STRING_POINTER (string_cst)[len - 1]
+ == target_newline);
+#endif
+ /* build_string_literal creates a new STRING_CST,
+ modify it in place to avoid double copying. */
+ newstr = CONST_CAST (char *, TREE_STRING_POINTER (string_cst));
+ newstr[len - 1] = '\0';
if (fn_puts)
call = build_call_expr_loc (loc, fn_puts, 1, newarg);
}
--- gcc/testsuite/gcc.c-torture/compile/pr46534.c (revision 0)
+++ gcc/testsuite/gcc.c-torture/compile/pr46534.c (revision 166918)
@@ -0,0 +1,17 @@
+/* PR middle-end/46534 */
+
+extern int printf (const char *, ...);
+
+#define S1 " "
+#define S2 S1 S1 S1 S1 S1 S1 S1 S1 S1 S1
+#define S3 S2 S2 S2 S2 S2 S2 S2 S2 S2 S2
+#define S4 S3 S3 S3 S3 S3 S3 S3 S3 S3 S3
+#define S5 S4 S4 S4 S4 S4 S4 S4 S4 S4 S4
+#define S6 S5 S5 S5 S5 S5 S5 S5 S5 S5 S5
+#define S7 S6 S6 S6 S6 S6 S6 S6 S6 S6 S6
+
+void
+foo (void)
+{
+ printf (S7 "\n");
+}
-------------- next part --------------
2010-12-07 Jakub Jelinek <jakub@redhat.com>
Backport from mainline
2010-12-02 Jakub Jelinek <jakub@redhat.com>
PR fortran/46753
* trans-openmp.c (gfc_trans_omp_do): Use build2_loc instead of
fold_build2_loc for OMP_FOR conditions.
* libgomp.fortran/pr46753.f90: New test.
--- gcc/fortran/trans-openmp.c (revision 167371)
+++ gcc/fortran/trans-openmp.c (revision 167372)
@@ -1243,8 +1243,9 @@ gfc_trans_omp_do (gfc_code *code, stmtbl
if (simple)
{
TREE_VEC_ELT (init, i) = build2_v (MODIFY_EXPR, dovar, from);
- TREE_VEC_ELT (cond, i) = fold_build2 (simple > 0 ? LE_EXPR : GE_EXPR,
- boolean_type_node, dovar, to);
+ /* The condition should not be folded. */
+ TREE_VEC_ELT (cond, i) = build2 (simple > 0 ? LE_EXPR : GE_EXPR,
+ boolean_type_node, dovar, to);
TREE_VEC_ELT (incr, i) = fold_build2 (PLUS_EXPR, type, dovar, step);
TREE_VEC_ELT (incr, i) = fold_build2 (MODIFY_EXPR, type, dovar,
TREE_VEC_ELT (incr, i));
@@ -1265,8 +1266,9 @@ gfc_trans_omp_do (gfc_code *code, stmtbl
count = gfc_create_var (type, "count");
TREE_VEC_ELT (init, i) = build2_v (MODIFY_EXPR, count,
build_int_cst (type, 0));
- TREE_VEC_ELT (cond, i) = fold_build2 (LT_EXPR, boolean_type_node,
- count, tmp);
+ /* The condition should not be folded. */
+ TREE_VEC_ELT (cond, i) = build2 (LT_EXPR, boolean_type_node,
+ count, tmp);
TREE_VEC_ELT (incr, i) = fold_build2 (PLUS_EXPR, type, count,
build_int_cst (type, 1));
TREE_VEC_ELT (incr, i) = fold_build2 (MODIFY_EXPR, type,
--- libgomp/testsuite/libgomp.fortran/pr46753.f90 (revision 0)
+++ libgomp/testsuite/libgomp.fortran/pr46753.f90 (revision 167372)
@@ -0,0 +1,17 @@
+! PR fortran/46753
+! { dg-do run }
+
+ integer :: i, j
+ j = 0
+!$omp parallel do reduction(+:j)
+ do i = 2147483636, 2147483646
+ j = j + 1
+ end do
+ if (j.ne.11) call abort
+ j = 0
+!$omp parallel do reduction(+:j)
+ do i = -2147483637, -2147483647, -1
+ j = j + 1
+ end do
+ if (j.ne.11) call abort
+end
-------------- next part --------------
2010-12-07 Jakub Jelinek <jakub@redhat.com>
Backport from mainline
2010-11-24 Jakub Jelinek <jakub@redhat.com>
PR middle-end/46629
* cfgexpand.c (maybe_cleanup_end_of_block): Test NEXT_INSN (insn)
instead of insn with any_condjump_p.
2010-11-23 Jakub Jelinek <jakub@redhat.com>
PR middle-end/46499
* cfgexpand.c (maybe_cleanup_end_of_block): Remove also BARRIERs
following unconditional jumps.
* gcc.dg/pr46499-1.c: New test.
* gcc.dg/pr46499-2.c: New test.
--- gcc/cfgexpand.c (revision 167081)
+++ gcc/cfgexpand.c (revision 167082)
@@ -1694,7 +1694,14 @@ maybe_cleanup_end_of_block (edge e, rtx
{
insn = PREV_INSN (insn);
if (JUMP_P (NEXT_INSN (insn)))
- delete_insn (NEXT_INSN (insn));
+ {
+ if (!any_condjump_p (NEXT_INSN (insn)))
+ {
+ gcc_assert (BARRIER_P (NEXT_INSN (NEXT_INSN (insn))));
+ delete_insn (NEXT_INSN (NEXT_INSN (insn)));
+ }
+ delete_insn (NEXT_INSN (insn));
+ }
}
}
}
--- gcc/testsuite/gcc.dg/pr46499-1.c (revision 0)
+++ gcc/testsuite/gcc.dg/pr46499-1.c (revision 167082)
@@ -0,0 +1,31 @@
+/* PR middle-end/46499 */
+/* { dg-do run } */
+/* { dg-options "-O -fno-omit-frame-pointer -fno-tree-ccp -fno-tree-dominator-opts -finline-small-functions" } */
+
+extern void abort (void);
+
+int count = 0;
+
+int
+foo (void)
+{
+ count++;
+ return 0;
+}
+
+int
+bar (void)
+{
+ count++;
+ return 0;
+}
+
+int
+main ()
+{
+ if ((foo () == 1) & (bar () == 1))
+ abort ();
+ if (count != 2)
+ abort ();
+ return 0;
+}
--- gcc/testsuite/gcc.dg/pr46499-2.c (revision 0)
+++ gcc/testsuite/gcc.dg/pr46499-2.c (revision 167082)
@@ -0,0 +1,19 @@
+/* PR middle-end/46499 */
+/* { dg-do compile } */
+/* { dg-options "-O -fno-tree-ccp -fno-tree-dominator-opts" } */
+
+extern void abort (void);
+
+static inline int
+foo (void)
+{
+ return 0;
+}
+
+int
+main ()
+{
+ if ((foo () == 1) & (foo () == 1))
+ abort ();
+ return 0;
+}
-------------- next part --------------
2010-12-07 Jakub Jelinek <jakub@redhat.com>
Backport from mainline
2010-12-06 Jakub Jelinek <jakub@redhat.com>
PR target/43897
* config/ia64/ia64.c (rtx_needs_barrier): Handle asm CLOBBER
as a store into that register.
* gcc.target/ia64/pr43897.c: New test.
--- gcc/config/ia64/ia64.c (revision 167515)
+++ gcc/config/ia64/ia64.c (revision 167516)
@@ -6320,6 +6320,17 @@ rtx_needs_barrier (rtx x, struct reg_fla
break;
case CLOBBER:
+ if (REG_P (XEXP (pat, 0))
+ && extract_asm_operands (x) != NULL_RTX
+ && REGNO (XEXP (pat, 0)) != AR_UNAT_REGNUM)
+ {
+ new_flags.is_write = 1;
+ need_barrier |= rtx_needs_barrier (XEXP (pat, 0),
+ new_flags, pred);
+ new_flags = flags;
+ }
+ break;
+
case RETURN:
break;
--- gcc/testsuite/gcc.target/ia64/pr43897.c (revision 0)
+++ gcc/testsuite/gcc.target/ia64/pr43897.c (revision 167516)
@@ -0,0 +1,12 @@
+/* PR target/43897 */
+/* { dg-do assemble } */
+/* { dg-options "-O2" } */
+
+int
+sub (int i)
+{
+ float tmp;
+ if (i)
+ __asm__ __volatile__ ("frcpa.s0 %0,p1=f0,f0":"=f" (tmp)::"p1");
+ return i + 10;
+}
-------------- next part --------------
2010-12-07 Jakub Jelinek <jakub@redhat.com>
Backport from mainline
2010-12-06 Jakub Jelinek <jakub@redhat.com>
PR rtl-optimization/46777
* cfglayout.c (duplicate_insn_chain): Avoid duplicating
also barrier after tablejump.
* gcc.dg/pr46777.c: New test.
--- gcc/cfglayout.c (revision 167512)
+++ gcc/cfglayout.c (revision 167513)
@@ -1177,7 +1177,20 @@ duplicate_insn_chain (rtx from, rtx to)
moved far from original jump. */
if (GET_CODE (PATTERN (insn)) == ADDR_VEC
|| GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
- break;
+ {
+ /* Avoid copying following barrier as well if any
+ (and debug insns in between). */
+ rtx next;
+
+ for (next = NEXT_INSN (insn);
+ next != NEXT_INSN (to);
+ next = NEXT_INSN (next))
+ if (!DEBUG_INSN_P (next))
+ break;
+ if (next != NEXT_INSN (to) && BARRIER_P (next))
+ insn = next;
+ break;
+ }
copy = emit_copy_of_insn_after (insn, get_last_insn ());
maybe_copy_prologue_epilogue_insn (insn, copy);
break;
--- gcc/testsuite/gcc.dg/pr46777.c (revision 0)
+++ gcc/testsuite/gcc.dg/pr46777.c (revision 167513)
@@ -0,0 +1,49 @@
+/* PR rtl-optimization/46777 */
+/* { dg-do compile } */
+/* { dg-options "-fgcse -O -fno-tree-dominator-opts -funroll-loops" } */
+
+struct S { char s[256]; };
+
+static inline int
+foo (int x, int y)
+{
+ switch (x)
+ {
+ case 1:
+ case 2:
+ return 3;
+ case 3:
+ case 4:
+ return 2;
+ case 5:
+ switch (y)
+ {
+ case 4:
+ return 1;
+ }
+ }
+ return 0;
+}
+
+void
+bar (struct S *x, int *y, int *z, int **w)
+{
+ switch (*y ? x->s[*y] : foo (y[1], y[0]))
+ {
+ case 3:
+ if (y + 2 == z)
+ for (;;)
+ {
+ y += 2;
+ switch (*y ? x->s[*y] : foo (y[1], y[0]))
+ {
+ case 6:
+ break;
+ default:
+ *w = y;
+ }
+ if (y == z)
+ break;
+ }
+ }
+}
-------------- next part --------------
2010-12-07 Jakub Jelinek <jakub@redhat.com>
Backport from mainline
2010-12-02 Jakub Jelinek <jakub@redhat.com>
PR libgomp/45240
* parallel.c (GOMP_parallel_end): Unlock gomp_remaining_threads_lock
at the end if sync builtins aren't supported.
--- libgomp/parallel.c (revision 167369)
+++ libgomp/parallel.c (revision 167370)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2005, 2007, 2008, 2009 Free Software Foundation, Inc.
+/* Copyright (C) 2005, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
Contributed by Richard Henderson <rth@redhat.com>.
This file is part of the GNU OpenMP Library (libgomp).
@@ -123,6 +123,7 @@ GOMP_parallel_end (void)
#else
gomp_mutex_lock (&gomp_remaining_threads_lock);
gomp_remaining_threads_count -= team->nthreads - 1;
+ gomp_mutex_unlock (&gomp_remaining_threads_lock);
#endif
}
}
-------------- next part --------------
2010-12-07 Jakub Jelinek <jakub@redhat.com>
Backport from mainline
2010-11-24 Jakub Jelinek <jakub@redhat.com>
PR rtl-optimization/46614
* sched-deps.c (NON_FLUSH_JUMP_KIND, NON_FLUSH_JUMP_P): Define.
(deps_analyze_insn): Mark JUMP_INSNs in
last_pending_memory_flush that weren't added through
flush_pending_lists with NON_FLUSH_JUMP_KIND.
(sched_analyze_2, sched_analyze_insn): Check NON_FLUSH_JUMP_P
on INSN_LIST instead of JUMP_P check on its operand.
* sched-rgn.c (concat_INSN_LIST): Copy over REG_NOTE_KIND.
PR rtl-optimization/46614
* gcc.dg/pr46614.c: New test.
--- gcc/sched-deps.c (revision 167120)
+++ gcc/sched-deps.c (revision 167121)
@@ -53,6 +53,12 @@ along with GCC; see the file COPYING3.
#define CHECK (false)
#endif
+/* In deps->last_pending_memory_flush marks JUMP_INSNs that weren't
+ added to the list because of flush_pending_lists, stands just
+ for itself and not for any other pending memory reads/writes. */
+#define NON_FLUSH_JUMP_KIND REG_DEP_ANTI
+#define NON_FLUSH_JUMP_P(x) (REG_NOTE_KIND (x) == NON_FLUSH_JUMP_KIND)
+
/* Holds current parameters for the dependency analyzer. */
struct sched_deps_info_def *sched_deps_info;
@@ -2484,7 +2490,7 @@ sched_analyze_2 (struct deps_desc *deps,
for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
{
- if (! JUMP_P (XEXP (u, 0)))
+ if (! NON_FLUSH_JUMP_P (u))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
else if (deps_may_trap_p (x))
{
@@ -2796,8 +2802,7 @@ sched_analyze_insn (struct deps_desc *de
REG_DEP_ANTI);
for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
- if (! JUMP_P (XEXP (u, 0))
- || !sel_sched_p ())
+ if (! NON_FLUSH_JUMP_P (u) || !sel_sched_p ())
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
@@ -3242,8 +3247,15 @@ deps_analyze_insn (struct deps_desc *dep
if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
flush_pending_lists (deps, insn, true, true);
else
- deps->last_pending_memory_flush
- = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
+ {
+ deps->last_pending_memory_flush
+ = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
+ /* Signal to sched_analyze_insn that this jump stands
+ just for its own, not any other pending memory
+ reads/writes flush_pending_lists had to flush. */
+ PUT_REG_NOTE_KIND (deps->last_pending_memory_flush,
+ NON_FLUSH_JUMP_KIND);
+ }
}
sched_analyze_insn (deps, PATTERN (insn), insn);
--- gcc/sched-rgn.c (revision 167120)
+++ gcc/sched-rgn.c (revision 167121)
@@ -2574,7 +2574,10 @@ concat_INSN_LIST (rtx copy, rtx old)
{
rtx new_rtx = old;
for (; copy ; copy = XEXP (copy, 1))
- new_rtx = alloc_INSN_LIST (XEXP (copy, 0), new_rtx);
+ {
+ new_rtx = alloc_INSN_LIST (XEXP (copy, 0), new_rtx);
+ PUT_REG_NOTE_KIND (new_rtx, REG_NOTE_KIND (copy));
+ }
return new_rtx;
}
--- gcc/testsuite/gcc.dg/pr46614.c (revision 0)
+++ gcc/testsuite/gcc.dg/pr46614.c (revision 167121)
@@ -0,0 +1,56 @@
+/* PR rtl-optimization/46614 */
+/* { dg-do run } */
+/* { dg-options "-O -fno-rename-registers -fsched2-use-superblocks -fschedule-insns2 -funroll-loops" } */
+
+extern void abort (void);
+
+struct S
+{
+ unsigned char a;
+ unsigned char b;
+ unsigned int c;
+ unsigned int e;
+ unsigned char f;
+ unsigned int g;
+};
+
+void bar (struct S *x)
+{
+ int i;
+ struct S *p = x;
+ struct S r[16];
+ unsigned j;
+ for (i = 0; i < 16; i++)
+ {
+ r[i].c = p->b + p->c;
+ j = p->c + p->f;
+ r[i].a = j + p->b;
+ r[i].f = p->f + p->e;
+ r[i].g = p->b + p->c;
+ }
+ for (i = 0; i < 16; i++)
+ {
+ if (r[i].c != x[i].b + x[i].c
+ || r[i].a != x[i].c + x[i].f + x[i].b
+ || r[i].f != x[i].f + x[i].e
+ || r[i].g != x[i].b + x[i].c)
+ abort ();
+ }
+ for (i = 0; i < 16; i++)
+ {
+ r[i].b = p->c;
+ if (r[i].b != x[i].c)
+ abort ();
+ }
+}
+
+int
+main ()
+{
+ int i;
+ struct S x[16];
+ for (i = 0; i < 16; i++)
+ x[i].b = x[i].c = x[i].e = x[i].f = 5;
+ bar (x);
+ return 0;
+}
-------------- next part --------------
2010-12-07 Jakub Jelinek <jakub@redhat.com>
Backport from mainline
2010-11-20 Jakub Jelinek <jakub@redhat.com>
PR c++/46538
* decl.c (cp_make_fname_decl): Return error_mark_node if
current_binding_level has already sk_function_parms kind.
* g++.dg/other/error34.C: New test.
--- gcc/cp/decl.c (revision 166973)
+++ gcc/cp/decl.c (revision 166974)
@@ -3687,6 +3687,8 @@ cp_make_fname_decl (location_t loc, tree
if (current_function_decl)
{
struct cp_binding_level *b = current_binding_level;
+ if (b->kind == sk_function_parms)
+ return error_mark_node;
while (b->level_chain->kind != sk_function_parms)
b = b->level_chain;
pushdecl_with_scope (decl, b, /*is_friend=*/false);
--- gcc/testsuite/g++.dg/other/error34.C (revision 0)
+++ gcc/testsuite/g++.dg/other/error34.C (revision 166974)
@@ -0,0 +1,6 @@
+// PR c++/46538
+// { dg-do compile }
+// { dg-options "" }
+
+S () : str(__PRETTY_FUNCTION__) {} // { dg-error "forbids declaration" }
+// { dg-error "only constructors" "" { target *-*-* } 5 }
-------------- next part --------------
2010-12-07 Jakub Jelinek <jakub@redhat.com>
Backport from mainline
2010-11-19 Jakub Jelinek <jakub@redhat.com>
PR target/45870
* dwarf2out.c (const_ok_for_output_1): Don't complain about
non-delegitimized TLS UNSPECs.
--- gcc/dwarf2out.c (revision 166931)
+++ gcc/dwarf2out.c (revision 166932)
@@ -13503,11 +13503,18 @@ const_ok_for_output_1 (rtx *rtlp, void *
/* If delegitimize_address couldn't do anything with the UNSPEC, assume
we can't express it in the debug info. */
#ifdef ENABLE_CHECKING
- inform (current_function_decl
- ? DECL_SOURCE_LOCATION (current_function_decl)
- : UNKNOWN_LOCATION,
- "non-delegitimized UNSPEC %d found in variable location",
- XINT (rtl, 1));
+ /* Don't complain about TLS UNSPECs, those are just too hard to
+ delegitimize. */
+ if (XVECLEN (rtl, 0) != 1
+ || GET_CODE (XVECEXP (rtl, 0, 0)) != SYMBOL_REF
+ || SYMBOL_REF_DECL (XVECEXP (rtl, 0, 0)) == NULL
+ || TREE_CODE (SYMBOL_REF_DECL (XVECEXP (rtl, 0, 0))) != VAR_DECL
+ || !DECL_THREAD_LOCAL_P (SYMBOL_REF_DECL (XVECEXP (rtl, 0, 0))))
+ inform (current_function_decl
+ ? DECL_SOURCE_LOCATION (current_function_decl)
+ : UNKNOWN_LOCATION,
+ "non-delegitimized UNSPEC %d found in variable location",
+ XINT (rtl, 1));
#endif
expansion_failed (NULL_TREE, rtl,
"UNSPEC hasn't been delegitimized.\n");
-------------- next part --------------
2010-12-07 Jakub Jelinek <jakub@redhat.com>
Backport from mainline
2010-11-17 Jakub Jelinek <jakub@redhat.com>
PR rtl-optimization/46440
* combine.c (update_cfg_for_uncondjump): When changing
an indirect jump into unconditional jump, remove BARRIERs
from bb's footer.
* gcc.dg/pr46440.c: New test.
--- gcc/combine.c (revision 166866)
+++ gcc/combine.c (revision 166867)
@@ -2460,7 +2460,25 @@ update_cfg_for_uncondjump (rtx insn)
delete_insn (insn);
if (at_end && EDGE_COUNT (bb->succs) == 1)
- single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
+ {
+ rtx insn;
+
+ single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
+
+ /* Remove barriers from the footer if there are any. */
+ for (insn = bb->il.rtl->footer; insn; insn = NEXT_INSN (insn))
+ if (BARRIER_P (insn))
+ {
+ if (PREV_INSN (insn))
+ NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
+ else
+ bb->il.rtl->footer = NEXT_INSN (insn);
+ if (NEXT_INSN (insn))
+ PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
+ }
+ else if (LABEL_P (insn))
+ break;
+ }
}
/* Try to combine the insns I0, I1 and I2 into I3.
--- gcc/testsuite/gcc.dg/pr46440.c (revision 0)
+++ gcc/testsuite/gcc.dg/pr46440.c (revision 166867)
@@ -0,0 +1,25 @@
+/* PR rtl-optimization/46440 */
+/* { dg-do compile } */
+/* { dg-options "-O -fstack-protector -fno-tree-dominator-opts -fno-tree-fre" } */
+/* { dg-require-effective-target fstack_protector } */
+
+int i;
+
+void bar (char *);
+
+void
+foo (void)
+{
+ void *l;
+ char c[64];
+ bar (c);
+ i = 1;
+ if (i)
+ l = &&l1;
+ else
+ l = &&l2;
+ goto *l;
+l2:
+ __builtin_abort ();
+l1:;
+}
-------------- next part --------------
2010-12-07 Jakub Jelinek <jakub@redhat.com>
Backport from mainline
2010-11-16 Jakub Jelinek <jakub@redhat.com>
PR c++/46401
* c-common.c (warning_candidate_p): Don't track non-const calls
or STRING_CSTs.
* g++.dg/warn/Wsequence-point-3.C: New test.
--- gcc/c-common.c (revision 166822)
+++ gcc/c-common.c (revision 166823)
@@ -2324,10 +2324,26 @@ warn_for_collisions (struct tlist *list)
static int
warning_candidate_p (tree x)
{
- /* !VOID_TYPE_P (TREE_TYPE (x)) is workaround for cp/tree.c
+ if (DECL_P (x) && DECL_ARTIFICIAL (x))
+ return 0;
+
+ /* VOID_TYPE_P (TREE_TYPE (x)) is workaround for cp/tree.c
(lvalue_p) crash on TRY/CATCH. */
- return !(DECL_P (x) && DECL_ARTIFICIAL (x))
- && TREE_TYPE (x) && !VOID_TYPE_P (TREE_TYPE (x)) && lvalue_p (x);
+ if (TREE_TYPE (x) == NULL_TREE || VOID_TYPE_P (TREE_TYPE (x)))
+ return 0;
+
+ if (!lvalue_p (x))
+ return 0;
+
+ /* No point to track non-const calls, they will never satisfy
+ operand_equal_p. */
+ if (TREE_CODE (x) == CALL_EXPR && (call_expr_flags (x) & ECF_CONST) == 0)
+ return 0;
+
+ if (TREE_CODE (x) == STRING_CST)
+ return 0;
+
+ return 1;
}
/* Return nonzero if X and Y appear to be the same candidate (or NULL) */
--- gcc/testsuite/g++.dg/warn/Wsequence-point-3.C (revision 0)
+++ gcc/testsuite/g++.dg/warn/Wsequence-point-3.C (revision 166823)
@@ -0,0 +1,20 @@
+// PR c++/46401
+// { dg-do compile }
+// { dg-options "-Wsequence-point" }
+
+struct S
+{
+ S ();
+ S &operator<< (const char *);
+ S (const S &);
+};
+
+#define N1(n) << #n
+#define N2(n) N1(n)
+#define N3(n) N2(n##0) N2(n##1) N2(n##2) N2(n##3) N2(n##4) \
+ N2(n##5) N2(n##6) N2(n##7) N2(n##8) N2(n##9)
+#define N4(n) N3(n##0) N3(n##1) N3(n##2) N3(n##3) N3(n##4) \
+ N3(n##5) N3(n##6) N3(n##7) N3(n##8) N3(n##9)
+#define N5(n) N4(n##0) N4(n##1) N4(n##2) N4(n##3) N4(n##4) \
+ N4(n##5) N4(n##6) N4(n##7) N4(n##8) N4(n##9)
+S s = S () N5(a) N5(b);
More information about the Gcc-patches
mailing list