This is the mail archive of the
gcc-patches@gcc.gnu.org
mailing list for the GCC project.
[25/67] Use is_a <scalar_int_mode> for bitmask optimisations
- From: Richard Sandiford <richard dot sandiford at arm dot com>
- To: gcc-patches at gcc dot gnu dot org
- Date: Fri, 09 Dec 2016 13:09:21 +0000
- Subject: [25/67] Use is_a <scalar_int_mode> for bitmask optimisations
- Authentication-results: sourceware.org; auth=none
- References: <87h96dp8u6.fsf@e105548-lin.cambridge.arm.com>
Explicitly check for scalar_int_mode in code that checks for full-mode
bit operations. These operations wouldn't work correctly for vector
modes, for example. In many cases this is enforced also by checking
whether an operand is CONST_INT_P, but there were other cases where the
condition is more indirect.
gcc/
2016-11-24 Richard Sandiford <richard.sandiford@arm.com>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
* combine.c (combine_simplify_rtx): Add checks for
is_a <scalar_int_mode>.
(simplify_if_then_else): Likewise.
(make_field_assignment): Likewise.
(simplify_comparison): Likewise.
* ifcvt.c (noce_try_bitop): Likewise.
* loop-invariant.c (canonicalize_address_split): Likewise.
* simplify-rtx.c (simplify_unary_operation_1): Likewise.
diff --git a/gcc/combine.c b/gcc/combine.c
index 8883194..e1e8457 100644
--- a/gcc/combine.c
+++ b/gcc/combine.c
@@ -5825,13 +5825,14 @@ combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
if (!REG_P (temp)
&& ! (GET_CODE (temp) == SUBREG
&& REG_P (SUBREG_REG (temp)))
- && (i = exact_log2 (nonzero_bits (temp, mode))) >= 0)
+ && is_a <scalar_int_mode> (mode, &int_mode)
+ && (i = exact_log2 (nonzero_bits (temp, int_mode))) >= 0)
{
rtx temp1 = simplify_shift_const
- (NULL_RTX, ASHIFTRT, mode,
- simplify_shift_const (NULL_RTX, ASHIFT, mode, temp,
- GET_MODE_PRECISION (mode) - 1 - i),
- GET_MODE_PRECISION (mode) - 1 - i);
+ (NULL_RTX, ASHIFTRT, int_mode,
+ simplify_shift_const (NULL_RTX, ASHIFT, int_mode, temp,
+ GET_MODE_PRECISION (int_mode) - 1 - i),
+ GET_MODE_PRECISION (int_mode) - 1 - i);
/* If all we did was surround TEMP with the two shifts, we
haven't improved anything, so don't use it. Otherwise,
@@ -5922,12 +5923,15 @@ combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
&& !REG_P (XEXP (x, 0))
&& ! (GET_CODE (XEXP (x, 0)) == SUBREG
&& REG_P (SUBREG_REG (XEXP (x, 0))))
- && nonzero_bits (XEXP (x, 0), mode) == 1)
- return simplify_shift_const (NULL_RTX, ASHIFTRT, mode,
- simplify_shift_const (NULL_RTX, ASHIFT, mode,
- gen_rtx_XOR (mode, XEXP (x, 0), const1_rtx),
- GET_MODE_PRECISION (mode) - 1),
- GET_MODE_PRECISION (mode) - 1);
+ && is_a <scalar_int_mode> (mode, &int_mode)
+ && nonzero_bits (XEXP (x, 0), int_mode) == 1)
+ return simplify_shift_const
+ (NULL_RTX, ASHIFTRT, int_mode,
+ simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
+ gen_rtx_XOR (int_mode, XEXP (x, 0),
+ const1_rtx),
+ GET_MODE_PRECISION (int_mode) - 1),
+ GET_MODE_PRECISION (int_mode) - 1);
/* If we are adding two things that have no bits in common, convert
the addition into an IOR. This will often be further simplified,
@@ -5965,11 +5969,12 @@ combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
case MINUS:
/* (minus <foo> (and <foo> (const_int -pow2))) becomes
(and <foo> (const_int pow2-1)) */
- if (GET_CODE (XEXP (x, 1)) == AND
+ if (is_a <scalar_int_mode> (mode, &int_mode)
+ && GET_CODE (XEXP (x, 1)) == AND
&& CONST_INT_P (XEXP (XEXP (x, 1), 1))
&& pow2p_hwi (-UINTVAL (XEXP (XEXP (x, 1), 1)))
&& rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
- return simplify_and_const_int (NULL_RTX, mode, XEXP (x, 0),
+ return simplify_and_const_int (NULL_RTX, int_mode, XEXP (x, 0),
-INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
break;
@@ -6000,14 +6005,16 @@ combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
case UDIV:
/* If this is a divide by a power of two, treat it as a shift if
its first operand is a shift. */
- if (CONST_INT_P (XEXP (x, 1))
+ if (is_a <scalar_int_mode> (mode, &int_mode)
+ && CONST_INT_P (XEXP (x, 1))
&& (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
&& (GET_CODE (XEXP (x, 0)) == ASHIFT
|| GET_CODE (XEXP (x, 0)) == LSHIFTRT
|| GET_CODE (XEXP (x, 0)) == ASHIFTRT
|| GET_CODE (XEXP (x, 0)) == ROTATE
|| GET_CODE (XEXP (x, 0)) == ROTATERT))
- return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (x, 0), i);
+ return simplify_shift_const (NULL_RTX, LSHIFTRT, int_mode,
+ XEXP (x, 0), i);
break;
case EQ: case NE:
@@ -6291,22 +6298,28 @@ simplify_if_then_else (rtx x)
std::swap (true_rtx, false_rtx);
}
- /* If we are comparing against zero and the expression being tested has
- only a single bit that might be nonzero, that is its value when it is
- not equal to zero. Similarly if it is known to be -1 or 0. */
-
- if (true_code == EQ && true_val == const0_rtx
- && pow2p_hwi (nzb = nonzero_bits (from, GET_MODE (from))))
- {
- false_code = EQ;
- false_val = gen_int_mode (nzb, GET_MODE (from));
- }
- else if (true_code == EQ && true_val == const0_rtx
- && (num_sign_bit_copies (from, GET_MODE (from))
- == GET_MODE_PRECISION (GET_MODE (from))))
+ scalar_int_mode from_mode;
+ if (is_a <scalar_int_mode> (GET_MODE (from), &from_mode))
{
- false_code = EQ;
- false_val = constm1_rtx;
+ /* If we are comparing against zero and the expression being
+ tested has only a single bit that might be nonzero, that is
+ its value when it is not equal to zero. Similarly if it is
+ known to be -1 or 0. */
+ if (true_code == EQ
+ && true_val == const0_rtx
+ && pow2p_hwi (nzb = nonzero_bits (from, from_mode)))
+ {
+ false_code = EQ;
+ false_val = gen_int_mode (nzb, from_mode);
+ }
+ else if (true_code == EQ
+ && true_val == const0_rtx
+ && (num_sign_bit_copies (from, from_mode)
+ == GET_MODE_PRECISION (from_mode)))
+ {
+ false_code = EQ;
+ false_val = constm1_rtx;
+ }
}
/* Now simplify an arm if we know the value of the register in the
@@ -6556,16 +6569,19 @@ simplify_if_then_else (rtx x)
negation of a single bit, we can convert this operation to a shift. We
can actually do this more generally, but it doesn't seem worth it. */
- if (true_code == NE && XEXP (cond, 1) == const0_rtx
- && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
- && ((1 == nonzero_bits (XEXP (cond, 0), mode)
+ if (true_code == NE
+ && is_a <scalar_int_mode> (mode, &int_mode)
+ && XEXP (cond, 1) == const0_rtx
+ && false_rtx == const0_rtx
+ && CONST_INT_P (true_rtx)
+ && ((1 == nonzero_bits (XEXP (cond, 0), int_mode)
&& (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
- || ((num_sign_bit_copies (XEXP (cond, 0), mode)
- == GET_MODE_PRECISION (mode))
+ || ((num_sign_bit_copies (XEXP (cond, 0), int_mode)
+ == GET_MODE_PRECISION (int_mode))
&& (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
return
- simplify_shift_const (NULL_RTX, ASHIFT, mode,
- gen_lowpart (mode, XEXP (cond, 0)), i);
+ simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
+ gen_lowpart (int_mode, XEXP (cond, 0)), i);
/* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
non-zero bit in A is C1. */
@@ -9436,7 +9452,11 @@ make_field_assignment (rtx x)
HOST_WIDE_INT pos;
unsigned HOST_WIDE_INT len;
rtx other;
- machine_mode mode;
+
+ /* All the rules in this function are specific to scalar integers. */
+ scalar_int_mode mode;
+ if (!is_a <scalar_int_mode> (GET_MODE (dest), &mode))
+ return x;
/* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
a clear of a one-bit field. We will have changed it to
@@ -9509,7 +9529,6 @@ make_field_assignment (rtx x)
/* Partial overlap. We can reduce the source AND. */
if ((and_mask & ze_mask) != and_mask)
{
- mode = GET_MODE (src);
src = gen_rtx_AND (mode, XEXP (src, 0),
gen_int_mode (and_mask & ze_mask, mode));
return gen_rtx_SET (dest, src);
@@ -9528,7 +9547,10 @@ make_field_assignment (rtx x)
assignment. The first one we are likely to encounter is an outer
narrowing SUBREG, which we can just strip for the purposes of
identifying the constant-field assignment. */
- if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src))
+ scalar_int_mode src_mode = mode;
+ if (GET_CODE (src) == SUBREG
+ && subreg_lowpart_p (src)
+ && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (src)), &src_mode))
src = SUBREG_REG (src);
if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
@@ -9574,10 +9596,11 @@ make_field_assignment (rtx x)
else
return x;
- pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (GET_MODE (dest)), &len);
- if (pos < 0 || pos + len > GET_MODE_PRECISION (GET_MODE (dest))
- || GET_MODE_PRECISION (GET_MODE (dest)) > HOST_BITS_PER_WIDE_INT
- || (c1 & nonzero_bits (other, GET_MODE (dest))) != 0)
+ pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (mode), &len);
+ if (pos < 0
+ || pos + len > GET_MODE_PRECISION (mode)
+ || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
+ || (c1 & nonzero_bits (other, mode)) != 0)
return x;
assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
@@ -9586,18 +9609,17 @@ make_field_assignment (rtx x)
/* The mode to use for the source is the mode of the assignment, or of
what is inside a possible STRICT_LOW_PART. */
- mode = (GET_CODE (assign) == STRICT_LOW_PART
- ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
+ machine_mode new_mode = (GET_CODE (assign) == STRICT_LOW_PART
+ ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
/* Shift OTHER right POS places and make it the source, restricting it
to the proper length and mode. */
src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
- GET_MODE (src),
- other, pos),
+ src_mode, other, pos),
dest);
- src = force_to_mode (src, mode,
- GET_MODE_PRECISION (mode) >= HOST_BITS_PER_WIDE_INT
+ src = force_to_mode (src, new_mode,
+ len >= HOST_BITS_PER_WIDE_INT
? HOST_WIDE_INT_M1U
: (HOST_WIDE_INT_1U << len) - 1,
0);
@@ -11711,16 +11733,17 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
&& GET_CODE (XEXP (op1, 0)) == ASHIFT
&& GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
&& GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
- && (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0)))
- == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0))))
+ && is_a <scalar_int_mode> (GET_MODE (op0), &mode)
+ && (is_a <scalar_int_mode>
+ (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))), &inner_mode))
+ && inner_mode == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0)))
&& CONST_INT_P (XEXP (op0, 1))
&& XEXP (op0, 1) == XEXP (op1, 1)
&& XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
&& XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
&& (INTVAL (XEXP (op0, 1))
- == (GET_MODE_PRECISION (GET_MODE (op0))
- - (GET_MODE_PRECISION
- (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))))))))
+ == (GET_MODE_PRECISION (mode)
+ - GET_MODE_PRECISION (inner_mode))))
{
op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
diff --git a/gcc/ifcvt.c b/gcc/ifcvt.c
index b22ed41..876353d 100644
--- a/gcc/ifcvt.c
+++ b/gcc/ifcvt.c
@@ -2839,7 +2839,7 @@ noce_try_bitop (struct noce_if_info *if_info)
{
rtx cond, x, a, result;
rtx_insn *seq;
- machine_mode mode;
+ scalar_int_mode mode;
enum rtx_code code;
int bitnum;
@@ -2847,6 +2847,10 @@ noce_try_bitop (struct noce_if_info *if_info)
cond = if_info->cond;
code = GET_CODE (cond);
+ /* Check for an integer operation. */
+ if (!is_a <scalar_int_mode> (GET_MODE (x), &mode))
+ return FALSE;
+
if (!noce_simple_bbs (if_info))
return FALSE;
@@ -2869,7 +2873,6 @@ noce_try_bitop (struct noce_if_info *if_info)
|| ! rtx_equal_p (x, XEXP (cond, 0)))
return FALSE;
bitnum = INTVAL (XEXP (cond, 2));
- mode = GET_MODE (x);
if (BITS_BIG_ENDIAN)
bitnum = GET_MODE_BITSIZE (mode) - 1 - bitnum;
if (bitnum < 0 || bitnum >= HOST_BITS_PER_WIDE_INT)
diff --git a/gcc/loop-invariant.c b/gcc/loop-invariant.c
index f617241..30fdf44 100644
--- a/gcc/loop-invariant.c
+++ b/gcc/loop-invariant.c
@@ -774,16 +774,16 @@ canonicalize_address_mult (rtx x)
FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
{
rtx sub = *iter;
-
- if (GET_CODE (sub) == ASHIFT
+ scalar_int_mode sub_mode;
+ if (is_a <scalar_int_mode> (GET_MODE (sub), &sub_mode)
+ && GET_CODE (sub) == ASHIFT
&& CONST_INT_P (XEXP (sub, 1))
- && INTVAL (XEXP (sub, 1)) < GET_MODE_BITSIZE (GET_MODE (sub))
+ && INTVAL (XEXP (sub, 1)) < GET_MODE_BITSIZE (sub_mode)
&& INTVAL (XEXP (sub, 1)) >= 0)
{
HOST_WIDE_INT shift = INTVAL (XEXP (sub, 1));
PUT_CODE (sub, MULT);
- XEXP (sub, 1) = gen_int_mode (HOST_WIDE_INT_1 << shift,
- GET_MODE (sub));
+ XEXP (sub, 1) = gen_int_mode (HOST_WIDE_INT_1 << shift, sub_mode);
iter.skip_subrtxes ();
}
}
diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c
index 9f44da9..1e6dd22 100644
--- a/gcc/simplify-rtx.c
+++ b/gcc/simplify-rtx.c
@@ -916,7 +916,7 @@ simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
{
enum rtx_code reversed;
rtx temp;
- scalar_int_mode inner;
+ scalar_int_mode inner, int_mode;
switch (code)
{
@@ -975,10 +975,11 @@ simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
so we can perform the above simplification. */
if (STORE_FLAG_VALUE == -1
+ && is_a <scalar_int_mode> (mode, &int_mode)
&& GET_CODE (op) == ASHIFTRT
&& CONST_INT_P (XEXP (op, 1))
- && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
- return simplify_gen_relational (GE, mode, VOIDmode,
+ && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
+ return simplify_gen_relational (GE, int_mode, VOIDmode,
XEXP (op, 0), const0_rtx);
@@ -1328,8 +1329,10 @@ simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
return op;
/* If operand is known to be only -1 or 0, convert ABS to NEG. */
- if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
- return gen_rtx_NEG (mode, op);
+ if (is_a <scalar_int_mode> (mode, &int_mode)
+ && (num_sign_bit_copies (op, int_mode)
+ == GET_MODE_PRECISION (int_mode)))
+ return gen_rtx_NEG (int_mode, op);
break;
@@ -1483,12 +1486,13 @@ simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
is similarly (zero_extend:M (subreg:O <X>)). */
if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
&& GET_CODE (XEXP (op, 0)) == ASHIFT
+ && is_a <scalar_int_mode> (mode, &int_mode)
&& CONST_INT_P (XEXP (op, 1))
&& XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
&& GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
{
scalar_int_mode tmode;
- gcc_assert (GET_MODE_BITSIZE (mode)
+ gcc_assert (GET_MODE_BITSIZE (int_mode)
> GET_MODE_BITSIZE (GET_MODE (op)));
if (int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
- INTVAL (XEXP (op, 1)), 1).exists (&tmode))
@@ -1498,7 +1502,7 @@ simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
if (inner)
return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
? SIGN_EXTEND : ZERO_EXTEND,
- mode, inner, tmode);
+ int_mode, inner, tmode);
}
}
@@ -1599,6 +1603,7 @@ simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
GET_MODE_PRECISION (N) - I bits. */
if (GET_CODE (op) == LSHIFTRT
&& GET_CODE (XEXP (op, 0)) == ASHIFT
+ && is_a <scalar_int_mode> (mode, &int_mode)
&& CONST_INT_P (XEXP (op, 1))
&& XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
&& GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
@@ -1610,7 +1615,8 @@ simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
rtx inner =
rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
if (inner)
- return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
+ return simplify_gen_unary (ZERO_EXTEND, int_mode,
+ inner, tmode);
}
}