This is the mail archive of the
gcc-patches@gcc.gnu.org
mailing list for the GCC project.
Re: [PATCH] Improve rotation by mode bitsize - 1 (take 2)
- From: Richard Biener <rguenther at suse dot de>
- To: Jakub Jelinek <jakub at redhat dot com>
- Cc: Jan Hubicka <hubicka at ucw dot cz>, Uros Bizjak <ubizjak at gmail dot com>, gcc-patches at gcc dot gnu dot org
- Date: Tue, 14 May 2013 10:27:27 +0200 (CEST)
- Subject: Re: [PATCH] Improve rotation by mode bitsize - 1 (take 2)
- References: <20130509184556 dot GI1377 at tucnak dot redhat dot com> <20130510134849 dot GD23785 at atrey dot karlin dot mff dot cuni dot cz> <20130510140926 dot GN1377 at tucnak dot redhat dot com> <20130510171538 dot GB7824 at kam dot mff dot cuni dot cz> <20130513164358 dot GB1377 at tucnak dot redhat dot com>
On Mon, 13 May 2013, Jakub Jelinek wrote:
> On Fri, May 10, 2013 at 07:15:38PM +0200, Jan Hubicka wrote:
> > It seems to me that it is not different from normalizing reg-10 into reg+(-10)
> > we do for years (and for good reason). It is still target preference when use
> > add and when sub to perform the arithmetic, but it makes sense to keep single
> > canonical form of the expression both in RTL and Gimple.
> >
> > For example we may want to be able to prove that
> > (rotate reg 31) == (rotatert reg 1)
> > is true or
> > (rotate reg 30) == (rotatert reg 2)
> > is also true or cross jump both variants into one instruction.
>
> Ok, this patch reverts my earlier patch and does the canonicalization, for
> now for RTL only. Bootstrapped/regtested on x86_64-linux and i686-linux, ok
> for trunk?
Ok.
Thanks,
Richard.
> 2013-05-13 Jakub Jelinek <jakub@redhat.com>
>
> * expmed.c (expand_shift_1): Canonicalize rotates by
> constant bitsize / 2 to bitsize - 1.
> * simplify-rt.x (simplify_binary_operation_1) <case ROTATE,
> case ROTATERT>: Likewise.
>
> Revert:
> 2013-05-10 Jakub Jelinek <jakub@redhat.com>
>
> * config/i386/i386.md (rotateinv): New code attr.
> (*<rotate_insn><mode>3_1, *<rotate_insn>si3_1_zext,
> *<rotate_insn>qi3_1_slp): Emit rorl %eax instead of
> roll $31, %eax, etc.
>
> --- gcc/expmed.c.jj 2013-05-13 13:03:31.000000000 +0200
> +++ gcc/expmed.c 2013-05-13 15:22:39.456194286 +0200
> @@ -2122,6 +2122,20 @@ expand_shift_1 (enum tree_code code, enu
> op1 = SUBREG_REG (op1);
> }
>
> + /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
> + prefer left rotation, if op1 is from bitsize / 2 + 1 to
> + bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
> + amount instead. */
> + if (rotate
> + && CONST_INT_P (op1)
> + && IN_RANGE (INTVAL (op1), GET_MODE_BITSIZE (mode) / 2 + left,
> + GET_MODE_BITSIZE (mode) - 1))
> + {
> + op1 = GEN_INT (GET_MODE_BITSIZE (mode) - INTVAL (op1));
> + left = !left;
> + code = left ? LROTATE_EXPR : RROTATE_EXPR;
> + }
> +
> if (op1 == const0_rtx)
> return shifted;
>
> --- gcc/simplify-rtx.c.jj 2013-05-02 12:42:25.000000000 +0200
> +++ gcc/simplify-rtx.c 2013-05-13 15:48:31.171182716 +0200
> @@ -3250,6 +3250,18 @@ simplify_binary_operation_1 (enum rtx_co
>
> case ROTATERT:
> case ROTATE:
> + /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
> + prefer left rotation, if op1 is from bitsize / 2 + 1 to
> + bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
> + amount instead. */
> + if (CONST_INT_P (trueop1)
> + && IN_RANGE (INTVAL (trueop1),
> + GET_MODE_BITSIZE (mode) / 2 + (code == ROTATE),
> + GET_MODE_BITSIZE (mode) - 1))
> + return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
> + mode, op0, GEN_INT (GET_MODE_BITSIZE (mode)
> + - INTVAL (trueop1)));
> + /* FALLTHRU */
> case ASHIFTRT:
> if (trueop1 == CONST0_RTX (mode))
> return op0;
> --- gcc/config/i386/i386.md.jj 2013-05-13 09:44:51.675494325 +0200
> +++ gcc/config/i386/i386.md 2013-05-13 15:09:37.461637593 +0200
> @@ -762,9 +762,6 @@ (define_code_attr rotate_insn [(rotate "
> ;; Base name for insn mnemonic.
> (define_code_attr rotate [(rotate "rol") (rotatert "ror")])
>
> -;; Base name for insn mnemonic of rotation in the other direction.
> -(define_code_attr rotateinv [(rotate "ror") (rotatert "rol")])
> -
> ;; Mapping of abs neg operators
> (define_code_iterator absneg [abs neg])
>
> @@ -9755,15 +9752,11 @@ (define_insn "*<rotate_insn><mode>3_1"
> return "#";
>
> default:
> - if (TARGET_SHIFT1 || optimize_function_for_size_p (cfun))
> - {
> - if (operands[2] == const1_rtx)
> - return "<rotate>{<imodesuffix>}\t%0";
> - if (CONST_INT_P (operands[2])
> - && INTVAL (operands[2]) == GET_MODE_BITSIZE (<MODE>mode) - 1)
> - return "<rotateinv>{<imodesuffix>}\t%0";
> - }
> - return "<rotate>{<imodesuffix>}\t{%2, %0|%0, %2}";
> + if (operands[2] == const1_rtx
> + && (TARGET_SHIFT1 || optimize_function_for_size_p (cfun)))
> + return "<rotate>{<imodesuffix>}\t%0";
> + else
> + return "<rotate>{<imodesuffix>}\t{%2, %0|%0, %2}";
> }
> }
> [(set_attr "isa" "*,bmi2")
> @@ -9825,14 +9818,11 @@ (define_insn "*<rotate_insn>si3_1_zext"
> return "#";
>
> default:
> - if (TARGET_SHIFT1 || optimize_function_for_size_p (cfun))
> - {
> - if (operands[2] == const1_rtx)
> - return "<rotate>{l}\t%k0";
> - if (CONST_INT_P (operands[2]) && INTVAL (operands[2]) == 31)
> - return "<rotateinv>{l}\t%k0";
> - }
> - return "<rotate>{l}\t{%2, %k0|%k0, %2}";
> + if (operands[2] == const1_rtx
> + && (TARGET_SHIFT1 || optimize_function_for_size_p (cfun)))
> + return "<rotate>{l}\t%k0";
> + else
> + return "<rotate>{l}\t{%2, %k0|%k0, %2}";
> }
> }
> [(set_attr "isa" "*,bmi2")
> @@ -9879,15 +9869,11 @@ (define_insn "*<rotate_insn><mode>3_1"
> (clobber (reg:CC FLAGS_REG))]
> "ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)"
> {
> - if (TARGET_SHIFT1 || optimize_function_for_size_p (cfun))
> - {
> - if (operands[2] == const1_rtx)
> - return "<rotate>{<imodesuffix>}\t%0";
> - if (CONST_INT_P (operands[2])
> - && INTVAL (operands[2]) == GET_MODE_BITSIZE (<MODE>mode) - 1)
> - return "<rotateinv>{<imodesuffix>}\t%0";
> - }
> - return "<rotate>{<imodesuffix>}\t{%2, %0|%0, %2}";
> + if (operands[2] == const1_rtx
> + && (TARGET_SHIFT1 || optimize_function_for_size_p (cfun)))
> + return "<rotate>{<imodesuffix>}\t%0";
> + else
> + return "<rotate>{<imodesuffix>}\t{%2, %0|%0, %2}";
> }
> [(set_attr "type" "rotate")
> (set (attr "length_immediate")
> @@ -9909,14 +9895,11 @@ (define_insn "*<rotate_insn>qi3_1_slp"
> || (operands[1] == const1_rtx
> && TARGET_SHIFT1))"
> {
> - if (TARGET_SHIFT1 || optimize_function_for_size_p (cfun))
> - {
> - if (operands[2] == const1_rtx)
> - return "<rotate>{b}\t%0";
> - if (CONST_INT_P (operands[2]) && INTVAL (operands[2]) == 7)
> - return "<rotateinv>{b}\t%0";
> - }
> - return "<rotate>{b}\t{%1, %0|%0, %1}";
> + if (operands[1] == const1_rtx
> + && (TARGET_SHIFT1 || optimize_function_for_size_p (cfun)))
> + return "<rotate>{b}\t%0";
> + else
> + return "<rotate>{b}\t{%1, %0|%0, %1}";
> }
> [(set_attr "type" "rotate1")
> (set (attr "length_immediate")
>
>
> Jakub
>
>
--
Richard Biener <rguenther@suse.de>
SUSE / SUSE Labs
SUSE LINUX Products GmbH - Nuernberg - AG Nuernberg - HRB 16746
GF: Jeff Hawn, Jennifer Guild, Felix Imend