[PATCH] Improve rotation by mode bitsize - 1

Jakub Jelinek jakub@redhat.com
Thu May 9 18:46:00 GMT 2013


Hi!

This is something I've noticed while working on the rotate recognizer
patch I've just posted.  We emit say
  roll %eax
instead of
  roll $1, %eax
because the former is shorter, but emit
  roll $31, %eax
instead of the equivalent, but shorter
  rorl %eax
The following patch let us optimize even those.  Bootstrapped/regtested
on x86_64-linux and i686-linux, ok for trunk?

2013-05-09  Jakub Jelinek  <jakub@redhat.com>

	* config/i386/i386.md (rotateinv): New code attr.
	(*<rotate_insn><mode>3_1, *<rotate_insn>si3_1_zext,
	*<rotate_insn>qi3_1_slp): Emit rorl %eax instead of
	roll $31, %eax, etc.

--- gcc/config/i386/i386.md.jj	2013-05-07 10:26:46.000000000 +0200
+++ gcc/config/i386/i386.md	2013-05-09 10:18:36.603489156 +0200
@@ -761,6 +761,9 @@ (define_code_attr rotate_insn [(rotate "
 ;; Base name for insn mnemonic.
 (define_code_attr rotate [(rotate "rol") (rotatert "ror")])
 
+;; Base name for insn mnemonic of rotation in the other direction.
+(define_code_attr rotateinv [(rotate "ror") (rotatert "rol")])
+
 ;; Mapping of abs neg operators
 (define_code_iterator absneg [abs neg])
 
@@ -9733,11 +9736,15 @@ (define_insn "*<rotate_insn><mode>3_1"
       return "#";
 
     default:
-      if (operands[2] == const1_rtx
-	  && (TARGET_SHIFT1 || optimize_function_for_size_p (cfun)))
-	return "<rotate>{<imodesuffix>}\t%0";
-      else
-	return "<rotate>{<imodesuffix>}\t{%2, %0|%0, %2}";
+      if (TARGET_SHIFT1 || optimize_function_for_size_p (cfun))
+	{
+	  if (operands[2] == const1_rtx)
+	    return "<rotate>{<imodesuffix>}\t%0";
+	  if (CONST_INT_P (operands[2])
+	      && INTVAL (operands[2]) == GET_MODE_BITSIZE (<MODE>mode) - 1)
+	    return "<rotateinv>{<imodesuffix>}\t%0";
+	}
+      return "<rotate>{<imodesuffix>}\t{%2, %0|%0, %2}";
     }
 }
   [(set_attr "isa" "*,bmi2")
@@ -9799,11 +9806,14 @@ (define_insn "*<rotate_insn>si3_1_zext"
       return "#";
 
     default:
-      if (operands[2] == const1_rtx
-	  && (TARGET_SHIFT1 || optimize_function_for_size_p (cfun)))
-	return "<rotate>{l}\t%k0";
-      else
-	return "<rotate>{l}\t{%2, %k0|%k0, %2}";
+      if (TARGET_SHIFT1 || optimize_function_for_size_p (cfun))
+	{
+	  if (operands[2] == const1_rtx)
+	    return "<rotate>{l}\t%k0";
+	  if (CONST_INT_P (operands[2]) && INTVAL (operands[2]) == 31)
+	    return "<rotateinv>{l}\t%k0";
+	}
+      return "<rotate>{l}\t{%2, %k0|%k0, %2}";
     }
 }
   [(set_attr "isa" "*,bmi2")
@@ -9850,11 +9860,15 @@ (define_insn "*<rotate_insn><mode>3_1"
    (clobber (reg:CC FLAGS_REG))]
   "ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)"
 {
-  if (operands[2] == const1_rtx
-      && (TARGET_SHIFT1 || optimize_function_for_size_p (cfun)))
-    return "<rotate>{<imodesuffix>}\t%0";
-  else
-    return "<rotate>{<imodesuffix>}\t{%2, %0|%0, %2}";
+  if (TARGET_SHIFT1 || optimize_function_for_size_p (cfun))
+    {
+      if (operands[2] == const1_rtx)
+	return "<rotate>{<imodesuffix>}\t%0";
+      if (CONST_INT_P (operands[2])
+	  && INTVAL (operands[2]) == GET_MODE_BITSIZE (<MODE>mode) - 1)
+	return "<rotateinv>{<imodesuffix>}\t%0";
+    }
+  return "<rotate>{<imodesuffix>}\t{%2, %0|%0, %2}";
 }
   [(set_attr "type" "rotate")
    (set (attr "length_immediate")
@@ -9876,11 +9890,14 @@ (define_insn "*<rotate_insn>qi3_1_slp"
     || (operands[1] == const1_rtx
 	&& TARGET_SHIFT1))"
 {
-  if (operands[1] == const1_rtx
-      && (TARGET_SHIFT1 || optimize_function_for_size_p (cfun)))
-    return "<rotate>{b}\t%0";
-  else
-    return "<rotate>{b}\t{%1, %0|%0, %1}";
+  if (TARGET_SHIFT1 || optimize_function_for_size_p (cfun))
+    {
+      if (operands[2] == const1_rtx)
+	return "<rotate>{b}\t%0";
+      if (CONST_INT_P (operands[2]) && INTVAL (operands[2]) == 7)
+	return "<rotateinv>{b}\t%0";
+    }
+  return "<rotate>{b}\t{%1, %0|%0, %1}";
 }
   [(set_attr "type" "rotate1")
    (set (attr "length_immediate")

	Jakub



More information about the Gcc-patches mailing list