[AArch64] Fully support rotate on logical operations

Richard Earnshaw rearnsha@arm.com
Wed Mar 26 15:17:00 GMT 2014


This patch fixes an issue where only some rotate immediate operations
are merged with logical operations during combine.  The problem is due
to canonicalization.  The architecture only has a rotate-right
operation, so rotate-left has to be converted into rotate-right.  To
avoid fighting the mid-end canonicalization rules, the only way to do
this is during final assembly output.

This patch adds the two necessary patterns to make this happen.  It also
corrects an oversight in the rtx_cost infrastructure in that we didn't
recognize any rotate operations on logical instructions.  We now treat
rotate in the same way as any other shift operation.

I'll apply this to trunk once stage-1 opens.

R.

	* aarch64.md (<optab>_rol<mode>3): New pattern.
	(<optab>_rolsi3_uxtw): Likewise.
	* aarch64.c (aarch64_strip_shift): Handle ROTATE and ROTATERT.
-------------- next part --------------
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index ebd58c0..4f37c08 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -4458,9 +4458,13 @@ aarch64_strip_shift (rtx x)
 {
   rtx op = x;
 
+  /* We accept both ROTATERT and ROTATE: since the RHS must be a constant
+     we can convert both to ROR during final output.  */
   if ((GET_CODE (op) == ASHIFT
        || GET_CODE (op) == ASHIFTRT
-       || GET_CODE (op) == LSHIFTRT)
+       || GET_CODE (op) == LSHIFTRT
+       || GET_CODE (op) == ROTATERT
+       || GET_CODE (op) == ROTATE)
       && CONST_INT_P (XEXP (op, 1)))
     return XEXP (op, 0);
 
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index c86a29d..a6d6243 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -2486,7 +2486,18 @@
   [(set_attr "type" "logic_shift_imm")]
 )
 
-;; zero_extend version of above
+(define_insn "*<optab>_rol<mode>3"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+	(LOGICAL:GPI (rotate:GPI
+		      (match_operand:GPI 1 "register_operand" "r")
+		      (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))
+		     (match_operand:GPI 3 "register_operand" "r")))]
+  ""
+  "<logical>\\t%<w>0, %<w>3, %<w>1, ror (<sizen> - %2)"
+  [(set_attr "type" "logic_shift_imm")]
+)
+
+;; zero_extend versions of above
 (define_insn "*<LOGICAL:optab>_<SHIFT:optab>si3_uxtw"
   [(set (match_operand:DI 0 "register_operand" "=r")
 	(zero_extend:DI
@@ -2499,6 +2510,18 @@
   [(set_attr "type" "logic_shift_imm")]
 )
 
+(define_insn "*<optab>_rolsi3_uxtw"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+	(zero_extend:DI
+	 (LOGICAL:SI (rotate:SI
+		      (match_operand:SI 1 "register_operand" "r")
+		      (match_operand:QI 2 "aarch64_shift_imm_si" "n"))
+		     (match_operand:SI 3 "register_operand" "r"))))]
+  ""
+  "<logical>\\t%w0, %w3, %w1, ror (32 - %2)"
+  [(set_attr "type" "logic_shift_imm")]
+)
+
 (define_insn "one_cmpl<mode>2"
   [(set (match_operand:GPI 0 "register_operand" "=r")
 	(not:GPI (match_operand:GPI 1 "register_operand" "r")))]


More information about the Gcc-patches mailing list