[PATCH] Fix movqi_1 (PR target/25199)

Jakub Jelinek jakub@redhat.com
Fri Dec 2 12:57:00 GMT 2005


Hi!

PR target/25199 is about movqi_1 insn using movl into a non-q class
register even when it doesn't know it is at least 32-bit aligned.
I have fixed it on gcc-4_0-branch (see attached gcc4-pr25199.patch),
then later on when trying to forward port it to trunk/4.1
I noticed Dale fixed the same problem already half a year ago
on the trunk.  But comparing the two changes I think the one that
went onto mainline was unnecessarily big hammer, movl is 1 byte
shorter than movzbl, so for -Os it is preferrable to use movl
over movzbl if we know it is aligned.  So, I'm also attaching
a patch for trunk/4.1.  Additionally, I was really surprised
aligned_operand doesn't use MEM_ALIGN (I know it predates
MEM_ALIGN by ~ 1 year), so this patch makes use of it too
and only falls back to the current logic if MEM_ALIGN ()
doesn't prove sufficient alignment.

Ok for trunk/4.1/4.0?

	Jakub
-------------- next part --------------
2005-12-02  Jakub Jelinek  <jakub@redhat.com>

	PR target/25199
	* config/i386/i386.md (movqi_1): Only force imovx for alternative
	5 if operand 1 is not aligned.  Undo previous constraint change.
	* config/i386/predicates.md (aligned_operand): Use MEM_ALIGN.

testsuite/
	* gcc.target/i386/movq-2.c: New test.
	* gcc.target/i386/movq.c: Remove target i?86, instead add
	dg-require-effective-target ilp32.

--- gcc/config/i386/i386.md.jj	2005-11-28 20:38:19.000000000 +0100
+++ gcc/config/i386/i386.md	2005-12-02 13:31:31.000000000 +0100
@@ -1469,7 +1469,7 @@
 ;; partial register stall can be caused there.  Then we use movzx.
 (define_insn "*movqi_1"
   [(set (match_operand:QI 0 "nonimmediate_operand" "=q,q ,q ,r,r ,?r,m")
-	(match_operand:QI 1 "general_operand"      " q,qn,qm,q,rn,m ,qn"))]
+	(match_operand:QI 1 "general_operand"      " q,qn,qm,q,rn,qm,qn"))]
   "GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM"
 {
   switch (get_attr_type (insn))
@@ -1485,7 +1485,8 @@
     }
 }
   [(set (attr "type")
-     (cond [(eq_attr "alternative" "5")
+     (cond [(and (eq_attr "alternative" "5")
+		 (not (match_operand:QI 1 "aligned_operand" "")))
 	      (const_string "imovx")
 	    (ne (symbol_ref "optimize_size") (const_int 0))
 	      (const_string "imov")
@@ -1495,7 +1496,7 @@
 		      (eq (symbol_ref "TARGET_QIMODE_MATH")
 			  (const_int 0))))
 	      (const_string "imov")
-	    (eq_attr "alternative" "3")
+	    (eq_attr "alternative" "3,5")
 	      (const_string "imovx")
 	    (and (ne (symbol_ref "TARGET_MOVX")
 		     (const_int 0))
--- gcc/config/i386/predicates.md.jj	2005-11-21 09:46:58.000000000 +0100
+++ gcc/config/i386/predicates.md	2005-12-02 13:22:37.000000000 +0100
@@ -687,7 +687,7 @@
   return parts.seg == SEG_DEFAULT;
 })
 
-;; Return nonzero if the rtx is known aligned.
+;; Return nonzero if the rtx is known to be at least 32 bits aligned.
 (define_predicate "aligned_operand"
   (match_operand 0 "general_operand")
 {
@@ -701,6 +701,10 @@
   /* Don't even try to do any aligned optimizations with volatiles.  */
   if (MEM_VOLATILE_P (op))
     return 0;
+
+  if (MEM_ALIGN (op) >= 32)
+    return 1;
+
   op = XEXP (op, 0);
 
   /* Pushes and pops are only valid on the stack pointer.  */
--- gcc/testsuite/gcc.target/i386/movq.c.jj	2005-10-28 22:54:59.000000000 +0200
+++ gcc/testsuite/gcc.target/i386/movq.c	2005-12-02 13:22:37.000000000 +0100
@@ -1,5 +1,7 @@
-/* { dg-do compile { target "i?86*-*-*" } }
+/* { dg-do compile }
 /* { dg-options "-Os -march=pentium4 -mtune=prescott" } */
+/* { dg-require-effective-target ilp32 } */
+
 register char foo asm("edi");
 char x;
 int bar() {
--- gcc/testsuite/gcc.target/i386/movq-2.c.jj	2005-12-02 13:22:37.000000000 +0100
+++ gcc/testsuite/gcc.target/i386/movq-2.c	2005-12-02 13:22:37.000000000 +0100
@@ -0,0 +1,26 @@
+/* PR target/25199 */
+/* { dg-do compile } */
+/* { dg-options "-Os" } */
+/* { dg-require-effective-target ilp32 } */
+
+struct S
+{
+  void *p[30];
+  unsigned char c[4];
+};
+
+unsigned char d;
+
+void
+foo (struct S *x)
+{
+  register unsigned char e __asm ("esi");
+  e = x->c[3];
+  __asm __volatile ("" : : "r" (e));
+  e = x->c[0];
+  __asm __volatile ("" : : "r" (e));
+}
+
+/* { dg-final { scan-assembler-not "movl\[ \t\]*123" } } */
+/* { dg-final { scan-assembler "movzbl\[ \t\]*123" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*120" } } */
-------------- next part --------------
2005-12-02  Jakub Jelinek  <jakub@redhat.com>

	PR target/25199
	* config/i386/i386.md (movqi_1): Only force imovx for alternative
	5 if operand 1 is not aligned.  Undo previous constraint change.
	* config/i386/predicates.md (aligned_operand): Use MEM_ALIGN.

	Backport from mainline:
	2005-06-07  Dale Johannesen  <dalej@apple.com>

	* config/i386/i386.md (movqi_1): Fix case where source
	is memory and destination EDI.

testsuite/
	* gcc.target/i386/movq-2.c: New test.
	* gcc.target/i386/movq.c: Remove target i?86, instead add
	dg-require-effective-target ilp32.

	Backport from mainline:
	2005-06-07  Dale Johannesen  <dalej@apple.com>

	* gcc.target/i386/movq.c: New.

--- gcc/config/i386/i386.md.jj	2005-11-09 22:09:48.000000000 +0100
+++ gcc/config/i386/i386.md	2005-12-02 12:45:13.000000000 +0100
@@ -1462,7 +1462,10 @@
     }
 }
   [(set (attr "type")
-     (cond [(ne (symbol_ref "optimize_size") (const_int 0))
+     (cond [(and (eq_attr "alternative" "5")
+		 (not (match_operand:QI 1 "aligned_operand" "")))
+	      (const_string "imovx")
+	    (ne (symbol_ref "optimize_size") (const_int 0))
 	      (const_string "imov")
 	    (and (eq_attr "alternative" "3")
 		 (ior (eq (symbol_ref "TARGET_PARTIAL_REG_STALL")
--- gcc/config/i386/predicates.md.jj	2005-11-04 10:12:01.000000000 +0100
+++ gcc/config/i386/predicates.md	2005-12-02 12:45:13.000000000 +0100
@@ -651,7 +651,7 @@
   return parts.seg == SEG_DEFAULT;
 })
 
-;; Return nonzero if the rtx is known aligned.
+;; Return nonzero if the rtx is known to be at least 32 bits aligned.
 (define_predicate "aligned_operand"
   (match_operand 0 "general_operand")
 {
@@ -664,6 +664,10 @@
   /* Don't even try to do any aligned optimizations with volatiles.  */
   if (MEM_VOLATILE_P (op))
     return 0;
+
+  if (MEM_ALIGN (op) >= 32)
+    return 1;
+
   op = XEXP (op, 0);
 
   /* Pushes and pops are only valid on the stack pointer.  */
--- gcc/testsuite/gcc.target/i386/movq.c.jj	2005-10-28 22:54:59.000000000 +0200
+++ gcc/testsuite/gcc.target/i386/movq.c	2005-12-02 13:07:39.000000000 +0100
@@ -0,0 +1,10 @@
+/* { dg-do compile }
+/* { dg-options "-Os -march=pentium4 -mtune=prescott" } */
+/* { dg-require-effective-target ilp32 } */
+
+register char foo asm("edi");
+char x;
+int bar() {
+  foo = x;
+}
+/* { dg-final { scan-assembler "movz" } } */
--- gcc/testsuite/gcc.target/i386/movq-2.c.jj	2005-12-02 13:09:02.000000000 +0100
+++ gcc/testsuite/gcc.target/i386/movq-2.c	2005-12-02 13:10:13.000000000 +0100
@@ -0,0 +1,26 @@
+/* PR target/25199 */
+/* { dg-do compile } */
+/* { dg-options "-Os" } */
+/* { dg-require-effective-target ilp32 } */
+
+struct S
+{
+  void *p[30];
+  unsigned char c[4];
+};
+
+unsigned char d;
+
+void
+foo (struct S *x)
+{
+  register unsigned char e __asm ("esi");
+  e = x->c[3];
+  __asm __volatile ("" : : "r" (e));
+  e = x->c[0];
+  __asm __volatile ("" : : "r" (e));
+}
+
+/* { dg-final { scan-assembler-not "movl\[ \t\]*123" } } */
+/* { dg-final { scan-assembler "movzbl\[ \t\]*123" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*120" } } */


More information about the Gcc-patches mailing list