[testsuite] Don't use multiplication in gcc.target/x86_64/abi/asm-support.S

Rainer Orth ro@CeBiTec.Uni-Bielefeld.DE
Fri Jul 9 11:18:00 GMT 2010


Uros Bizjak <ubizjak@gmail.com> writes:

> OK, but please also fix asm-support-darwin.s and avx/asm-support.S for
> consistency.

Thanks.  Here's what I've installed after testing on the 4.4 and 4.5
branches.

	Rainer


2010-07-06  Rainer Orth  <ro@CeBiTec.Uni-Bielefeld.DE>

	* gcc.target/x86_64/abi/asm-support.S (snapshot): Replace
	multiplication by values.
	* gcc.target/x86_64/abi/asm-support-darwin.s (_snapshot): Likewise.
	* gcc.target/x86_64/abi/avx/asm-support.S (snapshot): Likewise.

diff -r 294316f4899a gcc/testsuite/gcc.target/x86_64/abi/asm-support-darwin.s
--- a/gcc/testsuite/gcc.target/x86_64/abi/asm-support-darwin.s	Fri Jul 09 12:03:06 2010 +0200
+++ b/gcc/testsuite/gcc.target/x86_64/abi/asm-support-darwin.s	Fri Jul 09 12:27:33 2010 +0200
@@ -22,20 +22,20 @@
 	movq	%r15, _r15(%rip)
 	movdqu	%xmm0, _xmm_regs+0(%rip)
 	movdqu	%xmm1, _xmm_regs+16(%rip)
-	movdqu	%xmm2, _xmm_regs+16*2(%rip)
-	movdqu	%xmm3, _xmm_regs+16*3(%rip)
-	movdqu	%xmm4, _xmm_regs+16*4(%rip)
-	movdqu	%xmm5, _xmm_regs+16*5(%rip)
-	movdqu	%xmm6, _xmm_regs+16*6(%rip)
-	movdqu	%xmm7, _xmm_regs+16*7(%rip)
-	movdqu	%xmm8, _xmm_regs+16*8(%rip)
-	movdqu	%xmm9, _xmm_regs+16*9(%rip)
-	movdqu	%xmm10, _xmm_regs+16*10(%rip)
-	movdqu	%xmm11, _xmm_regs+16*11(%rip)
-	movdqu	%xmm12, _xmm_regs+16*12(%rip)
-	movdqu	%xmm13, _xmm_regs+16*13(%rip)
-	movdqu	%xmm14, _xmm_regs+16*14(%rip)
-	movdqu	%xmm15, _xmm_regs+16*15(%rip)
+	movdqu	%xmm2, _xmm_regs+32(%rip)
+	movdqu	%xmm3, _xmm_regs+48(%rip)
+	movdqu	%xmm4, _xmm_regs+64(%rip)
+	movdqu	%xmm5, _xmm_regs+80(%rip)
+	movdqu	%xmm6, _xmm_regs+96(%rip)
+	movdqu	%xmm7, _xmm_regs+112(%rip)
+	movdqu	%xmm8, _xmm_regs+128(%rip)
+	movdqu	%xmm9, _xmm_regs+144(%rip)
+	movdqu	%xmm10, _xmm_regs+160(%rip)
+	movdqu	%xmm11, _xmm_regs+176(%rip)
+	movdqu	%xmm12, _xmm_regs+192(%rip)
+	movdqu	%xmm13, _xmm_regs+208(%rip)
+	movdqu	%xmm14, _xmm_regs+224(%rip)
+	movdqu	%xmm15, _xmm_regs+240(%rip)
 	jmp	*_callthis(%rip)
 .LFE3:
 	.p2align 4,,15
diff -r 294316f4899a gcc/testsuite/gcc.target/x86_64/abi/asm-support.S
--- a/gcc/testsuite/gcc.target/x86_64/abi/asm-support.S	Fri Jul 09 12:03:06 2010 +0200
+++ b/gcc/testsuite/gcc.target/x86_64/abi/asm-support.S	Fri Jul 09 12:27:33 2010 +0200
@@ -26,20 +26,20 @@
 	movq	%r15, r15(%rip)
 	movdqu	%xmm0, xmm_regs+0(%rip)
 	movdqu	%xmm1, xmm_regs+16(%rip)
-	movdqu	%xmm2, xmm_regs+16*2(%rip)
-	movdqu	%xmm3, xmm_regs+16*3(%rip)
-	movdqu	%xmm4, xmm_regs+16*4(%rip)
-	movdqu	%xmm5, xmm_regs+16*5(%rip)
-	movdqu	%xmm6, xmm_regs+16*6(%rip)
-	movdqu	%xmm7, xmm_regs+16*7(%rip)
-	movdqu	%xmm8, xmm_regs+16*8(%rip)
-	movdqu	%xmm9, xmm_regs+16*9(%rip)
-	movdqu	%xmm10, xmm_regs+16*10(%rip)
-	movdqu	%xmm11, xmm_regs+16*11(%rip)
-	movdqu	%xmm12, xmm_regs+16*12(%rip)
-	movdqu	%xmm13, xmm_regs+16*13(%rip)
-	movdqu	%xmm14, xmm_regs+16*14(%rip)
-	movdqu	%xmm15, xmm_regs+16*15(%rip)
+	movdqu	%xmm2, xmm_regs+32(%rip)
+	movdqu	%xmm3, xmm_regs+48(%rip)
+	movdqu	%xmm4, xmm_regs+64(%rip)
+	movdqu	%xmm5, xmm_regs+80(%rip)
+	movdqu	%xmm6, xmm_regs+96(%rip)
+	movdqu	%xmm7, xmm_regs+112(%rip)
+	movdqu	%xmm8, xmm_regs+128(%rip)
+	movdqu	%xmm9, xmm_regs+144(%rip)
+	movdqu	%xmm10, xmm_regs+160(%rip)
+	movdqu	%xmm11, xmm_regs+176(%rip)
+	movdqu	%xmm12, xmm_regs+192(%rip)
+	movdqu	%xmm13, xmm_regs+208(%rip)
+	movdqu	%xmm14, xmm_regs+224(%rip)
+	movdqu	%xmm15, xmm_regs+240(%rip)
 	jmp	*callthis(%rip)
 .LFE3:
 	.size	snapshot, .-snapshot
diff -r 294316f4899a gcc/testsuite/gcc.target/x86_64/abi/avx/asm-support.S
--- a/gcc/testsuite/gcc.target/x86_64/abi/avx/asm-support.S	Fri Jul 09 12:03:06 2010 +0200
+++ b/gcc/testsuite/gcc.target/x86_64/abi/avx/asm-support.S	Fri Jul 09 12:27:33 2010 +0200
@@ -23,20 +23,20 @@
 	movq	%r15, r15(%rip)
 	vmovdqu	%ymm0, ymm_regs+0(%rip)
 	vmovdqu	%ymm1, ymm_regs+32(%rip)
-	vmovdqu	%ymm2, ymm_regs+32*2(%rip)
-	vmovdqu	%ymm3, ymm_regs+32*3(%rip)
-	vmovdqu	%ymm4, ymm_regs+32*4(%rip)
-	vmovdqu	%ymm5, ymm_regs+32*5(%rip)
-	vmovdqu	%ymm6, ymm_regs+32*6(%rip)
-	vmovdqu	%ymm7, ymm_regs+32*7(%rip)
-	vmovdqu	%ymm8, ymm_regs+32*8(%rip)
-	vmovdqu	%ymm9, ymm_regs+32*9(%rip)
-	vmovdqu	%ymm10, ymm_regs+32*10(%rip)
-	vmovdqu	%ymm11, ymm_regs+32*11(%rip)
-	vmovdqu	%ymm12, ymm_regs+32*12(%rip)
-	vmovdqu	%ymm13, ymm_regs+32*13(%rip)
-	vmovdqu	%ymm14, ymm_regs+32*14(%rip)
-	vmovdqu	%ymm15, ymm_regs+32*15(%rip)
+	vmovdqu	%ymm2, ymm_regs+64(%rip)
+	vmovdqu	%ymm3, ymm_regs+96(%rip)
+	vmovdqu	%ymm4, ymm_regs+128(%rip)
+	vmovdqu	%ymm5, ymm_regs+160(%rip)
+	vmovdqu	%ymm6, ymm_regs+192(%rip)
+	vmovdqu	%ymm7, ymm_regs+224(%rip)
+	vmovdqu	%ymm8, ymm_regs+256(%rip)
+	vmovdqu	%ymm9, ymm_regs+288(%rip)
+	vmovdqu	%ymm10, ymm_regs+320(%rip)
+	vmovdqu	%ymm11, ymm_regs+352(%rip)
+	vmovdqu	%ymm12, ymm_regs+384(%rip)
+	vmovdqu	%ymm13, ymm_regs+416(%rip)
+	vmovdqu	%ymm14, ymm_regs+448(%rip)
+	vmovdqu	%ymm15, ymm_regs+480(%rip)
 	jmp	*callthis(%rip)
 .LFE3:
 	.size	snapshot, .-snapshot


-- 
-----------------------------------------------------------------------------
Rainer Orth, Center for Biotechnology, Bielefeld University



More information about the Gcc-patches mailing list