This is the mail archive of the
gcc-bugs@gcc.gnu.org
mailing list for the GCC project.
[Bug target/65832] Inefficient vector construction
- From: "rguenth at gcc dot gnu.org" <gcc-bugzilla at gcc dot gnu dot org>
- To: gcc-bugs at gcc dot gnu dot org
- Date: Tue, 28 Apr 2015 08:51:51 +0000
- Subject: [Bug target/65832] Inefficient vector construction
- Auto-submitted: auto-generated
- References: <bug-65832-4 at http dot gcc dot gnu dot org/bugzilla/>
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65832
--- Comment #3 from Richard Biener <rguenth at gcc dot gnu.org> ---
Another example where the vectorizer thinks vectorization is profitable:
#define N 16
unsigned int out[N];
unsigned int in[N] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
__attribute__ ((noinline)) int
main1 (unsigned int x, unsigned int y)
{
int i;
unsigned int a0, a1, a2, a3;
a0 = in[0];
a1 = in[1];
a2 = in[2];
a3 = in[3];
out[0] = a0 * x;
out[1] = a1 * y;
out[2] = a2 * x;
out[3] = a3 * y;
}
generates
main1:
.LFB0:
.cfi_startproc
movl %edi, -12(%rsp)
movd -12(%rsp), %xmm0
movl %esi, -12(%rsp)
movd -12(%rsp), %xmm3
movdqa in(%rip), %xmm2
punpckldq %xmm3, %xmm0
psrlq $32, %xmm2
punpcklqdq %xmm0, %xmm0
movdqa %xmm0, %xmm1
psrlq $32, %xmm0
pmuludq %xmm2, %xmm0
pshufd $8, %xmm0, %xmm0
pmuludq in(%rip), %xmm1
pshufd $8, %xmm1, %xmm1
punpckldq %xmm0, %xmm1
movaps %xmm1, out(%rip)
ret
slightly less obfuscated when we allow gpr xmm moves with -mtune=intel:
main1:
.LFB0:
.cfi_startproc
movd %edi, %xmm0
movd %esi, %xmm3
movdqa in(%rip), %xmm2
punpckldq %xmm3, %xmm0
punpcklqdq %xmm0, %xmm0
psrlq $32, %xmm2
movdqa %xmm0, %xmm1
psrlq $32, %xmm0
pmuludq in(%rip), %xmm1
pmuludq %xmm2, %xmm0
pshufd $8, %xmm1, %xmm1
pshufd $8, %xmm0, %xmm0
punpckldq %xmm0, %xmm1
movdqa %xmm1, out(%rip)
ret
so for { x, y, x, y } construction we generate
movd %edi, %xmm0
movd %esi, %xmm3
punpckldq %xmm3, %xmm0
punpcklqdq %xmm0, %xmm0
no f*** idea where all the shifting and shuffling comes from...
This is just
vect_cst_.7_18 = {x_6(D), y_9(D), x_6(D), y_9(D)};
vect_a0_2.5_17 = MEM[(unsigned int *)&in];
vect__7.6_19 = vect_a0_2.5_17 * vect_cst_.7_18;
MEM[(unsigned int *)&out] = vect__7.6_19;