typedef float real_t; #define iterations 100000 #define LEN_1D 32000 #define LEN_2D 256 real_t a[LEN_1D]; int main() { // reductions // sum reduction real_t sum; for (int nl = 0; nl < iterations*10; nl++) { sum = (real_t)0.; for (int i = 0; i < LEN_1D; i++) { sum += a[i]; } } return sum > 4; } We produce with -O2 -march=znver2 .L2: movl $a, %eax vxorps %xmm0, %xmm0, %xmm0 .p2align 4 .p2align 3 .L3: vaddps (%rax), %ymm0, %ymm0 addq $32, %rax cmpq $a+128000, %rax jne .L3 vextractf128 $0x1, %ymm0, %xmm1 decl %edx vaddps %xmm0, %xmm1, %xmm1 vmovhlps %xmm1, %xmm1, %xmm0 vaddps %xmm1, %xmm0, %xmm0 vshufps $85, %xmm0, %xmm0, %xmm1 vaddps %xmm0, %xmm1, %xmm0 jne .L2 xorl %eax, %eax vcomiss .LC0(%rip), %xmm0 seta %al vzeroupper ret .cfi_endproc clang does: main: # @main .cfi_startproc # %bb.0: xorl %eax, %eax .p2align 4, 0x90 .LBB0_1: # =>This Loop Header: Depth=1 # Child Loop BB0_2 Depth 2 vxorps %xmm0, %xmm0, %xmm0 movq $-128000, %rcx # imm = 0xFFFE0C00 vxorps %xmm1, %xmm1, %xmm1 vxorps %xmm2, %xmm2, %xmm2 vxorps %xmm3, %xmm3, %xmm3 .p2align 4, 0x90 .LBB0_2: # Parent Loop BB0_1 Depth=1 # => This Inner Loop Header: Depth=2 vaddps a+128000(%rcx), %ymm0, %ymm0 vaddps a+128032(%rcx), %ymm1, %ymm1 vaddps a+128064(%rcx), %ymm2, %ymm2 vaddps a+128096(%rcx), %ymm3, %ymm3 subq $-128, %rcx jne .LBB0_2 # %bb.3: # in Loop: Header=BB0_1 Depth=1 incl %eax cmpl $1000000, %eax # imm = 0xF4240 jne .LBB0_1 # %bb.4: vaddps %ymm0, %ymm1, %ymm0 xorl %eax, %eax vaddps %ymm0, %ymm2, %ymm0 vaddps %ymm0, %ymm3, %ymm0 vextractf128 $1, %ymm0, %xmm1 vaddps %xmm1, %xmm0, %xmm0 vpermilpd $1, %xmm0, %xmm1 # xmm1 = xmm0[1,0] vaddps %xmm1, %xmm0, %xmm0 vmovshdup %xmm0, %xmm1 # xmm1 = xmm0[1,1,3,3] vaddss %xmm1, %xmm0, %xmm0 vucomiss .LCPI0_0(%rip), %xmm0 seta %al vzeroupper retq On zen3 hardware gcc version runs 2.4s, while clang's 0.8s
I think this is same case typedef float real_t; #define iterations 1000000 #define LEN_1D 32000 #define LEN_2D 256 real_t a[LEN_1D]; real_t test(real_t* A){ real_t s = (real_t)0.0; for (int i = 0; i < 4; i++) s += A[i]; return s; } int main() { // reductions // sum reduction real_t sum; for (int nl = 0; nl < 2000*iterations; nl++) { sum = (real_t)0.; sum += test(a); sum += test(&a[4]); sum += test(&a[8]); sum += test(&a[12]); sum += test(&a[16]); sum += test(&a[20]); sum += test(&a[24]); sum += test(&a[28]); } return sum>4; }
another one: // %3.1 typedef float real_t; #define iterations 100000 #define LEN_1D 32000 #define LEN_2D 256 real_t a[LEN_1D]; int main () { // reductions // product reduction real_t prod; for (int nl = 0; nl < 10*iterations; nl++) { prod = (real_t)1.; for (int i = 0; i < LEN_1D; i++) { prod *= a[i]; } } return prod > 0; }
and yet another one typedef float real_t; #define iterations 100000 #define LEN_1D 32000 #define LEN_2D 256 real_t a[LEN_1D]; int main() { // reductions // conditional sum reduction real_t sum; for (int nl = 0; nl < iterations/2; nl++) { sum = 0.; for (int i = 0; i < LEN_1D; i++) { if (a[i] > (real_t)0.) { sum += a[i]; } } } return sum > 4; }
typedef float real_t; #define iterations 100000 #define LEN_1D 32000 #define LEN_2D 256 real_t a[LEN_1D]; real_t aa[LEN_2D][LEN_2D]; int main() { // reductions // if to max with index reductio 2 dimensions // similar to S315 int xindex, yindex; real_t max, chksum; for (int nl = 0; nl < 100*(iterations/(LEN_2D)); nl++) { max = aa[(0)][0]; xindex = 0; yindex = 0; for (int i = 0; i < LEN_2D; i++) { for (int j = 0; j < LEN_2D; j++) { if (aa[i][j] > max) { max = aa[i][j]; xindex = i; yindex = j; } } } chksum = max + (real_t) xindex + (real_t) yindex; } return max + xindex+1 + yindex+1; }
typedef float real_t; #define iterations 100000 #define LEN_1D 32000 #define LEN_2D 256 real_t a[LEN_1D]; int main() { // control loops // vector sum reduction real_t sum; for (int nl = 0; nl < iterations*10; nl++) { sum = 0.; for (int i = 0; i < LEN_1D; i++) { sum += a[i]; } } return sum; }
So clang uses a larger VF (unroll of the vectorized loop) here. I think we have another PR about this.
With znver4 current trunk and clang15 I still see this problem (clang code is about 60% faster) for s311, s312 and s3111. Curious s31111 and s3110 no longer shows a regression.
Compared to aocc we also do worse on zen4: jh@alberti:~/tsvc/bin> ~/trunk-install/bin/gcc -Ofast -march=native s311.c jh@alberti:~/tsvc/bin> time ./a.out real 0m3.207s user 0m3.206s sys 0m0.000s jh@alberti:~/tsvc/bin> ~/aocc-compiler-4.0.0/bin/clang -Ofast -march=native s311.c jh@alberti:~/tsvc/bin> time ./a.out real 0m1.221s user 0m1.221s sys 0m0.000s aocc code seems similar to clangs from two years ago except for additional use of avx512. main: # @main .cfi_startproc # %bb.0: # %entry xorl %eax, %eax .p2align 4, 0x90 .LBB0_1: # %vector.ph # =>This Loop Header: Depth=1 # Child Loop BB0_2 Depth 2 vxorps %xmm0, %xmm0, %xmm0 movq $-128000, %rcx # imm = 0xFFFE0C00 vxorps %xmm1, %xmm1, %xmm1 vxorps %xmm2, %xmm2, %xmm2 vxorps %xmm3, %xmm3, %xmm3 .p2align 4, 0x90 .LBB0_2: # %vector.body # Parent Loop BB0_1 Depth=1 # => This Inner Loop Header: Depth=2 vaddps a+128000(%rcx), %zmm0, %zmm0 vaddps a+128064(%rcx), %zmm1, %zmm1 vaddps a+128128(%rcx), %zmm2, %zmm2 vaddps a+128192(%rcx), %zmm3, %zmm3 addq $256, %rcx # imm = 0x100 jne .LBB0_2 # %bb.3: # %middle.block # in Loop: Header=BB0_1 Depth=1 incl %eax cmpl $1000000, %eax # imm = 0xF4240 jne .LBB0_1 # %bb.4: # %for.cond.cleanup vaddps %zmm0, %zmm1, %zmm0 xorl %eax, %eax vaddps %zmm0, %zmm2, %zmm0 vaddps %zmm0, %zmm3, %zmm0 vextractf64x4 $1, %zmm0, %ymm1 vaddps %zmm1, %zmm0, %zmm0 vextractf128 $1, %ymm0, %xmm1 vaddps %xmm1, %xmm0, %xmm0 vpermilpd $1, %xmm0, %xmm1 # xmm1 = xmm0[1,0] vaddps %xmm1, %xmm0, %xmm0 vmovshdup %xmm0, %xmm1 # xmm1 = xmm0[1,1,3,3] vaddss %xmm1, %xmm0, %xmm0 vucomiss .LCPI0_0(%rip), %xmm0 seta %al vzeroupper retq