[PATCH, rs6000] Fold vector addition built-ins in GIMPLE

Bill Schmidt wschmidt@linux.vnet.ibm.com
Fri Nov 4 15:09:00 GMT 2016


Just a note that the "-std=gnu11" option in the test cases in this patch is a leftover
from a previous version of the patch, which I'll plan to remove.  Sorry for the
oversight!

Bill

> On Nov 1, 2016, at 9:05 PM, Bill Schmidt <wschmidt@linux.vnet.ibm.com> wrote:
> 
> Hi,
> 
> As Jakub suggested in response to my *ahem* ornate patch for overloaded
> function built-ins, a much better approach is to use the existing
> machinery for overloading and then immediately fold the specific
> functions during gimplification.  There is a target hook available for
> this purpose that we have not previously used.  This patch demonstrates
> this functionality by implementing the target hook and folding vector
> addition built-ins within it.  Future patches will fold other such
> operations, improving the optimization available for many vector
> intrinsics.
> 
> Bootstrapped and tested on powerpc64le-unknown-linux-gnu with no
> regressions.  Is this ok for trunk?
> 
> Thanks,
> Bill
> 
> 
> [gcc]
> 
> 2016-11-01  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>
> 
> 	* config/rs6000/rs6000.c (gimple-ssa.h): New #include.
> 	(TARGET_GIMPLE_FOLD_BUILTIN): Define as
> 	rs6000_gimple_fold_builtin.
> 	(rs6000_gimple_fold_builtin): New function.  Add handling for
> 	early expansion of vector addition builtins.
> 
> 
> [gcc/testsuite]
> 
> 2016-11-01  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>
> 
> 	* gcc.target/powerpc/fold-vec-add-1.c: New.
> 	* gcc.target/powerpc/fold-vec-add-2.c: New.
> 	* gcc.target/powerpc/fold-vec-add-3.c: New.
> 	* gcc.target/powerpc/fold-vec-add-4.c: New.
> 	* gcc.target/powerpc/fold-vec-add-5.c: New.
> 	* gcc.target/powerpc/fold-vec-add-6.c: New.
> 	* gcc.target/powerpc/fold-vec-add-7.c: New.
> 
> 
> Index: gcc/config/rs6000/rs6000.c
> ===================================================================
> --- gcc/config/rs6000/rs6000.c	(revision 241624)
> +++ gcc/config/rs6000/rs6000.c	(working copy)
> @@ -56,6 +56,7 @@
> #include "sched-int.h"
> #include "gimplify.h"
> #include "gimple-iterator.h"
> +#include "gimple-ssa.h"
> #include "gimple-walk.h"
> #include "intl.h"
> #include "params.h"
> @@ -1632,6 +1633,8 @@ static const struct attribute_spec rs6000_attribut
> 
> #undef TARGET_FOLD_BUILTIN
> #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
> +#undef TARGET_GIMPLE_FOLD_BUILTIN
> +#define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
> 
> #undef TARGET_EXPAND_BUILTIN
> #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
> @@ -16337,6 +16340,46 @@ rs6000_fold_builtin (tree fndecl, int n_args ATTRI
> #endif
> }
> 
> +/* Fold a machine-dependent built-in in GIMPLE.  (For folding into
> +   a constant, use rs6000_fold_builtin.)  */
> +
> +bool
> +rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
> +{
> +  gimple *stmt = gsi_stmt (*gsi);
> +  tree fndecl = gimple_call_fndecl (stmt);
> +  gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
> +  enum rs6000_builtins fn_code
> +    = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
> +  tree arg0, arg1, lhs;
> +
> +  switch (fn_code)
> +    {
> +    /* Flavors of vec_add.  We deliberately don't expand
> +       P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
> +       TImode, resulting in much poorer code generation.  */
> +    case ALTIVEC_BUILTIN_VADDUBM:
> +    case ALTIVEC_BUILTIN_VADDUHM:
> +    case ALTIVEC_BUILTIN_VADDUWM:
> +    case P8V_BUILTIN_VADDUDM:
> +    case ALTIVEC_BUILTIN_VADDFP:
> +    case VSX_BUILTIN_XVADDDP:
> +      {
> +	arg0 = gimple_call_arg (stmt, 0);
> +	arg1 = gimple_call_arg (stmt, 1);
> +	lhs = gimple_call_lhs (stmt);
> +	gimple *g = gimple_build_assign (lhs, PLUS_EXPR, arg0, arg1);
> +	gimple_set_location (g, gimple_location (stmt));
> +	gsi_replace (gsi, g, true);
> +	return true;
> +      }
> +    default:
> +      break;
> +    }
> +
> +  return false;
> +}
> +
> /* Expand an expression EXP that calls a built-in function,
>    with result going to TARGET if that's convenient
>    (and in mode MODE if that's convenient).
> Index: gcc/testsuite/gcc.target/powerpc/fold-vec-add-1.c
> ===================================================================
> --- gcc/testsuite/gcc.target/powerpc/fold-vec-add-1.c	(revision 0)
> +++ gcc/testsuite/gcc.target/powerpc/fold-vec-add-1.c	(working copy)
> @@ -0,0 +1,46 @@
> +/* Verify that overloaded built-ins for vec_add with char
> +   inputs produce the right results.  */
> +
> +/* { dg-do compile } */
> +/* { dg-require-effective-target powerpc_altivec_ok } */
> +/* { dg-additional-options "-std=gnu11" } */
> +
> +#include <altivec.h>
> +
> +vector signed char
> +test1 (vector bool char x, vector signed char y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +vector signed char
> +test2 (vector signed char x, vector bool char y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +vector signed char
> +test3 (vector signed char x, vector signed char y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +vector unsigned char
> +test4 (vector bool char x, vector unsigned char y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +vector unsigned char
> +test5 (vector unsigned char x, vector bool char y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +vector unsigned char
> +test6 (vector unsigned char x, vector unsigned char y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +/* { dg-final { scan-assembler-times "vaddubm" 6 } } */
> Index: gcc/testsuite/gcc.target/powerpc/fold-vec-add-2.c
> ===================================================================
> --- gcc/testsuite/gcc.target/powerpc/fold-vec-add-2.c	(revision 0)
> +++ gcc/testsuite/gcc.target/powerpc/fold-vec-add-2.c	(working copy)
> @@ -0,0 +1,46 @@
> +/* Verify that overloaded built-ins for vec_add with short
> +   inputs produce the right results.  */
> +
> +/* { dg-do compile } */
> +/* { dg-require-effective-target powerpc_altivec_ok } */
> +/* { dg-additional-options "-std=gnu11" } */
> +
> +#include <altivec.h>
> +
> +vector signed short
> +test1 (vector bool short x, vector signed short y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +vector signed short
> +test2 (vector signed short x, vector bool short y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +vector signed short
> +test3 (vector signed short x, vector signed short y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +vector unsigned short
> +test4 (vector bool short x, vector unsigned short y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +vector unsigned short
> +test5 (vector unsigned short x, vector bool short y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +vector unsigned short
> +test6 (vector unsigned short x, vector unsigned short y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +/* { dg-final { scan-assembler-times "vadduhm" 6 } } */
> Index: gcc/testsuite/gcc.target/powerpc/fold-vec-add-3.c
> ===================================================================
> --- gcc/testsuite/gcc.target/powerpc/fold-vec-add-3.c	(revision 0)
> +++ gcc/testsuite/gcc.target/powerpc/fold-vec-add-3.c	(working copy)
> @@ -0,0 +1,46 @@
> +/* Verify that overloaded built-ins for vec_add with int
> +   inputs produce the right results.  */
> +
> +/* { dg-do compile } */
> +/* { dg-require-effective-target powerpc_altivec_ok } */
> +/* { dg-additional-options "-std=gnu11" } */
> +
> +#include <altivec.h>
> +
> +vector signed int
> +test1 (vector bool int x, vector signed int y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +vector signed int
> +test2 (vector signed int x, vector bool int y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +vector signed int
> +test3 (vector signed int x, vector signed int y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +vector unsigned int
> +test4 (vector bool int x, vector unsigned int y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +vector unsigned int
> +test5 (vector unsigned int x, vector bool int y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +vector unsigned int
> +test6 (vector unsigned int x, vector unsigned int y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +/* { dg-final { scan-assembler-times "vadduwm" 6 } } */
> Index: gcc/testsuite/gcc.target/powerpc/fold-vec-add-4.c
> ===================================================================
> --- gcc/testsuite/gcc.target/powerpc/fold-vec-add-4.c	(revision 0)
> +++ gcc/testsuite/gcc.target/powerpc/fold-vec-add-4.c	(working copy)
> @@ -0,0 +1,46 @@
> +/* Verify that overloaded built-ins for vec_add with long long
> +   inputs produce the right results.  */
> +
> +/* { dg-do compile } */
> +/* { dg-require-effective-target powerpc_p8vector_ok } */
> +/* { dg-additional-options "-std=gnu11" } */
> +
> +#include <altivec.h>
> +
> +vector signed long long
> +test1 (vector bool long long x, vector signed long long y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +vector signed long long
> +test2 (vector signed long long x, vector bool long long y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +vector signed long long
> +test3 (vector signed long long x, vector signed long long y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +vector unsigned long long
> +test4 (vector bool long long x, vector unsigned long long y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +vector unsigned long long
> +test5 (vector unsigned long long x, vector bool long long y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +vector unsigned long long
> +test6 (vector unsigned long long x, vector unsigned long long y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +/* { dg-final { scan-assembler-times "vaddudm" 6 } } */
> Index: gcc/testsuite/gcc.target/powerpc/fold-vec-add-5.c
> ===================================================================
> --- gcc/testsuite/gcc.target/powerpc/fold-vec-add-5.c	(revision 0)
> +++ gcc/testsuite/gcc.target/powerpc/fold-vec-add-5.c	(working copy)
> @@ -0,0 +1,16 @@
> +/* Verify that overloaded built-ins for vec_add with float
> +   inputs produce the right results.  */
> +
> +/* { dg-do compile } */
> +/* { dg-require-effective-target powerpc_altivec_ok } */
> +/* { dg-additional-options "-std=gnu11 -mno-vsx" } */
> +
> +#include <altivec.h>
> +
> +vector float
> +test1 (vector float x, vector float y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +/* { dg-final { scan-assembler-times "vaddfp" 1 } } */
> Index: gcc/testsuite/gcc.target/powerpc/fold-vec-add-6.c
> ===================================================================
> --- gcc/testsuite/gcc.target/powerpc/fold-vec-add-6.c	(revision 0)
> +++ gcc/testsuite/gcc.target/powerpc/fold-vec-add-6.c	(working copy)
> @@ -0,0 +1,23 @@
> +/* Verify that overloaded built-ins for vec_add with float and
> +   double inputs for VSX produce the right results.  */
> +
> +/* { dg-do compile } */
> +/* { dg-require-effective-target powerpc_vsx_ok } */
> +/* { dg-additional-options "-std=gnu11" } */
> +
> +#include <altivec.h>
> +
> +vector float
> +test1 (vector float x, vector float y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +vector double
> +test2 (vector double x, vector double y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +/* { dg-final { scan-assembler-times "xvaddsp" 1 } } */
> +/* { dg-final { scan-assembler-times "xvadddp" 1 } } */
> Index: gcc/testsuite/gcc.target/powerpc/fold-vec-add-7.c
> ===================================================================
> --- gcc/testsuite/gcc.target/powerpc/fold-vec-add-7.c	(revision 0)
> +++ gcc/testsuite/gcc.target/powerpc/fold-vec-add-7.c	(working copy)
> @@ -0,0 +1,22 @@
> +/* Verify that overloaded built-ins for vec_add with __int128
> +   inputs produce the right results.  */
> +
> +/* { dg-do compile } */
> +/* { dg-require-effective-target powerpc_p8vector_ok } */
> +/* { dg-additional-options "-std=gnu11 -Wno-pedantic" } */
> +
> +#include "altivec.h"
> +
> +vector signed __int128
> +test1 (vector signed __int128 x, vector signed __int128 y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +vector unsigned __int128
> +test2 (vector unsigned __int128 x, vector unsigned __int128 y)
> +{
> +  return vec_add (x, y);
> +}
> +
> +/* { dg-final { scan-assembler-times "vadduqm" 2 } } */
> 



More information about the Gcc-patches mailing list