This is the mail archive of the
gcc-patches@gcc.gnu.org
mailing list for the GCC project.
[AArch64] Make vabs<q>_f<32, 64> a tree/gimple intrinsic.
- From: James Greenhalgh <james dot greenhalgh at arm dot com>
- To: gcc-patches at gcc dot gnu dot org
- Cc: marcus dot shawcroft at arm dot com
- Date: Thu, 25 Apr 2013 11:31:48 +0100
- Subject: [AArch64] Make vabs<q>_f<32, 64> a tree/gimple intrinsic.
Hi,
This patch adds infrastructure for TARGET_FOLD_BUILTIN and uses it to
fold the `abs` builtins to GIMPLE/GENERIC.
Tested on aarch64-none-elf with no regressions.
Thanks
James
---
gcc/
2013-04-22 James Greenhalgh <james.greenhalgh@arm.com>
* config/aarch64/aarch64-builtins.c
(aarch64_fold_builtin): New.
* config/aarch64/aarch64-protos.h (aarch64_fold_builtin): New.
* config/aarch64/aarch64.c (TARGET_FOLD_BUILTIN): Define.
* config/aarch64/aarch64-simd-builtins.def (abs): New.
* config/aarch64/arm_neon.h
(vabs<q>_<f32, 64>): Implement using __builtin_aarch64_fabs.
diff --git a/gcc/config/aarch64/aarch64-builtins.c b/gcc/config/aarch64/aarch64-builtins.c
index a786945..2851e2b 100644
--- a/gcc/config/aarch64/aarch64-builtins.c
+++ b/gcc/config/aarch64/aarch64-builtins.c
@@ -1260,6 +1260,25 @@ aarch64_builtin_vectorized_function (tree fndecl, tree type_out, tree type_in)
#define VAR1(T, N, MAP, A) \
case AARCH64_SIMD_BUILTIN_##N##A:
+tree
+aarch64_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *args,
+ bool ignore ATTRIBUTE_UNUSED)
+{
+ int fcode = DECL_FUNCTION_CODE (fndecl);
+ tree type = TREE_TYPE (TREE_TYPE (fndecl));
+
+ switch (fcode)
+ {
+ BUILTIN_VDQF (UNOP, abs, 2)
+ return fold_build1 (ABS_EXPR, type, args[0]);
+ break;
+ default:
+ break;
+ }
+
+ return NULL_TREE;
+}
+
bool
aarch64_gimple_fold_builtin (gimple_stmt_iterator *gsi)
{
diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
index bf02b99..c09b227 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -179,6 +179,7 @@ rtx aarch64_simd_gen_const_vector_dup (enum machine_mode, int);
bool aarch64_simd_mem_operand_p (rtx);
rtx aarch64_simd_vect_par_cnst_half (enum machine_mode, bool);
rtx aarch64_tls_get_addr (void);
+tree aarch64_fold_builtin (tree, int, tree *, bool);
unsigned aarch64_dbx_register_number (unsigned);
unsigned aarch64_trampoline_size (void);
void aarch64_asm_output_labelref (FILE *, const char *);
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index 534e23b..6e69298 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -282,3 +282,5 @@
BUILTIN_VDQF (UNOP, frecpe, 0)
BUILTIN_VDQF (BINOP, frecps, 0)
+
+ BUILTIN_VDQF (UNOP, abs, 2)
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index 22903e0..149665d 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -7874,6 +7874,9 @@ aarch64_vectorize_vec_perm_const_ok (enum machine_mode vmode,
#undef TARGET_EXPAND_BUILTIN_VA_START
#define TARGET_EXPAND_BUILTIN_VA_START aarch64_expand_builtin_va_start
+#undef TARGET_FOLD_BUILTIN
+#define TARGET_FOLD_BUILTIN aarch64_fold_builtin
+
#undef TARGET_FUNCTION_ARG
#define TARGET_FUNCTION_ARG aarch64_function_arg
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index e7ba000..6f5ca8e 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -4468,17 +4468,6 @@ vabds_f32 (float32_t a, float32_t b)
return result;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vabs_f32 (float32x2_t a)
-{
- float32x2_t result;
- __asm__ ("fabs %0.2s,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vabs_s8 (int8x8_t a)
{
@@ -4512,28 +4501,6 @@ vabs_s32 (int32x2_t a)
return result;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vabsq_f32 (float32x4_t a)
-{
- float32x4_t result;
- __asm__ ("fabs %0.4s,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vabsq_f64 (float64x2_t a)
-{
- float64x2_t result;
- __asm__ ("fabs %0.2d,%1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vabsq_s8 (int8x16_t a)
{
@@ -19717,6 +19684,26 @@ vtbx4_p8 (poly8x8_t r, poly8x8x4_t tab, uint8x8_t idx)
/* Start of optimal implementations in approved order. */
+/* vabs */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vabs_f32 (float32x2_t __a)
+{
+ return __builtin_aarch64_absv2sf (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vabsq_f32 (float32x4_t __a)
+{
+ return __builtin_aarch64_absv4sf (__a);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vabsq_f64 (float64x2_t __a)
+{
+ return __builtin_aarch64_absv2df (__a);
+}
+
/* vadd */
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))