This is the mail archive of the
gcc-patches@gcc.gnu.org
mailing list for the GCC project.
[AArch64] [3/4 Fix vtbx1]Implement bsl intrinsics using builtins
- From: James Greenhalgh <james dot greenhalgh at arm dot com>
- To: gcc-patches at gcc dot gnu dot org
- Cc: marcus dot shawcroft at arm dot com
- Date: Fri, 22 Nov 2013 15:11:41 +0000
- Subject: [AArch64] [3/4 Fix vtbx1]Implement bsl intrinsics using builtins
- Authentication-results: sourceware.org; auth=none
- References: <1385133102-19231-1-git-send-email-james dot greenhalgh at arm dot com>
Hi,
This patch wires up the bsl intrinsics in arm_neon.h
using builtins.
Regression tested on aarch64-none-elf with no regressions.
OK?
Thanks,
James
---
gcc/
2013-11-22 James Greenhalgh <james.greenhalgh@arm.com>
* config/aarch64/aarch64-builtins.c
(aarch64_types_bsl_p_qualifiers): New.
(aarch64_types_bsl_s_qualifiers): Likewise.
(aarch64_types_bsl_u_qualifiers): Likewise.
(TYPES_BSL_P): Likewise.
(TYPES_BSL_S): Likewise.
(TYPES_BSL_U): Likewise.
(BUILTIN_VALLDIF): Likewise.
(BUILTIN_VDQQH): Likewise.
* config/aarch64/aarch64-simd-builtins.def (simd_bsl): New.
* config/aarch64/aarch64-simd.md
(aarch64_simd_bsl<mode>_internal): Handle more modes.
(aarch64_simd_bsl<mode>): Likewise.
* config/aarch64/arm_neon.h
(vbsl<q>_<fpsu><8,16,32,64): Implement using builtins.
* config/aarch64/iterators.md (VALLDIF): New.
(Vbtype): Handle more modes.
diff --git a/gcc/config/aarch64/aarch64-builtins.c b/gcc/config/aarch64/aarch64-builtins.c
index 09616cb..9f4a9a0 100644
--- a/gcc/config/aarch64/aarch64-builtins.c
+++ b/gcc/config/aarch64/aarch64-builtins.c
@@ -170,6 +170,22 @@ aarch64_types_load1_qualifiers[SIMD_MAX_BUILTIN_ARGS]
#define TYPES_LOAD1 (aarch64_types_load1_qualifiers)
#define TYPES_LOADSTRUCT (aarch64_types_load1_qualifiers)
+static enum aarch64_type_qualifiers
+aarch64_types_bsl_p_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_poly, qualifier_unsigned,
+ qualifier_poly, qualifier_poly };
+#define TYPES_BSL_P (aarch64_types_bsl_p_qualifiers)
+static enum aarch64_type_qualifiers
+aarch64_types_bsl_s_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_unsigned,
+ qualifier_none, qualifier_none };
+#define TYPES_BSL_S (aarch64_types_bsl_s_qualifiers)
+static enum aarch64_type_qualifiers
+aarch64_types_bsl_u_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_unsigned, qualifier_unsigned,
+ qualifier_unsigned, qualifier_unsigned };
+#define TYPES_BSL_U (aarch64_types_bsl_u_qualifiers)
+
/* The first argument (return type) of a store should be void type,
which we represent with qualifier_void. Their first operand will be
a DImode pointer to the location to store to, so we must use
@@ -244,6 +260,9 @@ aarch64_types_store1_qualifiers[SIMD_MAX_BUILTIN_ARGS]
#define BUILTIN_VALLDI(T, N, MAP) \
VAR11 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, \
v4si, v2di, v2sf, v4sf, v2df, di)
+#define BUILTIN_VALLDIF(T, N, MAP) \
+ VAR12 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, \
+ v4si, v2di, v2sf, v4sf, v2df, di, df)
#define BUILTIN_VB(T, N, MAP) \
VAR2 (T, N, MAP, v8qi, v16qi)
#define BUILTIN_VD(T, N, MAP) \
@@ -268,6 +287,8 @@ aarch64_types_store1_qualifiers[SIMD_MAX_BUILTIN_ARGS]
VAR6 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, v4si)
#define BUILTIN_VDQV(T, N, MAP) \
VAR5 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v4si)
+#define BUILTIN_VDQQH(T, N, MAP) \
+ VAR4 (T, N, MAP, v8qi, v16qi, v4hi, v8hi)
#define BUILTIN_VDQ_BHSI(T, N, MAP) \
VAR6 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, v4si)
#define BUILTIN_VDQ_I(T, N, MAP) \
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index c18b150..1dc3c1f 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -362,3 +362,8 @@
/* Implemented by fma<mode>4. */
BUILTIN_VDQF (TERNOP, fma, 4)
+ /* Implemented by aarch64_simd_bsl<mode>. */
+ BUILTIN_VDQQH (BSL_P, simd_bsl, 0)
+ BUILTIN_VSDQ_I_DI (BSL_U, simd_bsl, 0)
+ BUILTIN_VALLDIF (BSL_S, simd_bsl, 0)
+
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index b9ebdf54431fcdaac6161a774bfe4d38fb52a44b..cf871d1e689348de565104129adfdb5f1fb78eec 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -1643,15 +1643,15 @@ (define_insn "reduc_<maxmin_uns>_v4sf"
;; bif op0, op1, mask
(define_insn "aarch64_simd_bsl<mode>_internal"
- [(set (match_operand:VALL 0 "register_operand" "=w,w,w")
- (ior:VALL
- (and:VALL
+ [(set (match_operand:VALLDIF 0 "register_operand" "=w,w,w")
+ (ior:VALLDIF
+ (and:VALLDIF
(match_operand:<V_cmp_result> 1 "register_operand" " 0,w,w")
- (match_operand:VALL 2 "register_operand" " w,w,0"))
- (and:VALL
+ (match_operand:VALLDIF 2 "register_operand" " w,w,0"))
+ (and:VALLDIF
(not:<V_cmp_result>
(match_dup:<V_cmp_result> 1))
- (match_operand:VALL 3 "register_operand" " w,0,w"))
+ (match_operand:VALLDIF 3 "register_operand" " w,0,w"))
))]
"TARGET_SIMD"
"@
@@ -1662,10 +1662,10 @@ (define_insn "aarch64_simd_bsl<mode>_int
)
(define_expand "aarch64_simd_bsl<mode>"
- [(match_operand:VALL 0 "register_operand")
+ [(match_operand:VALLDIF 0 "register_operand")
(match_operand:<V_cmp_result> 1 "register_operand")
- (match_operand:VALL 2 "register_operand")
- (match_operand:VALL 3 "register_operand")]
+ (match_operand:VALLDIF 2 "register_operand")
+ (match_operand:VALLDIF 3 "register_operand")]
"TARGET_SIMD"
{
/* We can't alias operands together if they have different modes. */
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index 26d96c1..11f8037 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -4839,259 +4839,6 @@ vaddlvq_u32 (uint32x4_t a)
return result;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vbsl_f32 (uint32x2_t a, float32x2_t b, float32x2_t c)
-{
- float32x2_t result;
- __asm__ ("bsl %0.8b, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vbsl_p8 (uint8x8_t a, poly8x8_t b, poly8x8_t c)
-{
- poly8x8_t result;
- __asm__ ("bsl %0.8b, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vbsl_p16 (uint16x4_t a, poly16x4_t b, poly16x4_t c)
-{
- poly16x4_t result;
- __asm__ ("bsl %0.8b, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vbsl_s8 (uint8x8_t a, int8x8_t b, int8x8_t c)
-{
- int8x8_t result;
- __asm__ ("bsl %0.8b, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vbsl_s16 (uint16x4_t a, int16x4_t b, int16x4_t c)
-{
- int16x4_t result;
- __asm__ ("bsl %0.8b, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vbsl_s32 (uint32x2_t a, int32x2_t b, int32x2_t c)
-{
- int32x2_t result;
- __asm__ ("bsl %0.8b, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vbsl_s64 (uint64x1_t a, int64x1_t b, int64x1_t c)
-{
- int64x1_t result;
- __asm__ ("bsl %0.8b, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vbsl_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c)
-{
- uint8x8_t result;
- __asm__ ("bsl %0.8b, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vbsl_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c)
-{
- uint16x4_t result;
- __asm__ ("bsl %0.8b, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vbsl_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c)
-{
- uint32x2_t result;
- __asm__ ("bsl %0.8b, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vbsl_u64 (uint64x1_t a, uint64x1_t b, uint64x1_t c)
-{
- uint64x1_t result;
- __asm__ ("bsl %0.8b, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vbslq_f32 (uint32x4_t a, float32x4_t b, float32x4_t c)
-{
- float32x4_t result;
- __asm__ ("bsl %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vbslq_f64 (uint64x2_t a, float64x2_t b, float64x2_t c)
-{
- float64x2_t result;
- __asm__ ("bsl %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vbslq_p8 (uint8x16_t a, poly8x16_t b, poly8x16_t c)
-{
- poly8x16_t result;
- __asm__ ("bsl %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vbslq_p16 (uint16x8_t a, poly16x8_t b, poly16x8_t c)
-{
- poly16x8_t result;
- __asm__ ("bsl %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vbslq_s8 (uint8x16_t a, int8x16_t b, int8x16_t c)
-{
- int8x16_t result;
- __asm__ ("bsl %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vbslq_s16 (uint16x8_t a, int16x8_t b, int16x8_t c)
-{
- int16x8_t result;
- __asm__ ("bsl %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vbslq_s32 (uint32x4_t a, int32x4_t b, int32x4_t c)
-{
- int32x4_t result;
- __asm__ ("bsl %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vbslq_s64 (uint64x2_t a, int64x2_t b, int64x2_t c)
-{
- int64x2_t result;
- __asm__ ("bsl %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vbslq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c)
-{
- uint8x16_t result;
- __asm__ ("bsl %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vbslq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c)
-{
- uint16x8_t result;
- __asm__ ("bsl %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vbslq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
-{
- uint32x4_t result;
- __asm__ ("bsl %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vbslq_u64 (uint64x2_t a, uint64x2_t b, uint64x2_t c)
-{
- uint64x2_t result;
- __asm__ ("bsl %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vcls_s8 (int8x8_t a)
{
@@ -15779,6 +15526,146 @@ vaddvq_f64 (float64x2_t __a)
return vgetq_lane_f64 (t, 0);
}
+/* vbsl */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vbsl_f32 (uint32x2_t __a, float32x2_t __b, float32x2_t __c)
+{
+ return __builtin_aarch64_simd_bslv2sf_suss (__a, __b, __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vbsl_p8 (uint8x8_t __a, poly8x8_t __b, poly8x8_t __c)
+{
+ return __builtin_aarch64_simd_bslv8qi_pupp (__a, __b, __c);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vbsl_p16 (uint16x4_t __a, poly16x4_t __b, poly16x4_t __c)
+{
+ return __builtin_aarch64_simd_bslv4hi_pupp (__a, __b, __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vbsl_s8 (uint8x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return __builtin_aarch64_simd_bslv8qi_suss (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vbsl_s16 (uint16x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return __builtin_aarch64_simd_bslv4hi_suss (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vbsl_s32 (uint32x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return __builtin_aarch64_simd_bslv2si_suss (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vbsl_s64 (uint64x1_t __a, int64x1_t __b, int64x1_t __c)
+{
+ return __builtin_aarch64_simd_bsldi_suss (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vbsl_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return __builtin_aarch64_simd_bslv8qi_uuuu (__a, __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vbsl_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return __builtin_aarch64_simd_bslv4hi_uuuu (__a, __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vbsl_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return __builtin_aarch64_simd_bslv2si_uuuu (__a, __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vbsl_u64 (uint64x1_t __a, uint64x1_t __b, uint64x1_t __c)
+{
+ return __builtin_aarch64_simd_bsldi_uuuu (__a, __b, __c);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vbslq_f32 (uint32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return __builtin_aarch64_simd_bslv4sf_suss (__a, __b, __c);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vbslq_f64 (uint64x2_t __a, float64x2_t __b, float64x2_t __c)
+{
+ return __builtin_aarch64_simd_bslv2df_suss (__a, __b, __c);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vbslq_p8 (uint8x16_t __a, poly8x16_t __b, poly8x16_t __c)
+{
+ return __builtin_aarch64_simd_bslv16qi_pupp (__a, __b, __c);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vbslq_p16 (uint16x8_t __a, poly16x8_t __b, poly16x8_t __c)
+{
+ return __builtin_aarch64_simd_bslv8hi_pupp (__a, __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vbslq_s8 (uint8x16_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return __builtin_aarch64_simd_bslv16qi_suss (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vbslq_s16 (uint16x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return __builtin_aarch64_simd_bslv8hi_suss (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vbslq_s32 (uint32x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __builtin_aarch64_simd_bslv4si_suss (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vbslq_s64 (uint64x2_t __a, int64x2_t __b, int64x2_t __c)
+{
+ return __builtin_aarch64_simd_bslv2di_suss (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vbslq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
+{
+ return __builtin_aarch64_simd_bslv16qi_uuuu (__a, __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vbslq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return __builtin_aarch64_simd_bslv8hi_uuuu (__a, __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vbslq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return __builtin_aarch64_simd_bslv4si_uuuu (__a, __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vbslq_u64 (uint64x2_t __a, uint64x2_t __b, uint64x2_t __c)
+{
+ return __builtin_aarch64_simd_bslv2di_uuuu (__a, __b, __c);
+}
+
/* vcage */
__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index 50bdac9b6a8ed305f76ece1b448847212b991a24..f00d414d61b961c025a0e7e1cca681cb865083cc 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -107,6 +107,10 @@ (define_mode_iterator VALL [V8QI V16QI V
;; All vector modes and DI.
(define_mode_iterator VALLDI [V8QI V16QI V4HI V8HI V2SI V4SI V2DI V2SF V4SF V2DF DI])
+;; All vector modes and DI and DF.
+(define_mode_iterator VALLDIF [V8QI V16QI V4HI V8HI V2SI V4SI
+ V2DI V2SF V4SF V2DF DI DF])
+
;; Vector modes for Integer reduction across lanes.
(define_mode_iterator VDQV [V8QI V16QI V4HI V8HI V4SI])
@@ -360,7 +364,8 @@ (define_mode_attr Vbtype [(V8QI "8b") (
(V4HI "8b") (V8HI "16b")
(V2SI "8b") (V4SI "16b")
(V2DI "16b") (V2SF "8b")
- (V4SF "16b") (V2DF "16b")])
+ (V4SF "16b") (V2DF "16b")
+ (DI "8b") (DF "8b")])
;; Define element mode for each vector mode.
(define_mode_attr VEL [(V8QI "QI") (V16QI "QI")