This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH, i386]: Committed: Further cleanups to ?mmintrin.h and mm3dnow.h


Hello!

A couple of intrinsics with immediate arguments were missed during macro->function conversions in ?mmintrin.h. Also, __attribute__((__always_inline__)) has been added everywhere through mm3dnow.h

Testsuite was enhanced (gcc.target/i386/sse-12.c) to check for C89 pedantic errors through all MMX/SSE intrinsics in one go. A couple of warnings was uncovered in mm3dnow.h. This patch also fixes these.

Patch was bootstrapped on x86_64-pc-linux-gnu, regression tested for all default languages.

2006-05-20 Uros Bizjak <ubizjak@gmail.com>

       * config/i386/tmmintrin.h (_mm_alignr_epi32): Implement as always
       inlined function, not as a macro.
       (_mm_alignr_pi8): Ditto.
       * config/i386/ammintrin.h (_mm_extracti_si64): Ditto.
       (_mm_inserti_si64): Ditto.
       * config/i386/emmintrin.h (_mm_shuffle_pd): Ditto.
       (_mm_extract_epi16): Ditto.
       (_mm_insert_epi16): Ditto.
       (_mm_shufflehi_epi16): Ditto.
       (_mm_shufflelo_epi16): Ditto.
       (_mm_shuffle_epi32): Ditto.
       * config/i386/xmmintrin.h (_mm_set_ss): Use 0.0f for float constant.
       * config/386/mm3dnow.h: Add __attribute__((__always_inline__)) to
       all functions.
       (_m_from_float): Add __extension__ to conversion.  Use 0.0f for
       float constant.
       (_m_to_float): Use C89 compatible assignment.

testsuite/ChangeLog:

2006-05-20 Uros Bizjak <ubizjak@gmail.com>

       * gcc.target/i386/sse-vect-types.c: Revert 'Use "-msse"
       instead of "-msse2".'
       * gcc.target/i386/sse-12.c: Use "-march=k8 -m3dnow -mssse3 -msse4a"
       instead of "-msse3".  Include only ammintrin.h, tmmintrin.h
       and mm3dnow.h.
       * gcc.target/i386/sse-13.c (__builtin_ia32_extrqi): Redefine
       to test with immediate operand.
       (__builtin_ia32_insertqi): Ditto.
       (__builtin_ia32_palignr128): Ditto.
       (__builtin_ia32_palignr): Ditto.
       (__builtin_ia32_pshufhw): Ditto.
       (__builtin_ia32_pshuflw): Ditto.
       (__builtin_ia32_pshufd): Ditto.
       (__builtin_ia32_vec_set_v8hi): Ditto.
       (__builtin_ia32_vec_ext_v8hi): Ditto.
       (__builtin_ia32_shufpd): Ditto.
       * gcc.target/i386/sse-14.c: Same changes as sse-13.c.

Uros.
Index: config/i386/tmmintrin.h
===================================================================
--- config/i386/tmmintrin.h	(revision 124867)
+++ config/i386/tmmintrin.h	(working copy)
@@ -181,11 +181,16 @@
   return (__m64) __builtin_ia32_psignd ((__v2si)__X, (__v2si)__Y);
 }
 
-#define _mm_alignr_epi8(__X, __Y, __N) \
-  ((__m128i)__builtin_ia32_palignr128 ((__v2di) __X, (__v2di) __Y, (__N) * 8))
+static __inline __m128i __attribute__((__always_inline__))
+_mm_alignr_epi8(__m128i __X, __m128i __Y, const int __N)
+{
+  return (__m128i)__builtin_ia32_palignr128 ((__v2di)__X, (__v2di)__Y, __N * 8);}
 
-#define _mm_alignr_pi8(__X, __Y, __N) \
-  ((__m64)__builtin_ia32_palignr ((long long) (__X), (long long) (__Y), (__N) * 8))
+static __inline __m64 __attribute__((__always_inline__))
+_mm_alignr_pi8(__m64 __X, __m64 __Y, const int __N)
+{
+  return (__m64)__builtin_ia32_palignr ((long long)__X, (long long)__Y, __N * 8);
+}
 
 static __inline __m128i __attribute__((__always_inline__))
 _mm_abs_epi8 (__m128i __X)
Index: config/i386/mm3dnow.h
===================================================================
--- config/i386/mm3dnow.h	(revision 124867)
+++ config/i386/mm3dnow.h	(working copy)
@@ -37,178 +37,179 @@
 /* Internal data types for implementing the intrinsics.  */
 typedef float __v2sf __attribute__ ((__vector_size__ (8)));
 
-static __inline void
+static __inline void __attribute__((__always_inline__))
 _m_femms (void)
 {
   __builtin_ia32_femms();
 }
 
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
 _m_pavgusb (__m64 __A, __m64 __B)
 {
   return (__m64)__builtin_ia32_pavgusb ((__v8qi)__A, (__v8qi)__B);
 }
 
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
 _m_pf2id (__m64 __A)
 {
   return (__m64)__builtin_ia32_pf2id ((__v2sf)__A);
 }
 
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
 _m_pfacc (__m64 __A, __m64 __B)
 {
   return (__m64)__builtin_ia32_pfacc ((__v2sf)__A, (__v2sf)__B);
 }
 
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
 _m_pfadd (__m64 __A, __m64 __B)
 {
   return (__m64)__builtin_ia32_pfadd ((__v2sf)__A, (__v2sf)__B);
 }
 
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
 _m_pfcmpeq (__m64 __A, __m64 __B)
 {
   return (__m64)__builtin_ia32_pfcmpeq ((__v2sf)__A, (__v2sf)__B);
 }
 
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
 _m_pfcmpge (__m64 __A, __m64 __B)
 {
   return (__m64)__builtin_ia32_pfcmpge ((__v2sf)__A, (__v2sf)__B);
 }
 
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
 _m_pfcmpgt (__m64 __A, __m64 __B)
 {
   return (__m64)__builtin_ia32_pfcmpgt ((__v2sf)__A, (__v2sf)__B);
 }
 
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
 _m_pfmax (__m64 __A, __m64 __B)
 {
   return (__m64)__builtin_ia32_pfmax ((__v2sf)__A, (__v2sf)__B);
 }
 
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
 _m_pfmin (__m64 __A, __m64 __B)
 {
   return (__m64)__builtin_ia32_pfmin ((__v2sf)__A, (__v2sf)__B);
 }
 
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
 _m_pfmul (__m64 __A, __m64 __B)
 {
   return (__m64)__builtin_ia32_pfmul ((__v2sf)__A, (__v2sf)__B);
 }
 
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
 _m_pfrcp (__m64 __A)
 {
   return (__m64)__builtin_ia32_pfrcp ((__v2sf)__A);
 }
 
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
 _m_pfrcpit1 (__m64 __A, __m64 __B)
 {
   return (__m64)__builtin_ia32_pfrcpit1 ((__v2sf)__A, (__v2sf)__B);
 }
 
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
 _m_pfrcpit2 (__m64 __A, __m64 __B)
 {
   return (__m64)__builtin_ia32_pfrcpit2 ((__v2sf)__A, (__v2sf)__B);
 }
 
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
 _m_pfrsqrt (__m64 __A)
 {
   return (__m64)__builtin_ia32_pfrsqrt ((__v2sf)__A);
 }
 
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
 _m_pfrsqit1 (__m64 __A, __m64 __B)
 {
   return (__m64)__builtin_ia32_pfrsqit1 ((__v2sf)__A, (__v2sf)__B);
 }
 
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
 _m_pfsub (__m64 __A, __m64 __B)
 {
   return (__m64)__builtin_ia32_pfsub ((__v2sf)__A, (__v2sf)__B);
 }
 
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
 _m_pfsubr (__m64 __A, __m64 __B)
 {
   return (__m64)__builtin_ia32_pfsubr ((__v2sf)__A, (__v2sf)__B);
 }
 
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
 _m_pi2fd (__m64 __A)
 {
   return (__m64)__builtin_ia32_pi2fd ((__v2si)__A);
 }
 
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
 _m_pmulhrw (__m64 __A, __m64 __B)
 {
   return (__m64)__builtin_ia32_pmulhrw ((__v4hi)__A, (__v4hi)__B);
 }
 
-static __inline void
+static __inline void __attribute__((__always_inline__))
 _m_prefetch (void *__P)
 {
   __builtin_prefetch (__P, 0, 3 /* _MM_HINT_T0 */);
 }
 
-static __inline void
+static __inline void __attribute__((__always_inline__))
 _m_prefetchw (void *__P)
 {
   __builtin_prefetch (__P, 1, 3 /* _MM_HINT_T0 */);
 }
 
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
 _m_from_float (float __A)
 {
-  return (__m64)(__v2sf){ __A, 0 };
+  return __extension__ (__m64)(__v2sf){ __A, 0.0f };
 }
 
-static __inline float
+static __inline float __attribute__((__always_inline__))
 _m_to_float (__m64 __A)
 {
-  union { __v2sf v; float a[2]; } __tmp = { (__v2sf)__A };
+  union { __v2sf v; float a[2]; } __tmp;
+  __tmp.v = (__v2sf)__A;
   return __tmp.a[0];
 }
 
 #ifdef __3dNOW_A__
 
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
 _m_pf2iw (__m64 __A)
 {
   return (__m64)__builtin_ia32_pf2iw ((__v2sf)__A);
 }
 
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
 _m_pfnacc (__m64 __A, __m64 __B)
 {
   return (__m64)__builtin_ia32_pfnacc ((__v2sf)__A, (__v2sf)__B);
 }
 
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
 _m_pfpnacc (__m64 __A, __m64 __B)
 {
   return (__m64)__builtin_ia32_pfpnacc ((__v2sf)__A, (__v2sf)__B);
 }
 
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
 _m_pi2fw (__m64 __A)
 {
   return (__m64)__builtin_ia32_pi2fw ((__v2si)__A);
 }
 
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
 _m_pswapd (__m64 __A)
 {
   return (__m64)__builtin_ia32_pswapdsf ((__v2sf)__A);
Index: config/i386/xmmintrin.h
===================================================================
--- config/i386/xmmintrin.h	(revision 124867)
+++ config/i386/xmmintrin.h	(working copy)
@@ -855,7 +855,7 @@
 static __inline __m128 __attribute__((__always_inline__))
 _mm_set_ss (float __F)
 {
-  return __extension__ (__m128)(__v4sf){ __F, 0, 0, 0 };
+  return __extension__ (__m128)(__v4sf){ __F, 0.0f, 0.0f, 0.0f };
 }
 
 /* Create a vector with all four elements equal to F.  */
Index: config/i386/ammintrin.h
===================================================================
--- config/i386/ammintrin.h	(revision 124867)
+++ config/i386/ammintrin.h	(working copy)
@@ -55,8 +55,11 @@
   return (__m128i) __builtin_ia32_extrq ((__v2di) __X, (__v16qi) __Y);
 }
 
-#define _mm_extracti_si64(X, I, L) \
-((__m128i) __builtin_ia32_extrqi ((__v2di)(X), I, L))
+static __inline __m128i __attribute__((__always_inline__))
+_mm_extracti_si64 (__m128i __X, unsigned const int __I, unsigned const int __L)
+{
+  return (__m128i) __builtin_ia32_extrqi ((__v2di) __X, __I, __L);
+}
 
 static __inline __m128i __attribute__((__always_inline__))
 _mm_insert_si64 (__m128i __X,__m128i __Y)
@@ -64,8 +67,11 @@
   return (__m128i) __builtin_ia32_insertq ((__v2di)__X, (__v2di)__Y);
 }
 
-#define _mm_inserti_si64(X, Y, I, L) \
-((__m128i) __builtin_ia32_insertqi ((__v2di)(X), (__v2di)(Y), I, L))
+static __inline __m128i __attribute__((__always_inline__))
+_mm_inserti_si64(__m128i __X, __m128i __Y, unsigned const int __I, unsigned const int __L)
+{
+  return (__m128i) __builtin_ia32_insertqi ((__v2di)__X, (__v2di)__Y, __I, __L);
+}
 
 
 #endif /* __SSE4A__ */
Index: config/i386/emmintrin.h
===================================================================
--- config/i386/emmintrin.h	(revision 124867)
+++ config/i386/emmintrin.h	(working copy)
@@ -57,7 +57,7 @@
 static __inline __m128d __attribute__((__always_inline__))
 _mm_set_sd (double __F)
 {
-  return __extension__ (__m128d){ __F, 0 };
+  return __extension__ (__m128d){ __F, 0.0 };
 }
 
 /* Create a vector with both elements equal to F.  */
@@ -880,7 +880,11 @@
   return (__m128d)__builtin_ia32_cvtss2sd ((__v2df) __A, (__v4sf)__B);
 }
 
-#define _mm_shuffle_pd(__A, __B, __C) ((__m128d)__builtin_ia32_shufpd ((__v2df)__A, (__v2df)__B, (__C)))
+static __inline __m128d __attribute__((__always_inline__))
+_mm_shuffle_pd(__m128d __A, __m128d __B, const int __mask)
+{
+  return (__m128d)__builtin_ia32_shufpd ((__v2df)__A, (__v2df)__B, __mask);
+}
 
 static __inline __m128d __attribute__((__always_inline__))
 _mm_unpackhi_pd (__m128d __A, __m128d __B)
@@ -1137,13 +1141,13 @@
 static __inline __m128i __attribute__((__always_inline__))
 _mm_srli_si128 (__m128i __A, const int __B)
 {
-  return ((__m128i)__builtin_ia32_psrldqi128 (__A, __B * 8));
+  return (__m128i)__builtin_ia32_psrldqi128 (__A, __B * 8);
 }
 
 static __inline __m128i __attribute__((__always_inline__))
 _mm_slli_si128 (__m128i __A, const int __B)
 {
-  return ((__m128i)__builtin_ia32_pslldqi128 (__A, __B * 8));
+  return (__m128i)__builtin_ia32_pslldqi128 (__A, __B * 8);
 }
 
 static __inline __m128i __attribute__((__always_inline__))
@@ -1290,7 +1294,6 @@
   return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__A, (__v4si)__B);
 }
 
-#if 0
 static __inline int __attribute__((__always_inline__))
 _mm_extract_epi16 (__m128i const __A, int const __N)
 {
@@ -1302,12 +1305,6 @@
 {
   return (__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)__A, __D, __N);
 }
-#else
-#define _mm_extract_epi16(A, N) \
-  ((int) __builtin_ia32_vec_ext_v8hi ((__v8hi)(A), (N)))
-#define _mm_insert_epi16(A, D, N) \
-  ((__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)(A), (D), (N)))
-#endif
 
 static __inline __m128i __attribute__((__always_inline__))
 _mm_max_epi16 (__m128i __A, __m128i __B)
@@ -1345,10 +1342,24 @@
   return (__m128i)__builtin_ia32_pmulhuw128 ((__v8hi)__A, (__v8hi)__B);
 }
 
-#define _mm_shufflehi_epi16(__A, __B) ((__m128i)__builtin_ia32_pshufhw ((__v8hi)__A, __B))
-#define _mm_shufflelo_epi16(__A, __B) ((__m128i)__builtin_ia32_pshuflw ((__v8hi)__A, __B))
-#define _mm_shuffle_epi32(__A, __B) ((__m128i)__builtin_ia32_pshufd ((__v4si)__A, __B))
+static __inline __m128i __attribute__((__always_inline__))
+_mm_shufflehi_epi16 (__m128i __A, const int __mask)
+{
+  return (__m128i)__builtin_ia32_pshufhw ((__v8hi)__A, __mask);
+}
 
+static __inline __m128i __attribute__((__always_inline__))
+_mm_shufflelo_epi16 (__m128i __A, const int __mask)
+{
+  return (__m128i)__builtin_ia32_pshuflw ((__v8hi)__A, __mask);
+}
+
+static __inline __m128i __attribute__((__always_inline__))
+_mm_shuffle_epi32 (__m128i __A, const int __mask)
+{
+  return (__m128i)__builtin_ia32_pshufd ((__v4si)__A, __mask);
+}
+
 static __inline void __attribute__((__always_inline__))
 _mm_maskmoveu_si128 (__m128i __A, __m128i __B, char *__C)
 {
Index: config/soft-fp/negdf2.c
===================================================================
--- config/soft-fp/negdf2.c	(revision 124867)
+++ config/soft-fp/negdf2.c	(working copy)
@@ -1,6 +1,6 @@
 /* Software floating-point emulation.
    Return -a
-   Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+   Copyright (C) 1997,1999,2006,2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Richard Henderson (rth@cygnus.com) and
 		  Jakub Jelinek (jj@ultra.linux.cz).
@@ -42,7 +42,6 @@
   FP_NEG_D(R, A);
   FP_PACK_D(r, R);
   FP_CLEAR_EXCEPTIONS;
-  FP_HANDLE_EXCEPTIONS;
 
   return r;
 }
Index: config/soft-fp/negsf2.c
===================================================================
--- config/soft-fp/negsf2.c	(revision 124867)
+++ config/soft-fp/negsf2.c	(working copy)
@@ -1,6 +1,6 @@
 /* Software floating-point emulation.
    Return -a
-   Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+   Copyright (C) 1997,1999,2006,2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Richard Henderson (rth@cygnus.com) and
 		  Jakub Jelinek (jj@ultra.linux.cz).
@@ -42,7 +42,6 @@
   FP_NEG_S(R, A);
   FP_PACK_S(r, R);
   FP_CLEAR_EXCEPTIONS;
-  FP_HANDLE_EXCEPTIONS;
 
   return r;
 }
Index: config/soft-fp/negtf2.c
===================================================================
--- config/soft-fp/negtf2.c	(revision 124867)
+++ config/soft-fp/negtf2.c	(working copy)
@@ -1,6 +1,6 @@
 /* Software floating-point emulation.
    Return -a
-   Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+   Copyright (C) 1997,1999,2006,2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Richard Henderson (rth@cygnus.com) and
 		  Jakub Jelinek (jj@ultra.linux.cz).
@@ -42,7 +42,6 @@
   FP_NEG_Q(R, A);
   FP_PACK_Q(r, R);
   FP_CLEAR_EXCEPTIONS;
-  FP_HANDLE_EXCEPTIONS;
 
   return r;
 }
Index: testsuite/gcc.target/i386/sse-14.c
===================================================================
--- testsuite/gcc.target/i386/sse-14.c	(revision 124867)
+++ testsuite/gcc.target/i386/sse-14.c	(working copy)
@@ -11,6 +11,14 @@
 
 /* Following intrinsics require immediate arguments. */
 
+/* ammintrin.h */
+#define __builtin_ia32_extrqi(X, I, L)  __builtin_ia32_extrqi(X, 1, 1)
+#define __builtin_ia32_insertqi(X, Y, I, L) __builtin_ia32_insertqi(X, Y, 1, 1)
+
+/* tmmintrin.h */
+#define __builtin_ia32_palignr128(X, Y, N) __builtin_ia32_palignr128(X, Y, 8)
+#define __builtin_ia32_palignr(X, Y, N) __builtin_ia32_palignr(X, Y, 8)
+
 /* emmintrin.h */
 #define __builtin_ia32_psllwi128(A, B) __builtin_ia32_psllwi128(A, 1)
 #define __builtin_ia32_psrlqi128(A, B) __builtin_ia32_psrlqi128(A, 1)
@@ -22,6 +30,13 @@
 #define __builtin_ia32_psradi128(A, B) __builtin_ia32_psradi128(A, 1)
 #define __builtin_ia32_psllqi128(A, B) __builtin_ia32_psllqi128(A, 1)
 #define __builtin_ia32_pslldi128(A, B) __builtin_ia32_pslldi128(A, 1)
+#define __builtin_ia32_pshufhw(A, N) __builtin_ia32_pshufhw(A, 0)
+#define __builtin_ia32_pshuflw(A, N) __builtin_ia32_pshuflw(A, 0)
+#define __builtin_ia32_pshufd(A, N) __builtin_ia32_pshufd(A, 0)
+#define __builtin_ia32_vec_set_v8hi(A, D, N) \
+  __builtin_ia32_vec_set_v8hi(A, D, 0)
+#define __builtin_ia32_vec_ext_v8hi(A, N) __builtin_ia32_vec_ext_v8hi(A, 0)
+#define __builtin_ia32_shufpd(A, B, N) __builtin_ia32_shufpd(A, B, 0)
 
 /* xmmintrin.h */
 #define __builtin_prefetch(P, A, I) __builtin_prefetch(P, A, _MM_HINT_NTA)
@@ -29,7 +44,7 @@
 #define __builtin_ia32_vec_set_v4hi(A, D, N) \
   __builtin_ia32_vec_set_v4hi(A, D, 0)
 #define __builtin_ia32_vec_ext_v4hi(A, N) __builtin_ia32_vec_ext_v4hi(A, 0)
-#define __builtin_ia32_shufps(A, B, C) __builtin_ia32_shufps(A, B, 0)
+#define __builtin_ia32_shufps(A, B, N) __builtin_ia32_shufps(A, B, 0)
 
 #include <ammintrin.h>
 #include <tmmintrin.h>
Index: testsuite/gcc.target/i386/sse-vect-types.c
===================================================================
--- testsuite/gcc.target/i386/sse-vect-types.c	(revision 124868)
+++ testsuite/gcc.target/i386/sse-vect-types.c	(working copy)
@@ -1,5 +1,5 @@
 /* { dg-do compile { target i?86-*-* x86_64-*-* } } */
-/* { dg-options "-O0 -msse" } */
+/* { dg-options "-O0 -msse2" } */
 
 #include <xmmintrin.h>
 
Index: testsuite/gcc.target/i386/sse-12.c
===================================================================
--- testsuite/gcc.target/i386/sse-12.c	(revision 124867)
+++ testsuite/gcc.target/i386/sse-12.c	(working copy)
@@ -1,12 +1,10 @@
-/* Test that {,x,e,p}mmintrin.h and mm_malloc.h are
+/* Test that {,x,e,p,t,a}mmintrin.h, mm3dnow.h and mm_malloc.h are
    usable with -std=c89 -pedantic-errors.  */
 /* { dg-do compile { target i?86-*-* x86_64-*-* } } */
-/* { dg-options "-std=c89 -pedantic-errors -msse3" } */
+/* { dg-options "-std=c89 -pedantic-errors -march=k8 -m3dnow -mssse3 -msse4a" } */
 
-#include <mmintrin.h>
-#include <xmmintrin.h>
-#include <emmintrin.h>
-#include <pmmintrin.h>
-#include <mm_malloc.h>
+#include <ammintrin.h>
+#include <tmmintrin.h>
+#include <mm3dnow.h>
 
 int dummy;
Index: testsuite/gcc.target/i386/sse-13.c
===================================================================
--- testsuite/gcc.target/i386/sse-13.c	(revision 124867)
+++ testsuite/gcc.target/i386/sse-13.c	(working copy)
@@ -11,6 +11,14 @@
 
 /* Following intrinsics require immediate arguments. */
 
+/* ammintrin.h */
+#define __builtin_ia32_extrqi(X, I, L)  __builtin_ia32_extrqi(X, 1, 1)
+#define __builtin_ia32_insertqi(X, Y, I, L) __builtin_ia32_insertqi(X, Y, 1, 1)
+
+/* tmmintrin.h */
+#define __builtin_ia32_palignr128(X, Y, N) __builtin_ia32_palignr128(X, Y, 8)
+#define __builtin_ia32_palignr(X, Y, N) __builtin_ia32_palignr(X, Y, 8)
+
 /* emmintrin.h */
 #define __builtin_ia32_psllwi128(A, B) __builtin_ia32_psllwi128(A, 1)
 #define __builtin_ia32_psrlqi128(A, B) __builtin_ia32_psrlqi128(A, 1)
@@ -22,6 +30,13 @@
 #define __builtin_ia32_psradi128(A, B) __builtin_ia32_psradi128(A, 1)
 #define __builtin_ia32_psllqi128(A, B) __builtin_ia32_psllqi128(A, 1)
 #define __builtin_ia32_pslldi128(A, B) __builtin_ia32_pslldi128(A, 1)
+#define __builtin_ia32_pshufhw(A, N) __builtin_ia32_pshufhw(A, 0)
+#define __builtin_ia32_pshuflw(A, N) __builtin_ia32_pshuflw(A, 0)
+#define __builtin_ia32_pshufd(A, N) __builtin_ia32_pshufd(A, 0)
+#define __builtin_ia32_vec_set_v8hi(A, D, N) \
+  __builtin_ia32_vec_set_v8hi(A, D, 0)
+#define __builtin_ia32_vec_ext_v8hi(A, N) __builtin_ia32_vec_ext_v8hi(A, 0)
+#define __builtin_ia32_shufpd(A, B, N) __builtin_ia32_shufpd(A, B, 0)
 
 /* xmmintrin.h */
 #define __builtin_prefetch(P, A, I) __builtin_prefetch(P, A, _MM_HINT_NTA)
@@ -29,7 +44,7 @@
 #define __builtin_ia32_vec_set_v4hi(A, D, N) \
   __builtin_ia32_vec_set_v4hi(A, D, 0)
 #define __builtin_ia32_vec_ext_v4hi(A, N) __builtin_ia32_vec_ext_v4hi(A, 0)
-#define __builtin_ia32_shufps(A, B, C) __builtin_ia32_shufps(A, B, 0)
+#define __builtin_ia32_shufps(A, B, N) __builtin_ia32_shufps(A, B, 0)
 
 #include <ammintrin.h>
 #include <tmmintrin.h>

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]