2003-06-05 H.J. Lu * config.gcc (extra_headers): Add emmintrin.h for i[34567]86-*-* and x86_64-*-*. * config/i386/mmintrin.h: Update version and add alternate intrinsic names. * config/i386/xmmintrin.h: Likewise. * config/i386/xmmintrin.h: Include . Move SSE2 intrinsics to ... * config/i386/emmintrin.h: Here. New file. --- gcc/config.gcc.sse2 2003-06-11 19:17:58.000000000 -0700 +++ gcc/config.gcc 2003-06-11 19:20:02.000000000 -0700 @@ -298,11 +298,11 @@ xscale-*-*) ;; i[34567]86-*-*) cpu_type=i386 - extra_headers="mmintrin.h xmmintrin.h" + extra_headers="mmintrin.h xmmintrin.h emmintrin.h" ;; x86_64-*-*) cpu_type=i386 - extra_headers="mmintrin.h xmmintrin.h" + extra_headers="mmintrin.h xmmintrin.h emmintrin.h" ;; ia64-*-*) extra_headers="ia64intrin.h mmintrin.h" --- gcc/config/i386/emmintrin.h.sse2 2003-06-11 19:17:58.000000000 -0700 +++ gcc/config/i386/emmintrin.h 2003-06-11 19:17:58.000000000 -0700 @@ -0,0 +1,1499 @@ +/* Copyright (C) 2003 Free Software Foundation, Inc. + + This file is part of GNU CC. + + GNU CC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + GNU CC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GNU CC; see the file COPYING. If not, write to + the Free Software Foundation, 59 Temple Place - Suite 330, + Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 8.0. */ + +#ifndef _EMMINTRIN_H_INCLUDED +#define _EMMINTRIN_H_INCLUDED + +#ifdef __SSE2__ +#include + +/* SSE2 */ +typedef int __v2df __attribute__ ((mode (V2DF))); +typedef int __v2di __attribute__ ((mode (V2DI))); +typedef int __v4si __attribute__ ((mode (V4SI))); +typedef int __v8hi __attribute__ ((mode (V8HI))); +typedef int __v16qi __attribute__ ((mode (V16QI))); + +/* Create a selector for use with the SHUFPD instruction. */ +#define _MM_SHUFFLE2(fp1,fp0) \ + (((fp1) << 1) | (fp0)) + +#define __m128i __v2di +#define __m128d __v2df + +/* Create a vector with element 0 as *P and the rest zero. */ +static __inline __m128d +_mm_load_sd (double const *__P) +{ + return (__m128d) __builtin_ia32_loadsd (__P); +} + +/* Create a vector with all two elements equal to *P. */ +static __inline __m128d +_mm_load1_pd (double const *__P) +{ + __v2df __tmp = __builtin_ia32_loadsd (__P); + return (__m128d) __builtin_ia32_shufpd (__tmp, __tmp, _MM_SHUFFLE2 (0,0)); +} + +static __inline __m128d +_mm_load_pd1 (double const *__P) +{ + return _mm_load1_pd (__P); +} + +/* Load two DPFP values from P. The address must be 16-byte aligned. */ +static __inline __m128d +_mm_load_pd (double const *__P) +{ + return (__m128d) __builtin_ia32_loadapd (__P); +} + +/* Load two DPFP values from P. The address need not be 16-byte aligned. */ +static __inline __m128d +_mm_loadu_pd (double const *__P) +{ + return (__m128d) __builtin_ia32_loadupd (__P); +} + +/* Load two DPFP values in reverse order. The address must be aligned. */ +static __inline __m128d +_mm_loadr_pd (double const *__P) +{ + __v2df __tmp = __builtin_ia32_loadapd (__P); + return (__m128d) __builtin_ia32_shufpd (__tmp, __tmp, _MM_SHUFFLE2 (0,1)); +} + +/* Create a vector with element 0 as F and the rest zero. */ +static __inline __m128d +_mm_set_sd (double __F) +{ + return (__m128d) __builtin_ia32_loadsd (&__F); +} + +/* Create a vector with all two elements equal to F. */ +static __inline __m128d +_mm_set1_pd (double __F) +{ + __v2df __tmp = __builtin_ia32_loadsd (&__F); + return (__m128d) __builtin_ia32_shufpd (__tmp, __tmp, _MM_SHUFFLE2 (0,0)); +} + +static __inline __m128d +_mm_set_pd1 (double __F) +{ + return _mm_set1_pd (__F); +} + +/* Create the vector [Z Y]. */ +static __inline __m128d +_mm_set_pd (double __Z, double __Y) +{ + union { + double __a[2]; + __m128d __v; + } __u; + + __u.__a[0] = __Y; + __u.__a[1] = __Z; + + return __u.__v; +} + +/* Create the vector [Y Z]. */ +static __inline __m128d +_mm_setr_pd (double __Z, double __Y) +{ + return _mm_set_pd (__Y, __Z); +} + +/* Create a vector of zeros. */ +static __inline __m128d +_mm_setzero_pd (void) +{ + return (__m128d) __builtin_ia32_setzeropd (); +} + +/* Stores the lower DPFP value. */ +static __inline void +_mm_store_sd (double *__P, __m128d __A) +{ + __builtin_ia32_storesd (__P, (__v2df)__A); +} + +/* Store the lower DPFP value acrosd two words. */ +static __inline void +_mm_store1_pd (double *__P, __m128d __A) +{ + __v2df __va = (__v2df)__A; + __v2df __tmp = __builtin_ia32_shufpd (__va, __va, _MM_SHUFFLE2 (0,0)); + __builtin_ia32_storeapd (__P, __tmp); +} + +static __inline void +_mm_store_pd1 (double *__P, __m128d __A) +{ + _mm_store1_pd (__P, __A); +} + +/* Store two DPFP values. The address must be 16-byte aligned. */ +static __inline void +_mm_store_pd (double *__P, __m128d __A) +{ + __builtin_ia32_storeapd (__P, (__v2df)__A); +} + +/* Store two DPFP values. The address need not be 16-byte aligned. */ +static __inline void +_mm_storeu_pd (double *__P, __m128d __A) +{ + __builtin_ia32_storeupd (__P, (__v2df)__A); +} + +/* Store two DPFP values in reverse order. The address must be aligned. */ +static __inline void +_mm_storer_pd (double *__P, __m128d __A) +{ + __v2df __va = (__v2df)__A; + __v2df __tmp = __builtin_ia32_shufpd (__va, __va, _MM_SHUFFLE2 (0,1)); + __builtin_ia32_storeapd (__P, __tmp); +} + +/* Sets the low DPFP value of A from the low value of B. */ +static __inline __m128d +_mm_move_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B); +} + + +static __inline __m128d +_mm_add_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_addpd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_add_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_addsd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_sub_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_subpd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_sub_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_subsd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_mul_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_mulpd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_mul_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_mulsd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_div_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_divpd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_div_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_divsd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_sqrt_pd (__m128d __A) +{ + return (__m128d)__builtin_ia32_sqrtpd ((__v2df)__A); +} + +/* Return pair {sqrt (A[0), B[1]}. */ +static __inline __m128d +_mm_sqrt_sd (__m128d __A, __m128d __B) +{ + __v2df __tmp = __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B); + return (__m128d)__builtin_ia32_sqrtsd ((__v2df)__tmp); +} + +static __inline __m128d +_mm_min_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_minpd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_min_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_minsd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_max_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_maxpd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_max_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_maxsd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_and_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_andpd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_andnot_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_andnpd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_or_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_orpd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_xor_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_xorpd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_cmpeq_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpeqpd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_cmplt_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpltpd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_cmple_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmplepd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_cmpgt_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpgtpd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_cmpge_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpgepd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_cmpneq_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpneqpd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_cmpnlt_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpnltpd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_cmpnle_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpnlepd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_cmpngt_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpngtpd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_cmpnge_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpngepd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_cmpord_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpordpd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_cmpunord_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpunordpd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_cmpeq_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpeqsd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_cmplt_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpltsd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_cmple_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmplesd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_cmpgt_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movsd ((__v2df) __A, + (__v2df) + __builtin_ia32_cmpltsd ((__v2df) __B, + (__v2df) + __A)); +} + +static __inline __m128d +_mm_cmpge_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movsd ((__v2df) __A, + (__v2df) + __builtin_ia32_cmplesd ((__v2df) __B, + (__v2df) + __A)); +} + +static __inline __m128d +_mm_cmpneq_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpneqsd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_cmpnlt_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpnltsd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_cmpnle_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpnlesd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_cmpngt_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movsd ((__v2df) __A, + (__v2df) + __builtin_ia32_cmpnltsd ((__v2df) __B, + (__v2df) + __A)); +} + +static __inline __m128d +_mm_cmpnge_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movsd ((__v2df) __A, + (__v2df) + __builtin_ia32_cmpnlesd ((__v2df) __B, + (__v2df) + __A)); +} + +static __inline __m128d +_mm_cmpord_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpordsd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_cmpunord_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpunordsd ((__v2df)__A, (__v2df)__B); +} + +static __inline int +_mm_comieq_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdeq ((__v2df)__A, (__v2df)__B); +} + +static __inline int +_mm_comilt_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdlt ((__v2df)__A, (__v2df)__B); +} + +static __inline int +_mm_comile_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdle ((__v2df)__A, (__v2df)__B); +} + +static __inline int +_mm_comigt_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdgt ((__v2df)__A, (__v2df)__B); +} + +static __inline int +_mm_comige_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdge ((__v2df)__A, (__v2df)__B); +} + +static __inline int +_mm_comineq_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdneq ((__v2df)__A, (__v2df)__B); +} + +static __inline int +_mm_ucomieq_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdeq ((__v2df)__A, (__v2df)__B); +} + +static __inline int +_mm_ucomilt_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdlt ((__v2df)__A, (__v2df)__B); +} + +static __inline int +_mm_ucomile_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdle ((__v2df)__A, (__v2df)__B); +} + +static __inline int +_mm_ucomigt_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdgt ((__v2df)__A, (__v2df)__B); +} + +static __inline int +_mm_ucomige_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdge ((__v2df)__A, (__v2df)__B); +} + +static __inline int +_mm_ucomineq_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdneq ((__v2df)__A, (__v2df)__B); +} + +/* Create a vector with element 0 as *P and the rest zero. */ + +static __inline __m128i +_mm_load_si128 (__m128i const *__P) +{ + return (__m128i) __builtin_ia32_loaddqa ((char const *)__P); +} + +static __inline __m128i +_mm_loadu_si128 (__m128i const *__P) +{ + return (__m128i) __builtin_ia32_loaddqu ((char const *)__P); +} + +static __inline __m128i +_mm_loadl_epi64 (__m128i const *__P) +{ + return (__m128i) __builtin_ia32_movq2dq (*(unsigned long long *)__P); +} + +static __inline void +_mm_store_si128 (__m128i *__P, __m128i __B) +{ + __builtin_ia32_storedqa ((char *)__P, (__v16qi)__B); +} + +static __inline void +_mm_storeu_si128 (__m128i *__P, __m128i __B) +{ + __builtin_ia32_storedqu ((char *)__P, (__v16qi)__B); +} + +static __inline void +_mm_storel_epi64 (__m128i *__P, __m128i __B) +{ + *(long long *)__P = __builtin_ia32_movdq2q ((__v2di)__B); +} + +static __inline __m64 +_mm_movepi64_pi64 (__m128i __B) +{ + return (__m64) __builtin_ia32_movdq2q ((__v2di)__B); +} + +static __inline __m128i +_mm_move_epi64 (__m128i __A) +{ + return (__m128i) __builtin_ia32_movq ((__v2di)__A); +} + +/* Create a vector of zeros. */ +static __inline __m128i +_mm_setzero_si128 (void) +{ + return (__m128i) __builtin_ia32_setzero128 (); +} + +static __inline __m128i +_mm_set_epi64 (__m64 __A, __m64 __B) +{ + __v2di __tmp = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__A); + __v2di __tmp2 = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__B); + return (__m128i)__builtin_ia32_punpcklqdq128 (__tmp2, __tmp); +} + +/* Create the vector [Z Y X W]. */ +static __inline __m128i +_mm_set_epi32 (int __Z, int __Y, int __X, int __W) +{ + union { + int __a[4]; + __m128i __v; + } __u; + + __u.__a[0] = __W; + __u.__a[1] = __X; + __u.__a[2] = __Y; + __u.__a[3] = __Z; + + return __u.__v; +} + +#ifdef __x86_64__ +/* Create the vector [Z Y]. */ +static __inline __m128i +_mm_set_epi64x (long long __Z, long long __Y) +{ + union { + long __a[2]; + __m128i __v; + } __u; + + __u.__a[0] = __Y; + __u.__a[1] = __Z; + + return __u.__v; +} +#endif + +/* Create the vector [S T U V Z Y X W]. */ +static __inline __m128i +_mm_set_epi16 (short __Z, short __Y, short __X, short __W, + short __V, short __U, short __T, short __S) +{ + union { + short __a[8]; + __m128i __v; + } __u; + + __u.__a[0] = __S; + __u.__a[1] = __T; + __u.__a[2] = __U; + __u.__a[3] = __V; + __u.__a[4] = __W; + __u.__a[5] = __X; + __u.__a[6] = __Y; + __u.__a[7] = __Z; + + return __u.__v; +} + +/* Create the vector [S T U V Z Y X W]. */ +static __inline __m128i +_mm_set_epi8 (char __Z, char __Y, char __X, char __W, + char __V, char __U, char __T, char __S, + char __Z1, char __Y1, char __X1, char __W1, + char __V1, char __U1, char __T1, char __S1) +{ + union { + char __a[16]; + __m128i __v; + } __u; + + __u.__a[0] = __S1; + __u.__a[1] = __T1; + __u.__a[2] = __U1; + __u.__a[3] = __V1; + __u.__a[4] = __W1; + __u.__a[5] = __X1; + __u.__a[6] = __Y1; + __u.__a[7] = __Z1; + __u.__a[8] = __S; + __u.__a[9] = __T; + __u.__a[10] = __U; + __u.__a[11] = __V; + __u.__a[12] = __W; + __u.__a[13] = __X; + __u.__a[14] = __Y; + __u.__a[15] = __Z; + + return __u.__v; +} + +static __inline __m128i +_mm_set1_epi64 (__m64 __A) +{ + __v2di __tmp = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__A); + return (__m128i)__builtin_ia32_punpcklqdq128 (__tmp, __tmp); +} + +static __inline __m128i +_mm_set1_epi32 (int __A) +{ + __v4si __tmp = (__v4si)__builtin_ia32_loadd (&__A); + return (__m128i) __builtin_ia32_pshufd ((__v4si)__tmp, _MM_SHUFFLE (0,0,0,0)); +} + +#ifdef __x86_64__ +static __inline __m128i +_mm_set1_epi64x (long long __A) +{ + __v2di __tmp = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__A); + return (__m128i) __builtin_ia32_shufpd ((__v2df)__tmp, (__v2df)__tmp, _MM_SHUFFLE2 (0,0)); +} +#endif + +static __inline __m128i +_mm_set1_epi16 (short __A) +{ + int __Acopy = (unsigned short)__A; + __v4si __tmp = (__v4si)__builtin_ia32_loadd (&__Acopy); + __tmp = (__v4si)__builtin_ia32_punpcklwd128 ((__v8hi)__tmp, (__v8hi)__tmp); + return (__m128i) __builtin_ia32_pshufd ((__v4si)__tmp, _MM_SHUFFLE (0,0,0,0)); +} + +static __inline __m128i +_mm_set1_epi8 (char __A) +{ + int __Acopy = (unsigned char)__A; + __v4si __tmp = (__v4si)__builtin_ia32_loadd (&__Acopy); + __tmp = (__v4si)__builtin_ia32_punpcklbw128 ((__v16qi)__tmp, (__v16qi)__tmp); + __tmp = (__v4si)__builtin_ia32_punpcklbw128 ((__v16qi)__tmp, (__v16qi)__tmp); + return (__m128i) __builtin_ia32_pshufd ((__v4si)__tmp, _MM_SHUFFLE (0,0,0,0)); +} + +static __inline __m128i +_mm_setr_epi64 (__m64 __A, __m64 __B) +{ + __v2di __tmp = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__A); + __v2di __tmp2 = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__B); + return (__m128i)__builtin_ia32_punpcklqdq128 (__tmp, __tmp2); +} + +/* Create the vector [Z Y X W]. */ +static __inline __m128i +_mm_setr_epi32 (int __W, int __X, int __Y, int __Z) +{ + union { + int __a[4]; + __m128i __v; + } __u; + + __u.__a[0] = __W; + __u.__a[1] = __X; + __u.__a[2] = __Y; + __u.__a[3] = __Z; + + return __u.__v; +} +/* Create the vector [S T U V Z Y X W]. */ +static __inline __m128i +_mm_setr_epi16 (short __S, short __T, short __U, short __V, + short __W, short __X, short __Y, short __Z) +{ + union { + short __a[8]; + __m128i __v; + } __u; + + __u.__a[0] = __S; + __u.__a[1] = __T; + __u.__a[2] = __U; + __u.__a[3] = __V; + __u.__a[4] = __W; + __u.__a[5] = __X; + __u.__a[6] = __Y; + __u.__a[7] = __Z; + + return __u.__v; +} + +/* Create the vector [S T U V Z Y X W]. */ +static __inline __m128i +_mm_setr_epi8 (char __S1, char __T1, char __U1, char __V1, + char __W1, char __X1, char __Y1, char __Z1, + char __S, char __T, char __U, char __V, + char __W, char __X, char __Y, char __Z) +{ + union { + char __a[16]; + __m128i __v; + } __u; + + __u.__a[0] = __S1; + __u.__a[1] = __T1; + __u.__a[2] = __U1; + __u.__a[3] = __V1; + __u.__a[4] = __W1; + __u.__a[5] = __X1; + __u.__a[6] = __Y1; + __u.__a[7] = __Z1; + __u.__a[8] = __S; + __u.__a[9] = __T; + __u.__a[10] = __U; + __u.__a[11] = __V; + __u.__a[12] = __W; + __u.__a[13] = __X; + __u.__a[14] = __Y; + __u.__a[15] = __Z; + + return __u.__v; +} + +static __inline __m128d +_mm_cvtepi32_pd (__m128i __A) +{ + return (__m128d)__builtin_ia32_cvtdq2pd ((__v4si) __A); +} + +static __inline __m128 +_mm_cvtepi32_ps (__m128i __A) +{ + return (__m128)__builtin_ia32_cvtdq2ps ((__v4si) __A); +} + +static __inline __m128i +_mm_cvtpd_epi32 (__m128d __A) +{ + return (__m128i)__builtin_ia32_cvtpd2dq ((__v2df) __A); +} + +static __inline __m64 +_mm_cvtpd_pi32 (__m128d __A) +{ + return (__m64)__builtin_ia32_cvtpd2pi ((__v2df) __A); +} + +static __inline __m128 +_mm_cvtpd_ps (__m128d __A) +{ + return (__m128)__builtin_ia32_cvtpd2ps ((__v2df) __A); +} + +static __inline __m128i +_mm_cvttpd_epi32 (__m128d __A) +{ + return (__m128i)__builtin_ia32_cvttpd2dq ((__v2df) __A); +} + +static __inline __m64 +_mm_cvttpd_pi32 (__m128d __A) +{ + return (__m64)__builtin_ia32_cvttpd2pi ((__v2df) __A); +} + +static __inline __m128d +_mm_cvtpi32_pd (__m64 __A) +{ + return (__m128d)__builtin_ia32_cvtpi2pd ((__v2si) __A); +} + +static __inline __m128i +_mm_cvtps_epi32 (__m128 __A) +{ + return (__m128i)__builtin_ia32_cvtps2dq ((__v4sf) __A); +} + +static __inline __m128i +_mm_cvttps_epi32 (__m128 __A) +{ + return (__m128i)__builtin_ia32_cvttps2dq ((__v4sf) __A); +} + +static __inline __m128d +_mm_cvtps_pd (__m128 __A) +{ + return (__m128d)__builtin_ia32_cvtps2pd ((__v4sf) __A); +} + +static __inline int +_mm_cvtsd_si32 (__m128d __A) +{ + return __builtin_ia32_cvtsd2si ((__v2df) __A); +} + +#ifdef __x86_64__ +static __inline long long +_mm_cvtsd_si64x (__m128d __A) +{ + return __builtin_ia32_cvtsd2si64 ((__v2df) __A); +} +#endif + +static __inline int +_mm_cvttsd_si32 (__m128d __A) +{ + return __builtin_ia32_cvttsd2si ((__v2df) __A); +} + +#ifdef __x86_64__ +static __inline long long +_mm_cvttsd_si64x (__m128d __A) +{ + return __builtin_ia32_cvttsd2si64 ((__v2df) __A); +} +#endif + +static __inline __m128 +_mm_cvtsd_ss (__m128 __A, __m128d __B) +{ + return (__m128)__builtin_ia32_cvtsd2ss ((__v4sf) __A, (__v2df) __B); +} + +static __inline __m128d +_mm_cvtsi32_sd (__m128d __A, int __B) +{ + return (__m128d)__builtin_ia32_cvtsi2sd ((__v2df) __A, __B); +} + +#ifdef __x86_64__ +static __inline __m128d +_mm_cvtsi64x_sd (__m128d __A, long long __B) +{ + return (__m128d)__builtin_ia32_cvtsi642sd ((__v2df) __A, __B); +} +#endif + +static __inline __m128d +_mm_cvtss_sd (__m128d __A, __m128 __B) +{ + return (__m128d)__builtin_ia32_cvtss2sd ((__v2df) __A, (__v4sf)__B); +} + +#define _mm_shuffle_pd(__A, __B, __C) ((__m128d)__builtin_ia32_shufpd ((__v2df)__A, (__v2df)__B, (__C))) + +static __inline __m128d +_mm_unpackhi_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_unpckhpd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_unpacklo_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_unpcklpd ((__v2df)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_loadh_pd (__m128d __A, double const *__B) +{ + return (__m128d)__builtin_ia32_loadhpd ((__v2df)__A, (__v2si *)__B); +} + +static __inline void +_mm_storeh_pd (double *__A, __m128d __B) +{ + __builtin_ia32_storehpd ((__v2si *)__A, (__v2df)__B); +} + +static __inline __m128d +_mm_loadl_pd (__m128d __A, double const *__B) +{ + return (__m128d)__builtin_ia32_loadlpd ((__v2df)__A, (__v2si *)__B); +} + +static __inline void +_mm_storel_pd (double *__A, __m128d __B) +{ + __builtin_ia32_storelpd ((__v2si *)__A, (__v2df)__B); +} + +static __inline int +_mm_movemask_pd (__m128d __A) +{ + return __builtin_ia32_movmskpd ((__v2df)__A); +} + +static __inline __m128i +_mm_packs_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_packsswb128 ((__v8hi)__A, (__v8hi)__B); +} + +static __inline __m128i +_mm_packs_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_packssdw128 ((__v4si)__A, (__v4si)__B); +} + +static __inline __m128i +_mm_packus_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_packuswb128 ((__v8hi)__A, (__v8hi)__B); +} + +static __inline __m128i +_mm_unpackhi_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpckhbw128 ((__v16qi)__A, (__v16qi)__B); +} + +static __inline __m128i +_mm_unpackhi_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpckhwd128 ((__v8hi)__A, (__v8hi)__B); +} + +static __inline __m128i +_mm_unpackhi_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpckhdq128 ((__v4si)__A, (__v4si)__B); +} + +static __inline __m128i +_mm_unpackhi_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpckhqdq128 ((__v2di)__A, (__v2di)__B); +} + +static __inline __m128i +_mm_unpacklo_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpcklbw128 ((__v16qi)__A, (__v16qi)__B); +} + +static __inline __m128i +_mm_unpacklo_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpcklwd128 ((__v8hi)__A, (__v8hi)__B); +} + +static __inline __m128i +_mm_unpacklo_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpckldq128 ((__v4si)__A, (__v4si)__B); +} + +static __inline __m128i +_mm_unpacklo_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpcklqdq128 ((__v2di)__A, (__v2di)__B); +} + +static __inline __m128i +_mm_add_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddb128 ((__v16qi)__A, (__v16qi)__B); +} + +static __inline __m128i +_mm_add_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddw128 ((__v8hi)__A, (__v8hi)__B); +} + +static __inline __m128i +_mm_add_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddd128 ((__v4si)__A, (__v4si)__B); +} + +static __inline __m128i +_mm_add_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddq128 ((__v2di)__A, (__v2di)__B); +} + +static __inline __m128i +_mm_adds_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddsb128 ((__v16qi)__A, (__v16qi)__B); +} + +static __inline __m128i +_mm_adds_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddsw128 ((__v8hi)__A, (__v8hi)__B); +} + +static __inline __m128i +_mm_adds_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddusb128 ((__v16qi)__A, (__v16qi)__B); +} + +static __inline __m128i +_mm_adds_epu16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddusw128 ((__v8hi)__A, (__v8hi)__B); +} + +static __inline __m128i +_mm_sub_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubb128 ((__v16qi)__A, (__v16qi)__B); +} + +static __inline __m128i +_mm_sub_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubw128 ((__v8hi)__A, (__v8hi)__B); +} + +static __inline __m128i +_mm_sub_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubd128 ((__v4si)__A, (__v4si)__B); +} + +static __inline __m128i +_mm_sub_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubq128 ((__v2di)__A, (__v2di)__B); +} + +static __inline __m128i +_mm_subs_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubsb128 ((__v16qi)__A, (__v16qi)__B); +} + +static __inline __m128i +_mm_subs_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubsw128 ((__v8hi)__A, (__v8hi)__B); +} + +static __inline __m128i +_mm_subs_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubusb128 ((__v16qi)__A, (__v16qi)__B); +} + +static __inline __m128i +_mm_subs_epu16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubusw128 ((__v8hi)__A, (__v8hi)__B); +} + +static __inline __m128i +_mm_madd_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmaddwd128 ((__v8hi)__A, (__v8hi)__B); +} + +static __inline __m128i +_mm_mulhi_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmulhw128 ((__v8hi)__A, (__v8hi)__B); +} + +static __inline __m128i +_mm_mullo_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmullw128 ((__v8hi)__A, (__v8hi)__B); +} + +static __inline __m64 +_mm_mul_su32 (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pmuludq ((__v2si)__A, (__v2si)__B); +} + +static __inline __m128i +_mm_mul_epu32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmuludq128 ((__v4si)__A, (__v4si)__B); +} + +static __inline __m128i +_mm_sll_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psllw128 ((__v8hi)__A, (__v2di)__B); +} + +static __inline __m128i +_mm_sll_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pslld128 ((__v4si)__A, (__v2di)__B); +} + +static __inline __m128i +_mm_sll_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psllq128 ((__v2di)__A, (__v2di)__B); +} + +static __inline __m128i +_mm_sra_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psraw128 ((__v8hi)__A, (__v2di)__B); +} + +static __inline __m128i +_mm_sra_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrad128 ((__v4si)__A, (__v2di)__B); +} + +static __inline __m128i +_mm_srl_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrlw128 ((__v8hi)__A, (__v2di)__B); +} + +static __inline __m128i +_mm_srl_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrld128 ((__v4si)__A, (__v2di)__B); +} + +static __inline __m128i +_mm_srl_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrlq128 ((__v2di)__A, (__v2di)__B); +} + +static __inline __m128i +_mm_slli_epi16 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psllwi128 ((__v8hi)__A, __B); +} + +static __inline __m128i +_mm_slli_epi32 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_pslldi128 ((__v4si)__A, __B); +} + +static __inline __m128i +_mm_slli_epi64 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psllqi128 ((__v2di)__A, __B); +} + +static __inline __m128i +_mm_srai_epi16 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psrawi128 ((__v8hi)__A, __B); +} + +static __inline __m128i +_mm_srai_epi32 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psradi128 ((__v4si)__A, __B); +} + +#if 0 +static __m128i __attribute__((__always_inline__)) +_mm_srli_si128 (__m128i __A, const int __B) +{ + return ((__m128i)__builtin_ia32_psrldqi128 (__A, __B)) +} + +static __m128i __attribute__((__always_inline__)) +_mm_srli_si128 (__m128i __A, const int __B) +{ + return ((__m128i)__builtin_ia32_pslldqi128 (__A, __B)) +} +#endif +#define _mm_srli_si128(__A, __B) ((__m128i)__builtin_ia32_psrldqi128 (__A, __B)) +#define _mm_slli_si128(__A, __B) ((__m128i)__builtin_ia32_pslldqi128 (__A, __B)) + +static __inline __m128i +_mm_srli_epi16 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psrlwi128 ((__v8hi)__A, __B); +} + +static __inline __m128i +_mm_srli_epi32 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psrldi128 ((__v4si)__A, __B); +} + +static __inline __m128i +_mm_srli_epi64 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psrlqi128 ((__v2di)__A, __B); +} + +static __inline __m128i +_mm_and_si128 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pand128 ((__v2di)__A, (__v2di)__B); +} + +static __inline __m128i +_mm_andnot_si128 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pandn128 ((__v2di)__A, (__v2di)__B); +} + +static __inline __m128i +_mm_or_si128 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_por128 ((__v2di)__A, (__v2di)__B); +} + +static __inline __m128i +_mm_xor_si128 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pxor128 ((__v2di)__A, (__v2di)__B); +} + +static __inline __m128i +_mm_cmpeq_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpeqb128 ((__v16qi)__A, (__v16qi)__B); +} + +static __inline __m128i +_mm_cmpeq_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpeqw128 ((__v8hi)__A, (__v8hi)__B); +} + +static __inline __m128i +_mm_cmpeq_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpeqd128 ((__v4si)__A, (__v4si)__B); +} + +static __inline __m128i +_mm_cmplt_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpgtb128 ((__v16qi)__B, (__v16qi)__A); +} + +static __inline __m128i +_mm_cmplt_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpgtw128 ((__v8hi)__B, (__v8hi)__A); +} + +static __inline __m128i +_mm_cmplt_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__B, (__v4si)__A); +} + +static __inline __m128i +_mm_cmpgt_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpgtb128 ((__v16qi)__A, (__v16qi)__B); +} + +static __inline __m128i +_mm_cmpgt_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpgtw128 ((__v8hi)__A, (__v8hi)__B); +} + +static __inline __m128i +_mm_cmpgt_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__A, (__v4si)__B); +} + +#define _mm_extract_epi16(__A, __B) __builtin_ia32_pextrw128 ((__v8hi)__A, __B) + +#define _mm_insert_epi16(__A, __B, __C) ((__m128i)__builtin_ia32_pinsrw128 ((__v8hi)__A, __B, __C)) + +static __inline __m128i +_mm_max_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmaxsw128 ((__v8hi)__A, (__v8hi)__B); +} + +static __inline __m128i +_mm_max_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmaxub128 ((__v16qi)__A, (__v16qi)__B); +} + +static __inline __m128i +_mm_min_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pminsw128 ((__v8hi)__A, (__v8hi)__B); +} + +static __inline __m128i +_mm_min_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pminub128 ((__v16qi)__A, (__v16qi)__B); +} + +static __inline int +_mm_movemask_epi8 (__m128i __A) +{ + return __builtin_ia32_pmovmskb128 ((__v16qi)__A); +} + +static __inline __m128i +_mm_mulhi_epu16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmulhuw128 ((__v8hi)__A, (__v8hi)__B); +} + +#define _mm_shufflehi_epi16(__A, __B) ((__m128i)__builtin_ia32_pshufhw ((__v8hi)__A, __B)) +#define _mm_shufflelo_epi16(__A, __B) ((__m128i)__builtin_ia32_pshuflw ((__v8hi)__A, __B)) +#define _mm_shuffle_epi32(__A, __B) ((__m128i)__builtin_ia32_pshufd ((__v4si)__A, __B)) + +static __inline void +_mm_maskmoveu_si128 (__m128i __A, __m128i __B, char *__C) +{ + __builtin_ia32_maskmovdqu ((__v16qi)__A, (__v16qi)__B, __C); +} + +static __inline __m128i +_mm_avg_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pavgb128 ((__v16qi)__A, (__v16qi)__B); +} + +static __inline __m128i +_mm_avg_epu16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pavgw128 ((__v8hi)__A, (__v8hi)__B); +} + +static __inline __m128i +_mm_sad_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psadbw128 ((__v16qi)__A, (__v16qi)__B); +} + +static __inline void +_mm_stream_si32 (int *__A, int __B) +{ + __builtin_ia32_movnti (__A, __B); +} + +static __inline void +_mm_stream_si128 (__m128i *__A, __m128i __B) +{ + __builtin_ia32_movntdq ((__v2di *)__A, (__v2di)__B); +} + +static __inline void +_mm_stream_pd (double *__A, __m128d __B) +{ + __builtin_ia32_movntpd (__A, (__v2df)__B); +} + +static __inline __m128i +_mm_movpi64_epi64 (__m64 __A) +{ + return (__m128i)__builtin_ia32_movq2dq ((unsigned long long)__A); +} + +static __inline void +_mm_clflush (void const *__A) +{ + return __builtin_ia32_clflush (__A); +} + +static __inline void +_mm_lfence (void) +{ + __builtin_ia32_lfence (); +} + +static __inline void +_mm_mfence (void) +{ + __builtin_ia32_mfence (); +} + +static __inline __m128i +_mm_cvtsi32_si128 (int __A) +{ + return (__m128i) __builtin_ia32_loadd (&__A); +} + +#ifdef __x86_64__ +static __inline __m128i +_mm_cvtsi64x_si128 (long long __A) +{ + return (__m128i) __builtin_ia32_movq2dq (__A); +} +#endif + +static __inline int +_mm_cvtsi128_si32 (__m128i __A) +{ + int __tmp; + __builtin_ia32_stored (&__tmp, (__v4si)__A); + return __tmp; +} + +#ifdef __x86_64__ +static __inline long long +_mm_cvtsi128_si64x (__m128i __A) +{ + return __builtin_ia32_movdq2q ((__v2di)__A); +} +#endif + +#endif /* __SSE2__ */ + +#endif /* _EMMINTRIN_H_INCLUDED */ --- gcc/config/i386/mmintrin.h.sse2 2003-06-11 13:32:37.000000000 -0700 +++ gcc/config/i386/mmintrin.h 2003-06-11 19:17:59.000000000 -0700 @@ -25,7 +25,7 @@ Public License. */ /* Implemented from the specification included in the Intel C++ Compiler - User Guide and Reference, version 5.0. */ + User Guide and Reference, version 8.0. */ #ifndef _MMINTRIN_H_INCLUDED #define _MMINTRIN_H_INCLUDED @@ -48,6 +48,12 @@ _mm_empty (void) __builtin_ia32_emms (); } +static __inline void +_m_empty (void) +{ + _mm_empty (); +} + /* Convert I to a __m64 object. The integer is zero-extended to 64-bits. */ static __inline __m64 _mm_cvtsi32_si64 (int __i) @@ -56,6 +62,12 @@ _mm_cvtsi32_si64 (int __i) return (__m64) __tmp; } +static __inline __m64 +_m_from_int (int __i) +{ + return _mm_cvtsi32_si64 (__i); +} + #ifdef __x86_64__ /* Convert I to a __m64 object. */ static __inline __m64 @@ -80,6 +92,12 @@ _mm_cvtsi64_si32 (__m64 __i) return __tmp; } +static __inline int +_m_to_int (__m64 __i) +{ + return _mm_cvtsi64_si32 (__i); +} + #ifdef __x86_64__ /* Convert the lower 32 bits of the __m64 object into an integer. */ static __inline long long @@ -98,6 +116,12 @@ _mm_packs_pi16 (__m64 __m1, __m64 __m2) return (__m64) __builtin_ia32_packsswb ((__v4hi)__m1, (__v4hi)__m2); } +static __inline __m64 +_m_packsswb (__m64 __m1, __m64 __m2) +{ + return _mm_packs_pi16 (__m1, __m2); +} + /* Pack the two 32-bit values from M1 in to the lower two 16-bit values of the result, and the two 32-bit values from M2 into the upper two 16-bit values of the result, all with signed saturation. */ @@ -107,6 +131,12 @@ _mm_packs_pi32 (__m64 __m1, __m64 __m2) return (__m64) __builtin_ia32_packssdw ((__v2si)__m1, (__v2si)__m2); } +static __inline __m64 +_m_packssdw (__m64 __m1, __m64 __m2) +{ + return _mm_packs_pi32 (__m1, __m2); +} + /* Pack the four 16-bit values from M1 into the lower four 8-bit values of the result, and the four 16-bit values from M2 into the upper four 8-bit values of the result, all with unsigned saturation. */ @@ -116,6 +146,12 @@ _mm_packs_pu16 (__m64 __m1, __m64 __m2) return (__m64) __builtin_ia32_packuswb ((__v4hi)__m1, (__v4hi)__m2); } +static __inline __m64 +_m_packuswb (__m64 __m1, __m64 __m2) +{ + return _mm_packs_pu16 (__m1, __m2); +} + /* Interleave the four 8-bit values from the high half of M1 with the four 8-bit values from the high half of M2. */ static __inline __m64 @@ -124,6 +160,12 @@ _mm_unpackhi_pi8 (__m64 __m1, __m64 __m2 return (__m64) __builtin_ia32_punpckhbw ((__v8qi)__m1, (__v8qi)__m2); } +static __inline __m64 +_m_punpckhbw (__m64 __m1, __m64 __m2) +{ + return _mm_unpackhi_pi8 (__m1, __m2); +} + /* Interleave the two 16-bit values from the high half of M1 with the two 16-bit values from the high half of M2. */ static __inline __m64 @@ -132,6 +174,12 @@ _mm_unpackhi_pi16 (__m64 __m1, __m64 __m return (__m64) __builtin_ia32_punpckhwd ((__v4hi)__m1, (__v4hi)__m2); } +static __inline __m64 +_m_punpckhwd (__m64 __m1, __m64 __m2) +{ + return _mm_unpackhi_pi16 (__m1, __m2); +} + /* Interleave the 32-bit value from the high half of M1 with the 32-bit value from the high half of M2. */ static __inline __m64 @@ -140,6 +188,12 @@ _mm_unpackhi_pi32 (__m64 __m1, __m64 __m return (__m64) __builtin_ia32_punpckhdq ((__v2si)__m1, (__v2si)__m2); } +static __inline __m64 +_m_punpckhdq (__m64 __m1, __m64 __m2) +{ + return _mm_unpackhi_pi32 (__m1, __m2); +} + /* Interleave the four 8-bit values from the low half of M1 with the four 8-bit values from the low half of M2. */ static __inline __m64 @@ -148,6 +202,12 @@ _mm_unpacklo_pi8 (__m64 __m1, __m64 __m2 return (__m64) __builtin_ia32_punpcklbw ((__v8qi)__m1, (__v8qi)__m2); } +static __inline __m64 +_m_punpcklbw (__m64 __m1, __m64 __m2) +{ + return _mm_unpacklo_pi8 (__m1, __m2); +} + /* Interleave the two 16-bit values from the low half of M1 with the two 16-bit values from the low half of M2. */ static __inline __m64 @@ -156,6 +216,12 @@ _mm_unpacklo_pi16 (__m64 __m1, __m64 __m return (__m64) __builtin_ia32_punpcklwd ((__v4hi)__m1, (__v4hi)__m2); } +static __inline __m64 +_m_punpcklwd (__m64 __m1, __m64 __m2) +{ + return _mm_unpacklo_pi16 (__m1, __m2); +} + /* Interleave the 32-bit value from the low half of M1 with the 32-bit value from the low half of M2. */ static __inline __m64 @@ -164,6 +230,12 @@ _mm_unpacklo_pi32 (__m64 __m1, __m64 __m return (__m64) __builtin_ia32_punpckldq ((__v2si)__m1, (__v2si)__m2); } +static __inline __m64 +_m_punpckldq (__m64 __m1, __m64 __m2) +{ + return _mm_unpacklo_pi32 (__m1, __m2); +} + /* Add the 8-bit values in M1 to the 8-bit values in M2. */ static __inline __m64 _mm_add_pi8 (__m64 __m1, __m64 __m2) @@ -171,6 +243,12 @@ _mm_add_pi8 (__m64 __m1, __m64 __m2) return (__m64) __builtin_ia32_paddb ((__v8qi)__m1, (__v8qi)__m2); } +static __inline __m64 +_m_paddb (__m64 __m1, __m64 __m2) +{ + return _mm_add_pi8 (__m1, __m2); +} + /* Add the 16-bit values in M1 to the 16-bit values in M2. */ static __inline __m64 _mm_add_pi16 (__m64 __m1, __m64 __m2) @@ -178,6 +256,12 @@ _mm_add_pi16 (__m64 __m1, __m64 __m2) return (__m64) __builtin_ia32_paddw ((__v4hi)__m1, (__v4hi)__m2); } +static __inline __m64 +_m_paddw (__m64 __m1, __m64 __m2) +{ + return _mm_add_pi16 (__m1, __m2); +} + /* Add the 32-bit values in M1 to the 32-bit values in M2. */ static __inline __m64 _mm_add_pi32 (__m64 __m1, __m64 __m2) @@ -185,6 +269,12 @@ _mm_add_pi32 (__m64 __m1, __m64 __m2) return (__m64) __builtin_ia32_paddd ((__v2si)__m1, (__v2si)__m2); } +static __inline __m64 +_m_paddd (__m64 __m1, __m64 __m2) +{ + return _mm_add_pi32 (__m1, __m2); +} + /* Add the 64-bit values in M1 to the 64-bit values in M2. */ static __inline __m64 _mm_add_si64 (__m64 __m1, __m64 __m2) @@ -200,6 +290,12 @@ _mm_adds_pi8 (__m64 __m1, __m64 __m2) return (__m64) __builtin_ia32_paddsb ((__v8qi)__m1, (__v8qi)__m2); } +static __inline __m64 +_m_paddsb (__m64 __m1, __m64 __m2) +{ + return _mm_adds_pi8 (__m1, __m2); +} + /* Add the 16-bit values in M1 to the 16-bit values in M2 using signed saturated arithmetic. */ static __inline __m64 @@ -208,6 +304,12 @@ _mm_adds_pi16 (__m64 __m1, __m64 __m2) return (__m64) __builtin_ia32_paddsw ((__v4hi)__m1, (__v4hi)__m2); } +static __inline __m64 +_m_paddsw (__m64 __m1, __m64 __m2) +{ + return _mm_adds_pi16 (__m1, __m2); +} + /* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned saturated arithmetic. */ static __inline __m64 @@ -216,6 +318,12 @@ _mm_adds_pu8 (__m64 __m1, __m64 __m2) return (__m64) __builtin_ia32_paddusb ((__v8qi)__m1, (__v8qi)__m2); } +static __inline __m64 +_m_paddusb (__m64 __m1, __m64 __m2) +{ + return _mm_adds_pu8 (__m1, __m2); +} + /* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned saturated arithmetic. */ static __inline __m64 @@ -224,6 +332,12 @@ _mm_adds_pu16 (__m64 __m1, __m64 __m2) return (__m64) __builtin_ia32_paddusw ((__v4hi)__m1, (__v4hi)__m2); } +static __inline __m64 +_m_paddusw (__m64 __m1, __m64 __m2) +{ + return _mm_adds_pu16 (__m1, __m2); +} + /* Subtract the 8-bit values in M2 from the 8-bit values in M1. */ static __inline __m64 _mm_sub_pi8 (__m64 __m1, __m64 __m2) @@ -231,6 +345,12 @@ _mm_sub_pi8 (__m64 __m1, __m64 __m2) return (__m64) __builtin_ia32_psubb ((__v8qi)__m1, (__v8qi)__m2); } +static __inline __m64 +_m_psubb (__m64 __m1, __m64 __m2) +{ + return _mm_sub_pi8 (__m1, __m2); +} + /* Subtract the 16-bit values in M2 from the 16-bit values in M1. */ static __inline __m64 _mm_sub_pi16 (__m64 __m1, __m64 __m2) @@ -238,6 +358,12 @@ _mm_sub_pi16 (__m64 __m1, __m64 __m2) return (__m64) __builtin_ia32_psubw ((__v4hi)__m1, (__v4hi)__m2); } +static __inline __m64 +_m_psubw (__m64 __m1, __m64 __m2) +{ + return _mm_sub_pi16 (__m1, __m2); +} + /* Subtract the 32-bit values in M2 from the 32-bit values in M1. */ static __inline __m64 _mm_sub_pi32 (__m64 __m1, __m64 __m2) @@ -245,6 +371,12 @@ _mm_sub_pi32 (__m64 __m1, __m64 __m2) return (__m64) __builtin_ia32_psubd ((__v2si)__m1, (__v2si)__m2); } +static __inline __m64 +_m_psubd (__m64 __m1, __m64 __m2) +{ + return _mm_sub_pi32 (__m1, __m2); +} + /* Add the 64-bit values in M1 to the 64-bit values in M2. */ static __inline __m64 _mm_sub_si64 (__m64 __m1, __m64 __m2) @@ -260,6 +392,12 @@ _mm_subs_pi8 (__m64 __m1, __m64 __m2) return (__m64) __builtin_ia32_psubsb ((__v8qi)__m1, (__v8qi)__m2); } +static __inline __m64 +_m_psubsb (__m64 __m1, __m64 __m2) +{ + return _mm_subs_pi8 (__m1, __m2); +} + /* Subtract the 16-bit values in M2 from the 16-bit values in M1 using signed saturating arithmetic. */ static __inline __m64 @@ -268,6 +406,12 @@ _mm_subs_pi16 (__m64 __m1, __m64 __m2) return (__m64) __builtin_ia32_psubsw ((__v4hi)__m1, (__v4hi)__m2); } +static __inline __m64 +_m_psubsw (__m64 __m1, __m64 __m2) +{ + return _mm_subs_pi16 (__m1, __m2); +} + /* Subtract the 8-bit values in M2 from the 8-bit values in M1 using unsigned saturating arithmetic. */ static __inline __m64 @@ -276,6 +420,12 @@ _mm_subs_pu8 (__m64 __m1, __m64 __m2) return (__m64) __builtin_ia32_psubusb ((__v8qi)__m1, (__v8qi)__m2); } +static __inline __m64 +_m_psubusb (__m64 __m1, __m64 __m2) +{ + return _mm_subs_pu8 (__m1, __m2); +} + /* Subtract the 16-bit values in M2 from the 16-bit values in M1 using unsigned saturating arithmetic. */ static __inline __m64 @@ -284,6 +434,12 @@ _mm_subs_pu16 (__m64 __m1, __m64 __m2) return (__m64) __builtin_ia32_psubusw ((__v4hi)__m1, (__v4hi)__m2); } +static __inline __m64 +_m_psubusw (__m64 __m1, __m64 __m2) +{ + return _mm_subs_pu16 (__m1, __m2); +} + /* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing four 32-bit intermediate results, which are then summed by pairs to produce two 32-bit results. */ @@ -293,6 +449,12 @@ _mm_madd_pi16 (__m64 __m1, __m64 __m2) return (__m64) __builtin_ia32_pmaddwd ((__v4hi)__m1, (__v4hi)__m2); } +static __inline __m64 +_m_pmaddwd (__m64 __m1, __m64 __m2) +{ + return _mm_madd_pi16 (__m1, __m2); +} + /* Multiply four signed 16-bit values in M1 by four signed 16-bit values in M2 and produce the high 16 bits of the 32-bit results. */ static __inline __m64 @@ -301,6 +463,12 @@ _mm_mulhi_pi16 (__m64 __m1, __m64 __m2) return (__m64) __builtin_ia32_pmulhw ((__v4hi)__m1, (__v4hi)__m2); } +static __inline __m64 +_m_pmulhw (__m64 __m1, __m64 __m2) +{ + return _mm_mulhi_pi16 (__m1, __m2); +} + /* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce the low 16 bits of the results. */ static __inline __m64 @@ -309,6 +477,12 @@ _mm_mullo_pi16 (__m64 __m1, __m64 __m2) return (__m64) __builtin_ia32_pmullw ((__v4hi)__m1, (__v4hi)__m2); } +static __inline __m64 +_m_pmullw (__m64 __m1, __m64 __m2) +{ + return _mm_mullo_pi16 (__m1, __m2); +} + /* Shift four 16-bit values in M left by COUNT. */ static __inline __m64 _mm_sll_pi16 (__m64 __m, __m64 __count) @@ -317,11 +491,23 @@ _mm_sll_pi16 (__m64 __m, __m64 __count) } static __inline __m64 +_m_psllw (__m64 __m, __m64 __count) +{ + return _mm_sll_pi16 (__m, __count); +} + +static __inline __m64 _mm_slli_pi16 (__m64 __m, int __count) { return (__m64) __builtin_ia32_psllw ((__v4hi)__m, __count); } +static __inline __m64 +_m_psllwi (__m64 __m, int __count) +{ + return _mm_slli_pi16 (__m, __count); +} + /* Shift two 32-bit values in M left by COUNT. */ static __inline __m64 _mm_sll_pi32 (__m64 __m, __m64 __count) @@ -330,11 +516,23 @@ _mm_sll_pi32 (__m64 __m, __m64 __count) } static __inline __m64 +_m_pslld (__m64 __m, __m64 __count) +{ + return _mm_sll_pi32 (__m, __count); +} + +static __inline __m64 _mm_slli_pi32 (__m64 __m, int __count) { return (__m64) __builtin_ia32_pslld ((__v2si)__m, __count); } +static __inline __m64 +_m_pslldi (__m64 __m, int __count) +{ + return _mm_slli_pi32 (__m, __count); +} + /* Shift the 64-bit value in M left by COUNT. */ static __inline __m64 _mm_sll_si64 (__m64 __m, __m64 __count) @@ -343,11 +541,23 @@ _mm_sll_si64 (__m64 __m, __m64 __count) } static __inline __m64 +_m_psllq (__m64 __m, __m64 __count) +{ + return _mm_sll_si64 (__m, __count); +} + +static __inline __m64 _mm_slli_si64 (__m64 __m, int __count) { return (__m64) __builtin_ia32_psllq ((long long)__m, (long long)__count); } +static __inline __m64 +_m_psllqi (__m64 __m, int __count) +{ + return _mm_slli_si64 (__m, __count); +} + /* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */ static __inline __m64 _mm_sra_pi16 (__m64 __m, __m64 __count) @@ -356,11 +566,23 @@ _mm_sra_pi16 (__m64 __m, __m64 __count) } static __inline __m64 +_m_psraw (__m64 __m, __m64 __count) +{ + return _mm_sra_pi16 (__m, __count); +} + +static __inline __m64 _mm_srai_pi16 (__m64 __m, int __count) { return (__m64) __builtin_ia32_psraw ((__v4hi)__m, __count); } +static __inline __m64 +_m_psrawi (__m64 __m, int __count) +{ + return _mm_srai_pi16 (__m, __count); +} + /* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */ static __inline __m64 _mm_sra_pi32 (__m64 __m, __m64 __count) @@ -369,11 +591,23 @@ _mm_sra_pi32 (__m64 __m, __m64 __count) } static __inline __m64 +_m_psrad (__m64 __m, __m64 __count) +{ + return _mm_sra_pi32 (__m, __count); +} + +static __inline __m64 _mm_srai_pi32 (__m64 __m, int __count) { return (__m64) __builtin_ia32_psrad ((__v2si)__m, __count); } +static __inline __m64 +_m_psradi (__m64 __m, int __count) +{ + return _mm_srai_pi32 (__m, __count); +} + /* Shift four 16-bit values in M right by COUNT; shift in zeros. */ static __inline __m64 _mm_srl_pi16 (__m64 __m, __m64 __count) @@ -382,11 +616,23 @@ _mm_srl_pi16 (__m64 __m, __m64 __count) } static __inline __m64 +_m_psrlw (__m64 __m, __m64 __count) +{ + return _mm_srl_pi16 (__m, __count); +} + +static __inline __m64 _mm_srli_pi16 (__m64 __m, int __count) { return (__m64) __builtin_ia32_psrlw ((__v4hi)__m, __count); } +static __inline __m64 +_m_psrlwi (__m64 __m, int __count) +{ + return _mm_srli_pi16 (__m, __count); +} + /* Shift two 32-bit values in M right by COUNT; shift in zeros. */ static __inline __m64 _mm_srl_pi32 (__m64 __m, __m64 __count) @@ -395,11 +641,23 @@ _mm_srl_pi32 (__m64 __m, __m64 __count) } static __inline __m64 +_m_psrld (__m64 __m, __m64 __count) +{ + return _mm_srl_pi32 (__m, __count); +} + +static __inline __m64 _mm_srli_pi32 (__m64 __m, int __count) { return (__m64) __builtin_ia32_psrld ((__v2si)__m, __count); } +static __inline __m64 +_m_psrldi (__m64 __m, int __count) +{ + return _mm_srli_pi32 (__m, __count); +} + /* Shift the 64-bit value in M left by COUNT; shift in zeros. */ static __inline __m64 _mm_srl_si64 (__m64 __m, __m64 __count) @@ -408,11 +666,23 @@ _mm_srl_si64 (__m64 __m, __m64 __count) } static __inline __m64 +_m_psrlq (__m64 __m, __m64 __count) +{ + return _mm_srl_si64 (__m, __count); +} + +static __inline __m64 _mm_srli_si64 (__m64 __m, int __count) { return (__m64) __builtin_ia32_psrlq ((long long)__m, (long long)__count); } +static __inline __m64 +_m_psrlqi (__m64 __m, int __count) +{ + return _mm_srli_si64 (__m, __count); +} + /* Bit-wise AND the 64-bit values in M1 and M2. */ static __inline __m64 _mm_and_si64 (__m64 __m1, __m64 __m2) @@ -420,6 +690,12 @@ _mm_and_si64 (__m64 __m1, __m64 __m2) return (__m64) __builtin_ia32_pand ((long long)__m1, (long long)__m2); } +static __inline __m64 +_m_pand (__m64 __m1, __m64 __m2) +{ + return _mm_and_si64 (__m1, __m2); +} + /* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the 64-bit value in M2. */ static __inline __m64 @@ -428,6 +704,12 @@ _mm_andnot_si64 (__m64 __m1, __m64 __m2) return (__m64) __builtin_ia32_pandn ((long long)__m1, (long long)__m2); } +static __inline __m64 +_m_pandn (__m64 __m1, __m64 __m2) +{ + return _mm_andnot_si64 (__m1, __m2); +} + /* Bit-wise inclusive OR the 64-bit values in M1 and M2. */ static __inline __m64 _mm_or_si64 (__m64 __m1, __m64 __m2) @@ -435,6 +717,12 @@ _mm_or_si64 (__m64 __m1, __m64 __m2) return (__m64)__builtin_ia32_por ((long long)__m1, (long long)__m2); } +static __inline __m64 +_m_por (__m64 __m1, __m64 __m2) +{ + return _mm_or_si64 (__m1, __m2); +} + /* Bit-wise exclusive OR the 64-bit values in M1 and M2. */ static __inline __m64 _mm_xor_si64 (__m64 __m1, __m64 __m2) @@ -442,6 +730,12 @@ _mm_xor_si64 (__m64 __m1, __m64 __m2) return (__m64)__builtin_ia32_pxor ((long long)__m1, (long long)__m2); } +static __inline __m64 +_m_pxor (__m64 __m1, __m64 __m2) +{ + return _mm_xor_si64 (__m1, __m2); +} + /* Compare eight 8-bit values. The result of the comparison is 0xFF if the test is true and zero if false. */ static __inline __m64 @@ -451,11 +745,23 @@ _mm_cmpeq_pi8 (__m64 __m1, __m64 __m2) } static __inline __m64 +_m_pcmpeqb (__m64 __m1, __m64 __m2) +{ + return _mm_cmpeq_pi8 (__m1, __m2); +} + +static __inline __m64 _mm_cmpgt_pi8 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_pcmpgtb ((__v8qi)__m1, (__v8qi)__m2); } +static __inline __m64 +_m_pcmpgtb (__m64 __m1, __m64 __m2) +{ + return _mm_cmpgt_pi8 (__m1, __m2); +} + /* Compare four 16-bit values. The result of the comparison is 0xFFFF if the test is true and zero if false. */ static __inline __m64 @@ -465,11 +771,23 @@ _mm_cmpeq_pi16 (__m64 __m1, __m64 __m2) } static __inline __m64 +_m_pcmpeqw (__m64 __m1, __m64 __m2) +{ + return _mm_cmpeq_pi16 (__m1, __m2); +} + +static __inline __m64 _mm_cmpgt_pi16 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_pcmpgtw ((__v4hi)__m1, (__v4hi)__m2); } +static __inline __m64 +_m_pcmpgtw (__m64 __m1, __m64 __m2) +{ + return _mm_cmpgt_pi16 (__m1, __m2); +} + /* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if the test is true and zero if false. */ static __inline __m64 @@ -479,11 +797,23 @@ _mm_cmpeq_pi32 (__m64 __m1, __m64 __m2) } static __inline __m64 +_m_pcmpeqd (__m64 __m1, __m64 __m2) +{ + return _mm_cmpeq_pi32 (__m1, __m2); +} + +static __inline __m64 _mm_cmpgt_pi32 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_pcmpgtd ((__v2si)__m1, (__v2si)__m2); } +static __inline __m64 +_m_pcmpgtd (__m64 __m1, __m64 __m2) +{ + return _mm_cmpgt_pi32 (__m1, __m2); +} + /* Creates a 64-bit zero. */ static __inline __m64 _mm_setzero_si64 (void) --- gcc/config/i386/xmmintrin.h.sse2 2003-06-11 13:32:39.000000000 -0700 +++ gcc/config/i386/xmmintrin.h 2003-06-11 19:21:45.000000000 -0700 @@ -1,4 +1,4 @@ -/* Copyright (C) 2002 Free Software Foundation, Inc. +/* Copyright (C) 2002, 2003 Free Software Foundation, Inc. This file is part of GNU CC. @@ -25,7 +25,7 @@ Public License. */ /* Implemented from the specification included in the Intel C++ Compiler - User Guide and Reference, version 5.0. */ + User Guide and Reference, version 8.0. */ #ifndef _XMMINTRIN_H_INCLUDED #define _XMMINTRIN_H_INCLUDED @@ -475,6 +475,12 @@ _mm_cvtss_si32 (__m128 __A) return __builtin_ia32_cvtss2si ((__v4sf) __A); } +static __inline int +_mm_cvt_ss2si (__m128 __A) +{ + return _mm_cvtss_si32 (__A); +} + #ifdef __x86_64__ /* Convert the lower SPFP value to a 32-bit integer according to the current rounding mode. */ @@ -493,6 +499,12 @@ _mm_cvtps_pi32 (__m128 __A) return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A); } +static __inline __m64 +_mm_cvt_ps2pi (__m128 __A) +{ + return _mm_cvtps_pi32 (__A); +} + /* Truncate the lower SPFP value to a 32-bit integer. */ static __inline int _mm_cvttss_si32 (__m128 __A) @@ -500,6 +512,12 @@ _mm_cvttss_si32 (__m128 __A) return __builtin_ia32_cvttss2si ((__v4sf) __A); } +static __inline int +_mm_cvtt_ss2si (__m128 __A) +{ + return _mm_cvttss_si32 (__A); +} + #ifdef __x86_64__ /* Truncate the lower SPFP value to a 32-bit integer. */ static __inline long long @@ -517,6 +535,12 @@ _mm_cvttps_pi32 (__m128 __A) return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A); } +static __inline __m64 +_mm_cvtt_ps2pi (__m128 __A) +{ + return _mm_cvttps_pi32 (__A); +} + /* Convert B to a SPFP value and insert it as element zero in A. */ static __inline __m128 _mm_cvtsi32_ss (__m128 __A, int __B) @@ -524,6 +548,12 @@ _mm_cvtsi32_ss (__m128 __A, int __B) return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B); } +static __inline __m128 +_mm_cvt_si2ss (__m128 __A, int __B) +{ + return _mm_cvtsi32_ss (__A, __B); +} + #ifdef __x86_64__ /* Convert B to a SPFP value and insert it as element zero in A. */ static __inline __m128 @@ -541,6 +571,12 @@ _mm_cvtpi32_ps (__m128 __A, __m64 __B) return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B); } +static __inline __m128 +_mm_cvt_pi2ps (__m128 __A, __m64 __B) +{ + return _mm_cvtpi32_ps (__A, __B); +} + /* Convert the four signed 16-bit values in A to SPFP form. */ static __inline __m128 _mm_cvtpi16_ps (__m64 __A) @@ -942,9 +978,16 @@ _mm_extract_pi16 (__m64 __A, int __N) { return __builtin_ia32_pextrw ((__v4hi)__A, __N); } + +static __inline int +_m_pextrw (__m64 __A, int __N) +{ + return _mm_extract_pi16 (__A, __N); +} #else #define _mm_extract_pi16(A, N) \ __builtin_ia32_pextrw ((__v4hi)(A), (N)) +#define _m_pextrw(A, N) _mm_extract_pi16((A), (N)) #endif /* Inserts word D into one of four words of A. The selector N must be @@ -955,9 +998,16 @@ _mm_insert_pi16 (__m64 __A, int __D, int { return (__m64)__builtin_ia32_pinsrw ((__v4hi)__A, __D, __N); } + +static __inline __m64 +_m_pinsrw (__m64 __A, int __D, int __N) +{ + return _mm_insert_pi16 (__A, __D, __N); +} #else #define _mm_insert_pi16(A, D, N) \ ((__m64) __builtin_ia32_pinsrw ((__v4hi)(A), (D), (N))) +#define _m_pinsrw(A, D, N) _mm_insert_pi16((A), (D), (N)) #endif /* Compute the element-wise maximum of signed 16-bit values. */ @@ -967,6 +1017,12 @@ _mm_max_pi16 (__m64 __A, __m64 __B) return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B); } +static __inline __m64 +_m_pmaxsw (__m64 __A, __m64 __B) +{ + return _mm_max_pi16 (__A, __B); +} + /* Compute the element-wise maximum of unsigned 8-bit values. */ static __inline __m64 _mm_max_pu8 (__m64 __A, __m64 __B) @@ -974,6 +1030,12 @@ _mm_max_pu8 (__m64 __A, __m64 __B) return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B); } +static __inline __m64 +_m_pmaxub (__m64 __A, __m64 __B) +{ + return _mm_max_pu8 (__A, __B); +} + /* Compute the element-wise minimum of signed 16-bit values. */ static __inline __m64 _mm_min_pi16 (__m64 __A, __m64 __B) @@ -981,6 +1043,12 @@ _mm_min_pi16 (__m64 __A, __m64 __B) return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B); } +static __inline __m64 +_m_pminsw (__m64 __A, __m64 __B) +{ + return _mm_min_pi16 (__A, __B); +} + /* Compute the element-wise minimum of unsigned 8-bit values. */ static __inline __m64 _mm_min_pu8 (__m64 __A, __m64 __B) @@ -988,6 +1056,12 @@ _mm_min_pu8 (__m64 __A, __m64 __B) return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B); } +static __inline __m64 +_m_pminub (__m64 __A, __m64 __B) +{ + return _mm_min_pu8 (__A, __B); +} + /* Create an 8-bit mask of the signs of 8-bit values. */ static __inline int _mm_movemask_pi8 (__m64 __A) @@ -995,6 +1069,12 @@ _mm_movemask_pi8 (__m64 __A) return __builtin_ia32_pmovmskb ((__v8qi)__A); } +static __inline int +_m_pmovmskb (__m64 __A) +{ + return _mm_movemask_pi8 (__A); +} + /* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values in B and produce the high 16 bits of the 32-bit results. */ static __inline __m64 @@ -1003,6 +1083,12 @@ _mm_mulhi_pu16 (__m64 __A, __m64 __B) return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B); } +static __inline __m64 +_m_pmulhuw (__m64 __A, __m64 __B) +{ + return _mm_mulhi_pu16 (__A, __B); +} + /* Return a combination of the four 16-bit values in A. The selector must be an immediate. */ #if 0 @@ -1011,9 +1097,16 @@ _mm_shuffle_pi16 (__m64 __A, int __N) { return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N); } + +static __inline __m64 +_m_pshufw (__m64 __A, int __N) +{ + return _mm_shuffle_pi16 (__A, __N); +} #else #define _mm_shuffle_pi16(A, N) \ ((__m64) __builtin_ia32_pshufw ((__v4hi)(A), (N))) +#define _m_pshufw(A, N) _mm_shuffle_pi16 ((A), (N)) #endif /* Conditionally store byte elements of A into P. The high bit of each @@ -1025,6 +1118,12 @@ _mm_maskmove_si64 (__m64 __A, __m64 __N, __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P); } +static __inline void +_m_maskmovq (__m64 __A, __m64 __N, char *__P) +{ + _mm_maskmove_si64 (__A, __N, __P); +} + /* Compute the rounded averages of the unsigned 8-bit values in A and B. */ static __inline __m64 _mm_avg_pu8 (__m64 __A, __m64 __B) @@ -1032,6 +1131,12 @@ _mm_avg_pu8 (__m64 __A, __m64 __B) return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B); } +static __inline __m64 +_m_pavgb (__m64 __A, __m64 __B) +{ + return _mm_avg_pu8 (__A, __B); +} + /* Compute the rounded averages of the unsigned 16-bit values in A and B. */ static __inline __m64 _mm_avg_pu16 (__m64 __A, __m64 __B) @@ -1039,6 +1144,12 @@ _mm_avg_pu16 (__m64 __A, __m64 __B) return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B); } +static __inline __m64 +_m_pavgw (__m64 __A, __m64 __B) +{ + return _mm_avg_pu16 (__A, __B); +} + /* Compute the sum of the absolute differences of the unsigned 8-bit values in A and B. Return the value in the lower 16-bit word; the upper words are cleared. */ @@ -1048,6 +1159,12 @@ _mm_sad_pu8 (__m64 __A, __m64 __B) return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B); } +static __inline __m64 +_m_psadbw (__m64 __A, __m64 __B) +{ + return _mm_sad_pu8 (__A, __B); +} + /* Loads one cache line from address P to a location "closer" to the processor. The selector I specifies the type of prefetch operation. */ #if 0 @@ -1106,1469 +1223,8 @@ do { \ (row3) = __builtin_ia32_shufps (__t2, __t3, 0xDD); \ } while (0) -#ifdef __SSE2__ -/* SSE2 */ -typedef int __v2df __attribute__ ((mode (V2DF))); -typedef int __v2di __attribute__ ((mode (V2DI))); -typedef int __v4si __attribute__ ((mode (V4SI))); -typedef int __v8hi __attribute__ ((mode (V8HI))); -typedef int __v16qi __attribute__ ((mode (V16QI))); - -/* Create a selector for use with the SHUFPD instruction. */ -#define _MM_SHUFFLE2(fp1,fp0) \ - (((fp1) << 1) | (fp0)) - -#define __m128i __v2di -#define __m128d __v2df - -/* Create a vector with element 0 as *P and the rest zero. */ -static __inline __m128d -_mm_load_sd (double const *__P) -{ - return (__m128d) __builtin_ia32_loadsd (__P); -} - -/* Create a vector with all two elements equal to *P. */ -static __inline __m128d -_mm_load1_pd (double const *__P) -{ - __v2df __tmp = __builtin_ia32_loadsd (__P); - return (__m128d) __builtin_ia32_shufpd (__tmp, __tmp, _MM_SHUFFLE2 (0,0)); -} - -static __inline __m128d -_mm_load_pd1 (double const *__P) -{ - return _mm_load1_pd (__P); -} - -/* Load two DPFP values from P. The addresd must be 16-byte aligned. */ -static __inline __m128d -_mm_load_pd (double const *__P) -{ - return (__m128d) __builtin_ia32_loadapd (__P); -} - -/* Load two DPFP values from P. The addresd need not be 16-byte aligned. */ -static __inline __m128d -_mm_loadu_pd (double const *__P) -{ - return (__m128d) __builtin_ia32_loadupd (__P); -} - -/* Load two DPFP values in reverse order. The addresd must be aligned. */ -static __inline __m128d -_mm_loadr_pd (double const *__P) -{ - __v2df __tmp = __builtin_ia32_loadapd (__P); - return (__m128d) __builtin_ia32_shufpd (__tmp, __tmp, _MM_SHUFFLE2 (0,1)); -} - -/* Create a vector with element 0 as F and the rest zero. */ -static __inline __m128d -_mm_set_sd (double __F) -{ - return (__m128d) __builtin_ia32_loadsd (&__F); -} - -/* Create a vector with all two elements equal to F. */ -static __inline __m128d -_mm_set1_pd (double __F) -{ - __v2df __tmp = __builtin_ia32_loadsd (&__F); - return (__m128d) __builtin_ia32_shufpd (__tmp, __tmp, _MM_SHUFFLE2 (0,0)); -} - -static __inline __m128d -_mm_set_pd1 (double __F) -{ - return _mm_set1_pd (__F); -} - -/* Create the vector [Z Y]. */ -static __inline __m128d -_mm_set_pd (double __Z, double __Y) -{ - union { - double __a[2]; - __m128d __v; - } __u; - - __u.__a[0] = __Y; - __u.__a[1] = __Z; - - return __u.__v; -} - -/* Create the vector [Y Z]. */ -static __inline __m128d -_mm_setr_pd (double __Z, double __Y) -{ - return _mm_set_pd (__Y, __Z); -} - -/* Create a vector of zeros. */ -static __inline __m128d -_mm_setzero_pd (void) -{ - return (__m128d) __builtin_ia32_setzeropd (); -} - -/* Stores the lower DPFP value. */ -static __inline void -_mm_store_sd (double *__P, __m128d __A) -{ - __builtin_ia32_storesd (__P, (__v2df)__A); -} - -/* Store the lower DPFP value acrosd two words. */ -static __inline void -_mm_store1_pd (double *__P, __m128d __A) -{ - __v2df __va = (__v2df)__A; - __v2df __tmp = __builtin_ia32_shufpd (__va, __va, _MM_SHUFFLE2 (0,0)); - __builtin_ia32_storeapd (__P, __tmp); -} - -static __inline void -_mm_store_pd1 (double *__P, __m128d __A) -{ - _mm_store1_pd (__P, __A); -} - -/* Store two DPFP values. The addresd must be 16-byte aligned. */ -static __inline void -_mm_store_pd (double *__P, __m128d __A) -{ - __builtin_ia32_storeapd (__P, (__v2df)__A); -} - -/* Store two DPFP values. The addresd need not be 16-byte aligned. */ -static __inline void -_mm_storeu_pd (double *__P, __m128d __A) -{ - __builtin_ia32_storeupd (__P, (__v2df)__A); -} - -/* Store two DPFP values in reverse order. The addresd must be aligned. */ -static __inline void -_mm_storer_pd (double *__P, __m128d __A) -{ - __v2df __va = (__v2df)__A; - __v2df __tmp = __builtin_ia32_shufpd (__va, __va, _MM_SHUFFLE2 (0,1)); - __builtin_ia32_storeapd (__P, __tmp); -} - -/* Sets the low DPFP value of A from the low value of B. */ -static __inline __m128d -_mm_move_sd (__m128d __A, __m128d __B) -{ - return (__m128d) __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B); -} - - -static __inline __m128d -_mm_add_pd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_addpd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_add_sd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_addsd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_sub_pd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_subpd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_sub_sd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_subsd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_mul_pd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_mulpd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_mul_sd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_mulsd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_div_pd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_divpd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_div_sd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_divsd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_sqrt_pd (__m128d __A) -{ - return (__m128d)__builtin_ia32_sqrtpd ((__v2df)__A); -} - -/* Return pair {sqrt (A[0), B[1]}. */ -static __inline __m128d -_mm_sqrt_sd (__m128d __A, __m128d __B) -{ - __v2df __tmp = __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B); - return (__m128d)__builtin_ia32_sqrtsd ((__v2df)__tmp); -} - -static __inline __m128d -_mm_min_pd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_minpd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_min_sd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_minsd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_max_pd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_maxpd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_max_sd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_maxsd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_and_pd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_andpd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_andnot_pd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_andnpd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_or_pd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_orpd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_xor_pd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_xorpd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_cmpeq_pd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_cmpeqpd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_cmplt_pd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_cmpltpd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_cmple_pd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_cmplepd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_cmpgt_pd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_cmpgtpd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_cmpge_pd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_cmpgepd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_cmpneq_pd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_cmpneqpd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_cmpnlt_pd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_cmpnltpd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_cmpnle_pd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_cmpnlepd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_cmpngt_pd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_cmpngtpd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_cmpnge_pd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_cmpngepd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_cmpord_pd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_cmpordpd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_cmpunord_pd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_cmpunordpd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_cmpeq_sd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_cmpeqsd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_cmplt_sd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_cmpltsd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_cmple_sd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_cmplesd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_cmpgt_sd (__m128d __A, __m128d __B) -{ - return (__m128d) __builtin_ia32_movsd ((__v2df) __A, - (__v2df) - __builtin_ia32_cmpltsd ((__v2df) __B, - (__v2df) - __A)); -} - -static __inline __m128d -_mm_cmpge_sd (__m128d __A, __m128d __B) -{ - return (__m128d) __builtin_ia32_movsd ((__v2df) __A, - (__v2df) - __builtin_ia32_cmplesd ((__v2df) __B, - (__v2df) - __A)); -} - -static __inline __m128d -_mm_cmpneq_sd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_cmpneqsd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_cmpnlt_sd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_cmpnltsd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_cmpnle_sd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_cmpnlesd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_cmpngt_sd (__m128d __A, __m128d __B) -{ - return (__m128d) __builtin_ia32_movsd ((__v2df) __A, - (__v2df) - __builtin_ia32_cmpnltsd ((__v2df) __B, - (__v2df) - __A)); -} - -static __inline __m128d -_mm_cmpnge_sd (__m128d __A, __m128d __B) -{ - return (__m128d) __builtin_ia32_movsd ((__v2df) __A, - (__v2df) - __builtin_ia32_cmpnlesd ((__v2df) __B, - (__v2df) - __A)); -} - -static __inline __m128d -_mm_cmpord_sd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_cmpordsd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_cmpunord_sd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_cmpunordsd ((__v2df)__A, (__v2df)__B); -} - -static __inline int -_mm_comieq_sd (__m128d __A, __m128d __B) -{ - return __builtin_ia32_comisdeq ((__v2df)__A, (__v2df)__B); -} - -static __inline int -_mm_comilt_sd (__m128d __A, __m128d __B) -{ - return __builtin_ia32_comisdlt ((__v2df)__A, (__v2df)__B); -} - -static __inline int -_mm_comile_sd (__m128d __A, __m128d __B) -{ - return __builtin_ia32_comisdle ((__v2df)__A, (__v2df)__B); -} - -static __inline int -_mm_comigt_sd (__m128d __A, __m128d __B) -{ - return __builtin_ia32_comisdgt ((__v2df)__A, (__v2df)__B); -} - -static __inline int -_mm_comige_sd (__m128d __A, __m128d __B) -{ - return __builtin_ia32_comisdge ((__v2df)__A, (__v2df)__B); -} - -static __inline int -_mm_comineq_sd (__m128d __A, __m128d __B) -{ - return __builtin_ia32_comisdneq ((__v2df)__A, (__v2df)__B); -} - -static __inline int -_mm_ucomieq_sd (__m128d __A, __m128d __B) -{ - return __builtin_ia32_ucomisdeq ((__v2df)__A, (__v2df)__B); -} - -static __inline int -_mm_ucomilt_sd (__m128d __A, __m128d __B) -{ - return __builtin_ia32_ucomisdlt ((__v2df)__A, (__v2df)__B); -} - -static __inline int -_mm_ucomile_sd (__m128d __A, __m128d __B) -{ - return __builtin_ia32_ucomisdle ((__v2df)__A, (__v2df)__B); -} - -static __inline int -_mm_ucomigt_sd (__m128d __A, __m128d __B) -{ - return __builtin_ia32_ucomisdgt ((__v2df)__A, (__v2df)__B); -} - -static __inline int -_mm_ucomige_sd (__m128d __A, __m128d __B) -{ - return __builtin_ia32_ucomisdge ((__v2df)__A, (__v2df)__B); -} - -static __inline int -_mm_ucomineq_sd (__m128d __A, __m128d __B) -{ - return __builtin_ia32_ucomisdneq ((__v2df)__A, (__v2df)__B); -} - -/* Create a vector with element 0 as *P and the rest zero. */ - -static __inline __m128i -_mm_load_si128 (__m128i const *__P) -{ - return (__m128i) __builtin_ia32_loaddqa ((char const *)__P); -} - -static __inline __m128i -_mm_loadu_si128 (__m128i const *__P) -{ - return (__m128i) __builtin_ia32_loaddqu ((char const *)__P); -} - -static __inline __m128i -_mm_loadl_epi64 (__m128i const *__P) -{ - return (__m128i) __builtin_ia32_movq2dq (*(unsigned long long *)__P); -} - -static __inline void -_mm_store_si128 (__m128i *__P, __m128i __B) -{ - __builtin_ia32_storedqa ((char *)__P, (__v16qi)__B); -} - -static __inline void -_mm_storeu_si128 (__m128i *__P, __m128i __B) -{ - __builtin_ia32_storedqu ((char *)__P, (__v16qi)__B); -} - -static __inline void -_mm_storel_epi64 (__m128i *__P, __m128i __B) -{ - *(long long *)__P = __builtin_ia32_movdq2q ((__v2di)__B); -} - -static __inline __m64 -_mm_movepi64_pi64 (__m128i __B) -{ - return (__m64) __builtin_ia32_movdq2q ((__v2di)__B); -} - -static __inline __m128i -_mm_move_epi64 (__m128i __A) -{ - return (__m128i) __builtin_ia32_movq ((__v2di)__A); -} - -/* Create a vector of zeros. */ -static __inline __m128i -_mm_setzero_si128 (void) -{ - return (__m128i) __builtin_ia32_setzero128 (); -} - -static __inline __m128i -_mm_set_epi64 (__m64 __A, __m64 __B) -{ - __v2di __tmp = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__A); - __v2di __tmp2 = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__B); - return (__m128i)__builtin_ia32_punpcklqdq128 (__tmp2, __tmp); -} - -/* Create the vector [Z Y X W]. */ -static __inline __m128i -_mm_set_epi32 (int __Z, int __Y, int __X, int __W) -{ - union { - int __a[4]; - __m128i __v; - } __u; - - __u.__a[0] = __W; - __u.__a[1] = __X; - __u.__a[2] = __Y; - __u.__a[3] = __Z; - - return __u.__v; -} - -#ifdef __x86_64__ -/* Create the vector [Z Y]. */ -static __inline __m128i -_mm_set_epi64x (long long __Z, long long __Y) -{ - union { - long __a[2]; - __m128i __v; - } __u; - - __u.__a[0] = __Y; - __u.__a[1] = __Z; - - return __u.__v; -} -#endif - -/* Create the vector [S T U V Z Y X W]. */ -static __inline __m128i -_mm_set_epi16 (short __Z, short __Y, short __X, short __W, - short __V, short __U, short __T, short __S) -{ - union { - short __a[8]; - __m128i __v; - } __u; - - __u.__a[0] = __S; - __u.__a[1] = __T; - __u.__a[2] = __U; - __u.__a[3] = __V; - __u.__a[4] = __W; - __u.__a[5] = __X; - __u.__a[6] = __Y; - __u.__a[7] = __Z; - - return __u.__v; -} - -/* Create the vector [S T U V Z Y X W]. */ -static __inline __m128i -_mm_set_epi8 (char __Z, char __Y, char __X, char __W, - char __V, char __U, char __T, char __S, - char __Z1, char __Y1, char __X1, char __W1, - char __V1, char __U1, char __T1, char __S1) -{ - union { - char __a[16]; - __m128i __v; - } __u; - - __u.__a[0] = __S1; - __u.__a[1] = __T1; - __u.__a[2] = __U1; - __u.__a[3] = __V1; - __u.__a[4] = __W1; - __u.__a[5] = __X1; - __u.__a[6] = __Y1; - __u.__a[7] = __Z1; - __u.__a[8] = __S; - __u.__a[9] = __T; - __u.__a[10] = __U; - __u.__a[11] = __V; - __u.__a[12] = __W; - __u.__a[13] = __X; - __u.__a[14] = __Y; - __u.__a[15] = __Z; - - return __u.__v; -} - -static __inline __m128i -_mm_set1_epi64 (__m64 __A) -{ - __v2di __tmp = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__A); - return (__m128i)__builtin_ia32_punpcklqdq128 (__tmp, __tmp); -} - -static __inline __m128i -_mm_set1_epi32 (int __A) -{ - __v4si __tmp = (__v4si)__builtin_ia32_loadd (&__A); - return (__m128i) __builtin_ia32_pshufd ((__v4si)__tmp, _MM_SHUFFLE (0,0,0,0)); -} - -#ifdef __x86_64__ -static __inline __m128i -_mm_set1_epi64x (long long __A) -{ - __v2di __tmp = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__A); - return (__m128i) __builtin_ia32_shufpd ((__v2df)__tmp, (__v2df)__tmp, _MM_SHUFFLE2 (0,0)); -} -#endif - -static __inline __m128i -_mm_set1_epi16 (short __A) -{ - int __Acopy = (unsigned short)__A; - __v4si __tmp = (__v4si)__builtin_ia32_loadd (&__Acopy); - __tmp = (__v4si)__builtin_ia32_punpcklwd128 ((__v8hi)__tmp, (__v8hi)__tmp); - return (__m128i) __builtin_ia32_pshufd ((__v4si)__tmp, _MM_SHUFFLE (0,0,0,0)); -} - -static __inline __m128i -_mm_set1_epi8 (char __A) -{ - int __Acopy = (unsigned char)__A; - __v4si __tmp = (__v4si)__builtin_ia32_loadd (&__Acopy); - __tmp = (__v4si)__builtin_ia32_punpcklbw128 ((__v16qi)__tmp, (__v16qi)__tmp); - __tmp = (__v4si)__builtin_ia32_punpcklbw128 ((__v16qi)__tmp, (__v16qi)__tmp); - return (__m128i) __builtin_ia32_pshufd ((__v4si)__tmp, _MM_SHUFFLE (0,0,0,0)); -} - -static __inline __m128i -_mm_setr_epi64 (__m64 __A, __m64 __B) -{ - __v2di __tmp = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__A); - __v2di __tmp2 = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__B); - return (__m128i)__builtin_ia32_punpcklqdq128 (__tmp, __tmp2); -} - -/* Create the vector [Z Y X W]. */ -static __inline __m128i -_mm_setr_epi32 (int __W, int __X, int __Y, int __Z) -{ - union { - int __a[4]; - __m128i __v; - } __u; - - __u.__a[0] = __W; - __u.__a[1] = __X; - __u.__a[2] = __Y; - __u.__a[3] = __Z; - - return __u.__v; -} -/* Create the vector [S T U V Z Y X W]. */ -static __inline __m128i -_mm_setr_epi16 (short __S, short __T, short __U, short __V, - short __W, short __X, short __Y, short __Z) -{ - union { - short __a[8]; - __m128i __v; - } __u; - - __u.__a[0] = __S; - __u.__a[1] = __T; - __u.__a[2] = __U; - __u.__a[3] = __V; - __u.__a[4] = __W; - __u.__a[5] = __X; - __u.__a[6] = __Y; - __u.__a[7] = __Z; - - return __u.__v; -} - -/* Create the vector [S T U V Z Y X W]. */ -static __inline __m128i -_mm_setr_epi8 (char __S1, char __T1, char __U1, char __V1, - char __W1, char __X1, char __Y1, char __Z1, - char __S, char __T, char __U, char __V, - char __W, char __X, char __Y, char __Z) -{ - union { - char __a[16]; - __m128i __v; - } __u; - - __u.__a[0] = __S1; - __u.__a[1] = __T1; - __u.__a[2] = __U1; - __u.__a[3] = __V1; - __u.__a[4] = __W1; - __u.__a[5] = __X1; - __u.__a[6] = __Y1; - __u.__a[7] = __Z1; - __u.__a[8] = __S; - __u.__a[9] = __T; - __u.__a[10] = __U; - __u.__a[11] = __V; - __u.__a[12] = __W; - __u.__a[13] = __X; - __u.__a[14] = __Y; - __u.__a[15] = __Z; - - return __u.__v; -} - -static __inline __m128d -_mm_cvtepi32_pd (__m128i __A) -{ - return (__m128d)__builtin_ia32_cvtdq2pd ((__v4si) __A); -} - -static __inline __m128 -_mm_cvtepi32_ps (__m128i __A) -{ - return (__m128)__builtin_ia32_cvtdq2ps ((__v4si) __A); -} - -static __inline __m128i -_mm_cvtpd_epi32 (__m128d __A) -{ - return (__m128i)__builtin_ia32_cvtpd2dq ((__v2df) __A); -} - -static __inline __m64 -_mm_cvtpd_pi32 (__m128d __A) -{ - return (__m64)__builtin_ia32_cvtpd2pi ((__v2df) __A); -} - -static __inline __m128 -_mm_cvtpd_ps (__m128d __A) -{ - return (__m128)__builtin_ia32_cvtpd2ps ((__v2df) __A); -} - -static __inline __m128i -_mm_cvttpd_epi32 (__m128d __A) -{ - return (__m128i)__builtin_ia32_cvttpd2dq ((__v2df) __A); -} - -static __inline __m64 -_mm_cvttpd_pi32 (__m128d __A) -{ - return (__m64)__builtin_ia32_cvttpd2pi ((__v2df) __A); -} - -static __inline __m128d -_mm_cvtpi32_pd (__m64 __A) -{ - return (__m128d)__builtin_ia32_cvtpi2pd ((__v2si) __A); -} - -static __inline __m128i -_mm_cvtps_epi32 (__m128 __A) -{ - return (__m128i)__builtin_ia32_cvtps2dq ((__v4sf) __A); -} - -static __inline __m128i -_mm_cvttps_epi32 (__m128 __A) -{ - return (__m128i)__builtin_ia32_cvttps2dq ((__v4sf) __A); -} - -static __inline __m128d -_mm_cvtps_pd (__m128 __A) -{ - return (__m128d)__builtin_ia32_cvtps2pd ((__v4sf) __A); -} - -static __inline int -_mm_cvtsd_si32 (__m128d __A) -{ - return __builtin_ia32_cvtsd2si ((__v2df) __A); -} - -#ifdef __x86_64__ -static __inline long long -_mm_cvtsd_si64x (__m128d __A) -{ - return __builtin_ia32_cvtsd2si64 ((__v2df) __A); -} -#endif - -static __inline int -_mm_cvttsd_si32 (__m128d __A) -{ - return __builtin_ia32_cvttsd2si ((__v2df) __A); -} - -#ifdef __x86_64__ -static __inline long long -_mm_cvttsd_si64x (__m128d __A) -{ - return __builtin_ia32_cvttsd2si64 ((__v2df) __A); -} -#endif - -static __inline __m128 -_mm_cvtsd_ss (__m128 __A, __m128d __B) -{ - return (__m128)__builtin_ia32_cvtsd2ss ((__v4sf) __A, (__v2df) __B); -} - -static __inline __m128d -_mm_cvtsi32_sd (__m128d __A, int __B) -{ - return (__m128d)__builtin_ia32_cvtsi2sd ((__v2df) __A, __B); -} - -#ifdef __x86_64__ -static __inline __m128d -_mm_cvtsi64x_sd (__m128d __A, long long __B) -{ - return (__m128d)__builtin_ia32_cvtsi642sd ((__v2df) __A, __B); -} -#endif - -static __inline __m128d -_mm_cvtss_sd (__m128d __A, __m128 __B) -{ - return (__m128d)__builtin_ia32_cvtss2sd ((__v2df) __A, (__v4sf)__B); -} - -#define _mm_shuffle_pd(__A, __B, __C) ((__m128d)__builtin_ia32_shufpd ((__v2df)__A, (__v2df)__B, (__C))) - -static __inline __m128d -_mm_unpackhi_pd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_unpckhpd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_unpacklo_pd (__m128d __A, __m128d __B) -{ - return (__m128d)__builtin_ia32_unpcklpd ((__v2df)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_loadh_pd (__m128d __A, double const *__B) -{ - return (__m128d)__builtin_ia32_loadhpd ((__v2df)__A, (__v2si *)__B); -} - -static __inline void -_mm_storeh_pd (double *__A, __m128d __B) -{ - __builtin_ia32_storehpd ((__v2si *)__A, (__v2df)__B); -} - -static __inline __m128d -_mm_loadl_pd (__m128d __A, double const *__B) -{ - return (__m128d)__builtin_ia32_loadlpd ((__v2df)__A, (__v2si *)__B); -} - -static __inline void -_mm_storel_pd (double *__A, __m128d __B) -{ - __builtin_ia32_storelpd ((__v2si *)__A, (__v2df)__B); -} - -static __inline int -_mm_movemask_pd (__m128d __A) -{ - return __builtin_ia32_movmskpd ((__v2df)__A); -} - -static __inline __m128i -_mm_packs_epi16 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_packsswb128 ((__v8hi)__A, (__v8hi)__B); -} - -static __inline __m128i -_mm_packs_epi32 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_packssdw128 ((__v4si)__A, (__v4si)__B); -} - -static __inline __m128i -_mm_packus_epi16 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_packuswb128 ((__v8hi)__A, (__v8hi)__B); -} - -static __inline __m128i -_mm_unpackhi_epi8 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_punpckhbw128 ((__v16qi)__A, (__v16qi)__B); -} - -static __inline __m128i -_mm_unpackhi_epi16 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_punpckhwd128 ((__v8hi)__A, (__v8hi)__B); -} - -static __inline __m128i -_mm_unpackhi_epi32 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_punpckhdq128 ((__v4si)__A, (__v4si)__B); -} - -static __inline __m128i -_mm_unpackhi_epi64 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_punpckhqdq128 ((__v2di)__A, (__v2di)__B); -} - -static __inline __m128i -_mm_unpacklo_epi8 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_punpcklbw128 ((__v16qi)__A, (__v16qi)__B); -} - -static __inline __m128i -_mm_unpacklo_epi16 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_punpcklwd128 ((__v8hi)__A, (__v8hi)__B); -} - -static __inline __m128i -_mm_unpacklo_epi32 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_punpckldq128 ((__v4si)__A, (__v4si)__B); -} - -static __inline __m128i -_mm_unpacklo_epi64 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_punpcklqdq128 ((__v2di)__A, (__v2di)__B); -} - -static __inline __m128i -_mm_add_epi8 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_paddb128 ((__v16qi)__A, (__v16qi)__B); -} - -static __inline __m128i -_mm_add_epi16 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_paddw128 ((__v8hi)__A, (__v8hi)__B); -} - -static __inline __m128i -_mm_add_epi32 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_paddd128 ((__v4si)__A, (__v4si)__B); -} - -static __inline __m128i -_mm_add_epi64 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_paddq128 ((__v2di)__A, (__v2di)__B); -} - -static __inline __m128i -_mm_adds_epi8 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_paddsb128 ((__v16qi)__A, (__v16qi)__B); -} - -static __inline __m128i -_mm_adds_epi16 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_paddsw128 ((__v8hi)__A, (__v8hi)__B); -} - -static __inline __m128i -_mm_adds_epu8 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_paddusb128 ((__v16qi)__A, (__v16qi)__B); -} - -static __inline __m128i -_mm_adds_epu16 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_paddusw128 ((__v8hi)__A, (__v8hi)__B); -} - -static __inline __m128i -_mm_sub_epi8 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_psubb128 ((__v16qi)__A, (__v16qi)__B); -} - -static __inline __m128i -_mm_sub_epi16 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_psubw128 ((__v8hi)__A, (__v8hi)__B); -} - -static __inline __m128i -_mm_sub_epi32 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_psubd128 ((__v4si)__A, (__v4si)__B); -} - -static __inline __m128i -_mm_sub_epi64 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_psubq128 ((__v2di)__A, (__v2di)__B); -} - -static __inline __m128i -_mm_subs_epi8 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_psubsb128 ((__v16qi)__A, (__v16qi)__B); -} - -static __inline __m128i -_mm_subs_epi16 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_psubsw128 ((__v8hi)__A, (__v8hi)__B); -} - -static __inline __m128i -_mm_subs_epu8 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_psubusb128 ((__v16qi)__A, (__v16qi)__B); -} - -static __inline __m128i -_mm_subs_epu16 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_psubusw128 ((__v8hi)__A, (__v8hi)__B); -} - -static __inline __m128i -_mm_madd_epi16 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_pmaddwd128 ((__v8hi)__A, (__v8hi)__B); -} - -static __inline __m128i -_mm_mulhi_epi16 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_pmulhw128 ((__v8hi)__A, (__v8hi)__B); -} - -static __inline __m128i -_mm_mullo_epi16 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_pmullw128 ((__v8hi)__A, (__v8hi)__B); -} - -static __inline __m64 -_mm_mul_su32 (__m64 __A, __m64 __B) -{ - return (__m64)__builtin_ia32_pmuludq ((__v2si)__A, (__v2si)__B); -} - -static __inline __m128i -_mm_mul_epu32 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_pmuludq128 ((__v4si)__A, (__v4si)__B); -} - -static __inline __m128i -_mm_sll_epi16 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_psllw128 ((__v8hi)__A, (__v2di)__B); -} - -static __inline __m128i -_mm_sll_epi32 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_pslld128 ((__v4si)__A, (__v2di)__B); -} - -static __inline __m128i -_mm_sll_epi64 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_psllq128 ((__v2di)__A, (__v2di)__B); -} - -static __inline __m128i -_mm_sra_epi16 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_psraw128 ((__v8hi)__A, (__v2di)__B); -} - -static __inline __m128i -_mm_sra_epi32 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_psrad128 ((__v4si)__A, (__v2di)__B); -} - -static __inline __m128i -_mm_srl_epi16 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_psrlw128 ((__v8hi)__A, (__v2di)__B); -} - -static __inline __m128i -_mm_srl_epi32 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_psrld128 ((__v4si)__A, (__v2di)__B); -} - -static __inline __m128i -_mm_srl_epi64 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_psrlq128 ((__v2di)__A, (__v2di)__B); -} - -static __inline __m128i -_mm_slli_epi16 (__m128i __A, int __B) -{ - return (__m128i)__builtin_ia32_psllwi128 ((__v8hi)__A, __B); -} - -static __inline __m128i -_mm_slli_epi32 (__m128i __A, int __B) -{ - return (__m128i)__builtin_ia32_pslldi128 ((__v4si)__A, __B); -} - -static __inline __m128i -_mm_slli_epi64 (__m128i __A, int __B) -{ - return (__m128i)__builtin_ia32_psllqi128 ((__v2di)__A, __B); -} - -static __inline __m128i -_mm_srai_epi16 (__m128i __A, int __B) -{ - return (__m128i)__builtin_ia32_psrawi128 ((__v8hi)__A, __B); -} - -static __inline __m128i -_mm_srai_epi32 (__m128i __A, int __B) -{ - return (__m128i)__builtin_ia32_psradi128 ((__v4si)__A, __B); -} - -#if 0 -static __m128i __attribute__((__always_inline__)) -_mm_srli_si128 (__m128i __A, const int __B) -{ - return ((__m128i)__builtin_ia32_psrldqi128 (__A, __B)) -} - -static __m128i __attribute__((__always_inline__)) -_mm_srli_si128 (__m128i __A, const int __B) -{ - return ((__m128i)__builtin_ia32_pslldqi128 (__A, __B)) -} -#endif -#define _mm_srli_si128(__A, __B) ((__m128i)__builtin_ia32_psrldqi128 (__A, __B)) -#define _mm_slli_si128(__A, __B) ((__m128i)__builtin_ia32_pslldqi128 (__A, __B)) - -static __inline __m128i -_mm_srli_epi16 (__m128i __A, int __B) -{ - return (__m128i)__builtin_ia32_psrlwi128 ((__v8hi)__A, __B); -} - -static __inline __m128i -_mm_srli_epi32 (__m128i __A, int __B) -{ - return (__m128i)__builtin_ia32_psrldi128 ((__v4si)__A, __B); -} - -static __inline __m128i -_mm_srli_epi64 (__m128i __A, int __B) -{ - return (__m128i)__builtin_ia32_psrlqi128 ((__v2di)__A, __B); -} - -static __inline __m128i -_mm_and_si128 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_pand128 ((__v2di)__A, (__v2di)__B); -} - -static __inline __m128i -_mm_andnot_si128 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_pandn128 ((__v2di)__A, (__v2di)__B); -} - -static __inline __m128i -_mm_or_si128 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_por128 ((__v2di)__A, (__v2di)__B); -} - -static __inline __m128i -_mm_xor_si128 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_pxor128 ((__v2di)__A, (__v2di)__B); -} - -static __inline __m128i -_mm_cmpeq_epi8 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_pcmpeqb128 ((__v16qi)__A, (__v16qi)__B); -} - -static __inline __m128i -_mm_cmpeq_epi16 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_pcmpeqw128 ((__v8hi)__A, (__v8hi)__B); -} - -static __inline __m128i -_mm_cmpeq_epi32 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_pcmpeqd128 ((__v4si)__A, (__v4si)__B); -} - -static __inline __m128i -_mm_cmplt_epi8 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_pcmpgtb128 ((__v16qi)__B, (__v16qi)__A); -} - -static __inline __m128i -_mm_cmplt_epi16 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_pcmpgtw128 ((__v8hi)__B, (__v8hi)__A); -} - -static __inline __m128i -_mm_cmplt_epi32 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__B, (__v4si)__A); -} - -static __inline __m128i -_mm_cmpgt_epi8 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_pcmpgtb128 ((__v16qi)__A, (__v16qi)__B); -} - -static __inline __m128i -_mm_cmpgt_epi16 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_pcmpgtw128 ((__v8hi)__A, (__v8hi)__B); -} - -static __inline __m128i -_mm_cmpgt_epi32 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__A, (__v4si)__B); -} - -#define _mm_extract_epi16(__A, __B) __builtin_ia32_pextrw128 ((__v8hi)__A, __B) - -#define _mm_insert_epi16(__A, __B, __C) ((__m128i)__builtin_ia32_pinsrw128 ((__v8hi)__A, __B, __C)) - -static __inline __m128i -_mm_max_epi16 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_pmaxsw128 ((__v8hi)__A, (__v8hi)__B); -} - -static __inline __m128i -_mm_max_epu8 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_pmaxub128 ((__v16qi)__A, (__v16qi)__B); -} - -static __inline __m128i -_mm_min_epi16 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_pminsw128 ((__v8hi)__A, (__v8hi)__B); -} - -static __inline __m128i -_mm_min_epu8 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_pminub128 ((__v16qi)__A, (__v16qi)__B); -} - -static __inline int -_mm_movemask_epi8 (__m128i __A) -{ - return __builtin_ia32_pmovmskb128 ((__v16qi)__A); -} - -static __inline __m128i -_mm_mulhi_epu16 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_pmulhuw128 ((__v8hi)__A, (__v8hi)__B); -} - -#define _mm_shufflehi_epi16(__A, __B) ((__m128i)__builtin_ia32_pshufhw ((__v8hi)__A, __B)) -#define _mm_shufflelo_epi16(__A, __B) ((__m128i)__builtin_ia32_pshuflw ((__v8hi)__A, __B)) -#define _mm_shuffle_epi32(__A, __B) ((__m128i)__builtin_ia32_pshufd ((__v4si)__A, __B)) - -static __inline void -_mm_maskmoveu_si128 (__m128i __A, __m128i __B, char *__C) -{ - __builtin_ia32_maskmovdqu ((__v16qi)__A, (__v16qi)__B, __C); -} - -static __inline __m128i -_mm_avg_epu8 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_pavgb128 ((__v16qi)__A, (__v16qi)__B); -} - -static __inline __m128i -_mm_avg_epu16 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_pavgw128 ((__v8hi)__A, (__v8hi)__B); -} - -static __inline __m128i -_mm_sad_epu8 (__m128i __A, __m128i __B) -{ - return (__m128i)__builtin_ia32_psadbw128 ((__v16qi)__A, (__v16qi)__B); -} - -static __inline void -_mm_stream_si32 (int *__A, int __B) -{ - __builtin_ia32_movnti (__A, __B); -} - -static __inline void -_mm_stream_si128 (__m128i *__A, __m128i __B) -{ - __builtin_ia32_movntdq ((__v2di *)__A, (__v2di)__B); -} - -static __inline void -_mm_stream_pd (double *__A, __m128d __B) -{ - __builtin_ia32_movntpd (__A, (__v2df)__B); -} - -static __inline __m128i -_mm_movpi64_epi64 (__m64 __A) -{ - return (__m128i)__builtin_ia32_movq2dq ((unsigned long long)__A); -} - -static __inline void -_mm_clflush (void const *__A) -{ - return __builtin_ia32_clflush (__A); -} - -static __inline void -_mm_lfence (void) -{ - __builtin_ia32_lfence (); -} - -static __inline void -_mm_mfence (void) -{ - __builtin_ia32_mfence (); -} - -static __inline __m128i -_mm_cvtsi32_si128 (int __A) -{ - return (__m128i) __builtin_ia32_loadd (&__A); -} - -#ifdef __x86_64__ -static __inline __m128i -_mm_cvtsi64x_si128 (long long __A) -{ - return (__m128i) __builtin_ia32_movq2dq (__A); -} -#endif - -static __inline int -_mm_cvtsi128_si32 (__m128i __A) -{ - int __tmp; - __builtin_ia32_stored (&__tmp, (__v4si)__A); - return __tmp; -} - -#ifdef __x86_64__ -static __inline long long -_mm_cvtsi128_si64x (__m128i __A) -{ - return __builtin_ia32_movdq2q ((__v2di)__A); -} -#endif - -#endif /* __SSE2__ */ +/* For backward source compatibility. */ +#include #endif /* __SSE__ */ #endif /* _XMMINTRIN_H_INCLUDED */