SDK review for 10.4u

Mike Stump mrs@apple.com
Fri Dec 22 23:51:00 GMT 2006


Here are the diffs in the generated headers for the 10.4u SDK in my
recent SDK work.  I've audited them, they look fine to me. The 'Only
in' lines are due to the fact that on x86, we fixincludes the X11
headers and they now are converted to use __i386__ instead of i386,
as the later isn't always defined.

Look ok?

diff -U3 -r /Volumes/mrs5/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/X11/Xos.h apple-build-fat/gcc.roots/gcc~dst/Developer/SDKs/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/X11/Xos.h
--- /Volumes/mrs5/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/X11/Xos.h	2006-07-24 19:12:26.000000000 -0700
+++ apple-build-fat/gcc.roots/gcc~dst/Developer/SDKs/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/X11/Xos.h	2006-12-22 15:30:54.000000000 -0800
@@ -2,7 +2,7 @@
 
     It has been auto-edited by fixincludes from:
 
-	"/usr/include/X11/Xos.h"
+	"/Developer/SDKs/MacOSX10.4u.sdk/usr/include/X11/Xos.h"
 
     This had to be done to correct non-standard usages in the
     original, manufacturer supplied header file.  */
diff -U3 -r /Volumes/mrs5/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/X11/Xos_r.h apple-build-fat/gcc.roots/gcc~dst/Developer/SDKs/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/X11/Xos_r.h
--- /Volumes/mrs5/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/X11/Xos_r.h	2006-07-24 19:12:26.000000000 -0700
+++ apple-build-fat/gcc.roots/gcc~dst/Developer/SDKs/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/X11/Xos_r.h	2006-12-22 15:30:54.000000000 -0800
@@ -2,7 +2,7 @@
 
     It has been auto-edited by fixincludes from:
 
-	"/usr/include/X11/Xos_r.h"
+	"/Developer/SDKs/MacOSX10.4u.sdk/usr/include/X11/Xos_r.h"
 
     This had to be done to correct non-standard usages in the
     original, manufacturer supplied header file.  */
diff -U3 -r /Volumes/mrs5/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/X11/Xosdefs.h apple-build-fat/gcc.roots/gcc~dst/Developer/SDKs/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/X11/Xosdefs.h
--- /Volumes/mrs5/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/X11/Xosdefs.h	2006-07-24 19:12:26.000000000 -0700
+++ apple-build-fat/gcc.roots/gcc~dst/Developer/SDKs/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/X11/Xosdefs.h	2006-12-22 15:30:54.000000000 -0800
@@ -2,7 +2,7 @@
 
     It has been auto-edited by fixincludes from:
 
-	"/usr/include/X11/Xosdefs.h"
+	"/Developer/SDKs/MacOSX10.4u.sdk/usr/include/X11/Xosdefs.h"
 
     This had to be done to correct non-standard usages in the
     original, manufacturer supplied header file.  */
diff -U3 -r /Volumes/mrs5/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/arpa/nameser8_compat.h apple-build-fat/gcc.roots/gcc~dst/Developer/SDKs/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/arpa/nameser8_compat.h
--- /Volumes/mrs5/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/arpa/nameser8_compat.h	2006-07-24 19:12:21.000000000 -0700
+++ apple-build-fat/gcc.roots/gcc~dst/Developer/SDKs/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/arpa/nameser8_compat.h	2006-12-22 15:30:51.000000000 -0800
@@ -2,7 +2,7 @@
 
     It has been auto-edited by fixincludes from:
 
-	"/usr/include/arpa/nameser8_compat.h"
+	"/Developer/SDKs/MacOSX10.4u.sdk/usr/include/arpa/nameser8_compat.h"
 
     This had to be done to correct non-standard usages in the
     original, manufacturer supplied header file.  */
diff -U3 -r /Volumes/mrs5/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/emmintrin.h apple-build-fat/gcc.roots/gcc~dst/Developer/SDKs/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/emmintrin.h
--- /Volumes/mrs5/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/emmintrin.h	2006-07-24 19:12:28.000000000 -0700
+++ apple-build-fat/gcc.roots/gcc~dst/Developer/SDKs/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/emmintrin.h	2006-12-22 15:30:54.000000000 -0800
@@ -48,91 +48,95 @@
 #define _MM_SHUFFLE2(fp1,fp0) \
  (((fp1) << 1) | (fp0))
 
+/* APPLE LOCAL begin nodebug inline 4152603 */
+#define __always_inline__ __always_inline__, __nodebug__
+/* APPLE LOCAL end nodebug inline 4152603 */
+
 /* APPLE LOCAL begin radar 4152603 */
 /* Create a vector with element 0 as F and the rest zero.  */
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_set_sd (double __F)
 {
   return __extension__ (__m128d){ __F, 0 };
 }
 
 /* Create a vector with both elements equal to F.  */
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_set1_pd (double __F)
 {
   return __extension__ (__m128d){ __F, __F };
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_set_pd1 (double __F)
 {
   return _mm_set1_pd (__F);
 }
 
 /* Create a vector with the lower value X and upper value W.  */
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_set_pd (double __W, double __X)
 {
   return __extension__ (__m128d){ __X, __W };
 }
 
 /* Create a vector with the lower value W and upper value X.  */
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_setr_pd (double __W, double __X)
 {
   return __extension__ (__m128d){ __W, __X };
 }
 
 /* Create a vector of zeros.  */
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_setzero_pd (void)
 {
   return __extension__ (__m128d){ 0.0, 0.0 };
 }
 
 /* Sets the low DPFP value of A from the low value of B.  */
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_move_sd (__m128d __A, __m128d __B)
 {
   return (__m128d) __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B);
 }
 
 /* Load two DPFP values from P.  The address must be 16-byte aligned.  */
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_load_pd (double const *__P)
 {
   return *(__m128d *)__P;
 }
 
 /* Load two DPFP values from P.  The address need not be 16-byte aligned.  */
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_loadu_pd (double const *__P)
 {
   return __builtin_ia32_loadupd (__P);
 }
 
 /* Create a vector with all two elements equal to *P.  */
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_load1_pd (double const *__P)
 {
   return _mm_set1_pd (*__P);
 }
 
 /* Create a vector with element 0 as *P and the rest zero.  */
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_load_sd (double const *__P)
 {
   return _mm_set_sd (*__P);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_load_pd1 (double const *__P)
 {
   return _mm_load1_pd (__P);
 }
 
 /* Load two DPFP values in reverse order.  The address must be aligned.  */
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_loadr_pd (double const *__P)
 {
   __m128d __tmp = _mm_load_pd (__P);
@@ -140,40 +144,40 @@
 }
 
 /* Store two DPFP values.  The address must be 16-byte aligned.  */
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_store_pd (double *__P, __m128d __A)
 {
   *(__m128d *)__P = __A;
 }
 
 /* Store two DPFP values.  The address need not be 16-byte aligned.  */
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_storeu_pd (double *__P, __m128d __A)
 {
   __builtin_ia32_storeupd (__P, __A);
 }
 
 /* Stores the lower DPFP value.  */
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_store_sd (double *__P, __m128d __A)
 {
   *__P = __builtin_ia32_vec_ext_v2df (__A, 0);
 }
 
-static __inline double __attribute__((__always_inline__, __nodebug__))
+static __inline double __attribute__((__always_inline__))
 _mm_cvtsd_f64 (__m128d __A)
 {
   return __builtin_ia32_vec_ext_v2df (__A, 0);
 }
 
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_storel_pd (double *__P, __m128d __A)
 {
   _mm_store_sd (__P, __A);
 }
 
 /* Stores the upper DPFP value.  */
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_storeh_pd (double *__P, __m128d __A)
 {
   *__P = __builtin_ia32_vec_ext_v2df (__A, 1);
@@ -181,26 +185,26 @@
 
 /* Store the lower DPFP value across two words.
    The address must be 16-byte aligned.  */
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_store1_pd (double *__P, __m128d __A)
 {
   _mm_store_pd (__P, __builtin_ia32_shufpd (__A, __A, _MM_SHUFFLE2 (0,0)));
 }
 
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_store_pd1 (double *__P, __m128d __A)
 {
   _mm_store1_pd (__P, __A);
 }
 
 /* Store two DPFP values in reverse order.  The address must be aligned.  */
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_storer_pd (double *__P, __m128d __A)
 {
   _mm_store_pd (__P, __builtin_ia32_shufpd (__A, __A, _MM_SHUFFLE2 (0,1)));
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_cvtsi128_si32 (__m128i __A)
 {
   return __builtin_ia32_vec_ext_v4si ((__v4si)__A, 0);
@@ -208,221 +212,221 @@
 
 #ifdef __x86_64__
 /* Intel intrinsic.  */
-static __inline long long __attribute__((__always_inline__, __nodebug__))
+static __inline long long __attribute__((__always_inline__))
 _mm_cvtsi128_si64 (__m128i __A)
 {
   return __builtin_ia32_vec_ext_v2di ((__v2di)__A, 0);
 }
 
 /* Microsoft intrinsic.  */
-static __inline long long __attribute__((__always_inline__, __nodebug__))
+static __inline long long __attribute__((__always_inline__))
 _mm_cvtsi128_si64x (__m128i __A)
 {
   return __builtin_ia32_vec_ext_v2di ((__v2di)__A, 0);
 }
 #endif
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_add_pd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_addpd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_add_sd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_addsd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_sub_pd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_subpd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_sub_sd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_subsd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_mul_pd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_mulpd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_mul_sd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_mulsd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_div_pd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_divpd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_div_sd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_divsd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_sqrt_pd (__m128d __A)
 {
   return (__m128d)__builtin_ia32_sqrtpd ((__v2df)__A);
 }
 
 /* Return pair {sqrt (A[0), B[1]}.  */
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_sqrt_sd (__m128d __A, __m128d __B)
 {
   __v2df __tmp = __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B);
   return (__m128d)__builtin_ia32_sqrtsd ((__v2df)__tmp);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_min_pd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_minpd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_min_sd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_minsd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_max_pd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_maxpd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_max_sd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_maxsd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_and_pd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_andpd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_andnot_pd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_andnpd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_or_pd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_orpd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_xor_pd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_xorpd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cmpeq_pd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_cmpeqpd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cmplt_pd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_cmpltpd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cmple_pd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_cmplepd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cmpgt_pd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_cmpgtpd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cmpge_pd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_cmpgepd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cmpneq_pd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_cmpneqpd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cmpnlt_pd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_cmpnltpd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cmpnle_pd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_cmpnlepd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cmpngt_pd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_cmpngtpd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cmpnge_pd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_cmpngepd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cmpord_pd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_cmpordpd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cmpunord_pd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_cmpunordpd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cmpeq_sd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_cmpeqsd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cmplt_sd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_cmpltsd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cmple_sd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_cmplesd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cmpgt_sd (__m128d __A, __m128d __B)
 {
   return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
@@ -432,7 +436,7 @@
 								 __A));
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cmpge_sd (__m128d __A, __m128d __B)
 {
   return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
@@ -442,25 +446,25 @@
 								 __A));
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cmpneq_sd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_cmpneqsd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cmpnlt_sd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_cmpnltsd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cmpnle_sd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_cmpnlesd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cmpngt_sd (__m128d __A, __m128d __B)
 {
   return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
@@ -470,7 +474,7 @@
 								  __A));
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cmpnge_sd (__m128d __A, __m128d __B)
 {
   return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
@@ -480,85 +484,85 @@
 								  __A));
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cmpord_sd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_cmpordsd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cmpunord_sd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_cmpunordsd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_comieq_sd (__m128d __A, __m128d __B)
 {
   return __builtin_ia32_comisdeq ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_comilt_sd (__m128d __A, __m128d __B)
 {
   return __builtin_ia32_comisdlt ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_comile_sd (__m128d __A, __m128d __B)
 {
   return __builtin_ia32_comisdle ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_comigt_sd (__m128d __A, __m128d __B)
 {
   return __builtin_ia32_comisdgt ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_comige_sd (__m128d __A, __m128d __B)
 {
   return __builtin_ia32_comisdge ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_comineq_sd (__m128d __A, __m128d __B)
 {
   return __builtin_ia32_comisdneq ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_ucomieq_sd (__m128d __A, __m128d __B)
 {
   return __builtin_ia32_ucomisdeq ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_ucomilt_sd (__m128d __A, __m128d __B)
 {
   return __builtin_ia32_ucomisdlt ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_ucomile_sd (__m128d __A, __m128d __B)
 {
   return __builtin_ia32_ucomisdle ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_ucomigt_sd (__m128d __A, __m128d __B)
 {
   return __builtin_ia32_ucomisdgt ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_ucomige_sd (__m128d __A, __m128d __B)
 {
   return __builtin_ia32_ucomisdge ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_ucomineq_sd (__m128d __A, __m128d __B)
 {
   return __builtin_ia32_ucomisdneq ((__v2df)__A, (__v2df)__B);
@@ -566,25 +570,25 @@
 
 /* Create a vector of Qi, where i is the element number.  */
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_set_epi64x (long long __q1, long long __q0)
 {
   return __extension__ (__m128i)(__v2di){ __q0, __q1 };
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_set_epi64 (__m64 __q1,  __m64 __q0)
 {
   return _mm_set_epi64x ((long long)__q1, (long long)__q0);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_set_epi32 (int __q3, int __q2, int __q1, int __q0)
 {
   return __extension__ (__m128i)(__v4si){ __q0, __q1, __q2, __q3 };
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_set_epi16 (short __q7, short __q6, short __q5, short __q4,
 	       short __q3, short __q2, short __q1, short __q0)
 {
@@ -592,7 +596,7 @@
     __q0, __q1, __q2, __q3, __q4, __q5, __q6, __q7 };
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_set_epi8 (char __q15, char __q14, char __q13, char __q12,
 	      char __q11, char __q10, char __q09, char __q08,
 	      char __q07, char __q06, char __q05, char __q04,
@@ -611,26 +615,26 @@
 /* Create a vector of Qi, where i is the element number.
    The parameter order is reversed from the _mm_set_epi* functions.  */
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_setr_epi64 (__m64 __q0, __m64 __q1)
 {
   return _mm_set_epi64 (__q1, __q0);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_setr_epi32 (int __q0, int __q1, int __q2, int __q3)
 {
   return _mm_set_epi32 (__q3, __q2, __q1, __q0);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_setr_epi16 (short __q0, short __q1, short __q2, short __q3,
 	        short __q4, short __q5, short __q6, short __q7)
 {
   return _mm_set_epi16 (__q7, __q6, __q5, __q4, __q3, __q2, __q1, __q0);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_setr_epi8 (char __q00, char __q01, char __q02, char __q03,
 	       char __q04, char __q05, char __q06, char __q07,
 	       char __q08, char __q09, char __q10, char __q11,
@@ -642,60 +646,60 @@
 
 /* Create a vector with element 0 as *P and the rest zero.  */
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_load_si128 (__m128i const *__P)
 {
   return *__P;
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_loadu_si128 (__m128i const *__P)
 {
   return (__m128i) __builtin_ia32_loaddqu ((char const *)__P);
 }
 
 /* APPLE LOCAL begin 4099020 */
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_loadl_epi64 (__m128i const *__P)
 {
   return  (__m128i)__builtin_ia32_loadlv4si ((__v2si *)__P);
 }
 /* APPLE LOCAL end 4099020 */
 
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_store_si128 (__m128i *__P, __m128i __B)
 {
   *__P = __B;
 }
 
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_storeu_si128 (__m128i *__P, __m128i __B)
 {
   __builtin_ia32_storedqu ((char *)__P, (__v16qi)__B);
 }
 
 /* APPLE LOCAL begin 4099020 */
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_storel_epi64 (__m128i *__P, __m128i __B)
 {
   __builtin_ia32_storelv4si ((__v2si *)__P, __B);
 }
 /* APPLE LOCAL end 4099020 */
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_movepi64_pi64 (__m128i __B)
 {
   return (__m64) __builtin_ia32_vec_ext_v2di ((__v2di)__B, 0);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_movpi64_epi64 (__m64 __A)
 {
   return _mm_set_epi64 ((__m64)0LL, __A);
 }
 
 /* APPLE LOCAL begin 4099020 */
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_move_epi64 (__m128i __A)
 {
   return (__m128i)__builtin_ia32_movqv4si ((__v4si)__A) ;
@@ -703,79 +707,79 @@
 /* APPLE LOCAL end 4099020 */
 
 /* Create a vector of zeros.  */
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_setzero_si128 (void)
 {
   return __extension__ (__m128i)(__v4si){ 0, 0, 0, 0 };
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cvtepi32_pd (__m128i __A)
 {
   return (__m128d)__builtin_ia32_cvtdq2pd ((__v4si) __A);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cvtepi32_ps (__m128i __A)
 {
   return (__m128)__builtin_ia32_cvtdq2ps ((__v4si) __A);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_cvtpd_epi32 (__m128d __A)
 {
   return (__m128i)__builtin_ia32_cvtpd2dq ((__v2df) __A);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_cvtpd_pi32 (__m128d __A)
 {
   return (__m64)__builtin_ia32_cvtpd2pi ((__v2df) __A);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cvtpd_ps (__m128d __A)
 {
   return (__m128)__builtin_ia32_cvtpd2ps ((__v2df) __A);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_cvttpd_epi32 (__m128d __A)
 {
   return (__m128i)__builtin_ia32_cvttpd2dq ((__v2df) __A);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_cvttpd_pi32 (__m128d __A)
 {
   return (__m64)__builtin_ia32_cvttpd2pi ((__v2df) __A);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cvtpi32_pd (__m64 __A)
 {
   return (__m128d)__builtin_ia32_cvtpi2pd ((__v2si) __A);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_cvtps_epi32 (__m128 __A)
 {
   return (__m128i)__builtin_ia32_cvtps2dq ((__v4sf) __A);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_cvttps_epi32 (__m128 __A)
 {
   return (__m128i)__builtin_ia32_cvttps2dq ((__v4sf) __A);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cvtps_pd (__m128 __A)
 {
   return (__m128d)__builtin_ia32_cvtps2pd ((__v4sf) __A);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_cvtsd_si32 (__m128d __A)
 {
   return __builtin_ia32_cvtsd2si ((__v2df) __A);
@@ -783,21 +787,21 @@
 
 #ifdef __x86_64__
 /* Intel intrinsic.  */
-static __inline long long __attribute__((__always_inline__, __nodebug__))
+static __inline long long __attribute__((__always_inline__))
 _mm_cvtsd_si64 (__m128d __A)
 {
   return __builtin_ia32_cvtsd2si64 ((__v2df) __A);
 }
 
 /* Microsoft intrinsic.  */
-static __inline long long __attribute__((__always_inline__, __nodebug__))
+static __inline long long __attribute__((__always_inline__))
 _mm_cvtsd_si64x (__m128d __A)
 {
   return __builtin_ia32_cvtsd2si64 ((__v2df) __A);
 }
 #endif
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_cvttsd_si32 (__m128d __A)
 {
   return __builtin_ia32_cvttsd2si ((__v2df) __A);
@@ -805,27 +809,27 @@
 
 #ifdef __x86_64__
 /* Intel intrinsic.  */
-static __inline long long __attribute__((__always_inline__, __nodebug__))
+static __inline long long __attribute__((__always_inline__))
 _mm_cvttsd_si64 (__m128d __A)
 {
   return __builtin_ia32_cvttsd2si64 ((__v2df) __A);
 }
 
 /* Microsoft intrinsic.  */
-static __inline long long __attribute__((__always_inline__, __nodebug__))
+static __inline long long __attribute__((__always_inline__))
 _mm_cvttsd_si64x (__m128d __A)
 {
   return __builtin_ia32_cvttsd2si64 ((__v2df) __A);
 }
 #endif
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cvtsd_ss (__m128 __A, __m128d __B)
 {
   return (__m128)__builtin_ia32_cvtsd2ss ((__v4sf) __A, (__v2df) __B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cvtsi32_sd (__m128d __A, int __B)
 {
   return (__m128d)__builtin_ia32_cvtsi2sd ((__v2df) __A, __B);
@@ -833,21 +837,21 @@
 
 #ifdef __x86_64__
 /* Intel intrinsic.  */
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cvtsi64_sd (__m128d __A, long long __B)
 {
   return (__m128d)__builtin_ia32_cvtsi642sd ((__v2df) __A, __B);
 }
 
 /* Microsoft intrinsic.  */
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cvtsi64x_sd (__m128d __A, long long __B)
 {
   return (__m128d)__builtin_ia32_cvtsi642sd ((__v2df) __A, __B);
 }
 #endif
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_cvtss_sd (__m128d __A, __m128 __B)
 {
   return (__m128d)__builtin_ia32_cvtss2sd ((__v2df) __A, (__v4sf)__B);
@@ -855,266 +859,266 @@
 
 #define _mm_shuffle_pd(__A, __B, __C) ((__m128d)__builtin_ia32_shufpd ((__v2df)__A, (__v2df)__B, (__C)))
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_unpackhi_pd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_unpckhpd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_unpacklo_pd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_unpcklpd ((__v2df)__A, (__v2df)__B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_loadh_pd (__m128d __A, double const *__B)
 {
   return (__m128d)__builtin_ia32_loadhpd ((__v2df)__A, __B);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_loadl_pd (__m128d __A, double const *__B)
 {
   return (__m128d)__builtin_ia32_loadlpd ((__v2df)__A, __B);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_movemask_pd (__m128d __A)
 {
   return __builtin_ia32_movmskpd ((__v2df)__A);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_packs_epi16 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_packsswb128 ((__v8hi)__A, (__v8hi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_packs_epi32 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_packssdw128 ((__v4si)__A, (__v4si)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_packus_epi16 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_packuswb128 ((__v8hi)__A, (__v8hi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_unpackhi_epi8 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_punpckhbw128 ((__v16qi)__A, (__v16qi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_unpackhi_epi16 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_punpckhwd128 ((__v8hi)__A, (__v8hi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_unpackhi_epi32 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_punpckhdq128 ((__v4si)__A, (__v4si)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_unpackhi_epi64 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_punpckhqdq128 ((__v2di)__A, (__v2di)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_unpacklo_epi8 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_punpcklbw128 ((__v16qi)__A, (__v16qi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_unpacklo_epi16 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_punpcklwd128 ((__v8hi)__A, (__v8hi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_unpacklo_epi32 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_punpckldq128 ((__v4si)__A, (__v4si)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_unpacklo_epi64 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_punpcklqdq128 ((__v2di)__A, (__v2di)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_add_epi8 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_paddb128 ((__v16qi)__A, (__v16qi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_add_epi16 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_paddw128 ((__v8hi)__A, (__v8hi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_add_epi32 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_paddd128 ((__v4si)__A, (__v4si)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_add_epi64 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_paddq128 ((__v2di)__A, (__v2di)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_adds_epi8 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_paddsb128 ((__v16qi)__A, (__v16qi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_adds_epi16 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_paddsw128 ((__v8hi)__A, (__v8hi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_adds_epu8 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_paddusb128 ((__v16qi)__A, (__v16qi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_adds_epu16 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_paddusw128 ((__v8hi)__A, (__v8hi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_sub_epi8 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_psubb128 ((__v16qi)__A, (__v16qi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_sub_epi16 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_psubw128 ((__v8hi)__A, (__v8hi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_sub_epi32 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_psubd128 ((__v4si)__A, (__v4si)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_sub_epi64 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_psubq128 ((__v2di)__A, (__v2di)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_subs_epi8 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_psubsb128 ((__v16qi)__A, (__v16qi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_subs_epi16 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_psubsw128 ((__v8hi)__A, (__v8hi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_subs_epu8 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_psubusb128 ((__v16qi)__A, (__v16qi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_subs_epu16 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_psubusw128 ((__v8hi)__A, (__v8hi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_madd_epi16 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_pmaddwd128 ((__v8hi)__A, (__v8hi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_mulhi_epi16 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_pmulhw128 ((__v8hi)__A, (__v8hi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_mullo_epi16 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_pmullw128 ((__v8hi)__A, (__v8hi)__B);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_mul_su32 (__m64 __A, __m64 __B)
 {
   return (__m64)__builtin_ia32_pmuludq ((__v2si)__A, (__v2si)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_mul_epu32 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_pmuludq128 ((__v4si)__A, (__v4si)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_slli_epi16 (__m128i __A, int __B)
 {
   return (__m128i)__builtin_ia32_psllwi128 ((__v8hi)__A, __B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_slli_epi32 (__m128i __A, int __B)
 {
   return (__m128i)__builtin_ia32_pslldi128 ((__v4si)__A, __B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_slli_epi64 (__m128i __A, int __B)
 {
   return (__m128i)__builtin_ia32_psllqi128 ((__v2di)__A, __B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_srai_epi16 (__m128i __A, int __B)
 {
   return (__m128i)__builtin_ia32_psrawi128 ((__v8hi)__A, __B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_srai_epi32 (__m128i __A, int __B)
 {
   return (__m128i)__builtin_ia32_psradi128 ((__v4si)__A, __B);
 }
 
 #if 0
-static __m128i __attribute__((__always_inline__, __nodebug__))
+static __m128i __attribute__((__always_inline__))
 _mm_srli_si128 (__m128i __A, const int __B)
 {
   return ((__m128i)__builtin_ia32_psrldqi128 (__A, __B))
 }
 
-static __m128i __attribute__((__always_inline__, __nodebug__))
+static __m128i __attribute__((__always_inline__))
 _mm_srli_si128 (__m128i __A, const int __B)
 {
   return ((__m128i)__builtin_ia32_pslldqi128 (__A, __B))
@@ -1126,158 +1130,158 @@
   ((__m128i)__builtin_ia32_pslldqi128 (__A, (__B) * 8))
 #endif
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_srli_epi16 (__m128i __A, int __B)
 {
   return (__m128i)__builtin_ia32_psrlwi128 ((__v8hi)__A, __B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_srli_epi32 (__m128i __A, int __B)
 {
   return (__m128i)__builtin_ia32_psrldi128 ((__v4si)__A, __B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_srli_epi64 (__m128i __A, int __B)
 {
   return (__m128i)__builtin_ia32_psrlqi128 ((__v2di)__A, __B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_sll_epi16 (__m128i __A, __m128i __B)
 {
   return _mm_slli_epi16 (__A, _mm_cvtsi128_si32 (__B));
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_sll_epi32 (__m128i __A, __m128i __B)
 {
   return _mm_slli_epi32 (__A, _mm_cvtsi128_si32 (__B));
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_sll_epi64 (__m128i __A, __m128i __B)
 {
   return _mm_slli_epi64 (__A, _mm_cvtsi128_si32 (__B));
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_sra_epi16 (__m128i __A, __m128i __B)
 {
   return _mm_srai_epi16 (__A, _mm_cvtsi128_si32 (__B));
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_sra_epi32 (__m128i __A, __m128i __B)
 {
   return _mm_srai_epi32 (__A, _mm_cvtsi128_si32 (__B));
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_srl_epi16 (__m128i __A, __m128i __B)
 {
   return _mm_srli_epi16 (__A, _mm_cvtsi128_si32 (__B));
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_srl_epi32 (__m128i __A, __m128i __B)
 {
   return _mm_srli_epi32 (__A, _mm_cvtsi128_si32 (__B));
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_srl_epi64 (__m128i __A, __m128i __B)
 {
   return _mm_srli_epi64 (__A, _mm_cvtsi128_si32 (__B));
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_and_si128 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_pand128 ((__v2di)__A, (__v2di)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_andnot_si128 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_pandn128 ((__v2di)__A, (__v2di)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_or_si128 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_por128 ((__v2di)__A, (__v2di)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_xor_si128 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_pxor128 ((__v2di)__A, (__v2di)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_cmpeq_epi8 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_pcmpeqb128 ((__v16qi)__A, (__v16qi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_cmpeq_epi16 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_pcmpeqw128 ((__v8hi)__A, (__v8hi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_cmpeq_epi32 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_pcmpeqd128 ((__v4si)__A, (__v4si)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_cmplt_epi8 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_pcmpgtb128 ((__v16qi)__B, (__v16qi)__A);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_cmplt_epi16 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_pcmpgtw128 ((__v8hi)__B, (__v8hi)__A);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_cmplt_epi32 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__B, (__v4si)__A);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_cmpgt_epi8 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_pcmpgtb128 ((__v16qi)__A, (__v16qi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_cmpgt_epi16 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_pcmpgtw128 ((__v8hi)__A, (__v8hi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_cmpgt_epi32 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__A, (__v4si)__B);
 }
 
 #if 0
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_extract_epi16 (__m128i const __A, int const __N)
 {
   return __builtin_ia32_vec_ext_v8hi ((__v8hi)__A, __N);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_insert_epi16 (__m128i const __A, int const __D, int const __N)
 {
   return (__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)__A, __D, __N);
@@ -1289,37 +1293,37 @@
   ((__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)(A), (D), (N)))
 #endif
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_max_epi16 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_pmaxsw128 ((__v8hi)__A, (__v8hi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_max_epu8 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_pmaxub128 ((__v16qi)__A, (__v16qi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_min_epi16 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_pminsw128 ((__v8hi)__A, (__v8hi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_min_epu8 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_pminub128 ((__v16qi)__A, (__v16qi)__B);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_movemask_epi8 (__m128i __A)
 {
   return __builtin_ia32_pmovmskb128 ((__v16qi)__A);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_mulhi_epu16 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_pmulhuw128 ((__v8hi)__A, (__v8hi)__B);
@@ -1329,67 +1333,67 @@
 #define _mm_shufflelo_epi16(__A, __B) ((__m128i)__builtin_ia32_pshuflw ((__v8hi)__A, __B))
 #define _mm_shuffle_epi32(__A, __B) ((__m128i)__builtin_ia32_pshufd ((__v4si)__A, __B))
 
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_maskmoveu_si128 (__m128i __A, __m128i __B, char *__C)
 {
   __builtin_ia32_maskmovdqu ((__v16qi)__A, (__v16qi)__B, __C);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_avg_epu8 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_pavgb128 ((__v16qi)__A, (__v16qi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_avg_epu16 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_pavgw128 ((__v8hi)__A, (__v8hi)__B);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_sad_epu8 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_psadbw128 ((__v16qi)__A, (__v16qi)__B);
 }
 
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_stream_si32 (int *__A, int __B)
 {
   __builtin_ia32_movnti (__A, __B);
 }
 
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_stream_si128 (__m128i *__A, __m128i __B)
 {
   __builtin_ia32_movntdq ((__v2di *)__A, (__v2di)__B);
 }
 
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_stream_pd (double *__A, __m128d __B)
 {
   __builtin_ia32_movntpd (__A, (__v2df)__B);
 }
 
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_clflush (void const *__A)
 {
   __builtin_ia32_clflush (__A);
 }
 
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_lfence (void)
 {
   __builtin_ia32_lfence ();
 }
 
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_mfence (void)
 {
   __builtin_ia32_mfence ();
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_cvtsi32_si128 (int __A)
 {
   return _mm_set_epi32 (0, 0, 0, __A);
@@ -1397,14 +1401,14 @@
 
 #ifdef __x86_64__
 /* Intel intrinsic.  */
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_cvtsi64_si128 (long long __A)
 {
   return _mm_set_epi64x (0, __A);
 }
 
 /* Microsoft intrinsic.  */
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_cvtsi64x_si128 (long long __A)
 {
   return _mm_set_epi64x (0, __A);
@@ -1413,71 +1417,75 @@
 
 /* Casts between various SP, DP, INT vector types.  Note that these do no
    conversion of values, they just change the type.  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_castpd_ps(__m128d __A)
 {
   return (__m128) __A;
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_castpd_si128(__m128d __A)
 {
   return (__m128i) __A;
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_castps_pd(__m128 __A)
 {
   return (__m128d) __A;
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_castps_si128(__m128 __A)
 {
   return (__m128i) __A;
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_castsi128_ps(__m128i __A)
 {
   return (__m128) __A;
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_castsi128_pd(__m128i __A)
 {
   return (__m128d) __A;
 }
 /* APPLE LOCAL end radar 4152603 */
 
-/* APPLE LOCAL begin 4220129 */
+/* APPLE LOCAL begin 4220129, 4286110 */
 /* Set all of the elements of the vector to A.  */
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_set1_epi64x (long long __A)
 {
   return _mm_set_epi64x (__A, __A);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_set1_epi64 (__m64 __A)
 {
   return _mm_set_epi64 (__A, __A);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_set1_epi32 (int __A)
 {
   return _mm_set_epi32 (__A, __A, __A, __A);
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_set1_epi16 (short __A)
 {
-  return _mm_set_epi16 (__A, __A, __A, __A, __A, __A, __A, __A);
+  __m128i temp, temp2, temp3; 
+  temp = _mm_cvtsi32_si128((int)__A);
+  temp2 = _mm_unpacklo_epi16(temp, temp);
+  temp3 = _mm_shuffle_epi32(temp2, 0);
+  return temp3;
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_set1_epi8 (char __A)
 {
     __m128i temp, temp2, temp3, temp4;
@@ -1487,7 +1495,12 @@
     temp4 = _mm_shuffle_epi32 (temp3, 0);
     return temp4;
 }
-/* APPLE LOCAL end 4220129 */
+/* APPLE LOCAL end 4220129, 4286110 */
+
+/* APPLE LOCAL begin nodebug inline 4152603 */
+#undef __always_inline__
+/* APPLE LOCAL end nodebug inline 4152603 */
+
 #endif /* __SSE2__  */
 
 #endif /* _EMMINTRIN_H_INCLUDED */
diff -U3 -r /Volumes/mrs5/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/mmintrin.h apple-build-fat/gcc.roots/gcc~dst/Developer/SDKs/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/mmintrin.h
--- /Volumes/mrs5/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/mmintrin.h	2006-07-24 19:12:28.000000000 -0700
+++ apple-build-fat/gcc.roots/gcc~dst/Developer/SDKs/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/mmintrin.h	2006-12-22 15:30:54.000000000 -0800
@@ -43,28 +43,32 @@
 typedef short __v4hi __attribute__ ((__vector_size__ (8)));
 typedef char __v8qi __attribute__ ((__vector_size__ (8)));
 
+/* APPLE LOCAL begin nodebug inline 4152603 */
+#define __always_inline__ __always_inline__, __nodebug__
+/* APPLE LOCAL end nodebug inline 4152603 */
+
 /* Empty the multimedia state.  */
 /* APPLE LOCAL begin radar 4152603 */
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_empty (void)
 {
   __builtin_ia32_emms ();
 }
 
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _m_empty (void)
 {
   _mm_empty ();
 }
 
 /* Convert I to a __m64 object.  The integer is zero-extended to 64-bits.  */
-static __inline __m64  __attribute__((__always_inline__, __nodebug__))
+static __inline __m64  __attribute__((__always_inline__))
 _mm_cvtsi32_si64 (int __i)
 {
   return (__m64) __builtin_ia32_vec_init_v2si (__i, 0);
 }
 
-static __inline __m64  __attribute__((__always_inline__, __nodebug__))
+static __inline __m64  __attribute__((__always_inline__))
 _m_from_int (int __i)
 {
   return _mm_cvtsi32_si64 (__i);
@@ -74,26 +78,26 @@
 /* Convert I to a __m64 object.  */
 
 /* Intel intrinsic.  */
-static __inline __m64  __attribute__((__always_inline__, __nodebug__))
+static __inline __m64  __attribute__((__always_inline__))
 _m_from_int64 (long long __i)
 {
   return (__m64) __i;
 }
 
-static __inline __m64  __attribute__((__always_inline__, __nodebug__))
+static __inline __m64  __attribute__((__always_inline__))
 _mm_cvtsi64_m64 (long long __i)
 {
   return (__m64) __i;
 }
 
 /* Microsoft intrinsic.  */
-static __inline __m64  __attribute__((__always_inline__, __nodebug__))
+static __inline __m64  __attribute__((__always_inline__))
 _mm_cvtsi64x_si64 (long long __i)
 {
   return (__m64) __i;
 }
 
-static __inline __m64  __attribute__((__always_inline__, __nodebug__))
+static __inline __m64  __attribute__((__always_inline__))
 _mm_set_pi64x (long long __i)
 {
   return (__m64) __i;
@@ -101,13 +105,13 @@
 #endif
 
 /* Convert the lower 32 bits of the __m64 object into an integer.  */
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_cvtsi64_si32 (__m64 __i)
 {
   return __builtin_ia32_vec_ext_v2si ((__v2si)__i, 0);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _m_to_int (__m64 __i)
 {
   return _mm_cvtsi64_si32 (__i);
@@ -117,20 +121,20 @@
 /* Convert the __m64 object to a 64bit integer.  */
 
 /* Intel intrinsic.  */
-static __inline long long __attribute__((__always_inline__, __nodebug__))
+static __inline long long __attribute__((__always_inline__))
 _m_to_int64 (__m64 __i)
 {
   return (long long)__i;
 }
 
-static __inline long long __attribute__((__always_inline__, __nodebug__))
+static __inline long long __attribute__((__always_inline__))
 _mm_cvtm64_si64 (__m64 __i)
 {
   return (long long)__i;
 }
 
 /* Microsoft intrinsic.  */
-static __inline long long __attribute__((__always_inline__, __nodebug__))
+static __inline long long __attribute__((__always_inline__))
 _mm_cvtsi64_si64x (__m64 __i)
 {
   return (long long)__i;
@@ -140,13 +144,13 @@
 /* Pack the four 16-bit values from M1 into the lower four 8-bit values of
    the result, and the four 16-bit values from M2 into the upper four 8-bit
    values of the result, all with signed saturation.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_packs_pi16 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_packsswb ((__v4hi)__m1, (__v4hi)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_packsswb (__m64 __m1, __m64 __m2)
 {
   return _mm_packs_pi16 (__m1, __m2);
@@ -155,13 +159,13 @@
 /* Pack the two 32-bit values from M1 in to the lower two 16-bit values of
    the result, and the two 32-bit values from M2 into the upper two 16-bit
    values of the result, all with signed saturation.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_packs_pi32 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_packssdw ((__v2si)__m1, (__v2si)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_packssdw (__m64 __m1, __m64 __m2)
 {
   return _mm_packs_pi32 (__m1, __m2);
@@ -170,13 +174,13 @@
 /* Pack the four 16-bit values from M1 into the lower four 8-bit values of
    the result, and the four 16-bit values from M2 into the upper four 8-bit
    values of the result, all with unsigned saturation.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_packs_pu16 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_packuswb ((__v4hi)__m1, (__v4hi)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_packuswb (__m64 __m1, __m64 __m2)
 {
   return _mm_packs_pu16 (__m1, __m2);
@@ -184,13 +188,13 @@
 
 /* Interleave the four 8-bit values from the high half of M1 with the four
    8-bit values from the high half of M2.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_unpackhi_pi8 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_punpckhbw ((__v8qi)__m1, (__v8qi)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_punpckhbw (__m64 __m1, __m64 __m2)
 {
   return _mm_unpackhi_pi8 (__m1, __m2);
@@ -198,13 +202,13 @@
 
 /* Interleave the two 16-bit values from the high half of M1 with the two
    16-bit values from the high half of M2.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_unpackhi_pi16 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_punpckhwd ((__v4hi)__m1, (__v4hi)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_punpckhwd (__m64 __m1, __m64 __m2)
 {
   return _mm_unpackhi_pi16 (__m1, __m2);
@@ -212,13 +216,13 @@
 
 /* Interleave the 32-bit value from the high half of M1 with the 32-bit
    value from the high half of M2.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_unpackhi_pi32 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_punpckhdq ((__v2si)__m1, (__v2si)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_punpckhdq (__m64 __m1, __m64 __m2)
 {
   return _mm_unpackhi_pi32 (__m1, __m2);
@@ -226,13 +230,13 @@
 
 /* Interleave the four 8-bit values from the low half of M1 with the four
    8-bit values from the low half of M2.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_unpacklo_pi8 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_punpcklbw ((__v8qi)__m1, (__v8qi)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_punpcklbw (__m64 __m1, __m64 __m2)
 {
   return _mm_unpacklo_pi8 (__m1, __m2);
@@ -240,13 +244,13 @@
 
 /* Interleave the two 16-bit values from the low half of M1 with the two
    16-bit values from the low half of M2.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_unpacklo_pi16 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_punpcklwd ((__v4hi)__m1, (__v4hi)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_punpcklwd (__m64 __m1, __m64 __m2)
 {
   return _mm_unpacklo_pi16 (__m1, __m2);
@@ -254,73 +258,74 @@
 
 /* Interleave the 32-bit value from the low half of M1 with the 32-bit
    value from the low half of M2.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_unpacklo_pi32 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_punpckldq ((__v2si)__m1, (__v2si)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_punpckldq (__m64 __m1, __m64 __m2)
 {
   return _mm_unpacklo_pi32 (__m1, __m2);
 }
 
 /* Add the 8-bit values in M1 to the 8-bit values in M2.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_add_pi8 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_paddb ((__v8qi)__m1, (__v8qi)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_paddb (__m64 __m1, __m64 __m2)
 {
   return _mm_add_pi8 (__m1, __m2);
 }
 
 /* Add the 16-bit values in M1 to the 16-bit values in M2.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_add_pi16 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_paddw ((__v4hi)__m1, (__v4hi)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_paddw (__m64 __m1, __m64 __m2)
 {
   return _mm_add_pi16 (__m1, __m2);
 }
 
 /* Add the 32-bit values in M1 to the 32-bit values in M2.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_add_pi32 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_paddd ((__v2si)__m1, (__v2si)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_paddd (__m64 __m1, __m64 __m2)
 {
   return _mm_add_pi32 (__m1, __m2);
 }
 
 /* Add the 64-bit values in M1 to the 64-bit values in M2.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_add_si64 (__m64 __m1, __m64 __m2)
 {
-  return (__m64) __builtin_ia32_paddq ((long long)__m1, (long long)__m2);
+  /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+  return (__m64) __builtin_ia32_paddq (__m1, __m2);
 }
 
 /* Add the 8-bit values in M1 to the 8-bit values in M2 using signed
    saturated arithmetic.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_adds_pi8 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_paddsb ((__v8qi)__m1, (__v8qi)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_paddsb (__m64 __m1, __m64 __m2)
 {
   return _mm_adds_pi8 (__m1, __m2);
@@ -328,13 +333,13 @@
 
 /* Add the 16-bit values in M1 to the 16-bit values in M2 using signed
    saturated arithmetic.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_adds_pi16 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_paddsw ((__v4hi)__m1, (__v4hi)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_paddsw (__m64 __m1, __m64 __m2)
 {
   return _mm_adds_pi16 (__m1, __m2);
@@ -342,13 +347,13 @@
 
 /* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned
    saturated arithmetic.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_adds_pu8 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_paddusb ((__v8qi)__m1, (__v8qi)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_paddusb (__m64 __m1, __m64 __m2)
 {
   return _mm_adds_pu8 (__m1, __m2);
@@ -356,73 +361,74 @@
 
 /* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned
    saturated arithmetic.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_adds_pu16 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_paddusw ((__v4hi)__m1, (__v4hi)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_paddusw (__m64 __m1, __m64 __m2)
 {
   return _mm_adds_pu16 (__m1, __m2);
 }
 
 /* Subtract the 8-bit values in M2 from the 8-bit values in M1.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_sub_pi8 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_psubb ((__v8qi)__m1, (__v8qi)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_psubb (__m64 __m1, __m64 __m2)
 {
   return _mm_sub_pi8 (__m1, __m2);
 }
 
 /* Subtract the 16-bit values in M2 from the 16-bit values in M1.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_sub_pi16 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_psubw ((__v4hi)__m1, (__v4hi)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_psubw (__m64 __m1, __m64 __m2)
 {
   return _mm_sub_pi16 (__m1, __m2);
 }
 
 /* Subtract the 32-bit values in M2 from the 32-bit values in M1.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_sub_pi32 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_psubd ((__v2si)__m1, (__v2si)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_psubd (__m64 __m1, __m64 __m2)
 {
   return _mm_sub_pi32 (__m1, __m2);
 }
 
 /* Add the 64-bit values in M1 to the 64-bit values in M2.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_sub_si64 (__m64 __m1, __m64 __m2)
 {
-  return (__m64) __builtin_ia32_psubq ((long long)__m1, (long long)__m2);
+  /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+  return (__m64) __builtin_ia32_psubq (__m1, __m2);
 }
 
 /* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed
    saturating arithmetic.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_subs_pi8 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_psubsb ((__v8qi)__m1, (__v8qi)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_psubsb (__m64 __m1, __m64 __m2)
 {
   return _mm_subs_pi8 (__m1, __m2);
@@ -430,13 +436,13 @@
 
 /* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
    signed saturating arithmetic.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_subs_pi16 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_psubsw ((__v4hi)__m1, (__v4hi)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_psubsw (__m64 __m1, __m64 __m2)
 {
   return _mm_subs_pi16 (__m1, __m2);
@@ -444,13 +450,13 @@
 
 /* Subtract the 8-bit values in M2 from the 8-bit values in M1 using
    unsigned saturating arithmetic.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_subs_pu8 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_psubusb ((__v8qi)__m1, (__v8qi)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_psubusb (__m64 __m1, __m64 __m2)
 {
   return _mm_subs_pu8 (__m1, __m2);
@@ -458,13 +464,13 @@
 
 /* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
    unsigned saturating arithmetic.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_subs_pu16 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_psubusw ((__v4hi)__m1, (__v4hi)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_psubusw (__m64 __m1, __m64 __m2)
 {
   return _mm_subs_pu16 (__m1, __m2);
@@ -473,13 +479,13 @@
 /* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
    four 32-bit intermediate results, which are then summed by pairs to
    produce two 32-bit results.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_madd_pi16 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_pmaddwd ((__v4hi)__m1, (__v4hi)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_pmaddwd (__m64 __m1, __m64 __m2)
 {
   return _mm_madd_pi16 (__m1, __m2);
@@ -487,13 +493,13 @@
 
 /* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
    M2 and produce the high 16 bits of the 32-bit results.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_mulhi_pi16 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_pmulhw ((__v4hi)__m1, (__v4hi)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_pmulhw (__m64 __m1, __m64 __m2)
 {
   return _mm_mulhi_pi16 (__m1, __m2);
@@ -501,226 +507,242 @@
 
 /* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce
    the low 16 bits of the results.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_mullo_pi16 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_pmullw ((__v4hi)__m1, (__v4hi)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_pmullw (__m64 __m1, __m64 __m2)
 {
   return _mm_mullo_pi16 (__m1, __m2);
 }
 
 /* Shift four 16-bit values in M left by COUNT.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_sll_pi16 (__m64 __m, __m64 __count)
 {
-  return (__m64) __builtin_ia32_psllw ((__v4hi)__m, (long long)__count);
+  /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+  return (__m64) __builtin_ia32_psllw ((__v4hi)__m, __count);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_psllw (__m64 __m, __m64 __count)
 {
   return _mm_sll_pi16 (__m, __count);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_slli_pi16 (__m64 __m, int __count)
 {
-  return (__m64) __builtin_ia32_psllw ((__v4hi)__m, __count);
+  /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+  return (__m64) __builtin_ia32_psllwi ((__v4hi)__m, __count);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_psllwi (__m64 __m, int __count)
 {
   return _mm_slli_pi16 (__m, __count);
 }
 
 /* Shift two 32-bit values in M left by COUNT.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_sll_pi32 (__m64 __m, __m64 __count)
 {
-  return (__m64) __builtin_ia32_pslld ((__v2si)__m, (long long)__count);
+  /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+  return (__m64) __builtin_ia32_pslld ((__v2si)__m, __count);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_pslld (__m64 __m, __m64 __count)
 {
   return _mm_sll_pi32 (__m, __count);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_slli_pi32 (__m64 __m, int __count)
 {
-  return (__m64) __builtin_ia32_pslld ((__v2si)__m, __count);
+  /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+  return (__m64) __builtin_ia32_pslldi ((__v2si)__m, __count);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_pslldi (__m64 __m, int __count)
 {
   return _mm_slli_pi32 (__m, __count);
 }
 
 /* Shift the 64-bit value in M left by COUNT.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_sll_si64 (__m64 __m, __m64 __count)
 {
-  return (__m64) __builtin_ia32_psllq ((long long)__m, (long long)__count);
+  /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+  return (__m64) __builtin_ia32_psllq (__m, __count);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_psllq (__m64 __m, __m64 __count)
 {
   return _mm_sll_si64 (__m, __count);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_slli_si64 (__m64 __m, int __count)
 {
-  return (__m64) __builtin_ia32_psllq ((long long)__m, (long long)__count);
+  /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+  return (__m64) __builtin_ia32_psllqi (__m, __count);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_psllqi (__m64 __m, int __count)
 {
   return _mm_slli_si64 (__m, __count);
 }
 
 /* Shift four 16-bit values in M right by COUNT; shift in the sign bit.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_sra_pi16 (__m64 __m, __m64 __count)
 {
-  return (__m64) __builtin_ia32_psraw ((__v4hi)__m, (long long)__count);
+  /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+  return (__m64) __builtin_ia32_psraw ((__v4hi)__m, __count);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_psraw (__m64 __m, __m64 __count)
 {
   return _mm_sra_pi16 (__m, __count);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_srai_pi16 (__m64 __m, int __count)
 {
-  return (__m64) __builtin_ia32_psraw ((__v4hi)__m, __count);
+  /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+  return (__m64) __builtin_ia32_psrawi ((__v4hi)__m, __count);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_psrawi (__m64 __m, int __count)
 {
   return _mm_srai_pi16 (__m, __count);
 }
 
 /* Shift two 32-bit values in M right by COUNT; shift in the sign bit.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_sra_pi32 (__m64 __m, __m64 __count)
 {
-  return (__m64) __builtin_ia32_psrad ((__v2si)__m, (long long)__count);
+  /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+  return (__m64) __builtin_ia32_psrad ((__v2si)__m, __count);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_psrad (__m64 __m, __m64 __count)
 {
   return _mm_sra_pi32 (__m, __count);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_srai_pi32 (__m64 __m, int __count)
 {
-  return (__m64) __builtin_ia32_psrad ((__v2si)__m, __count);
+  /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+  return (__m64) __builtin_ia32_psradi ((__v2si)__m, __count);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_psradi (__m64 __m, int __count)
 {
   return _mm_srai_pi32 (__m, __count);
 }
 
 /* Shift four 16-bit values in M right by COUNT; shift in zeros.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_srl_pi16 (__m64 __m, __m64 __count)
 {
-  return (__m64) __builtin_ia32_psrlw ((__v4hi)__m, (long long)__count);
+  /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+  return (__m64) __builtin_ia32_psrlw ((__v4hi)__m, __count);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_psrlw (__m64 __m, __m64 __count)
 {
   return _mm_srl_pi16 (__m, __count);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_srli_pi16 (__m64 __m, int __count)
 {
-  return (__m64) __builtin_ia32_psrlw ((__v4hi)__m, __count);
+  /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+  return (__m64) __builtin_ia32_psrlwi ((__v4hi)__m, __count);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_psrlwi (__m64 __m, int __count)
 {
   return _mm_srli_pi16 (__m, __count);
 }
 
 /* Shift two 32-bit values in M right by COUNT; shift in zeros.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_srl_pi32 (__m64 __m, __m64 __count)
 {
-  return (__m64) __builtin_ia32_psrld ((__v2si)__m, (long long)__count);
+  /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+  return (__m64) __builtin_ia32_psrld ((__v2si)__m, __count);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_psrld (__m64 __m, __m64 __count)
 {
   return _mm_srl_pi32 (__m, __count);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_srli_pi32 (__m64 __m, int __count)
 {
-  return (__m64) __builtin_ia32_psrld ((__v2si)__m, __count);
+  /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+  return (__m64) __builtin_ia32_psrldi ((__v2si)__m, __count);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_psrldi (__m64 __m, int __count)
 {
   return _mm_srli_pi32 (__m, __count);
 }
 
 /* Shift the 64-bit value in M left by COUNT; shift in zeros.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_srl_si64 (__m64 __m, __m64 __count)
 {
-  return (__m64) __builtin_ia32_psrlq ((long long)__m, (long long)__count);
+  /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+  return (__m64) __builtin_ia32_psrlq (__m, __count);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_psrlq (__m64 __m, __m64 __count)
 {
   return _mm_srl_si64 (__m, __count);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_srli_si64 (__m64 __m, int __count)
 {
-  return (__m64) __builtin_ia32_psrlq ((long long)__m, (long long)__count);
+  /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+  return (__m64) __builtin_ia32_psrlqi (__m, __count);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_psrlqi (__m64 __m, int __count)
 {
   return _mm_srli_si64 (__m, __count);
 }
 
 /* Bit-wise AND the 64-bit values in M1 and M2.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_and_si64 (__m64 __m1, __m64 __m2)
 {
   return __builtin_ia32_pand (__m1, __m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_pand (__m64 __m1, __m64 __m2)
 {
   return _mm_and_si64 (__m1, __m2);
@@ -728,39 +750,39 @@
 
 /* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the
    64-bit value in M2.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_andnot_si64 (__m64 __m1, __m64 __m2)
 {
   return __builtin_ia32_pandn (__m1, __m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_pandn (__m64 __m1, __m64 __m2)
 {
   return _mm_andnot_si64 (__m1, __m2);
 }
 
 /* Bit-wise inclusive OR the 64-bit values in M1 and M2.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_or_si64 (__m64 __m1, __m64 __m2)
 {
   return __builtin_ia32_por (__m1, __m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_por (__m64 __m1, __m64 __m2)
 {
   return _mm_or_si64 (__m1, __m2);
 }
 
 /* Bit-wise exclusive OR the 64-bit values in M1 and M2.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_xor_si64 (__m64 __m1, __m64 __m2)
 {
   return __builtin_ia32_pxor (__m1, __m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_pxor (__m64 __m1, __m64 __m2)
 {
   return _mm_xor_si64 (__m1, __m2);
@@ -768,25 +790,25 @@
 
 /* Compare eight 8-bit values.  The result of the comparison is 0xFF if the
    test is true and zero if false.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_cmpeq_pi8 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_pcmpeqb ((__v8qi)__m1, (__v8qi)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_pcmpeqb (__m64 __m1, __m64 __m2)
 {
   return _mm_cmpeq_pi8 (__m1, __m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_cmpgt_pi8 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_pcmpgtb ((__v8qi)__m1, (__v8qi)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_pcmpgtb (__m64 __m1, __m64 __m2)
 {
   return _mm_cmpgt_pi8 (__m1, __m2);
@@ -794,25 +816,25 @@
 
 /* Compare four 16-bit values.  The result of the comparison is 0xFFFF if
    the test is true and zero if false.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_cmpeq_pi16 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_pcmpeqw ((__v4hi)__m1, (__v4hi)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_pcmpeqw (__m64 __m1, __m64 __m2)
 {
   return _mm_cmpeq_pi16 (__m1, __m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_cmpgt_pi16 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_pcmpgtw ((__v4hi)__m1, (__v4hi)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_pcmpgtw (__m64 __m1, __m64 __m2)
 {
   return _mm_cmpgt_pi16 (__m1, __m2);
@@ -820,53 +842,53 @@
 
 /* Compare two 32-bit values.  The result of the comparison is 0xFFFFFFFF if
    the test is true and zero if false.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_cmpeq_pi32 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_pcmpeqd ((__v2si)__m1, (__v2si)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_pcmpeqd (__m64 __m1, __m64 __m2)
 {
   return _mm_cmpeq_pi32 (__m1, __m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_cmpgt_pi32 (__m64 __m1, __m64 __m2)
 {
   return (__m64) __builtin_ia32_pcmpgtd ((__v2si)__m1, (__v2si)__m2);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_pcmpgtd (__m64 __m1, __m64 __m2)
 {
   return _mm_cmpgt_pi32 (__m1, __m2);
 }
 
 /* Creates a 64-bit zero.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_setzero_si64 (void)
 {
   return (__m64)0LL;
 }
 
 /* Creates a vector of two 32-bit values; I0 is least significant.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_set_pi32 (int __i1, int __i0)
 {
   return (__m64) __builtin_ia32_vec_init_v2si (__i0, __i1);
 }
 
 /* Creates a vector of four 16-bit values; W0 is least significant.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_set_pi16 (short __w3, short __w2, short __w1, short __w0)
 {
   return (__m64) __builtin_ia32_vec_init_v4hi (__w0, __w1, __w2, __w3);
 }
 
 /* Creates a vector of eight 8-bit values; B0 is least significant.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_set_pi8 (char __b7, char __b6, char __b5, char __b4,
 	     char __b3, char __b2, char __b1, char __b0)
 {
@@ -875,19 +897,19 @@
 }
 
 /* Similar, but with the arguments in reverse order.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_setr_pi32 (int __i0, int __i1)
 {
   return _mm_set_pi32 (__i1, __i0);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_setr_pi16 (short __w0, short __w1, short __w2, short __w3)
 {
   return _mm_set_pi16 (__w3, __w2, __w1, __w0);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_setr_pi8 (char __b0, char __b1, char __b2, char __b3,
 	      char __b4, char __b5, char __b6, char __b7)
 {
@@ -895,26 +917,30 @@
 }
 
 /* Creates a vector of two 32-bit values, both elements containing I.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_set1_pi32 (int __i)
 {
   return _mm_set_pi32 (__i, __i);
 }
 
 /* Creates a vector of four 16-bit values, all elements containing W.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_set1_pi16 (short __w)
 {
   return _mm_set_pi16 (__w, __w, __w, __w);
 }
 
 /* Creates a vector of eight 8-bit values, all elements containing B.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_set1_pi8 (char __b)
 {
   return _mm_set_pi8 (__b, __b, __b, __b, __b, __b, __b, __b);
 }
 /* APPLE LOCAL end radar 4152603 */
 
+/* APPLE LOCAL begin nodebug inline 4152603 */
+#undef __always_inline__
+/* APPLE LOCAL end nodebug inline 4152603 */
+
 #endif /* __MMX__ */
 #endif /* _MMINTRIN_H_INCLUDED */
diff -U3 -r /Volumes/mrs5/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/nameser8_compat.h apple-build-fat/gcc.roots/gcc~dst/Developer/SDKs/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/nameser8_compat.h
--- /Volumes/mrs5/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/nameser8_compat.h	2006-07-24 19:12:22.000000000 -0700
+++ apple-build-fat/gcc.roots/gcc~dst/Developer/SDKs/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/nameser8_compat.h	2006-12-22 15:30:52.000000000 -0800
@@ -2,7 +2,7 @@
 
     It has been auto-edited by fixincludes from:
 
-	"/usr/include/nameser8_compat.h"
+	"/Developer/SDKs/MacOSX10.4u.sdk/usr/include/nameser8_compat.h"
 
     This had to be done to correct non-standard usages in the
     original, manufacturer supplied header file.  */
diff -U3 -r /Volumes/mrs5/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/pmmintrin.h apple-build-fat/gcc.roots/gcc~dst/Developer/SDKs/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/pmmintrin.h
--- /Volumes/mrs5/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/pmmintrin.h	2006-07-24 19:12:28.000000000 -0700
+++ apple-build-fat/gcc.roots/gcc~dst/Developer/SDKs/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/pmmintrin.h	2006-12-22 15:30:54.000000000 -0800
@@ -45,81 +45,85 @@
 #define _MM_GET_DENORMALS_ZERO_MODE() \
   (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK)
 
+/* APPLE LOCAL begin nodebug inline 4152603 */
+#define __always_inline__ __always_inline__, __nodebug__
+/* APPLE LOCAL end nodebug inline 4152603 */
+
 /* APPLE LOCAL begin radar 4152603 */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_addsub_ps (__m128 __X, __m128 __Y)
 {
   return (__m128) __builtin_ia32_addsubps ((__v4sf)__X, (__v4sf)__Y);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_hadd_ps (__m128 __X, __m128 __Y)
 {
   return (__m128) __builtin_ia32_haddps ((__v4sf)__X, (__v4sf)__Y);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_hsub_ps (__m128 __X, __m128 __Y)
 {
   return (__m128) __builtin_ia32_hsubps ((__v4sf)__X, (__v4sf)__Y);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_movehdup_ps (__m128 __X)
 {
   return (__m128) __builtin_ia32_movshdup ((__v4sf)__X);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_moveldup_ps (__m128 __X)
 {
   return (__m128) __builtin_ia32_movsldup ((__v4sf)__X);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_addsub_pd (__m128d __X, __m128d __Y)
 {
   return (__m128d) __builtin_ia32_addsubpd ((__v2df)__X, (__v2df)__Y);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_hadd_pd (__m128d __X, __m128d __Y)
 {
   return (__m128d) __builtin_ia32_haddpd ((__v2df)__X, (__v2df)__Y);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_hsub_pd (__m128d __X, __m128d __Y)
 {
   return (__m128d) __builtin_ia32_hsubpd ((__v2df)__X, (__v2df)__Y);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_loaddup_pd (double const *__P)
 {
   return _mm_load1_pd (__P);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline __m128d __attribute__((__always_inline__))
 _mm_movedup_pd (__m128d __X)
 {
   return _mm_shuffle_pd (__X, __X, _MM_SHUFFLE2 (0,0));
 }
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline __m128i __attribute__((__always_inline__))
 _mm_lddqu_si128 (__m128i const *__P)
 {
   return (__m128i) __builtin_ia32_lddqu ((char const *)__P);
 }
 
 #if 0
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_monitor (void const * __P, unsigned int __E, unsigned int __H)
 {
   __builtin_ia32_monitor (__P, __E, __H);
 }
 
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_mwait (unsigned int __E, unsigned int __H)
 {
   __builtin_ia32_mwait (__E, __H);
@@ -129,6 +133,9 @@
 #define _mm_monitor(P, E, H)	__builtin_ia32_monitor ((P), (E), (H))
 #define _mm_mwait(E, H)		__builtin_ia32_mwait ((E), (H))
 #endif
+/* APPLE LOCAL begin nodebug inline 4152603 */
+#undef __always_inline__
+/* APPLE LOCAL end nodebug inline 4152603 */
 
 #endif /* __SSE3__ */
 
Only in apple-build-fat/gcc.roots/gcc~dst/Developer/SDKs/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/root: Developer
Only in /Volumes/mrs5/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/root: usr
diff -U3 -r /Volumes/mrs5/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/tmmintrin.h apple-build-fat/gcc.roots/gcc~dst/Developer/SDKs/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/tmmintrin.h
--- /Volumes/mrs5/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/tmmintrin.h	2006-07-24 19:12:28.000000000 -0700
+++ apple-build-fat/gcc.roots/gcc~dst/Developer/SDKs/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/tmmintrin.h	2006-12-22 15:30:54.000000000 -0800
@@ -1,4 +1,4 @@
-/* APPLE LOCAL file mni 4424835 */
+/* APPLE LOCAL file ssse3 4424835 */
 /* Copyright (C) 2006 Free Software Foundation, Inc.
 
    This file is part of GCC.
@@ -31,7 +31,7 @@
 #ifndef _TMMINTRIN_H_INCLUDED
 #define _TMMINTRIN_H_INCLUDED
 
-#ifdef __MNI__
+#ifdef __SSSE3__
 #include <pmmintrin.h>
 
 /* APPLE LOCAL begin nodebug inline */
@@ -228,6 +228,6 @@
 #undef __always_inline__
 /* APPLE LOCAL end nodebug inline */
 
-#endif /* __MNI__ */
+#endif /* __SSSE3__ */
 
 #endif /* _TMMINTRIN_H_INCLUDED */
diff -U3 -r /Volumes/mrs5/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/xmmintrin.h apple-build-fat/gcc.roots/gcc~dst/Developer/SDKs/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/xmmintrin.h
--- /Volumes/mrs5/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/xmmintrin.h	2006-07-24 19:12:28.000000000 -0700
+++ apple-build-fat/gcc.roots/gcc~dst/Developer/SDKs/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1/include/xmmintrin.h	2006-12-22 15:30:54.000000000 -0800
@@ -87,9 +87,13 @@
 #define _MM_FLUSH_ZERO_ON     0x8000
 #define _MM_FLUSH_ZERO_OFF    0x0000
 
+/* APPLE LOCAL begin nodebug inline 4152603 */
+#define __always_inline__ __always_inline__, __nodebug__
+/* APPLE LOCAL end nodebug inline 4152603 */
+
 /* Create a vector of zeros.  */
 /* APPLE LOCAL begin radar 4152603 */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_setzero_ps (void)
 {
   return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
@@ -99,55 +103,55 @@
    floating-point) values of A and B; the upper three SPFP values are
    passed through from A.  */
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_add_ss (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_sub_ss (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_mul_ss (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_div_ss (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_sqrt_ss (__m128 __A)
 {
   return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_rcp_ss (__m128 __A)
 {
   return (__m128) __builtin_ia32_rcpss ((__v4sf)__A);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_rsqrt_ss (__m128 __A)
 {
   return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_min_ss (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_max_ss (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B);
@@ -155,55 +159,55 @@
 
 /* Perform the respective operation on the four SPFP values in A and B.  */
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_add_ps (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_addps ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_sub_ps (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_subps ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_mul_ps (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_mulps ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_div_ps (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_divps ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_sqrt_ps (__m128 __A)
 {
   return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_rcp_ps (__m128 __A)
 {
   return (__m128) __builtin_ia32_rcpps ((__v4sf)__A);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_rsqrt_ps (__m128 __A)
 {
   return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_min_ps (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_max_ps (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B);
@@ -211,25 +215,25 @@
 
 /* Perform logical bit-wise operations on 128-bit values.  */
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_and_ps (__m128 __A, __m128 __B)
 {
   return __builtin_ia32_andps (__A, __B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_andnot_ps (__m128 __A, __m128 __B)
 {
   return __builtin_ia32_andnps (__A, __B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_or_ps (__m128 __A, __m128 __B)
 {
   return __builtin_ia32_orps (__A, __B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_xor_ps (__m128 __A, __m128 __B)
 {
   return __builtin_ia32_xorps (__A, __B);
@@ -239,25 +243,25 @@
    comparison is true, place a mask of all ones in the result, otherwise a
    mask of zeros.  The upper three SPFP values are passed through from A.  */
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cmpeq_ss (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cmplt_ss (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cmple_ss (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cmpgt_ss (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_movss ((__v4sf) __A,
@@ -267,7 +271,7 @@
 								__A));
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cmpge_ss (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_movss ((__v4sf) __A,
@@ -277,25 +281,25 @@
 								__A));
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cmpneq_ss (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cmpnlt_ss (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cmpnle_ss (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cmpngt_ss (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_movss ((__v4sf) __A,
@@ -305,7 +309,7 @@
 								 __A));
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cmpnge_ss (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_movss ((__v4sf) __A,
@@ -315,13 +319,13 @@
 								 __A));
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cmpord_ss (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cmpunord_ss (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B);
@@ -331,73 +335,73 @@
    element, if the comparison is true, place a mask of all ones in the
    result, otherwise a mask of zeros.  */
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cmpeq_ps (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cmplt_ps (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cmple_ps (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cmpgt_ps (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cmpge_ps (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cmpneq_ps (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cmpnlt_ps (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cmpnle_ps (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cmpngt_ps (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cmpnge_ps (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cmpord_ps (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cmpunord_ps (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B);
@@ -406,73 +410,73 @@
 /* Compare the lower SPFP values of A and B and return 1 if true
    and 0 if false.  */
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_comieq_ss (__m128 __A, __m128 __B)
 {
   return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_comilt_ss (__m128 __A, __m128 __B)
 {
   return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_comile_ss (__m128 __A, __m128 __B)
 {
   return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_comigt_ss (__m128 __A, __m128 __B)
 {
   return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_comige_ss (__m128 __A, __m128 __B)
 {
   return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_comineq_ss (__m128 __A, __m128 __B)
 {
   return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_ucomieq_ss (__m128 __A, __m128 __B)
 {
   return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_ucomilt_ss (__m128 __A, __m128 __B)
 {
   return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_ucomile_ss (__m128 __A, __m128 __B)
 {
   return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_ucomigt_ss (__m128 __A, __m128 __B)
 {
   return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_ucomige_ss (__m128 __A, __m128 __B)
 {
   return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_ucomineq_ss (__m128 __A, __m128 __B)
 {
   return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B);
@@ -480,13 +484,13 @@
 
 /* Convert the lower SPFP value to a 32-bit integer according to the current
    rounding mode.  */
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_cvtss_si32 (__m128 __A)
 {
   return __builtin_ia32_cvtss2si ((__v4sf) __A);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_cvt_ss2si (__m128 __A)
 {
   return _mm_cvtss_si32 (__A);
@@ -496,14 +500,14 @@
 /* Convert the lower SPFP value to a 32-bit integer according to the
    current rounding mode.  */
 /* Intel intrinsic.  */
-static __inline long long __attribute__((__always_inline__, __nodebug__))
+static __inline long long __attribute__((__always_inline__))
 _mm_cvtss_si64 (__m128 __A)
 {
   return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
 }
 
 /* Microsoft intrinsic.  */
-static __inline long long __attribute__((__always_inline__, __nodebug__))
+static __inline long long __attribute__((__always_inline__))
 _mm_cvtss_si64x (__m128 __A)
 {
   return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
@@ -512,26 +516,26 @@
 
 /* Convert the two lower SPFP values to 32-bit integers according to the
    current rounding mode.  Return the integers in packed form.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_cvtps_pi32 (__m128 __A)
 {
   return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_cvt_ps2pi (__m128 __A)
 {
   return _mm_cvtps_pi32 (__A);
 }
 
 /* Truncate the lower SPFP value to a 32-bit integer.  */
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_cvttss_si32 (__m128 __A)
 {
   return __builtin_ia32_cvttss2si ((__v4sf) __A);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_cvtt_ss2si (__m128 __A)
 {
   return _mm_cvttss_si32 (__A);
@@ -541,14 +545,14 @@
 /* Truncate the lower SPFP value to a 32-bit integer.  */
 
 /* Intel intrinsic.  */
-static __inline long long __attribute__((__always_inline__, __nodebug__))
+static __inline long long __attribute__((__always_inline__))
 _mm_cvttss_si64 (__m128 __A)
 {
   return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
 }
 
 /* Microsoft intrinsic.  */
-static __inline long long __attribute__((__always_inline__, __nodebug__))
+static __inline long long __attribute__((__always_inline__))
 _mm_cvttss_si64x (__m128 __A)
 {
   return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
@@ -557,26 +561,26 @@
 
 /* Truncate the two lower SPFP values to 32-bit integers.  Return the
    integers in packed form.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_cvttps_pi32 (__m128 __A)
 {
   return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_cvtt_ps2pi (__m128 __A)
 {
   return _mm_cvttps_pi32 (__A);
 }
 
 /* Convert B to a SPFP value and insert it as element zero in A.  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cvtsi32_ss (__m128 __A, int __B)
 {
   return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cvt_si2ss (__m128 __A, int __B)
 {
   return _mm_cvtsi32_ss (__A, __B);
@@ -586,14 +590,14 @@
 /* Convert B to a SPFP value and insert it as element zero in A.  */
 
 /* Intel intrinsic.  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cvtsi64_ss (__m128 __A, long long __B)
 {
   return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
 }
 
 /* Microsoft intrinsic.  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cvtsi64x_ss (__m128 __A, long long __B)
 {
   return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
@@ -602,20 +606,20 @@
 
 /* Convert the two 32-bit values in B to SPFP form and insert them
    as the two lower elements in A.  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cvtpi32_ps (__m128 __A, __m64 __B)
 {
   return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cvt_pi2ps (__m128 __A, __m64 __B)
 {
   return _mm_cvtpi32_ps (__A, __B);
 }
 
 /* Convert the four signed 16-bit values in A to SPFP form.  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cvtpi16_ps (__m64 __A)
 {
   __v4hi __sign;
@@ -641,7 +645,7 @@
 }
 
 /* Convert the four unsigned 16-bit values in A to SPFP form.  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cvtpu16_ps (__m64 __A)
 {
   __v2si __hisi, __losi;
@@ -661,7 +665,7 @@
 }
 
 /* Convert the low four signed 8-bit values in A to SPFP form.  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cvtpi8_ps (__m64 __A)
 {
   __v8qi __sign;
@@ -678,7 +682,7 @@
 }
 
 /* Convert the low four unsigned 8-bit values in A to SPFP form.  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cvtpu8_ps(__m64 __A)
 {
   __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, (__v8qi)0LL);
@@ -686,7 +690,7 @@
 }
 
 /* Convert the four signed 32-bit values in A and B to SPFP form.  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
 {
   __v4sf __zero = (__v4sf) _mm_setzero_ps ();
@@ -696,7 +700,7 @@
 }
 
 /* Convert the four SPFP values in A to four signed 16-bit integers.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_cvtps_pi16(__m128 __A)
 {
   __v4sf __hisf = (__v4sf)__A;
@@ -707,7 +711,7 @@
 }
 
 /* Convert the four SPFP values in A to four signed 8-bit integers.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_cvtps_pi8(__m128 __A)
 {
   __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A);
@@ -716,7 +720,7 @@
 
 /* Selects four specific SPFP values from A and B based on MASK.  */
 #if 0
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_shuffle_ps (__m128 __A, __m128 __B, int __mask)
 {
   return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask);
@@ -728,14 +732,14 @@
 
 
 /* Selects and interleaves the upper two SPFP values from A and B.  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_unpackhi_ps (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B);
 }
 
 /* Selects and interleaves the lower two SPFP values from A and B.  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_unpacklo_ps (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B);
@@ -743,28 +747,28 @@
 
 /* Sets the upper two SPFP values with 64-bits of data loaded from P;
    the lower two values are passed through from A.  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_loadh_pi (__m128 __A, __m64 const *__P)
 {
   return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (__v2si *)__P);
 }
 
 /* Stores the upper two SPFP values of A into P.  */
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_storeh_pi (__m64 *__P, __m128 __A)
 {
   __builtin_ia32_storehps ((__v2si *)__P, (__v4sf)__A);
 }
 
 /* Moves the upper two values of B into the lower two values of A.  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_movehl_ps (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B);
 }
 
 /* Moves the lower two values of B into the upper two values of A.  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_movelh_ps (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B);
@@ -772,146 +776,146 @@
 
 /* Sets the lower two SPFP values with 64-bits of data loaded from P;
    the upper two values are passed through from A.  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_loadl_pi (__m128 __A, __m64 const *__P)
 {
   return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (__v2si *)__P);
 }
 
 /* Stores the lower two SPFP values of A into P.  */
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_storel_pi (__m64 *__P, __m128 __A)
 {
   __builtin_ia32_storelps ((__v2si *)__P, (__v4sf)__A);
 }
 
 /* Creates a 4-bit mask from the most significant bits of the SPFP values.  */
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_movemask_ps (__m128 __A)
 {
   return __builtin_ia32_movmskps ((__v4sf)__A);
 }
 
 /* Return the contents of the control register.  */
-static __inline unsigned int __attribute__((__always_inline__, __nodebug__))
+static __inline unsigned int __attribute__((__always_inline__))
 _mm_getcsr (void)
 {
   return __builtin_ia32_stmxcsr ();
 }
 
 /* Read exception bits from the control register.  */
-static __inline unsigned int __attribute__((__always_inline__, __nodebug__))
+static __inline unsigned int __attribute__((__always_inline__))
 _MM_GET_EXCEPTION_STATE (void)
 {
   return _mm_getcsr() & _MM_EXCEPT_MASK;
 }
 
-static __inline unsigned int __attribute__((__always_inline__, __nodebug__))
+static __inline unsigned int __attribute__((__always_inline__))
 _MM_GET_EXCEPTION_MASK (void)
 {
   return _mm_getcsr() & _MM_MASK_MASK;
 }
 
-static __inline unsigned int __attribute__((__always_inline__, __nodebug__))
+static __inline unsigned int __attribute__((__always_inline__))
 _MM_GET_ROUNDING_MODE (void)
 {
   return _mm_getcsr() & _MM_ROUND_MASK;
 }
 
-static __inline unsigned int __attribute__((__always_inline__, __nodebug__))
+static __inline unsigned int __attribute__((__always_inline__))
 _MM_GET_FLUSH_ZERO_MODE (void)
 {
   return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
 }
 
 /* Set the control register to I.  */
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_setcsr (unsigned int __I)
 {
   __builtin_ia32_ldmxcsr (__I);
 }
 
 /* Set exception bits in the control register.  */
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _MM_SET_EXCEPTION_STATE(unsigned int __mask)
 {
   _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask);
 }
 
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _MM_SET_EXCEPTION_MASK (unsigned int __mask)
 {
   _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask);
 }
 
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _MM_SET_ROUNDING_MODE (unsigned int __mode)
 {
   _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode);
 }
 
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _MM_SET_FLUSH_ZERO_MODE (unsigned int __mode)
 {
   _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode);
 }
 
 /* Create a vector with element 0 as F and the rest zero.  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_set_ss (float __F)
 {
   return __extension__ (__m128)(__v4sf){ __F, 0, 0, 0 };
 }
 
 /* Create a vector with all four elements equal to F.  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_set1_ps (float __F)
 {
   return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F };
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_set_ps1 (float __F)
 {
   return _mm_set1_ps (__F);
 }
 
 /* Create a vector with element 0 as *P and the rest zero.  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_load_ss (float const *__P)
 {
   return _mm_set_ss (*__P);
 }
 
 /* Create a vector with all four elements equal to *P.  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_load1_ps (float const *__P)
 {
   return _mm_set1_ps (*__P);
 }
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_load_ps1 (float const *__P)
 {
   return _mm_load1_ps (__P);
 }
 
 /* Load four SPFP values from P.  The address must be 16-byte aligned.  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_load_ps (float const *__P)
 {
   return (__m128) *(__v4sf *)__P;
 }
 
 /* Load four SPFP values from P.  The address need not be 16-byte aligned.  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_loadu_ps (float const *__P)
 {
   return (__m128) __builtin_ia32_loadups (__P);
 }
 
 /* Load four SPFP values in reverse order.  The address must be aligned.  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_loadr_ps (float const *__P)
 {
   __v4sf __tmp = *(__v4sf *)__P;
@@ -919,48 +923,48 @@
 }
 
 /* Create the vector [Z Y X W].  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
 {
   return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z };
 }
 
 /* Create the vector [W X Y Z].  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_setr_ps (float __Z, float __Y, float __X, float __W)
 {
   return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W };
 }
 
 /* Stores the lower SPFP value.  */
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_store_ss (float *__P, __m128 __A)
 {
   *__P = __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
 }
 
-static __inline float __attribute__((__always_inline__, __nodebug__))
+static __inline float __attribute__((__always_inline__))
 _mm_cvtss_f32 (__m128 __A)
 {
   return __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
 }
 
 /* Store four SPFP values.  The address must be 16-byte aligned.  */
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_store_ps (float *__P, __m128 __A)
 {
   *(__v4sf *)__P = (__v4sf)__A;
 }
 
 /* Store four SPFP values.  The address need not be 16-byte aligned.  */
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_storeu_ps (float *__P, __m128 __A)
 {
   __builtin_ia32_storeups (__P, (__v4sf)__A);
 }
 
 /* Store the lower SPFP value across four words.  */
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_store1_ps (float *__P, __m128 __A)
 {
   __v4sf __va = (__v4sf)__A;
@@ -968,14 +972,14 @@
   _mm_storeu_ps (__P, __tmp);
 }
 
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_store_ps1 (float *__P, __m128 __A)
 {
   _mm_store1_ps (__P, __A);
 }
 
 /* Store four SPFP values in reverse order.  The address must be aligned.  */
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_storer_ps (float *__P, __m128 __A)
 {
   __v4sf __va = (__v4sf)__A;
@@ -984,7 +988,7 @@
 }
 
 /* Sets the low SPFP value of A from the low value of B.  */
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline __m128 __attribute__((__always_inline__))
 _mm_move_ss (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_movss ((__v4sf)__A, (__v4sf)__B);
@@ -992,13 +996,13 @@
 
 /* Extracts one of the four words of A.  The selector N must be immediate.  */
 #if 0
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_extract_pi16 (__m64 const __A, int const __N)
 {
   return __builtin_ia32_vec_ext_v4hi ((__v4hi)__A, __N);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _m_pextrw (__m64 const __A, int const __N)
 {
   return _mm_extract_pi16 (__A, __N);
@@ -1011,13 +1015,13 @@
 /* Inserts word D into one of four words of A.  The selector N must be
    immediate.  */
 #if 0
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_insert_pi16 (__m64 const __A, int const __D, int const __N)
 {
   return (__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)__A, __D, __N);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_pinsrw (__m64 const __A, int const __D, int const __N)
 {
   return _mm_insert_pi16 (__A, __D, __N);
@@ -1029,65 +1033,65 @@
 #endif
 
 /* Compute the element-wise maximum of signed 16-bit values.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_max_pi16 (__m64 __A, __m64 __B)
 {
   return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_pmaxsw (__m64 __A, __m64 __B)
 {
   return _mm_max_pi16 (__A, __B);
 }
 
 /* Compute the element-wise maximum of unsigned 8-bit values.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_max_pu8 (__m64 __A, __m64 __B)
 {
   return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_pmaxub (__m64 __A, __m64 __B)
 {
   return _mm_max_pu8 (__A, __B);
 }
 
 /* Compute the element-wise minimum of signed 16-bit values.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_min_pi16 (__m64 __A, __m64 __B)
 {
   return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_pminsw (__m64 __A, __m64 __B)
 {
   return _mm_min_pi16 (__A, __B);
 }
 
 /* Compute the element-wise minimum of unsigned 8-bit values.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_min_pu8 (__m64 __A, __m64 __B)
 {
   return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_pminub (__m64 __A, __m64 __B)
 {
   return _mm_min_pu8 (__A, __B);
 }
 
 /* Create an 8-bit mask of the signs of 8-bit values.  */
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _mm_movemask_pi8 (__m64 __A)
 {
   return __builtin_ia32_pmovmskb ((__v8qi)__A);
 }
 
-static __inline int __attribute__((__always_inline__, __nodebug__))
+static __inline int __attribute__((__always_inline__))
 _m_pmovmskb (__m64 __A)
 {
   return _mm_movemask_pi8 (__A);
@@ -1095,13 +1099,13 @@
 
 /* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
    in B and produce the high 16 bits of the 32-bit results.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_mulhi_pu16 (__m64 __A, __m64 __B)
 {
   return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_pmulhuw (__m64 __A, __m64 __B)
 {
   return _mm_mulhi_pu16 (__A, __B);
@@ -1110,13 +1114,13 @@
 /* Return a combination of the four 16-bit values in A.  The selector
    must be an immediate.  */
 #if 0
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_shuffle_pi16 (__m64 __A, int __N)
 {
   return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_pshufw (__m64 __A, int __N)
 {
   return _mm_shuffle_pi16 (__A, __N);
@@ -1130,39 +1134,39 @@
 /* Conditionally store byte elements of A into P.  The high bit of each
    byte in the selector N determines whether the corresponding byte from
    A is stored.  */
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
 {
   __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P);
 }
 
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _m_maskmovq (__m64 __A, __m64 __N, char *__P)
 {
   _mm_maskmove_si64 (__A, __N, __P);
 }
 
 /* Compute the rounded averages of the unsigned 8-bit values in A and B.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_avg_pu8 (__m64 __A, __m64 __B)
 {
   return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_pavgb (__m64 __A, __m64 __B)
 {
   return _mm_avg_pu8 (__A, __B);
 }
 
 /* Compute the rounded averages of the unsigned 16-bit values in A and B.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_avg_pu16 (__m64 __A, __m64 __B)
 {
   return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_pavgw (__m64 __A, __m64 __B)
 {
   return _mm_avg_pu16 (__A, __B);
@@ -1171,13 +1175,13 @@
 /* Compute the sum of the absolute differences of the unsigned 8-bit
    values in A and B.  Return the value in the lower 16-bit word; the
    upper words are cleared.  */
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _mm_sad_pu8 (__m64 __A, __m64 __B)
 {
   return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B);
 }
 
-static __inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline __m64 __attribute__((__always_inline__))
 _m_psadbw (__m64 __A, __m64 __B)
 {
   return _mm_sad_pu8 (__A, __B);
@@ -1186,7 +1190,7 @@
 /* Loads one cache line from address P to a location "closer" to the
    processor.  The selector I specifies the type of prefetch operation.  */
 #if 0
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_prefetch (void *__P, enum _mm_hint __I)
 {
   __builtin_prefetch (__P, 0, __I);
@@ -1197,14 +1201,15 @@
 #endif
 
 /* Stores the data in A to the address P without polluting the caches.  */
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_stream_pi (__m64 *__P, __m64 __A)
 {
-  __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A);
+  /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+  __builtin_ia32_movntq (__P, __A);
 }
 
 /* Likewise.  The address must be 16-byte aligned.  */
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_stream_ps (float *__P, __m128 __A)
 {
   __builtin_ia32_movntps (__P, (__v4sf)__A);
@@ -1212,7 +1217,7 @@
 
 /* Guarantees that every preceding store is globally visible before
    any subsequent store.  */
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_sfence (void)
 {
   __builtin_ia32_sfence ();
@@ -1221,7 +1226,7 @@
 /* The execution of the next instruction is delayed by an implementation
    specific amount of time.  The instruction does not modify the
    architectural state.  */
-static __inline void __attribute__((__always_inline__, __nodebug__))
+static __inline void __attribute__((__always_inline__))
 _mm_pause (void)
 {
   __asm__ __volatile__ ("rep; nop" : : );
@@ -1244,6 +1249,9 @@
 } while (0)
 /* APPPLE LOCAL end radar 4109832 */
 
+/* APPLE LOCAL begin nodebug inline 4152603 */
+#undef __always_inline__
+/* APPLE LOCAL end nodebug inline 4152603 */
 
 /* For backward source compatibility.  */
 #include <emmintrin.h>



More information about the Gcc-patches mailing list