+2014-02-10 Kirill Yukhin <kirill.yukhin@intel.com>
+ Ilya Tocar <ilya.tocar@intel.com>
+
+ * config/i386/avx512fintrin.h (_mm512_storeu_epi64): Removed.
+ (_mm512_loadu_epi32): Renamed into...
+ (_mm512_loadu_si512): This.
+ (_mm512_storeu_epi32): Renamed into...
+ (_mm512_storeu_si512): This.
+ (_mm512_maskz_ceil_ps): Removed.
+ (_mm512_maskz_ceil_pd): Ditto.
+ (_mm512_maskz_floor_ps): Ditto.
+ (_mm512_maskz_floor_pd): Ditto.
+ (_mm512_floor_round_ps): Ditto.
+ (_mm512_floor_round_pd): Ditto.
+ (_mm512_ceil_round_ps): Ditto.
+ (_mm512_ceil_round_pd): Ditto.
+ (_mm512_mask_floor_round_ps): Ditto.
+ (_mm512_mask_floor_round_pd): Ditto.
+ (_mm512_mask_ceil_round_ps): Ditto.
+ (_mm512_mask_ceil_round_pd): Ditto.
+ (_mm512_maskz_floor_round_ps): Ditto.
+ (_mm512_maskz_floor_round_pd): Ditto.
+ (_mm512_maskz_ceil_round_ps): Ditto.
+ (_mm512_maskz_ceil_round_pd): Ditto.
+ (_mm512_expand_pd): Ditto.
+ (_mm512_expand_ps): Ditto.
+ * config/i386/i386.c (ix86_builtins): Remove
+ IX86_BUILTIN_EXPANDPD512_NOMASK, IX86_BUILTIN_EXPANDPS512_NOMASK.
+ (bdesc_args): Ditto.
+ * config/i386/predicates.md (const1256_operand): New.
+ (const_1_to_2_operand): Ditto.
+ * config/i386/sse.md (avx512pf_gatherpf<mode>sf): Change hint value.
+ (*avx512pf_gatherpf<mode>sf_mask): Ditto.
+ (*avx512pf_gatherpf<mode>sf): Ditto.
+ (avx512pf_gatherpf<mode>df): Ditto.
+ (*avx512pf_gatherpf<mode>df_mask): Ditto.
+ (*avx512pf_gatherpf<mode>df): Ditto.
+ (avx512pf_scatterpf<mode>sf): Ditto.
+ (*avx512pf_scatterpf<mode>sf_mask): Ditto.
+ (*avx512pf_scatterpf<mode>sf): Ditto.
+ (avx512pf_scatterpf<mode>df): Ditto.
+ (*avx512pf_scatterpf<mode>df_mask): Ditto.
+ (*avx512pf_scatterpf<mode>df): Ditto.
+ (avx512f_expand<mode>): Removed.
+ (<shift_insn><mode>3<mask_name>): Change predicate type.
+
2014-02-08 Jakub Jelinek <jakub@redhat.com>
* tree-vect-data-refs.c (vect_analyze_data_refs): For clobbers
(__mmask8) __U);
}
-extern __inline void
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_storeu_epi64 (void *__P, __m512i __A)
-{
- __builtin_ia32_storedqudi512_mask ((__v8di *) __P, (__v8di) __A,
- (__mmask8) -1);
-}
-
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_loadu_epi32 (void const *__P)
+_mm512_loadu_si512 (void const *__P)
{
return (__m512i) __builtin_ia32_loaddqusi512_mask ((const __v16si *) __P,
(__v16si)
extern __inline void
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_storeu_epi32 (void *__P, __m512i __A)
+_mm512_storeu_si512 (void *__P, __m512i __A)
{
__builtin_ia32_storedqusi512_mask ((__v16si *) __P, (__v16si) __A,
(__mmask16) -1);
_MM_FROUND_CUR_DIRECTION);
}
-extern __inline __m512
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_floor_ps (__mmask16 __U, __m512 __A)
-{
- return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
- _MM_FROUND_FLOOR,
- (__v16sf)
- _mm512_setzero_ps (),
- __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m512d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_floor_pd (__mmask8 __U, __m512d __A)
-{
- return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
- _MM_FROUND_FLOOR,
- (__v8df)
- _mm512_setzero_pd (),
- __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m512
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_ceil_ps (__mmask16 __U, __m512 __A)
-{
- return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
- _MM_FROUND_CEIL,
- (__v16sf)
- _mm512_setzero_ps (),
- __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
-extern __inline __m512d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_ceil_pd (__mmask8 __U, __m512d __A)
-{
- return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
- _MM_FROUND_CEIL,
- (__v8df)
- _mm512_setzero_pd (),
- __U,
- _MM_FROUND_CUR_DIRECTION);
-}
-
#ifdef __OPTIMIZE__
-extern __inline __m512
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_floor_round_ps (__m512 __A, const int __R)
-{
- return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
- _MM_FROUND_FLOOR,
- (__v16sf) __A, -1, __R);
-}
-
-extern __inline __m512d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_floor_round_pd (__m512d __A, const int __R)
-{
- return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
- _MM_FROUND_FLOOR,
- (__v8df) __A, -1, __R);
-}
-
-extern __inline __m512
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_ceil_round_ps (__m512 __A, const int __R)
-{
- return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
- _MM_FROUND_CEIL,
- (__v16sf) __A, -1, __R);
-}
-
-extern __inline __m512d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_ceil_round_pd (__m512d __A, const int __R)
-{
- return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
- _MM_FROUND_CEIL,
- (__v8df) __A, -1, __R);
-}
-
-extern __inline __m512
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_floor_round_ps (__m512 __W, __mmask16 __U, __m512 __A,
- const int __R)
-{
- return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
- _MM_FROUND_FLOOR,
- (__v16sf) __W, __U, __R);
-}
-
-extern __inline __m512d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_floor_round_pd (__m512d __W, __mmask8 __U, __m512d __A,
- const int __R)
-{
- return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
- _MM_FROUND_FLOOR,
- (__v8df) __W, __U, __R);
-}
-
-extern __inline __m512
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_ceil_round_ps (__m512 __W, __mmask16 __U, __m512 __A, const int __R)
-{
- return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
- _MM_FROUND_CEIL,
- (__v16sf) __W, __U, __R);
-}
-
-extern __inline __m512d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_ceil_round_pd (__m512d __W, __mmask8 __U, __m512d __A,
- const int __R)
-{
- return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
- _MM_FROUND_CEIL,
- (__v8df) __W, __U, __R);
-}
-
-extern __inline __m512
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_floor_round_ps (__mmask16 __U, __m512 __A, const int __R)
-{
- return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
- _MM_FROUND_FLOOR,
- (__v16sf)
- _mm512_setzero_ps (),
- __U, __R);
-}
-
-extern __inline __m512d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_floor_round_pd (__mmask8 __U, __m512d __A, const int __R)
-{
- return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
- _MM_FROUND_FLOOR,
- (__v8df)
- _mm512_setzero_pd (),
- __U, __R);
-}
-
-extern __inline __m512
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_ceil_round_ps (__mmask16 __U, __m512 __A, const int __R)
-{
- return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
- _MM_FROUND_CEIL,
- (__v16sf)
- _mm512_setzero_ps (),
- __U, __R);
-}
-
-extern __inline __m512d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_ceil_round_pd (__mmask8 __U, __m512d __A, const int __R)
-{
- return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
- _MM_FROUND_CEIL,
- (__v8df)
- _mm512_setzero_pd (),
- __U, __R);
-}
-
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_alignr_epi32 (__m512i __A, __m512i __B, const int __imm)
(__mmask8) __U);
}
#else
-#define _mm512_floor_round_ps(A, R) \
- ((__m512) __builtin_ia32_rndscaleps_mask ((__v16sf)(__m512)(A), \
- _MM_FROUND_FLOOR, \
- (__v16sf)(__m512)(A), \
- (__mmask16)(-1), R))
-#define _mm512_mask_floor_round_ps(A, B, C, R) \
- ((__m512) __builtin_ia32_rndscaleps_mask ((__v16sf)(__m512)(C), \
- _MM_FROUND_FLOOR, \
- (__v16sf)(__m512)(A), \
- (__mmask16)(B), R))
-#define _mm512_maskz_floor_round_ps(A, B, R) \
- ((__m512) __builtin_ia32_rndscaleps_mask ((__v16sf)(__m512)(B), \
- _MM_FROUND_FLOOR, \
- (__v16sf)_mm512_setzero_ps(),\
- (__mmask16)(A), R))
-#define _mm512_floor_round_pd(A, R) \
- ((__m512d) __builtin_ia32_rndscalepd_mask ((__v8df)(__m512d)(A), \
- _MM_FROUND_FLOOR, \
- (__v8df)(__m512d)(A), \
- (__mmask8)(-1), R))
-#define _mm512_mask_floor_round_pd(A, B, C, R) \
- ((__m512d) __builtin_ia32_rndscalepd_mask ((__v8df)(__m512d)(C), \
- _MM_FROUND_FLOOR, \
- (__v8df)(__m512d)(A), \
- (__mmask8)(B), R))
-#define _mm512_maskz_floor_round_pd(A, B, R) \
- ((__m512d) __builtin_ia32_rndscalepd_mask ((__v8df)(__m512d)(B), \
- _MM_FROUND_FLOOR, \
- (__v8df)_mm512_setzero_pd(),\
- (__mmask8)(A), R))
-#define _mm512_ceil_round_ps(A, R) \
- ((__m512) __builtin_ia32_rndscaleps_mask ((__v16sf)(__m512)(A), \
- _MM_FROUND_CEIL, \
- (__v16sf)(__m512)(A), \
- (__mmask16)(-1), R))
-#define _mm512_mask_ceil_round_ps(A, B, C, R) \
- ((__m512) __builtin_ia32_rndscaleps_mask ((__v16sf)(__m512)(C), \
- _MM_FROUND_CEIL, \
- (__v16sf)(__m512)(A), \
- (__mmask16)(B), R))
-#define _mm512_maskz_ceil_round_ps(A, B, R) \
- ((__m512) __builtin_ia32_rndscaleps_mask ((__v16sf)(__m512)(B), \
- _MM_FROUND_CEIL, \
- (__v16sf)_mm512_setzero_ps(),\
- (__mmask16)(A), R))
-#define _mm512_ceil_round_pd(A, R) \
- ((__m512d) __builtin_ia32_rndscalepd_mask ((__v8df)(__m512d)(A), \
- _MM_FROUND_CEIL, \
- (__v8df)(__m512d)(A), \
- (__mmask8)(-1), R))
-#define _mm512_mask_ceil_round_pd(A, B, C, R) \
- ((__m512d) __builtin_ia32_rndscalepd_mask ((__v8df)(__m512d)(C), \
- _MM_FROUND_CEIL, \
- (__v8df)(__m512d)(A), \
- (__mmask8)(B), R))
-#define _mm512_maskz_ceil_round_pd(A, B, R) \
- ((__m512d) __builtin_ia32_rndscalepd_mask ((__v8df)(__m512d)(B), \
- _MM_FROUND_CEIL, \
- (__v8df)_mm512_setzero_pd(),\
- (__mmask8)(A), R))
-
#define _mm512_alignr_epi32(X, Y, C) \
((__m512i)__builtin_ia32_alignd512_mask ((__v16si)(__m512i)(X), \
(__v16si)(__m512i)(Y), (int)(C), (__v16si)(__m512i)(X), \
(__mmask16) __U);
}
-extern __inline __m512d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_expand_pd (__m512d __A)
-{
- return (__m512d) __builtin_ia32_expanddf512 ((__v8df) __A);
-}
-
extern __inline __m512d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_expand_pd (__m512d __W, __mmask8 __U, __m512d __A)
(__mmask8) __U);
}
-extern __inline __m512
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_expand_ps (__m512 __A)
-{
- return (__m512) __builtin_ia32_expandsf512 ((__v16sf) __A);
-}
-
extern __inline __m512
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_expand_ps (__m512 __W, __mmask16 __U, __m512 __A)
IX86_BUILTIN_DIVPS512,
IX86_BUILTIN_DIVSD_ROUND,
IX86_BUILTIN_DIVSS_ROUND,
- IX86_BUILTIN_EXPANDPD512_NOMASK,
IX86_BUILTIN_EXPANDPD512,
IX86_BUILTIN_EXPANDPD512Z,
IX86_BUILTIN_EXPANDPDLOAD512,
IX86_BUILTIN_EXPANDPDLOAD512Z,
- IX86_BUILTIN_EXPANDPS512_NOMASK,
IX86_BUILTIN_EXPANDPS512,
IX86_BUILTIN_EXPANDPS512Z,
IX86_BUILTIN_EXPANDPSLOAD512,
{ OPTION_MASK_ISA_AVX512F, CODE_FOR_avx512f_vcvtps2ph512_mask, "__builtin_ia32_vcvtps2ph512_mask", IX86_BUILTIN_CVTPS2PH512, UNKNOWN, (int) V16HI_FTYPE_V16SF_INT_V16HI_HI },
{ OPTION_MASK_ISA_AVX512F, CODE_FOR_ufloatv8siv8df_mask, "__builtin_ia32_cvtudq2pd512_mask", IX86_BUILTIN_CVTUDQ2PD512, UNKNOWN, (int) V8DF_FTYPE_V8SI_V8DF_QI },
{ OPTION_MASK_ISA_AVX512F, CODE_FOR_cvtusi2sd32, "__builtin_ia32_cvtusi2sd32", IX86_BUILTIN_CVTUSI2SD32, UNKNOWN, (int) V2DF_FTYPE_V2DF_UINT },
- { OPTION_MASK_ISA_AVX512F, CODE_FOR_avx512f_expandv8df, "__builtin_ia32_expanddf512", IX86_BUILTIN_EXPANDPD512_NOMASK, UNKNOWN, (int) V8DF_FTYPE_V8DF },
{ OPTION_MASK_ISA_AVX512F, CODE_FOR_avx512f_expandv8df_mask, "__builtin_ia32_expanddf512_mask", IX86_BUILTIN_EXPANDPD512, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_QI },
{ OPTION_MASK_ISA_AVX512F, CODE_FOR_avx512f_expandv8df_maskz, "__builtin_ia32_expanddf512_maskz", IX86_BUILTIN_EXPANDPD512Z, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_QI },
- { OPTION_MASK_ISA_AVX512F, CODE_FOR_avx512f_expandv16sf, "__builtin_ia32_expandsf512", IX86_BUILTIN_EXPANDPS512_NOMASK, UNKNOWN, (int) V16SF_FTYPE_V16SF },
{ OPTION_MASK_ISA_AVX512F, CODE_FOR_avx512f_expandv16sf_mask, "__builtin_ia32_expandsf512_mask", IX86_BUILTIN_EXPANDPS512, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_HI },
{ OPTION_MASK_ISA_AVX512F, CODE_FOR_avx512f_expandv16sf_maskz, "__builtin_ia32_expandsf512_maskz", IX86_BUILTIN_EXPANDPS512Z, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_HI },
{ OPTION_MASK_ISA_AVX512F, CODE_FOR_avx512f_vextractf32x4_mask, "__builtin_ia32_extractf32x4_mask", IX86_BUILTIN_EXTRACTF32X4, UNKNOWN, (int) V4SF_FTYPE_V16SF_INT_V4SF_QI },
return i == 2 || i == 4 || i == 8;
})
+;; Match 1, 2, 5, or 6
+(define_predicate "const1256_operand"
+ (match_code "const_int")
+{
+ HOST_WIDE_INT i = INTVAL (op);
+ return i == 1 || i == 2 || i == 5 || i == 6;
+})
+
;; Match 1, 2, 4, or 8
(define_predicate "const1248_operand"
(match_code "const_int")
return val <= 255*8 && val % 8 == 0;
})
+;; Match 1 to 2.
+(define_predicate "const_1_to_2_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 1, 2)")))
+
;; Return true if OP is CONST_INT >= 1 and <= 31 (a valid operand
;; for shift & compare patterns, as shifting by 0 does not change flags).
(define_predicate "const_1_to_31_operand"
UNSPEC_COMPRESS
UNSPEC_COMPRESS_STORE
UNSPEC_EXPAND
- UNSPEC_EXPAND_NOMASK
UNSPEC_MASKED_EQ
UNSPEC_MASKED_GT
(define_insn "<shift_insn><mode>3<mask_name>"
[(set (match_operand:VI48_512 0 "register_operand" "=v,v")
(any_lshift:VI48_512
- (match_operand:VI48_512 1 "register_operand" "v,m")
+ (match_operand:VI48_512 1 "nonimmediate_operand" "v,m")
(match_operand:SI 2 "nonmemory_operand" "vN,N")))]
"TARGET_AVX512F && <mask_mode512bit_condition>"
"vp<vshift><ssemodesuffix>\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(match_operand 2 "vsib_address_operand")
(match_operand:VI48_512 1 "register_operand")
(match_operand:SI 3 "const1248_operand")]))
- (match_operand:SI 4 "const_0_to_1_operand")]
+ (match_operand:SI 4 "const_1_to_2_operand")]
UNSPEC_GATHER_PREFETCH)]
"TARGET_AVX512PF"
{
(match_operand:VI48_512 1 "register_operand" "v")
(match_operand:SI 3 "const1248_operand" "n")]
UNSPEC_VSIBADDR)])
- (match_operand:SI 4 "const_0_to_1_operand" "n")]
+ (match_operand:SI 4 "const_1_to_2_operand" "n")]
UNSPEC_GATHER_PREFETCH)]
"TARGET_AVX512PF"
{
switch (INTVAL (operands[4]))
{
- case 0:
- return "vgatherpf0<ssemodesuffix>ps\t{%5%{%0%}|%5%{%0%}}";
case 1:
+ return "vgatherpf0<ssemodesuffix>ps\t{%5%{%0%}|%5%{%0%}}";
+ case 2:
return "vgatherpf1<ssemodesuffix>ps\t{%5%{%0%}|%5%{%0%}}";
default:
gcc_unreachable ();
(match_operand:VI48_512 0 "register_operand" "v")
(match_operand:SI 2 "const1248_operand" "n")]
UNSPEC_VSIBADDR)])
- (match_operand:SI 3 "const_0_to_1_operand" "n")]
+ (match_operand:SI 3 "const_1_to_2_operand" "n")]
UNSPEC_GATHER_PREFETCH)]
"TARGET_AVX512PF"
{
switch (INTVAL (operands[3]))
{
- case 0:
- return "vgatherpf0<ssemodesuffix>ps\t{%4|%4}";
case 1:
+ return "vgatherpf0<ssemodesuffix>ps\t{%4|%4}";
+ case 2:
return "vgatherpf1<ssemodesuffix>ps\t{%4|%4}";
default:
gcc_unreachable ();
[(match_operand 2 "vsib_address_operand")
(match_operand:VI4_256_8_512 1 "register_operand")
(match_operand:SI 3 "const1248_operand")]))
- (match_operand:SI 4 "const_0_to_1_operand")]
+ (match_operand:SI 4 "const_1_to_2_operand")]
UNSPEC_GATHER_PREFETCH)]
"TARGET_AVX512PF"
{
(match_operand:VI4_256_8_512 1 "register_operand" "v")
(match_operand:SI 3 "const1248_operand" "n")]
UNSPEC_VSIBADDR)])
- (match_operand:SI 4 "const_0_to_1_operand" "n")]
+ (match_operand:SI 4 "const_1_to_2_operand" "n")]
UNSPEC_GATHER_PREFETCH)]
"TARGET_AVX512PF"
{
switch (INTVAL (operands[4]))
{
- case 0:
- return "vgatherpf0<ssemodesuffix>pd\t{%5%{%0%}|%5%{%0%}}";
case 1:
+ return "vgatherpf0<ssemodesuffix>pd\t{%5%{%0%}|%5%{%0%}}";
+ case 2:
return "vgatherpf1<ssemodesuffix>pd\t{%5%{%0%}|%5%{%0%}}";
default:
gcc_unreachable ();
(match_operand:VI4_256_8_512 0 "register_operand" "v")
(match_operand:SI 2 "const1248_operand" "n")]
UNSPEC_VSIBADDR)])
- (match_operand:SI 3 "const_0_to_1_operand" "n")]
+ (match_operand:SI 3 "const_1_to_2_operand" "n")]
UNSPEC_GATHER_PREFETCH)]
"TARGET_AVX512PF"
{
switch (INTVAL (operands[3]))
{
- case 0:
- return "vgatherpf0<ssemodesuffix>pd\t{%4|%4}";
case 1:
+ return "vgatherpf0<ssemodesuffix>pd\t{%4|%4}";
+ case 2:
return "vgatherpf1<ssemodesuffix>pd\t{%4|%4}";
default:
gcc_unreachable ();
[(match_operand 2 "vsib_address_operand")
(match_operand:VI48_512 1 "register_operand")
(match_operand:SI 3 "const1248_operand")]))
- (match_operand:SI 4 "const_0_to_1_operand")]
+ (match_operand:SI 4 "const1256_operand")]
UNSPEC_SCATTER_PREFETCH)]
"TARGET_AVX512PF"
{
(match_operand:VI48_512 1 "register_operand" "v")
(match_operand:SI 3 "const1248_operand" "n")]
UNSPEC_VSIBADDR)])
- (match_operand:SI 4 "const_0_to_1_operand" "n")]
+ (match_operand:SI 4 "const1256_operand" "n")]
UNSPEC_SCATTER_PREFETCH)]
"TARGET_AVX512PF"
{
switch (INTVAL (operands[4]))
{
- case 0:
- return "vscatterpf0<ssemodesuffix>ps\t{%5%{%0%}|%5%{%0%}}";
case 1:
+ case 5:
+ return "vscatterpf0<ssemodesuffix>ps\t{%5%{%0%}|%5%{%0%}}";
+ case 2:
+ case 6:
return "vscatterpf1<ssemodesuffix>ps\t{%5%{%0%}|%5%{%0%}}";
default:
gcc_unreachable ();
(match_operand:VI48_512 0 "register_operand" "v")
(match_operand:SI 2 "const1248_operand" "n")]
UNSPEC_VSIBADDR)])
- (match_operand:SI 3 "const_0_to_1_operand" "n")]
+ (match_operand:SI 3 "const1256_operand" "n")]
UNSPEC_SCATTER_PREFETCH)]
"TARGET_AVX512PF"
{
switch (INTVAL (operands[3]))
{
- case 0:
- return "vscatterpf0<ssemodesuffix>ps\t{%4|%4}";
case 1:
+ case 5:
+ return "vscatterpf0<ssemodesuffix>ps\t{%4|%4}";
+ case 2:
+ case 6:
return "vscatterpf1<ssemodesuffix>ps\t{%4|%4}";
default:
gcc_unreachable ();
[(match_operand 2 "vsib_address_operand")
(match_operand:VI4_256_8_512 1 "register_operand")
(match_operand:SI 3 "const1248_operand")]))
- (match_operand:SI 4 "const_0_to_1_operand")]
+ (match_operand:SI 4 "const1256_operand")]
UNSPEC_SCATTER_PREFETCH)]
"TARGET_AVX512PF"
{
(match_operand:VI4_256_8_512 1 "register_operand" "v")
(match_operand:SI 3 "const1248_operand" "n")]
UNSPEC_VSIBADDR)])
- (match_operand:SI 4 "const_0_to_1_operand" "n")]
+ (match_operand:SI 4 "const1256_operand" "n")]
UNSPEC_SCATTER_PREFETCH)]
"TARGET_AVX512PF"
{
switch (INTVAL (operands[4]))
{
- case 0:
- return "vscatterpf0<ssemodesuffix>pd\t{%5%{%0%}|%5%{%0%}}";
case 1:
+ case 5:
+ return "vscatterpf0<ssemodesuffix>pd\t{%5%{%0%}|%5%{%0%}}";
+ case 2:
+ case 6:
return "vscatterpf1<ssemodesuffix>pd\t{%5%{%0%}|%5%{%0%}}";
default:
gcc_unreachable ();
(match_operand:VI4_256_8_512 0 "register_operand" "v")
(match_operand:SI 2 "const1248_operand" "n")]
UNSPEC_VSIBADDR)])
- (match_operand:SI 3 "const_0_to_1_operand" "n")]
+ (match_operand:SI 3 "const1256_operand" "n")]
UNSPEC_SCATTER_PREFETCH)]
"TARGET_AVX512PF"
{
switch (INTVAL (operands[3]))
{
- case 0:
- return "vscatterpf0<ssemodesuffix>pd\t{%4|%4}";
case 1:
+ case 5:
+ return "vscatterpf0<ssemodesuffix>pd\t{%4|%4}";
+ case 2:
+ case 6:
return "vscatterpf1<ssemodesuffix>pd\t{%4|%4}";
default:
gcc_unreachable ();
"TARGET_AVX512F"
"operands[2] = CONST0_RTX (<MODE>mode);")
-(define_insn "avx512f_expand<mode>"
- [(set (match_operand:VI48F_512 0 "register_operand" "=v,v")
- (unspec:VI48F_512
- [(match_operand:VI48F_512 1 "nonimmediate_operand" "v,m")]
- UNSPEC_EXPAND_NOMASK))]
- "TARGET_AVX512F"
- "v<sseintprefix>expand<ssemodesuffix>\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssemov")
- (set_attr "prefix" "evex")
- (set_attr "memory" "none,load")
- (set_attr "mode" "<sseinsnmode>")])
-
(define_insn "avx512f_expand<mode>_mask"
[(set (match_operand:VI48F_512 0 "register_operand" "=v,v")
(unspec:VI48F_512
+2014-02-10 Kirill Yukhin <kirill.yukhin@intel.com>
+ Ilya Tocar <ilya.tocar@intel.com>
+
+ * gcc.target/i386/avx512f-vexpandpd-1.c: Update intrinsics.
+ * gcc.target/i386/avx512f-vexpandps-1.c: Ditto.
+ * gcc.target/i386/avx512f-vexpandpd-2.c: Ditto.
+ * gcc.target/i386/avx512f-vexpandps-2.c: Ditto.
+ * gcc.target/i386/avx512f-vmovdqu32-1: Ditto.
+ * gcc.target/i386/avx512f-vmovdqu32-2: Ditto.
+ * gcc.target/i386/avx512f-vmovdqu64-1: Ditto.
+ * gcc.target/i386/avx512f-vmovdqu64-2: Ditto.
+ * gcc.target/i386/avx512f-vpcmpd-2.c: Ditto.
+ * gcc.target/i386/avx512f-vpcmpq-2.c: Ditto.
+ * gcc.target/i386/avx512f-vpcmupd-2.c: Ditto.
+ * gcc.target/i386/avx512f-vpcmupq-2.c: Ditto.
+ * gcc.target/i386/avx512f-vrndscalepd-1.c: Ditto.
+ * gcc.target/i386/avx512f-vrndscaleps-1.c: Ditto.
+ * gcc.target/i386/avx512f-vrndscalepd-2.c: Ditto.
+ * gcc.target/i386/avx512f-vrndscaleps-2.c: Ditto.
+ * gcc.target/i386/avx512pf-vgatherpf0dpd-1.c: Update parameters.
+ * gcc.target/i386/avx512pf-vgatherpf0dps-1.c: Ditto.
+ * gcc.target/i386/avx512pf-vgatherpf0qpd-1.c: Ditto.
+ * gcc.target/i386/avx512pf-vgatherpf0qps-1.c: Ditto.
+ * gcc.target/i386/avx512pf-vgatherpf1dpd-1.c: Ditto.
+ * gcc.target/i386/avx512pf-vgatherpf1dps-1.c: Ditto.
+ * gcc.target/i386/avx512pf-vgatherpf1qpd-1.c: Ditto.
+ * gcc.target/i386/avx512pf-vgatherpf1qps-1.c: Ditto.
+ * gcc.target/i386/avx512f-vpsrad-2.c: Initialize 64 bits.
+ * gcc.target/i386/avx512f-vpslld-2.c: Ditto.
+ * gcc.target/i386/avx512f-vpsrld-2.c: Ditto.
+
2014-02-10 Jakub Jelinek <jakub@redhat.com>
* gcc.dg/vect/pr59984.c: Require effective target
/* { dg-do compile } */
/* { dg-options "-mavx512f -O2" } */
-/* { dg-final { scan-assembler-times "vexpandpd\[ \\t\]+\[^\n\]*%zmm\[0-9\]" 5 } } */
+/* { dg-final { scan-assembler-times "vexpandpd\[ \\t\]+\[^\n\]*%zmm\[0-9\]" 4 } } */
/* { dg-final { scan-assembler-times "vexpandpd\[ \\t\]+\[^\n\]*%zmm\[0-9\]\{%k\[1-7\]\}\[^\{\]" 2 } } */
/* { dg-final { scan-assembler-times "vexpandpd\[ \\t\]+\[^\n\]*%zmm\[0-9\]\{%k\[1-7\]\}\{z\}" 2 } } */
void extern
avx512f_test (void)
{
- x = _mm512_expand_pd (x);
x = _mm512_mask_expand_pd (x, m, x);
x = _mm512_maskz_expand_pd (m, x);
static void
TEST (void)
{
- UNION_TYPE (AVX512F_LEN, d) s1, res1, res2, res3, res4, res5;
+ UNION_TYPE (AVX512F_LEN, d) s1, res2, res3, res4, res5;
MASK_TYPE mask = MASK_VALUE;
double s2[SIZE];
double res_ref1[SIZE];
sign = -sign;
}
- res1.x = INTRINSIC (_expand_pd) (s1.x);
res2.x = INTRINSIC (_mask_expand_pd) (res2.x, mask, s1.x);
res3.x = INTRINSIC (_maskz_expand_pd) (mask, s1.x);
res4.x = INTRINSIC (_mask_expandloadu_pd) (res4.x, mask, s2);
CALC (s1.a, res_ref2, mask);
CALC (s2, res_ref3, mask);
- if (UNION_CHECK (AVX512F_LEN, d) (res1, res_ref1))
- abort ();
-
MASK_MERGE (d) (res_ref2, mask, SIZE);
if (UNION_CHECK (AVX512F_LEN, d) (res2, res_ref2))
abort ();
/* { dg-do compile } */
/* { dg-options "-mavx512f -O2" } */
-/* { dg-final { scan-assembler-times "vexpandps\[ \\t\]+\[^\n\]*%zmm\[0-9\]" 5 } } */
+/* { dg-final { scan-assembler-times "vexpandps\[ \\t\]+\[^\n\]*%zmm\[0-9\]" 4 } } */
/* { dg-final { scan-assembler-times "vexpandps\[ \\t\]+\[^\n\]*%zmm\[0-9\]\{%k\[1-7\]\}\[^\{\]" 2 } } */
/* { dg-final { scan-assembler-times "vexpandps\[ \\t\]+\[^\n\]*%zmm\[0-9\]\{%k\[1-7\]\}\{z\}" 2 } } */
void extern
avx512f_test (void)
{
- x = _mm512_expand_ps (x);
x = _mm512_mask_expand_ps (x, m, x);
x = _mm512_maskz_expand_ps (m, x);
static void
TEST (void)
{
- UNION_TYPE (AVX512F_LEN, ) s1, res1, res2, res3, res4, res5;
+ UNION_TYPE (AVX512F_LEN, ) s1, res2, res3, res4, res5;
MASK_TYPE mask = MASK_VALUE;
float s2[SIZE];
float res_ref1[SIZE];
sign = -sign;
}
- res1.x = INTRINSIC (_expand_ps) (s1.x);
res2.x = INTRINSIC (_mask_expand_ps) (res2.x, mask, s1.x);
res3.x = INTRINSIC (_maskz_expand_ps) (mask, s1.x);
res4.x = INTRINSIC (_mask_expandloadu_ps) (res4.x, mask, s2);
CALC (s1.a, res_ref2, mask);
CALC (s2, res_ref3, mask);
- if (UNION_CHECK (AVX512F_LEN, ) (res1, res_ref1))
- abort ();
-
MASK_MERGE () (res_ref2, mask, SIZE);
if (UNION_CHECK (AVX512F_LEN, ) (res2, res_ref2))
abort ();
void extern
avx512f_test (void)
{
- x = _mm512_loadu_epi32 (p);
+ x = _mm512_loadu_si512 (p);
x = _mm512_mask_loadu_epi32 (x, m, p);
x = _mm512_maskz_loadu_epi32 (m, p);
- _mm512_storeu_epi32 (p, x);
+ _mm512_storeu_si512 (p, x);
_mm512_mask_storeu_epi32 (p, m, x);
}
}
#if AVX512F_LEN == 512
- res1.x = _mm512_loadu_epi32 (s1.a);
- _mm512_storeu_epi32 (res2.a, s2.x);
+ res1.x = _mm512_loadu_si512 (s1.a);
+ _mm512_storeu_si512 (res2.a, s2.x);
#endif
res3.x = INTRINSIC (_mask_loadu_epi32) (res3.x, mask, s1.a);
res4.x = INTRINSIC (_maskz_loadu_epi32) (mask, s1.a);
/* { dg-final { scan-assembler-times "vmovdqu64\[ \\t\]+\[^\n\]*\\)\[^\n\]*%zmm\[0-9\]\{%k\[1-7\]\}\[^\{\]" 1 } } */
/* { dg-final { scan-assembler-times "vmovdqu64\[ \\t\]+\[^\n\]*\\)\[^\n\]*%zmm\[0-9\]\{%k\[1-7\]\}\{z\}" 1 } } */
/* { dg-final { scan-assembler-times "vmovdqu64\[ \\t\]+\[^\n\]*%zmm\[0-9\]\[^\n\]*\\)\{%k\[1-7\]\}\[^\{\]" 1 } } */
-/* { dg-final { scan-assembler-times "vmovdqu64\[ \\t\]+\[^\n\]*%zmm\[0-9\]" 4 } } */
+/* { dg-final { scan-assembler-times "vmovdqu64\[ \\t\]+\[^\n\]*%zmm\[0-9\]" 3 } } */
#include <immintrin.h>
x = _mm512_maskz_loadu_epi64 (m, p);
_mm512_mask_storeu_epi64 (p, m, x);
- _mm512_storeu_epi64 (p, x);
}
TEST (void)
{
UNION_TYPE (AVX512F_LEN, i_q) s2, res1, res2;
- EVAL(unaligned_array, AVX512F_LEN,) s1, res3, res4;
+ EVAL(unaligned_array, AVX512F_LEN,) s1, res3;
MASK_TYPE mask = MASK_VALUE;
int i, sign = 1;
res1.x = INTRINSIC (_mask_loadu_epi64) (res1.x, mask, s1.a);
res2.x = INTRINSIC (_maskz_loadu_epi64) (mask, s1.a);
INTRINSIC (_mask_storeu_epi64) (res3.a, mask, s2.x);
- INTRINSIC (_storeu_epi64) (res4.a, s2.x);
MASK_MERGE (i_q) (s1.a, mask, SIZE);
if (UNION_CHECK (AVX512F_LEN, i_q) (res1, s1.a))
if (UNION_CHECK (AVX512F_LEN, i_q) (res2, s1.a))
abort ();
- if (UNION_CHECK (AVX512F_LEN, i_q) (s2, res4.a))
- abort ();
-
MASK_MERGE (i_q) (s2.a, mask, SIZE);
if (UNION_CHECK (AVX512F_LEN, i_q) (s2, res3.a))
abort ();
{ \
dst_ref = ((rel) << i) | dst_ref; \
} \
- source1.x = _mm512_loadu_epi32 (s1); \
- source2.x = _mm512_loadu_epi32 (s2); \
+ source1.x = _mm512_loadu_si512 (s1); \
+ source2.x = _mm512_loadu_si512 (s2); \
dst1 = _mm512_cmp_epi32_mask (source1.x, source2.x, imm);\
dst2 = _mm512_mask_cmp_epi32_mask (mask, source1.x, source2.x, imm);\
if (dst_ref != dst1) abort(); \
{ \
dst_ref = ((rel) << i) | dst_ref; \
} \
- source1.x = _mm512_loadu_epi32 (s1); \
- source2.x = _mm512_loadu_epi32 (s2); \
+ source1.x = _mm512_loadu_si512 (s1); \
+ source2.x = _mm512_loadu_si512 (s2); \
dst1 = _mm512_cmp_epi64_mask (source1.x, source2.x, imm);\
dst2 = _mm512_mask_cmp_epi64_mask (mask, source1.x, source2.x, imm);\
if (dst_ref != dst1) abort(); \
{ \
dst_ref = ((rel) << i) | dst_ref; \
} \
- source1.x = _mm512_loadu_epi32 (s1); \
- source2.x = _mm512_loadu_epi32 (s2); \
+ source1.x = _mm512_loadu_si512 (s1); \
+ source2.x = _mm512_loadu_si512 (s2); \
dst1 = _mm512_cmp_epu32_mask (source1.x, source2.x, imm);\
dst2 = _mm512_mask_cmp_epu32_mask (mask, source1.x, source2.x, imm);\
if (dst_ref != dst1) abort(); \
{ \
dst_ref = ((rel) << i) | dst_ref; \
} \
- source1.x = _mm512_loadu_epi32 (s1); \
- source2.x = _mm512_loadu_epi32 (s2); \
+ source1.x = _mm512_loadu_si512 (s1); \
+ source2.x = _mm512_loadu_si512 (s2); \
dst1 = _mm512_cmp_epu64_mask (source1.x, source2.x, imm);\
dst2 = _mm512_mask_cmp_epu64_mask (mask, source1.x, source2.x, imm);\
if (dst_ref != dst1) abort(); \
#define SIZE (AVX512F_LEN / 32)
#include "avx512f-mask-type.h"
-CALC (int *r, int *s1, int* s2)
+CALC (int *r, int *s1, long long* s2)
{
int i;
int count = s2[0];
{
int i, sign;
UNION_TYPE (AVX512F_LEN, i_d) res1, res2, res3, src1;
- UNION_TYPE (128, i_d) src2;
+ UNION_TYPE (128, i_q) src2;
MASK_TYPE mask = MASK_VALUE;
int res_ref[SIZE];
#define SIZE (AVX512F_LEN / 32)
#include "avx512f-mask-type.h"
-CALC (int *r, int *s1, int *s2)
+CALC (int *r, int *s1, long long *s2)
{
int i;
int count = s2[0];
{
int i, sign;
UNION_TYPE (AVX512F_LEN, i_d) res1, res2, res3, src1;
- UNION_TYPE (128, i_d) src2;
+ UNION_TYPE (128, i_q) src2;
MASK_TYPE mask = MASK_VALUE;
int res_ref[SIZE];
#define SIZE (AVX512F_LEN / 32)
#include "avx512f-mask-type.h"
-CALC (unsigned int *r, unsigned int *s1, unsigned int* s2)
+CALC (unsigned int *r, unsigned int *s1, unsigned long long* s2)
{
int i;
unsigned int count = s2[0];
{
int i;
UNION_TYPE (AVX512F_LEN, i_d) res1, res2, res3, src1;
- UNION_TYPE (128, i_d) src2;
+ UNION_TYPE (128, i_q) src2;
MASK_TYPE mask = MASK_VALUE;
unsigned int res_ref[SIZE];
/* { dg-do compile } */
/* { dg-options "-mavx512f -O2" } */
-/* { dg-final { scan-assembler-times "vrndscalepd\[ \\t\]+\[^\n\]*%zmm\[0-9\]\[^\n\]*%zmm\[0-9\]\[^\{\]" 6} } */
-/* { dg-final { scan-assembler-times "vrndscalepd\[ \\t\]+\[^\n\]*%zmm\[0-9\]\{%k\[1-7\]\}\[^\{\]" 9} } */
-/* { dg-final { scan-assembler-times "vrndscalepd\[ \\t\]+\[^\n\]*%zmm\[0-9\]\{%k\[1-7\]\}\{z\}" 3} } */
-/* { dg-final { scan-assembler-times "vrndscalepd\[ \\t\]+\\S*,\[ \\t\]+\{sae\}\[^\n\]*%zmm\[0-9\]\[^\n\]*%zmm\[0-9\]\[^\{\]" 3} } */
-/* { dg-final { scan-assembler-times "vrndscalepd\[ \\t\]+\\S*,\[ \\t\]+\{sae\}\[^\n\]*%zmm\[0-9\]\{%k\[1-7\]\}\[^\{\]" 6} } */
+/* { dg-final { scan-assembler-times "vrndscalepd\[ \\t\]+\[^\n\]*%zmm\[0-9\]\[^\n\]*%zmm\[0-9\]\[^\{\]" 4 } } */
+/* { dg-final { scan-assembler-times "vrndscalepd\[ \\t\]+\[^\n\]*%zmm\[0-9\]\{%k\[1-7\]\}\[^\{\]" 4 } } */
+/* { dg-final { scan-assembler-times "vrndscalepd\[ \\t\]+\[^\n\]*%zmm\[0-9\]\{%k\[1-7\]\}\{z\}" 2 } } */
+/* { dg-final { scan-assembler-times "vrndscalepd\[ \\t\]+\\S*,\[ \\t\]+\{sae\}\[^\n\]*%zmm\[0-9\]\[^\n\]*%zmm\[0-9\]\[^\{\]" 1 } } */
+/* { dg-final { scan-assembler-times "vrndscalepd\[ \\t\]+\\S*,\[ \\t\]+\{sae\}\[^\n\]*%zmm\[0-9\]\{%k\[1-7\]\}\[^\{\]" 1 } } */
#include <immintrin.h>
x = _mm512_mask_ceil_pd (x, 2, x);
x = _mm512_mask_floor_pd (x, 2, x);
x = _mm512_maskz_roundscale_pd (2, x, 0x42);
- x = _mm512_maskz_ceil_pd (2, x);
- x = _mm512_maskz_floor_pd (2, x);
x = _mm512_roundscale_round_pd (x, 0x42, _MM_FROUND_NO_EXC);
- x = _mm512_ceil_round_pd (x, _MM_FROUND_NO_EXC);
- x = _mm512_floor_round_pd (x, _MM_FROUND_NO_EXC);
x = _mm512_mask_roundscale_round_pd (x, 2, x, 0x42, _MM_FROUND_NO_EXC);
- x = _mm512_mask_ceil_round_pd (x, 2, x, _MM_FROUND_NO_EXC);
- x = _mm512_mask_floor_round_pd (x, 2, x, _MM_FROUND_NO_EXC);
x = _mm512_maskz_roundscale_round_pd (2, x, 0x42, _MM_FROUND_NO_EXC);
- x = _mm512_maskz_ceil_round_pd (2, x, _MM_FROUND_NO_EXC);
- x = _mm512_maskz_floor_round_pd (2, x, _MM_FROUND_NO_EXC);
}
imm = _MM_FROUND_FLOOR;
res1.x = INTRINSIC (_floor_pd) (s.x);
res2.x = INTRINSIC (_mask_floor_pd) (res2.x, mask, s.x);
- res3.x = INTRINSIC (_maskz_floor_pd) (mask, s.x);
break;
case 2:
imm = _MM_FROUND_CEIL;
res1.x = INTRINSIC (_ceil_pd) (s.x);
res2.x = INTRINSIC (_mask_ceil_pd) (res2.x, mask, s.x);
- res3.x = INTRINSIC (_maskz_ceil_pd) (mask, s.x);
break;
}
MASK_ZERO(d) (res_ref,mask,SIZE );
- if (UNION_CHECK (AVX512F_LEN, d) (res3, res_ref))
+ if (!i && UNION_CHECK (AVX512F_LEN, d) (res3, res_ref))
abort ();
}
/* { dg-do compile } */
/* { dg-options "-mavx512f -O2" } */
-/* { dg-final { scan-assembler-times "vrndscaleps\[ \\t\]+\[^\n\]*%zmm\[0-9\]+\[^\n\]*%zmm\[0-9\]\[^\{\]" 6} } */
-/* { dg-final { scan-assembler-times "vrndscaleps\[ \\t\]+\[^\n\]*%zmm\[0-9\]\{%k\[1-7\]\}\[^\{\]" 9} } */
-/* { dg-final { scan-assembler-times "vrndscaleps\[ \\t\]+\[^\n\]*%zmm\[0-9\]\{%k\[1-7\]\}\{z\}" 3} } */
-/* { dg-final { scan-assembler-times "vrndscaleps\[ \\t\]+\\S*,\[ \\t\]+\{sae\}\[^\n\]*%zmm\[0-9\]+\[^\n\]*%zmm\[0-9\]\[^\{\]" 3} } */
-/* { dg-final { scan-assembler-times "vrndscaleps\[ \\t\]+\\S*,\[ \\t\]+\{sae\}\[^\n\]*%zmm\[0-9\]\{%k\[1-7\]\}\[^\{\]" 6} } */
+/* { dg-final { scan-assembler-times "vrndscaleps\[ \\t\]+\[^\n\]*%zmm\[0-9\]+\[^\n\]*%zmm\[0-9\]\[^\{\]" 4 } } */
+/* { dg-final { scan-assembler-times "vrndscaleps\[ \\t\]+\[^\n\]*%zmm\[0-9\]\{%k\[1-7\]\}\[^\{\]" 4 } } */
+/* { dg-final { scan-assembler-times "vrndscaleps\[ \\t\]+\[^\n\]*%zmm\[0-9\]\{%k\[1-7\]\}\{z\}" 2 } } */
+/* { dg-final { scan-assembler-times "vrndscaleps\[ \\t\]+\\S*,\[ \\t\]+\{sae\}\[^\n\]*%zmm\[0-9\]+\[^\n\]*%zmm\[0-9\]\[^\{\]" 1 } } */
+/* { dg-final { scan-assembler-times "vrndscaleps\[ \\t\]+\\S*,\[ \\t\]+\{sae\}\[^\n\]*%zmm\[0-9\]\{%k\[1-7\]\}\[^\{\]" 1 } } */
#include <immintrin.h>
x = _mm512_mask_ceil_ps (x, 2, x);
x = _mm512_mask_floor_ps (x, 2, x);
x = _mm512_maskz_roundscale_ps (2, x, 0x42);
- x = _mm512_maskz_ceil_ps (2, x);
- x = _mm512_maskz_floor_ps (2, x);
x = _mm512_roundscale_round_ps (x, 0x42, _MM_FROUND_NO_EXC);
- x = _mm512_ceil_round_ps (x, _MM_FROUND_NO_EXC);
- x = _mm512_floor_round_ps (x, _MM_FROUND_NO_EXC);
x = _mm512_mask_roundscale_round_ps (x, 2, x, 0x42, _MM_FROUND_NO_EXC);
- x = _mm512_mask_ceil_round_ps (x, 2, x, _MM_FROUND_NO_EXC);
- x = _mm512_mask_floor_round_ps (x, 2, x, _MM_FROUND_NO_EXC);
x = _mm512_maskz_roundscale_round_ps (2, x, 0x42, _MM_FROUND_NO_EXC);
- x = _mm512_maskz_ceil_round_ps (2, x, _MM_FROUND_NO_EXC);
- x = _mm512_maskz_floor_round_ps (2, x, _MM_FROUND_NO_EXC);
}
imm = _MM_FROUND_FLOOR;
res1.x = INTRINSIC (_floor_ps) (s.x);
res2.x = INTRINSIC (_mask_floor_ps) (res2.x, mask, s.x);
- res3.x = INTRINSIC (_maskz_floor_ps) (mask, s.x);
break;
case 2:
imm = _MM_FROUND_CEIL;
res1.x = INTRINSIC (_ceil_ps) (s.x);
res2.x = INTRINSIC (_mask_ceil_ps) (res2.x, mask, s.x);
- res3.x = INTRINSIC (_maskz_ceil_ps) (mask, s.x);
break;
}
MASK_ZERO ()(res_ref, mask, SIZE);
- if (UNION_CHECK (AVX512F_LEN,) (res3, res_ref))
+ if (!i && UNION_CHECK (AVX512F_LEN,) (res3, res_ref))
abort ();
}
void extern
avx512pf_test (void)
{
- _mm512_mask_prefetch_i32gather_pd (idx, m8, base, 8, 0);
+ _mm512_mask_prefetch_i32gather_pd (idx, m8, base, 8, 1);
}
void extern
avx512pf_test (void)
{
- _mm512_mask_prefetch_i32gather_ps (idx, m16, base, 8, 0);
+ _mm512_mask_prefetch_i32gather_ps (idx, m16, base, 8, 1);
}
void extern
avx512pf_test (void)
{
- _mm512_mask_prefetch_i64gather_pd (idx, m8, base, 8, 0);
+ _mm512_mask_prefetch_i64gather_pd (idx, m8, base, 8, 1);
}
void extern
avx512pf_test (void)
{
- _mm512_mask_prefetch_i64gather_ps (idx, m8, base, 8, 0);
+ _mm512_mask_prefetch_i64gather_ps (idx, m8, base, 8, 1);
}
void extern
avx512pf_test (void)
{
- _mm512_mask_prefetch_i32gather_pd (idx, m8, base, 8, 1);
+ _mm512_mask_prefetch_i32gather_pd (idx, m8, base, 8, 2);
}
void extern
avx512pf_test (void)
{
- _mm512_mask_prefetch_i32gather_ps (idx, m16, base, 8, 1);
+ _mm512_mask_prefetch_i32gather_ps (idx, m16, base, 8, 2);
}
void extern
avx512pf_test (void)
{
- _mm512_mask_prefetch_i64gather_pd (idx, m8, base, 8, 1);
+ _mm512_mask_prefetch_i64gather_pd (idx, m8, base, 8, 2);
}
void extern
avx512pf_test (void)
{
- _mm512_mask_prefetch_i64gather_ps (idx, m8, base, 8, 1);
+ _mm512_mask_prefetch_i64gather_ps (idx, m8, base, 8, 2);
}
void extern
avx512pf_test (void)
{
- _mm512_prefetch_i32scatter_pd (base, idx, 8, 0);
- _mm512_mask_prefetch_i32scatter_pd (base, m8, idx, 8, 0);
+ _mm512_prefetch_i32scatter_pd (base, idx, 8, 1);
+ _mm512_mask_prefetch_i32scatter_pd (base, m8, idx, 8, 5);
}
void extern
avx512pf_test (void)
{
- _mm512_prefetch_i32scatter_ps (base, idx, 8, 0);
- _mm512_mask_prefetch_i32scatter_ps (base, m16, idx, 8, 0);
+ _mm512_prefetch_i32scatter_ps (base, idx, 8, 1);
+ _mm512_mask_prefetch_i32scatter_ps (base, m16, idx, 8, 5);
}
void extern
avx512pf_test (void)
{
- _mm512_prefetch_i64scatter_pd (base, idx, 8, 0);
- _mm512_mask_prefetch_i64scatter_pd (base, m8, idx, 8, 0);
+ _mm512_prefetch_i64scatter_pd (base, idx, 8, 1);
+ _mm512_mask_prefetch_i64scatter_pd (base, m8, idx, 8, 5);
}
void extern
avx512pf_test (void)
{
- _mm512_prefetch_i64scatter_ps (base, idx, 8, 0);
- _mm512_mask_prefetch_i64scatter_ps (base, m8, idx, 8, 0);
+ _mm512_prefetch_i64scatter_ps (base, idx, 8, 1);
+ _mm512_mask_prefetch_i64scatter_ps (base, m8, idx, 8, 5);
}
void extern
avx512pf_test (void)
{
- _mm512_prefetch_i32scatter_pd (base, idx, 8, 1);
- _mm512_mask_prefetch_i32scatter_pd (base, m8, idx, 8, 1);
+ _mm512_prefetch_i32scatter_pd (base, idx, 8, 2);
+ _mm512_mask_prefetch_i32scatter_pd (base, m8, idx, 8, 6);
}
void extern
avx512pf_test (void)
{
- _mm512_prefetch_i32scatter_ps (base, idx, 8, 1);
- _mm512_mask_prefetch_i32scatter_ps (base, m16, idx, 8, 1);
+ _mm512_prefetch_i32scatter_ps (base, idx, 8, 2);
+ _mm512_mask_prefetch_i32scatter_ps (base, m16, idx, 8, 6);
}
void extern
avx512pf_test (void)
{
- _mm512_prefetch_i64scatter_pd (base, idx, 8, 1);
- _mm512_mask_prefetch_i64scatter_pd (base, m8, idx, 8, 1);
+ _mm512_prefetch_i64scatter_pd (base, idx, 8, 2);
+ _mm512_mask_prefetch_i64scatter_pd (base, m8, idx, 8, 6);
}
void extern
avx512pf_test (void)
{
- _mm512_prefetch_i64scatter_ps (base, idx, 8, 1);
- _mm512_mask_prefetch_i64scatter_ps (base, m8, idx, 8, 1);
+ _mm512_prefetch_i64scatter_ps (base, idx, 8, 2);
+ _mm512_mask_prefetch_i64scatter_ps (base, m8, idx, 8, 6);
}