}
}
-/* Return true if an operaton of kind KIND for STMT_INFO represents
- the extraction of an element from a vector in preparation for
- storing the element to memory. */
-static bool
-aarch64_is_store_elt_extraction (vect_cost_for_stmt kind,
- stmt_vec_info stmt_info)
-{
- return (kind == vec_to_scalar
- && STMT_VINFO_DATA_REF (stmt_info)
- && DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info)));
-}
-
-/* Return true if STMT_INFO represents part of a reduction. */
-static bool
-aarch64_is_reduction (stmt_vec_info stmt_info)
-{
- return (STMT_VINFO_REDUC_DEF (stmt_info)
- || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)));
-}
-
-/* If STMT_INFO describes a reduction, return the type of reduction
- it describes, otherwise return -1. */
-static int
-aarch64_reduc_type (vec_info *vinfo, stmt_vec_info stmt_info)
-{
- if (loop_vec_info loop_vinfo = dyn_cast<loop_vec_info> (vinfo))
- if (STMT_VINFO_REDUC_DEF (stmt_info))
- {
- stmt_vec_info reduc_info = info_for_reduction (loop_vinfo, stmt_info);
- return int (STMT_VINFO_REDUC_TYPE (reduc_info));
- }
- return -1;
-}
-
/* Return true if an access of kind KIND for STMT_INFO represents one
vector of an LD[234] or ST[234] operation. Return the total number of
vectors (2, 3 or 4) if so, otherwise return a value outside that range. */
return 0;
}
-/* If STMT_INFO is a COND_EXPR that includes an embedded comparison, return the
- scalar type of the values being compared. Return null otherwise. */
-static tree
-aarch64_embedded_comparison_type (stmt_vec_info stmt_info)
-{
- if (auto *assign = dyn_cast<gassign *> (stmt_info->stmt))
- if (gimple_assign_rhs_code (assign) == COND_EXPR)
- {
- tree cond = gimple_assign_rhs1 (assign);
- if (COMPARISON_CLASS_P (cond))
- return TREE_TYPE (TREE_OPERAND (cond, 0));
- }
- return NULL_TREE;
-}
-
-/* If STMT_INFO is a comparison or contains an embedded comparison, return the
- scalar type of the values being compared. Return null otherwise. */
-static tree
-aarch64_comparison_type (stmt_vec_info stmt_info)
-{
- if (auto *assign = dyn_cast<gassign *> (stmt_info->stmt))
- if (TREE_CODE_CLASS (gimple_assign_rhs_code (assign)) == tcc_comparison)
- return TREE_TYPE (gimple_assign_rhs1 (assign));
- return aarch64_embedded_comparison_type (stmt_info);
-}
-
/* Return true if creating multiple copies of STMT_INFO for Advanced SIMD
vectors would produce a series of LDP or STP operations. KIND is the
kind of statement that STMT_INFO represents. */
return is_gimple_assign (stmt_info->stmt);
}
-/* Return true if STMT_INFO extends the result of a load. */
-static bool
-aarch64_extending_load_p (class vec_info *vinfo, stmt_vec_info stmt_info)
-{
- gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
- if (!assign || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (assign)))
- return false;
-
- tree rhs = gimple_assign_rhs1 (stmt_info->stmt);
- tree lhs_type = TREE_TYPE (gimple_assign_lhs (assign));
- tree rhs_type = TREE_TYPE (rhs);
- if (!INTEGRAL_TYPE_P (lhs_type)
- || !INTEGRAL_TYPE_P (rhs_type)
- || TYPE_PRECISION (lhs_type) <= TYPE_PRECISION (rhs_type))
- return false;
-
- stmt_vec_info def_stmt_info = vinfo->lookup_def (rhs);
- return (def_stmt_info
- && STMT_VINFO_DATA_REF (def_stmt_info)
- && DR_IS_READ (STMT_VINFO_DATA_REF (def_stmt_info)));
-}
-
-/* Return true if STMT_INFO is an integer truncation. */
-static bool
-aarch64_integer_truncation_p (stmt_vec_info stmt_info)
-{
- gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
- if (!assign || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (assign)))
- return false;
-
- tree lhs_type = TREE_TYPE (gimple_assign_lhs (assign));
- tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (assign));
- return (INTEGRAL_TYPE_P (lhs_type)
- && INTEGRAL_TYPE_P (rhs_type)
- && TYPE_PRECISION (lhs_type) < TYPE_PRECISION (rhs_type));
-}
-
/* Return true if STMT_INFO is the second part of a two-statement multiply-add
or multiply-subtract sequence that might be suitable for fusing into a
single instruction. If VEC_FLAGS is zero, analyze the operation as
tree vectype,
const sve_vec_cost *sve_costs)
{
- switch (aarch64_reduc_type (vinfo, stmt_info))
+ switch (vect_reduc_type (vinfo, stmt_info))
{
case EXTRACT_LAST_REDUCTION:
return sve_costs->clast_cost;
{
/* Detect an extension of a loaded value. In general, we'll be able to fuse
the extension with the load. */
- if (kind == scalar_stmt && aarch64_extending_load_p (vinfo, stmt_info))
+ if (kind == scalar_stmt && vect_is_extending_load (vinfo, stmt_info))
return 0;
return stmt_cost;
/* Detect cases in which vec_to_scalar is describing the extraction of a
vector element in preparation for a scalar store. The store itself is
costed separately. */
- if (aarch64_is_store_elt_extraction (kind, stmt_info))
+ if (vect_is_store_elt_extraction (kind, stmt_info))
return simd_costs->store_elt_extra_cost;
/* Detect SVE gather loads, which are costed as a single scalar_load
instruction like FADDP or MAXV. */
if (kind == vec_to_scalar
&& where == vect_epilogue
- && aarch64_is_reduction (stmt_info))
+ && vect_is_reduction (stmt_info))
switch (GET_MODE_INNER (TYPE_MODE (vectype)))
{
case E_QImode:
on the fly. Optimistically assume that a load followed by an extension
will fold to this form during combine, and that the extension therefore
comes for free. */
- if (kind == vector_stmt && aarch64_extending_load_p (vinfo, stmt_info))
+ if (kind == vector_stmt && vect_is_extending_load (vinfo, stmt_info))
stmt_cost = 0;
/* For similar reasons, vector_stmt integer truncations are a no-op,
because we can just ignore the unused upper bits of the source. */
- if (kind == vector_stmt && aarch64_integer_truncation_p (stmt_info))
+ if (kind == vector_stmt && vect_is_integer_truncation (stmt_info))
stmt_cost = 0;
/* Advanced SIMD can load and store pairs of registers using LDP and STP,
}
if (kind == vector_stmt || kind == vec_to_scalar)
- if (tree cmp_type = aarch64_embedded_comparison_type (stmt_info))
+ if (tree cmp_type = vect_embedded_comparison_type (stmt_info))
{
if (FLOAT_TYPE_P (cmp_type))
stmt_cost += simd_costs->fp_stmt_cost;
}
if (kind == scalar_stmt)
- if (tree cmp_type = aarch64_embedded_comparison_type (stmt_info))
+ if (tree cmp_type = vect_embedded_comparison_type (stmt_info))
{
if (FLOAT_TYPE_P (cmp_type))
stmt_cost += aarch64_tune_params.vec_costs->scalar_fp_stmt_cost;
/* Calculate the minimum cycles per iteration imposed by a reduction
operation. */
if ((kind == vector_stmt || kind == vec_to_scalar)
- && aarch64_is_reduction (stmt_info))
+ && vect_is_reduction (stmt_info))
{
unsigned int base
= aarch64_in_loop_reduction_latency (vinfo, stmt_info, vectype,
vec_flags);
- if (aarch64_reduc_type (vinfo, stmt_info) == FOLD_LEFT_REDUCTION)
+ if (vect_reduc_type (vinfo, stmt_info) == FOLD_LEFT_REDUCTION)
{
if (aarch64_sve_mode_p (TYPE_MODE (vectype)))
{
/* Add any embedded comparison operations. */
if ((kind == scalar_stmt || kind == vector_stmt || kind == vec_to_scalar)
- && aarch64_embedded_comparison_type (stmt_info))
+ && vect_embedded_comparison_type (stmt_info))
ops->general_ops += num_copies;
/* Detect COND_REDUCTIONs and things that would need to become
have only accounted for one. */
if (vec_flags && (kind == vector_stmt || kind == vec_to_scalar))
{
- int reduc_type = aarch64_reduc_type (vinfo, stmt_info);
+ int reduc_type = vect_reduc_type (vinfo, stmt_info);
if ((reduc_type == EXTRACT_LAST_REDUCTION && (vec_flags & VEC_ADVSIMD))
|| reduc_type == COND_REDUCTION)
ops->general_ops += num_copies;
/* Count the predicate operations needed by an SVE comparison. */
if (sve_issue && (kind == vector_stmt || kind == vec_to_scalar))
- if (tree type = aarch64_comparison_type (stmt_info))
+ if (tree type = vect_comparison_type (stmt_info))
{
unsigned int base = (FLOAT_TYPE_P (type)
? sve_issue->fp_cmp_pred_ops
/* If we scalarize a strided store, the vectorizer costs one
vec_to_scalar for each element. However, we can store the first
element using an FP store without a separate extract step. */
- if (aarch64_is_store_elt_extraction (kind, stmt_info))
+ if (vect_is_store_elt_extraction (kind, stmt_info))
count -= 1;
stmt_cost = aarch64_detect_scalar_stmt_subtype
/* Number of supported pattern matchers. */
extern size_t num__slp_patterns;
+/* ----------------------------------------------------------------------
+ Target support routines
+ -----------------------------------------------------------------------
+ The following routines are provided to simplify costing decisions in
+ target code. Please add more as needed. */
+
+/* Return true if an operaton of kind KIND for STMT_INFO represents
+ the extraction of an element from a vector in preparation for
+ storing the element to memory. */
+inline bool
+vect_is_store_elt_extraction (vect_cost_for_stmt kind, stmt_vec_info stmt_info)
+{
+ return (kind == vec_to_scalar
+ && STMT_VINFO_DATA_REF (stmt_info)
+ && DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info)));
+}
+
+/* Return true if STMT_INFO represents part of a reduction. */
+inline bool
+vect_is_reduction (stmt_vec_info stmt_info)
+{
+ return (STMT_VINFO_REDUC_DEF (stmt_info)
+ || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)));
+}
+
+/* If STMT_INFO describes a reduction, return the vect_reduction_type
+ of the reduction it describes, otherwise return -1. */
+inline int
+vect_reduc_type (vec_info *vinfo, stmt_vec_info stmt_info)
+{
+ if (loop_vec_info loop_vinfo = dyn_cast<loop_vec_info> (vinfo))
+ if (STMT_VINFO_REDUC_DEF (stmt_info))
+ {
+ stmt_vec_info reduc_info = info_for_reduction (loop_vinfo, stmt_info);
+ return int (STMT_VINFO_REDUC_TYPE (reduc_info));
+ }
+ return -1;
+}
+
+/* If STMT_INFO is a COND_EXPR that includes an embedded comparison, return the
+ scalar type of the values being compared. Return null otherwise. */
+inline tree
+vect_embedded_comparison_type (stmt_vec_info stmt_info)
+{
+ if (auto *assign = dyn_cast<gassign *> (stmt_info->stmt))
+ if (gimple_assign_rhs_code (assign) == COND_EXPR)
+ {
+ tree cond = gimple_assign_rhs1 (assign);
+ if (COMPARISON_CLASS_P (cond))
+ return TREE_TYPE (TREE_OPERAND (cond, 0));
+ }
+ return NULL_TREE;
+}
+
+/* If STMT_INFO is a comparison or contains an embedded comparison, return the
+ scalar type of the values being compared. Return null otherwise. */
+inline tree
+vect_comparison_type (stmt_vec_info stmt_info)
+{
+ if (auto *assign = dyn_cast<gassign *> (stmt_info->stmt))
+ if (TREE_CODE_CLASS (gimple_assign_rhs_code (assign)) == tcc_comparison)
+ return TREE_TYPE (gimple_assign_rhs1 (assign));
+ return vect_embedded_comparison_type (stmt_info);
+}
+
+/* Return true if STMT_INFO extends the result of a load. */
+inline bool
+vect_is_extending_load (class vec_info *vinfo, stmt_vec_info stmt_info)
+{
+ /* Although this is quite large for an inline function, this part
+ at least should be inline. */
+ gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
+ if (!assign || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (assign)))
+ return false;
+
+ tree rhs = gimple_assign_rhs1 (stmt_info->stmt);
+ tree lhs_type = TREE_TYPE (gimple_assign_lhs (assign));
+ tree rhs_type = TREE_TYPE (rhs);
+ if (!INTEGRAL_TYPE_P (lhs_type)
+ || !INTEGRAL_TYPE_P (rhs_type)
+ || TYPE_PRECISION (lhs_type) <= TYPE_PRECISION (rhs_type))
+ return false;
+
+ stmt_vec_info def_stmt_info = vinfo->lookup_def (rhs);
+ return (def_stmt_info
+ && STMT_VINFO_DATA_REF (def_stmt_info)
+ && DR_IS_READ (STMT_VINFO_DATA_REF (def_stmt_info)));
+}
+
+/* Return true if STMT_INFO is an integer truncation. */
+inline bool
+vect_is_integer_truncation (stmt_vec_info stmt_info)
+{
+ gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
+ if (!assign || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (assign)))
+ return false;
+
+ tree lhs_type = TREE_TYPE (gimple_assign_lhs (assign));
+ tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (assign));
+ return (INTEGRAL_TYPE_P (lhs_type)
+ && INTEGRAL_TYPE_P (rhs_type)
+ && TYPE_PRECISION (lhs_type) < TYPE_PRECISION (rhs_type));
+}
+
#endif /* GCC_TREE_VECTORIZER_H */