}
}
/* If both types are integral, convert from one mode to the other. */
- else if (INTEGRAL_TYPE_P (type) && INTEGRAL_TYPE_P (TREE_TYPE (treeop0)))
+ else if (INTEGRAL_TYPE_P (type)
+ && INTEGRAL_TYPE_P (TREE_TYPE (treeop0))
+ && mode != BLKmode
+ && GET_MODE (op0) != BLKmode)
op0 = convert_modes (mode, GET_MODE (op0), op0,
TYPE_UNSIGNED (TREE_TYPE (treeop0)));
/* If the output type is a bit-field type, do an extraction. */
{
lhs = gimple_assign_lhs (stmt);
tree rhs1 = gimple_assign_rhs1 (stmt);
+ if (TREE_CODE (rhs1) == VIEW_CONVERT_EXPR)
+ rhs1 = TREE_OPERAND (rhs1, 0);
if (TREE_CODE (TREE_TYPE (lhs)) == BITINT_TYPE
&& bitint_precision_kind (TREE_TYPE (lhs)) >= bitint_prec_large
&& INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
|| POINTER_TYPE_P (TREE_TYPE (lhs))))
{
final_cast_p = true;
+ if (TREE_CODE (TREE_TYPE (lhs)) == INTEGER_TYPE
+ && TYPE_PRECISION (TREE_TYPE (lhs)) > MAX_FIXED_MODE_SIZE
+ && gimple_assign_rhs_code (stmt) == VIEW_CONVERT_EXPR)
+ {
+ /* Handle VIEW_CONVERT_EXPRs to not generally supported
+ huge INTEGER_TYPEs like uint256_t or uint512_t. These
+ are usually emitted from memcpy folding and backends
+ support moves with them but that is usually it. */
+ if (TREE_CODE (rhs1) == INTEGER_CST)
+ {
+ rhs1 = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (lhs),
+ rhs1);
+ gcc_assert (rhs1 && TREE_CODE (rhs1) == INTEGER_CST);
+ gimple_assign_set_rhs1 (stmt, rhs1);
+ gimple_assign_set_rhs_code (stmt, INTEGER_CST);
+ update_stmt (stmt);
+ return;
+ }
+ gcc_assert (TREE_CODE (rhs1) == SSA_NAME);
+ if (SSA_NAME_IS_DEFAULT_DEF (rhs1)
+ && (!SSA_NAME_VAR (rhs1) || VAR_P (SSA_NAME_VAR (rhs1))))
+ {
+ tree var = create_tmp_reg (TREE_TYPE (lhs));
+ rhs1 = get_or_create_ssa_default_def (cfun, var);
+ gimple_assign_set_rhs1 (stmt, rhs1);
+ gimple_assign_set_rhs_code (stmt, SSA_NAME);
+ }
+ else
+ {
+ int part = var_to_partition (m_map, rhs1);
+ gcc_assert (m_vars[part] != NULL_TREE);
+ rhs1 = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs),
+ m_vars[part]);
+ gimple_assign_set_rhs1 (stmt, rhs1);
+ }
+ update_stmt (stmt);
+ return;
+ }
if (TREE_CODE (rhs1) == SSA_NAME
&& (m_names == NULL
|| !bitmap_bit_p (m_names, SSA_NAME_VERSION (rhs1))))
if (gimple_assign_cast_p (use_stmt))
{
tree lhs = gimple_assign_lhs (use_stmt);
- if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
+ if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
+ /* Don't merge with VIEW_CONVERT_EXPRs to
+ huge INTEGER_TYPEs used sometimes in memcpy
+ expansion. */
+ && (TREE_CODE (TREE_TYPE (lhs)) != INTEGER_TYPE
+ || (TYPE_PRECISION (TREE_TYPE (lhs))
+ <= MAX_FIXED_MODE_SIZE)))
continue;
}
else if (gimple_store_p (use_stmt)
== gimple_bb (SSA_NAME_DEF_STMT (s))))
goto force_name;
break;
+ case VIEW_CONVERT_EXPR:
+ /* Don't merge with VIEW_CONVERT_EXPRs to
+ huge INTEGER_TYPEs used sometimes in memcpy
+ expansion. */
+ {
+ tree lhs = gimple_assign_lhs (use_stmt);
+ if (TREE_CODE (TREE_TYPE (lhs)) == INTEGER_TYPE
+ && (TYPE_PRECISION (TREE_TYPE (lhs))
+ > MAX_FIXED_MODE_SIZE))
+ goto force_name;
+ }
+ break;
default:
break;
}
--- /dev/null
+/* PR tree-optimization/113783 */
+/* { dg-do compile { target bitint } } */
+/* { dg-options "-O2" } */
+/* { dg-additional-options "-mavx512f" { target i?86-*-* x86_64-*-* } } */
+
+int i;
+
+#if __BITINT_MAXWIDTH__ >= 246
+void
+foo (void *p, _BitInt(246) x)
+{
+ __builtin_memcpy (p, &x, sizeof x);
+}
+
+_BitInt(246)
+bar (void *p, _BitInt(246) x)
+{
+ _BitInt(246) y = x + 1;
+ __builtin_memcpy (p, &y, sizeof y);
+ return x;
+}
+#endif
+
+#if __BITINT_MAXWIDTH__ >= 502
+void
+baz (void *p, _BitInt(502) x)
+{
+ __builtin_memcpy (p, &x, sizeof x);
+}
+
+_BitInt(502)
+qux (void *p, _BitInt(502) x)
+{
+ _BitInt(502) y = x + 1;
+ __builtin_memcpy (p, &y, sizeof y);
+ return x;
+}
+#endif