--- /dev/null
+// { dg-do compile }
+// { dg-additional-options "-O3" }
+
+extern int arr_6[];
+extern char arr_7[] __attribute__((aligned));
+void test(short a, bool, int p8) {
+ for (bool b = 0; b < (bool)p8; b = 1)
+ for (short c = 0; c < 5; c++) {
+ arr_6[c] = (long)2 << a - 30574;
+ arr_7[c] = 0;
+ }
+}
vect_free_oprnd_info (oprnds_info);
- /* If we have all children of a non-unary child built up from
- uniform scalars then just throw that away, causing it built up
- from scalars. */
- if (nops > 1
- && is_a <bb_vec_info> (vinfo)
+ /* If we have all children of a child built up from uniform scalars
+ then just throw that away, causing it built up from scalars.
+ The exception is the SLP node for the vector store. */
+ if (is_a <bb_vec_info> (vinfo)
+ && !STMT_VINFO_GROUPED_ACCESS (stmt_info)
/* ??? Rejecting patterns this way doesn't work. We'd have to
do extra work to cancel the pattern so the uses see the
scalar version. */
return false;
}
/* Fatal mismatch. */
+ matches[0] = true;
matches[group_size / const_max_nunits * const_max_nunits] = false;
vect_free_slp_tree (node, false);
}
if (!op1_vectype)
op1_vectype = get_vectype_for_scalar_type (vinfo,
TREE_TYPE (op1),
- slp_node);
+ slp_op1);
/* Unlike the other binary operators, shifts/rotates have
the rhs being int, instead of the same type as the lhs,
/* Arguments are ready. Create the new vector stmt. */
FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
{
- vop1 = vec_oprnds1[i];
/* For internal defs where we need to use a scalar shift arg
extract the first lane. */
if (scalar_shift_arg && dt[1] == vect_internal_def)
{
+ vop1 = vec_oprnds1[0];
new_temp = make_ssa_name (TREE_TYPE (TREE_TYPE (vop1)));
gassign *new_stmt
= gimple_build_assign (new_temp,
vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
vop1 = new_temp;
}
+ else
+ vop1 = vec_oprnds1[i];
gassign *new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp);