Index: expr.c =================================================================== --- expr.c (revision 148164) +++ expr.c (working copy) @@ -4296,6 +4296,36 @@ expand_assignment (tree to, tree from, b return; } + else if (TREE_CODE (to) == MISALIGNED_INDIRECT_REF) + { + enum machine_mode mode, op_mode1; + enum insn_code icode; + rtx reg, addr, mem, insn; + + reg = expand_expr (from, NULL_RTX, VOIDmode, EXPAND_NORMAL); + reg = force_not_mem (reg); + + mode = TYPE_MODE (TREE_TYPE (to)); + addr = expand_expr (TREE_OPERAND (to, 0), NULL_RTX, VOIDmode, + EXPAND_SUM); + addr = memory_address (mode, addr); + mem = gen_rtx_MEM (mode, addr); + + set_mem_attributes (mem, to, 0); + + icode = movmisalign_optab->handlers[mode].insn_code; + gcc_assert (icode != CODE_FOR_nothing); + + op_mode1 = insn_data[icode].operand[1].mode; + if (! (*insn_data[icode].operand[1].predicate) (reg, op_mode1) + && op_mode1 != VOIDmode) + reg = copy_to_mode_reg (op_mode1, reg); + + insn = GEN_FCN (icode) (mem, reg); + emit_insn (insn); + return; + } + /* If the rhs is a function call and its value is not an aggregate, call the function before we start to compute the lhs. This is needed for correct code for cases such as @@ -7575,9 +7605,6 @@ expand_expr_real_1 (tree exp, rtx target /* Resolve the misalignment now, so that we don't have to remember to resolve it later. Of course, this only works for reads. */ - /* ??? When we get around to supporting writes, we'll have to handle - this in store_expr directly. The vectorizer isn't generating - those yet, however. */ if (code == MISALIGNED_INDIRECT_REF) { int icode; Index: tree-vect-data-refs.c =================================================================== --- tree-vect-data-refs.c (revision 148164) +++ tree-vect-data-refs.c (working copy) @@ -1138,11 +1138,10 @@ vect_enhance_data_refs_alignment (loop_v /* While cost model enhancements are expected in the future, the high level view of the code at this time is as follows: - A) If there is a misaligned write then see if peeling to align this write - can make all data references satisfy vect_supportable_dr_alignment. - If so, update data structures as needed and return true. Note that - at this time vect_supportable_dr_alignment is known to return false - for a misaligned write. + A) If there is an unsupported misaligned access then see if peeling + to align this access can make all data references satisfy + vect_supportable_dr_alignment. If so, update data structures + as needed and return true. B) If peeling wasn't possible and there is a data reference with an unknown misalignment that does not satisfy vect_supportable_dr_alignment @@ -1169,8 +1168,7 @@ vect_enhance_data_refs_alignment (loop_v in code size). The scheme we use FORNOW: peel to force the alignment of the first - misaligned store in the loop. - Rationale: misaligned stores are not yet supported. + unsupported misaligned access in the loop. TODO: Use a cost model. */ @@ -1178,6 +1176,7 @@ vect_enhance_data_refs_alignment (loop_v { stmt = DR_STMT (dr); stmt_info = vinfo_for_stmt (stmt); + supportable_dr_alignment = vect_supportable_dr_alignment (dr); /* For interleaving, only the alignment of the first access matters. */ @@ -1185,7 +1184,7 @@ vect_enhance_data_refs_alignment (loop_v && DR_GROUP_FIRST_DR (stmt_info) != stmt) continue; - if (!DR_IS_READ (dr) && !aligned_access_p (dr)) + if (!supportable_dr_alignment) { do_peeling = vector_alignment_reachable_p (dr); if (do_peeling) @@ -3475,6 +3474,11 @@ vect_supportable_dr_alignment (struct da /* Can't software pipeline the loads, but can at least do them. */ return dr_unaligned_supported; } + else + { + if (movmisalign_optab->handlers[mode].insn_code != CODE_FOR_nothing) + return dr_unaligned_supported; + } /* Unsupported. */ return dr_unaligned_unsupported; Index: tree-vect-stmts.c =================================================================== --- tree-vect-stmts.c (revision 148164) +++ tree-vect-stmts.c (working copy) @@ -3018,7 +3018,6 @@ vectorizable_store (gimple stmt, gimple_ alignment_support_scheme = vect_supportable_dr_alignment (first_dr); gcc_assert (alignment_support_scheme); - gcc_assert (alignment_support_scheme == dr_aligned); /* FORNOW */ /* In case the vectorization factor (VF) is bigger than the number of elements that we can fit in a vectype (nunits), we have to generate @@ -3157,7 +3156,16 @@ vectorizable_store (gimple stmt, gimple_ vect_permute_store_chain(). */ vec_oprnd = VEC_index (tree, result_chain, i); - data_ref = build_fold_indirect_ref (dataref_ptr); + if (aligned_access_p (first_dr)) + data_ref = build_fold_indirect_ref (dataref_ptr); + else + { + int mis = DR_MISALIGNMENT (first_dr); + tree tmis = (mis == -1 ? size_zero_node : size_int (mis)); + tmis = size_binop (MULT_EXPR, tmis, size_int (BITS_PER_UNIT)); + data_ref = build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis); + } + /* If accesses through a pointer to vectype do not alias the original memory reference we have a problem. This should never happen. */ gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),