diff --git a/gcc/builtins.c b/gcc/builtins.c index 296c5b7..3e41695 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -3567,7 +3567,8 @@ expand_builtin_memset_args (tree dest, tree val, tree len, builtin_memset_read_str, &c, dest_align, true)) store_by_pieces (dest_mem, tree_low_cst (len, 1), - builtin_memset_read_str, &c, dest_align, true, 0); + builtin_memset_read_str, gen_int_mode (c, val_mode), + dest_align, true, 0); else if (!set_storage_via_setmem (dest_mem, len_rtx, gen_int_mode (c, val_mode), dest_align, expected_align, diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c index 2c53423..d7c4330 100644 --- a/gcc/config/i386/i386.c +++ b/gcc/config/i386/i386.c @@ -561,10 +561,14 @@ struct processor_costs ix86_size_cost = {/* costs for tuning for size */ COSTS_N_BYTES (2), /* cost of FABS instruction. */ COSTS_N_BYTES (2), /* cost of FCHS instruction. */ COSTS_N_BYTES (2), /* cost of FSQRT instruction. */ - {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}, + {{{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}, {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}}, - {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}, + {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}, + {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}}}, + {{{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}, {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}}, + {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}, + {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}}}, 1, /* scalar_stmt_cost. */ 1, /* scalar load_cost. */ 1, /* scalar_store_cost. */ @@ -632,10 +636,14 @@ struct processor_costs i386_cost = { /* 386 specific costs */ COSTS_N_INSNS (22), /* cost of FABS instruction. */ COSTS_N_INSNS (24), /* cost of FCHS instruction. */ COSTS_N_INSNS (122), /* cost of FSQRT instruction. */ - {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}, + {{{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}, DUMMY_STRINGOP_ALGS}, - {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}, + {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}, + DUMMY_STRINGOP_ALGS}}, + {{{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}, DUMMY_STRINGOP_ALGS}, + {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}, + DUMMY_STRINGOP_ALGS}}, 1, /* scalar_stmt_cost. */ 1, /* scalar load_cost. */ 1, /* scalar_store_cost. */ @@ -704,10 +712,14 @@ struct processor_costs i486_cost = { /* 486 specific costs */ COSTS_N_INSNS (3), /* cost of FABS instruction. */ COSTS_N_INSNS (3), /* cost of FCHS instruction. */ COSTS_N_INSNS (83), /* cost of FSQRT instruction. */ - {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}}, + {{{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}}, DUMMY_STRINGOP_ALGS}, - {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}}, + {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}}, + DUMMY_STRINGOP_ALGS}}, + {{{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}}, DUMMY_STRINGOP_ALGS}, + {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}}, + DUMMY_STRINGOP_ALGS}}, 1, /* scalar_stmt_cost. */ 1, /* scalar load_cost. */ 1, /* scalar_store_cost. */ @@ -774,10 +786,14 @@ struct processor_costs pentium_cost = { COSTS_N_INSNS (1), /* cost of FABS instruction. */ COSTS_N_INSNS (1), /* cost of FCHS instruction. */ COSTS_N_INSNS (70), /* cost of FSQRT instruction. */ - {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}}, + {{{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}}, DUMMY_STRINGOP_ALGS}, - {{libcall, {{-1, rep_prefix_4_byte}}}, + {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}}, + DUMMY_STRINGOP_ALGS}}, + {{{libcall, {{-1, rep_prefix_4_byte}}}, DUMMY_STRINGOP_ALGS}, + {{libcall, {{-1, rep_prefix_4_byte}}}, + DUMMY_STRINGOP_ALGS}}, 1, /* scalar_stmt_cost. */ 1, /* scalar load_cost. */ 1, /* scalar_store_cost. */ @@ -849,12 +865,18 @@ struct processor_costs pentiumpro_cost = { noticeable win, for bigger blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently more expensive startup time in CPU, but after 4K the difference is down in the noise. */ - {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop}, + {{{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop}, {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}}, DUMMY_STRINGOP_ALGS}, - {{rep_prefix_4_byte, {{1024, unrolled_loop}, - {8192, rep_prefix_4_byte}, {-1, libcall}}}, + {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop}, + {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}}, + DUMMY_STRINGOP_ALGS}}, + {{{rep_prefix_4_byte, {{1024, unrolled_loop}, + {8192, rep_prefix_4_byte}, {-1, libcall}}}, DUMMY_STRINGOP_ALGS}, + {{rep_prefix_4_byte, {{1024, unrolled_loop}, + {8192, rep_prefix_4_byte}, {-1, libcall}}}, + DUMMY_STRINGOP_ALGS}}, 1, /* scalar_stmt_cost. */ 1, /* scalar load_cost. */ 1, /* scalar_store_cost. */ @@ -922,10 +944,14 @@ struct processor_costs geode_cost = { COSTS_N_INSNS (1), /* cost of FABS instruction. */ COSTS_N_INSNS (1), /* cost of FCHS instruction. */ COSTS_N_INSNS (54), /* cost of FSQRT instruction. */ - {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}}, + {{{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}}, DUMMY_STRINGOP_ALGS}, - {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}}, + {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}}, + DUMMY_STRINGOP_ALGS}}, + {{{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}}, DUMMY_STRINGOP_ALGS}, + {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}}, + DUMMY_STRINGOP_ALGS}}, 1, /* scalar_stmt_cost. */ 1, /* scalar load_cost. */ 1, /* scalar_store_cost. */ @@ -995,10 +1021,14 @@ struct processor_costs k6_cost = { COSTS_N_INSNS (2), /* cost of FABS instruction. */ COSTS_N_INSNS (2), /* cost of FCHS instruction. */ COSTS_N_INSNS (56), /* cost of FSQRT instruction. */ - {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}}, + {{{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}}, DUMMY_STRINGOP_ALGS}, - {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}}, + {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}}, + DUMMY_STRINGOP_ALGS}}, + {{{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}}, DUMMY_STRINGOP_ALGS}, + {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}}, + DUMMY_STRINGOP_ALGS}}, 1, /* scalar_stmt_cost. */ 1, /* scalar load_cost. */ 1, /* scalar_store_cost. */ @@ -1068,10 +1098,14 @@ struct processor_costs athlon_cost = { /* For some reason, Athlon deals better with REP prefix (relative to loops) compared to K8. Alignment becomes important after 8 bytes for memcpy and 128 bytes for memset. */ - {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}}, + {{{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}}, DUMMY_STRINGOP_ALGS}, - {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}}, + {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}}, + DUMMY_STRINGOP_ALGS}}, + {{{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}}, DUMMY_STRINGOP_ALGS}, + {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}}, + DUMMY_STRINGOP_ALGS}}, 1, /* scalar_stmt_cost. */ 1, /* scalar load_cost. */ 1, /* scalar_store_cost. */ @@ -1146,11 +1180,16 @@ struct processor_costs k8_cost = { /* K8 has optimized REP instruction for medium sized blocks, but for very small blocks it is better to use loop. For large blocks, libcall can do nontemporary accesses and beat inline considerably. */ - {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}}, + {{{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}}, {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}, - {{libcall, {{8, loop}, {24, unrolled_loop}, + {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}}, + {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}}, + {{{libcall, {{8, loop}, {24, unrolled_loop}, {2048, rep_prefix_4_byte}, {-1, libcall}}}, {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}, + {{libcall, {{8, loop}, {24, unrolled_loop}, + {2048, rep_prefix_4_byte}, {-1, libcall}}}, + {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}}, 4, /* scalar_stmt_cost. */ 2, /* scalar load_cost. */ 2, /* scalar_store_cost. */ @@ -1233,11 +1272,16 @@ struct processor_costs amdfam10_cost = { /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for very small blocks it is better to use loop. For large blocks, libcall can do nontemporary accesses and beat inline considerably. */ - {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}}, + {{{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}}, {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}, - {{libcall, {{8, loop}, {24, unrolled_loop}, + {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}}, + {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}}, + {{{libcall, {{8, loop}, {24, unrolled_loop}, {2048, rep_prefix_4_byte}, {-1, libcall}}}, {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}, + {{libcall, {{8, loop}, {24, unrolled_loop}, + {2048, rep_prefix_4_byte}, {-1, libcall}}}, + {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}}, 4, /* scalar_stmt_cost. */ 2, /* scalar load_cost. */ 2, /* scalar_store_cost. */ @@ -1320,11 +1364,16 @@ struct processor_costs bdver1_cost = { /* BDVER1 has optimized REP instruction for medium sized blocks, but for very small blocks it is better to use loop. For large blocks, libcall can do nontemporary accesses and beat inline considerably. */ - {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}}, + {{{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}}, {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}, - {{libcall, {{8, loop}, {24, unrolled_loop}, + {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}}, + {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}}, + {{{libcall, {{8, loop}, {24, unrolled_loop}, {2048, rep_prefix_4_byte}, {-1, libcall}}}, {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}, + {{libcall, {{8, loop}, {24, unrolled_loop}, + {2048, rep_prefix_4_byte}, {-1, libcall}}}, + {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}}, 6, /* scalar_stmt_cost. */ 4, /* scalar load_cost. */ 4, /* scalar_store_cost. */ @@ -1407,11 +1456,16 @@ struct processor_costs bdver2_cost = { /* BDVER2 has optimized REP instruction for medium sized blocks, but for very small blocks it is better to use loop. For large blocks, libcall can do nontemporary accesses and beat inline considerably. */ - {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}}, + {{{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}}, {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}, - {{libcall, {{8, loop}, {24, unrolled_loop}, + {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}}, + {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}}, + {{{libcall, {{8, loop}, {24, unrolled_loop}, {2048, rep_prefix_4_byte}, {-1, libcall}}}, {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}, + {{libcall, {{8, loop}, {24, unrolled_loop}, + {2048, rep_prefix_4_byte}, {-1, libcall}}}, + {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}}, 6, /* scalar_stmt_cost. */ 4, /* scalar load_cost. */ 4, /* scalar_store_cost. */ @@ -1489,11 +1543,16 @@ struct processor_costs btver1_cost = { /* BTVER1 has optimized REP instruction for medium sized blocks, but for very small blocks it is better to use loop. For large blocks, libcall can do nontemporary accesses and beat inline considerably. */ - {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}}, + {{{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}}, {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}, - {{libcall, {{8, loop}, {24, unrolled_loop}, + {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}}, + {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}}, + {{{libcall, {{8, loop}, {24, unrolled_loop}, {2048, rep_prefix_4_byte}, {-1, libcall}}}, {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}, + {{libcall, {{8, loop}, {24, unrolled_loop}, + {2048, rep_prefix_4_byte}, {-1, libcall}}}, + {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}}, 4, /* scalar_stmt_cost. */ 2, /* scalar load_cost. */ 2, /* scalar_store_cost. */ @@ -1560,11 +1619,18 @@ struct processor_costs pentium4_cost = { COSTS_N_INSNS (2), /* cost of FABS instruction. */ COSTS_N_INSNS (2), /* cost of FCHS instruction. */ COSTS_N_INSNS (43), /* cost of FSQRT instruction. */ - {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}}, + + {{{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}}, DUMMY_STRINGOP_ALGS}, - {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte}, + {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}}, + DUMMY_STRINGOP_ALGS}}, + + {{{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte}, {-1, libcall}}}, DUMMY_STRINGOP_ALGS}, + {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte}, + {-1, libcall}}}, + DUMMY_STRINGOP_ALGS}}, 1, /* scalar_stmt_cost. */ 1, /* scalar load_cost. */ 1, /* scalar_store_cost. */ @@ -1631,13 +1697,22 @@ struct processor_costs nocona_cost = { COSTS_N_INSNS (3), /* cost of FABS instruction. */ COSTS_N_INSNS (3), /* cost of FCHS instruction. */ COSTS_N_INSNS (44), /* cost of FSQRT instruction. */ - {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}}, + + {{{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}}, {libcall, {{32, loop}, {20000, rep_prefix_8_byte}, {100000, unrolled_loop}, {-1, libcall}}}}, - {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte}, + {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}}, + {libcall, {{32, loop}, {20000, rep_prefix_8_byte}, + {100000, unrolled_loop}, {-1, libcall}}}}}, + + {{{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte}, {-1, libcall}}}, {libcall, {{24, loop}, {64, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}, + {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte}, + {-1, libcall}}}, + {libcall, {{24, loop}, {64, unrolled_loop}, + {8192, rep_prefix_8_byte}, {-1, libcall}}}}}, 1, /* scalar_stmt_cost. */ 1, /* scalar load_cost. */ 1, /* scalar_store_cost. */ @@ -1704,13 +1779,21 @@ struct processor_costs atom_cost = { COSTS_N_INSNS (8), /* cost of FABS instruction. */ COSTS_N_INSNS (8), /* cost of FCHS instruction. */ COSTS_N_INSNS (40), /* cost of FSQRT instruction. */ - {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}}, - {libcall, {{32, loop}, {64, rep_prefix_4_byte}, - {8192, rep_prefix_8_byte}, {-1, libcall}}}}, - {{libcall, {{8, loop}, {15, unrolled_loop}, - {2048, rep_prefix_4_byte}, {-1, libcall}}}, - {libcall, {{24, loop}, {32, unrolled_loop}, - {8192, rep_prefix_8_byte}, {-1, libcall}}}}, + + /* stringop_algs for memcpy. */ + {{{libcall, {{4096, unrolled_loop}, {-1, libcall}}}, /* Known alignment. */ + {libcall, {{4096, unrolled_loop}, {-1, libcall}}}}, + {{libcall, {{-1, libcall}}}, /* Unknown alignment. */ + {libcall, {{2048, unrolled_loop}, + {-1, libcall}}}}}, + + /* stringop_algs for memset. */ + {{{libcall, {{4096, unrolled_loop}, {-1, libcall}}}, /* Known alignment. */ + {libcall, {{4096, unrolled_loop}, {-1, libcall}}}}, + {{libcall, {{1024, unrolled_loop}, /* Unknown alignment. */ + {-1, libcall}}}, + {libcall, {{2048, unrolled_loop}, + {-1, libcall}}}}}, 1, /* scalar_stmt_cost. */ 1, /* scalar load_cost. */ 1, /* scalar_store_cost. */ @@ -1784,10 +1867,16 @@ struct processor_costs generic64_cost = { COSTS_N_INSNS (8), /* cost of FABS instruction. */ COSTS_N_INSNS (8), /* cost of FCHS instruction. */ COSTS_N_INSNS (40), /* cost of FSQRT instruction. */ - {DUMMY_STRINGOP_ALGS, - {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}, - {DUMMY_STRINGOP_ALGS, - {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}, + + {{DUMMY_STRINGOP_ALGS, + {libcall, {{-1, libcall}}}}, + {DUMMY_STRINGOP_ALGS, + {libcall, {{-1, libcall}}}}}, + + {{DUMMY_STRINGOP_ALGS, + {libcall, {{-1, libcall}}}}, + {DUMMY_STRINGOP_ALGS, + {libcall, {{-1, libcall}}}}}, 1, /* scalar_stmt_cost. */ 1, /* scalar load_cost. */ 1, /* scalar_store_cost. */ @@ -1856,10 +1945,16 @@ struct processor_costs generic32_cost = { COSTS_N_INSNS (8), /* cost of FABS instruction. */ COSTS_N_INSNS (8), /* cost of FCHS instruction. */ COSTS_N_INSNS (40), /* cost of FSQRT instruction. */ - {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}}, + /* stringop_algs for memcpy. */ + {{{libcall, {{4096, unrolled_loop}, {-1, libcall}}}, DUMMY_STRINGOP_ALGS}, - {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}}, + {{libcall, {{-1, libcall}}}, + DUMMY_STRINGOP_ALGS}}, + /* stringop_algs for memset. */ + {{{libcall, {{4096, unrolled_loop}, {-1, libcall}}}, DUMMY_STRINGOP_ALGS}, + {{libcall, {{-1, libcall}}}, + DUMMY_STRINGOP_ALGS}}, 1, /* scalar_stmt_cost. */ 1, /* scalar load_cost. */ 1, /* scalar_store_cost. */ @@ -2537,6 +2632,7 @@ static void ix86_set_current_function (tree); static unsigned int ix86_minimum_incoming_stack_boundary (bool); static enum calling_abi ix86_function_abi (const_tree); +static rtx promote_duplicated_reg (enum machine_mode, rtx); #ifndef SUBTARGET32_DEFAULT_CPU @@ -15266,6 +15362,38 @@ ix86_expand_move (enum machine_mode mode, rtx operands[]) } else { + if (mode == DImode + && !TARGET_64BIT + && TARGET_SSE2 + && MEM_P (op0) + && MEM_P (op1) + && !push_operand (op0, mode) + && can_create_pseudo_p ()) + { + rtx temp = gen_reg_rtx (V2DImode); + emit_insn (gen_sse2_loadq (temp, op1)); + emit_insn (gen_sse_storeq (op0, temp)); + return; + } + if (mode == DImode + && !TARGET_64BIT + && TARGET_SSE + && !MEM_P (op1) + && GET_MODE (op1) == V2DImode) + { + emit_insn (gen_sse_storeq (op0, op1)); + return; + } + if (mode == TImode + && TARGET_AVX2 + && MEM_P (op0) + && !MEM_P (op1) + && GET_MODE (op1) == V4DImode) + { + op0 = convert_to_mode (V2DImode, op0, 1); + emit_insn (gen_vec_extract_lo_v4di (op0, op1)); + return; + } if (MEM_P (op0) && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode) || !push_operand (op0, mode)) @@ -20677,22 +20805,17 @@ counter_mode (rtx count_exp) return SImode; } -/* When SRCPTR is non-NULL, output simple loop to move memory - pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times, - overall size is COUNT specified in bytes. When SRCPTR is NULL, output the - equivalent loop to set memory by VALUE (supposed to be in MODE). - - The size is rounded down to whole number of chunk size moved at once. - SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */ - - -static void -expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem, - rtx destptr, rtx srcptr, rtx value, - rtx count, enum machine_mode mode, int unroll, - int expected_size) +/* Helper function for expand_set_or_movmem_via_loop. + This function can reuse iter rtx from another loop and don't generate + code for updating the addresses. */ +static rtx +expand_set_or_movmem_via_loop_with_iter (rtx destmem, rtx srcmem, + rtx destptr, rtx srcptr, rtx value, + rtx count, rtx iter, + enum machine_mode mode, int unroll, + int expected_size, bool change_ptrs) { - rtx out_label, top_label, iter, tmp; + rtx out_label, top_label, tmp; enum machine_mode iter_mode = counter_mode (count); rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll); rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1)); @@ -20700,10 +20823,12 @@ expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem, rtx x_addr; rtx y_addr; int i; + bool reuse_iter = (iter != NULL_RTX); top_label = gen_label_rtx (); out_label = gen_label_rtx (); - iter = gen_reg_rtx (iter_mode); + if (!reuse_iter) + iter = gen_reg_rtx (iter_mode); size = expand_simple_binop (iter_mode, AND, count, piece_size_mask, NULL, 1, OPTAB_DIRECT); @@ -20714,18 +20839,21 @@ expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem, true, out_label); predict_jump (REG_BR_PROB_BASE * 10 / 100); } - emit_move_insn (iter, const0_rtx); + if (!reuse_iter) + emit_move_insn (iter, const0_rtx); emit_label (top_label); tmp = convert_modes (Pmode, iter_mode, iter, true); x_addr = gen_rtx_PLUS (Pmode, destptr, tmp); - destmem = change_address (destmem, mode, x_addr); + destmem = + adjust_automodify_address_1 (copy_rtx (destmem), mode, x_addr, 0, 1); if (srcmem) { y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp)); - srcmem = change_address (srcmem, mode, y_addr); + srcmem = + adjust_automodify_address_1 (copy_rtx (srcmem), mode, y_addr, 0, 1); /* When unrolling for chips that reorder memory reads and writes, we can save registers by using single temporary. @@ -20797,19 +20925,43 @@ expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem, } else predict_jump (REG_BR_PROB_BASE * 80 / 100); - iter = ix86_zero_extend_to_Pmode (iter); - tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr, - true, OPTAB_LIB_WIDEN); - if (tmp != destptr) - emit_move_insn (destptr, tmp); - if (srcptr) + if (change_ptrs) { - tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr, + iter = ix86_zero_extend_to_Pmode (iter); + tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr, true, OPTAB_LIB_WIDEN); - if (tmp != srcptr) - emit_move_insn (srcptr, tmp); + if (tmp != destptr) + emit_move_insn (destptr, tmp); + if (srcptr) + { + tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr, + true, OPTAB_LIB_WIDEN); + if (tmp != srcptr) + emit_move_insn (srcptr, tmp); + } } emit_label (out_label); + return iter; +} + +/* When SRCPTR is non-NULL, output simple loop to move memory + pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times, + overall size is COUNT specified in bytes. When SRCPTR is NULL, output the + equivalent loop to set memory by VALUE (supposed to be in MODE). + + The size is rounded down to whole number of chunk size moved at once. + SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */ + +static void +expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem, + rtx destptr, rtx srcptr, rtx value, + rtx count, enum machine_mode mode, int unroll, + int expected_size) +{ + expand_set_or_movmem_via_loop_with_iter (destmem, srcmem, + destptr, srcptr, value, + count, NULL_RTX, mode, unroll, + expected_size, true); } /* Output "rep; mov" instruction. @@ -20913,7 +21065,27 @@ emit_strmov (rtx destmem, rtx srcmem, emit_insn (gen_strmov (destptr, dest, srcptr, src)); } -/* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */ +/* Emit strset instuction. If RHS is constant, and vector mode will be used, + then move this consatnt to a vector register before emitting strset. */ +static void +emit_strset (rtx destmem, rtx value, + rtx destptr, enum machine_mode mode, int offset) +{ + rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset); + rtx vec_reg; + if (vector_extensions_used_for_mode (mode) && CONSTANT_P (value)) + { + if (mode == DImode) + mode = TARGET_64BIT ? V2DImode : V4SImode; + vec_reg = gen_reg_rtx (mode); + emit_move_insn (vec_reg, value); + emit_insn (gen_strset (destptr, dest, vec_reg)); + } + else + emit_insn (gen_strset (destptr, dest, value)); +} + +/* Output code to copy (count % max_size) bytes from SRC to DEST. */ static void expand_movmem_epilogue (rtx destmem, rtx srcmem, rtx destptr, rtx srcptr, rtx count, int max_size) @@ -20924,43 +21096,55 @@ expand_movmem_epilogue (rtx destmem, rtx srcmem, HOST_WIDE_INT countval = INTVAL (count); int offset = 0; - if ((countval & 0x10) && max_size > 16) + int remainder_size = countval % max_size; + enum machine_mode move_mode = Pmode; + + /* Firstly, try to move data with the widest possible mode. + Remaining part we'll move using Pmode and narrower modes. */ + if (TARGET_SSE) { - if (TARGET_64BIT) - { - emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset); - emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8); - } - else - gcc_unreachable (); - offset += 16; + if (max_size >= GET_MODE_SIZE (V4SImode)) + move_mode = V4SImode; + else if (max_size >= GET_MODE_SIZE (DImode)) + move_mode = DImode; } - if ((countval & 0x08) && max_size > 8) + + while (remainder_size >= GET_MODE_SIZE (move_mode)) { - if (TARGET_64BIT) - emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset); - else - { - emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset); - emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4); - } - offset += 8; + emit_strmov (destmem, srcmem, destptr, srcptr, move_mode, offset); + offset += GET_MODE_SIZE (move_mode); + remainder_size -= GET_MODE_SIZE (move_mode); } - if ((countval & 0x04) && max_size > 4) + + /* Move the remaining part of epilogue - its size might be + a size of the widest mode. */ + move_mode = Pmode; + while (remainder_size >= GET_MODE_SIZE (move_mode)) { - emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset); + emit_strmov (destmem, srcmem, destptr, srcptr, move_mode, offset); + offset += GET_MODE_SIZE (move_mode); + remainder_size -= GET_MODE_SIZE (move_mode); + } + + if (remainder_size >= 4) + { + emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset); offset += 4; + remainder_size -= 4; } - if ((countval & 0x02) && max_size > 2) + if (remainder_size >= 2) { - emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset); + emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset); offset += 2; + remainder_size -= 2; } - if ((countval & 0x01) && max_size > 1) + if (remainder_size >= 1) { - emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset); + emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset); offset += 1; + remainder_size -= 1; } + gcc_assert (remainder_size == 0); return; } if (max_size > 8) @@ -21066,87 +21250,122 @@ expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value, 1, max_size / 2); } -/* Output code to set at most count & (max_size - 1) bytes starting by DEST. */ +/* Output code to set at most count & (max_size - 1) bytes starting by + DESTMEM. */ static void -expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size) +expand_setmem_epilogue (rtx destmem, rtx destptr, rtx promoted_to_vector_value, + rtx value, rtx count, int max_size) { - rtx dest; - if (CONST_INT_P (count)) { HOST_WIDE_INT countval = INTVAL (count); int offset = 0; - if ((countval & 0x10) && max_size > 16) + int remainder_size = countval % max_size; + enum machine_mode move_mode = Pmode; + enum machine_mode sse_mode = TARGET_64BIT ? V2DImode : V4SImode; + rtx promoted_value = NULL_RTX; + + /* Firstly, try to move data with the widest possible mode. + Remaining part we'll move using Pmode and narrower modes. */ + if (TARGET_SSE) { - if (TARGET_64BIT) - { - dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset); - emit_insn (gen_strset (destptr, dest, value)); - dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8); - emit_insn (gen_strset (destptr, dest, value)); - } - else - gcc_unreachable (); - offset += 16; + if (max_size >= GET_MODE_SIZE (sse_mode)) + move_mode = sse_mode; + else if (max_size >= GET_MODE_SIZE (DImode)) + move_mode = DImode; + if (!VECTOR_MODE_P (GET_MODE (promoted_to_vector_value))) + promoted_to_vector_value = NULL_RTX; } - if ((countval & 0x08) && max_size > 8) + + while (remainder_size >= GET_MODE_SIZE (move_mode)) { - if (TARGET_64BIT) - { - dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset); - emit_insn (gen_strset (destptr, dest, value)); - } - else - { - dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset); - emit_insn (gen_strset (destptr, dest, value)); - dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4); - emit_insn (gen_strset (destptr, dest, value)); - } - offset += 8; + if (GET_MODE (destmem) != move_mode) + destmem = change_address (destmem, move_mode, destptr); + if (!promoted_to_vector_value) + promoted_to_vector_value = + targetm.promote_rtx_for_memset (move_mode, value); + emit_strset (destmem, promoted_to_vector_value, destptr, + move_mode, offset); + + offset += GET_MODE_SIZE (move_mode); + remainder_size -= GET_MODE_SIZE (move_mode); + } + + /* Move the remaining part of epilogue - its size might be + a size of the widest mode. */ + move_mode = Pmode; + promoted_value = NULL_RTX; + while (remainder_size >= GET_MODE_SIZE (move_mode)) + { + if (!promoted_value) + promoted_value = promote_duplicated_reg (move_mode, value); + emit_strset (destmem, promoted_value, destptr, move_mode, offset); + offset += GET_MODE_SIZE (move_mode); + remainder_size -= GET_MODE_SIZE (move_mode); } - if ((countval & 0x04) && max_size > 4) + + if (!promoted_value) + promoted_value = promote_duplicated_reg (move_mode, value); + if (remainder_size >= 4) { - dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset); - emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value))); + emit_strset (destmem, gen_lowpart (SImode, promoted_value), destptr, + SImode, offset); offset += 4; + remainder_size -= 4; } - if ((countval & 0x02) && max_size > 2) + if (remainder_size >= 2) { - dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset); - emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value))); - offset += 2; + emit_strset (destmem, gen_lowpart (HImode, promoted_value), destptr, + HImode, offset); + offset +=2; + remainder_size -= 2; } - if ((countval & 0x01) && max_size > 1) + if (remainder_size >= 1) { - dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset); - emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value))); + emit_strset (destmem, gen_lowpart (QImode, promoted_value), destptr, + QImode, offset); offset += 1; + remainder_size -= 1; } + gcc_assert (remainder_size == 0); return; } + + /* count isn't const. */ if (max_size > 32) { - expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size); + expand_setmem_epilogue_via_loop (destmem, destptr, value, count, + max_size); return; } + /* If it turned out, that we promoted value to non-vector register, we can + reuse it. */ + if (!VECTOR_MODE_P (GET_MODE (promoted_to_vector_value))) + value = promoted_to_vector_value; + if (max_size > 16) { rtx label = ix86_expand_aligntest (count, 16, true); if (TARGET_64BIT) { - dest = change_address (destmem, DImode, destptr); - emit_insn (gen_strset (destptr, dest, value)); - emit_insn (gen_strset (destptr, dest, value)); + destmem = change_address (destmem, DImode, destptr); + emit_insn (gen_strset (destptr, destmem, gen_lowpart (DImode, + value))); + emit_insn (gen_strset (destptr, destmem, gen_lowpart (DImode, + value))); } else { - dest = change_address (destmem, SImode, destptr); - emit_insn (gen_strset (destptr, dest, value)); - emit_insn (gen_strset (destptr, dest, value)); - emit_insn (gen_strset (destptr, dest, value)); - emit_insn (gen_strset (destptr, dest, value)); + destmem = change_address (destmem, SImode, destptr); + emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, + value))); + emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, + value))); + emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, + value))); + emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, + value))); } emit_label (label); LABEL_NUSES (label) = 1; @@ -21156,14 +21375,17 @@ expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_ rtx label = ix86_expand_aligntest (count, 8, true); if (TARGET_64BIT) { - dest = change_address (destmem, DImode, destptr); - emit_insn (gen_strset (destptr, dest, value)); + destmem = change_address (destmem, DImode, destptr); + emit_insn (gen_strset (destptr, destmem, gen_lowpart (DImode, + value))); } else { - dest = change_address (destmem, SImode, destptr); - emit_insn (gen_strset (destptr, dest, value)); - emit_insn (gen_strset (destptr, dest, value)); + destmem = change_address (destmem, SImode, destptr); + emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, + value))); + emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, + value))); } emit_label (label); LABEL_NUSES (label) = 1; @@ -21171,24 +21393,24 @@ expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_ if (max_size > 4) { rtx label = ix86_expand_aligntest (count, 4, true); - dest = change_address (destmem, SImode, destptr); - emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value))); + destmem = change_address (destmem, SImode, destptr); + emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value))); emit_label (label); LABEL_NUSES (label) = 1; } if (max_size > 2) { rtx label = ix86_expand_aligntest (count, 2, true); - dest = change_address (destmem, HImode, destptr); - emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value))); + destmem = change_address (destmem, HImode, destptr); + emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value))); emit_label (label); LABEL_NUSES (label) = 1; } if (max_size > 1) { rtx label = ix86_expand_aligntest (count, 1, true); - dest = change_address (destmem, QImode, destptr); - emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value))); + destmem = change_address (destmem, QImode, destptr); + emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value))); emit_label (label); LABEL_NUSES (label) = 1; } @@ -21204,8 +21426,8 @@ expand_movmem_prologue (rtx destmem, rtx srcmem, if (align <= 1 && desired_alignment > 1) { rtx label = ix86_expand_aligntest (destptr, 1, false); - srcmem = change_address (srcmem, QImode, srcptr); - destmem = change_address (destmem, QImode, destptr); + srcmem = adjust_automodify_address_1 (srcmem, QImode, srcptr, 0, 1); + destmem = adjust_automodify_address_1 (destmem, QImode, destptr, 0, 1); emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem)); ix86_adjust_counter (count, 1); emit_label (label); @@ -21214,8 +21436,8 @@ expand_movmem_prologue (rtx destmem, rtx srcmem, if (align <= 2 && desired_alignment > 2) { rtx label = ix86_expand_aligntest (destptr, 2, false); - srcmem = change_address (srcmem, HImode, srcptr); - destmem = change_address (destmem, HImode, destptr); + srcmem = adjust_automodify_address_1 (srcmem, HImode, srcptr, 0, 1); + destmem = adjust_automodify_address_1 (destmem, HImode, destptr, 0, 1); emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem)); ix86_adjust_counter (count, 2); emit_label (label); @@ -21224,14 +21446,34 @@ expand_movmem_prologue (rtx destmem, rtx srcmem, if (align <= 4 && desired_alignment > 4) { rtx label = ix86_expand_aligntest (destptr, 4, false); - srcmem = change_address (srcmem, SImode, srcptr); - destmem = change_address (destmem, SImode, destptr); + srcmem = adjust_automodify_address_1 (srcmem, SImode, srcptr, 0, 1); + destmem = adjust_automodify_address_1 (destmem, SImode, destptr, 0, 1); emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem)); ix86_adjust_counter (count, 4); emit_label (label); LABEL_NUSES (label) = 1; } - gcc_assert (desired_alignment <= 8); + if (align <= 8 && desired_alignment > 8) + { + rtx label = ix86_expand_aligntest (destptr, 8, false); + if (TARGET_64BIT || TARGET_SSE) + { + srcmem = adjust_automodify_address_1 (srcmem, DImode, srcptr, 0, 1); + destmem = adjust_automodify_address_1 (destmem, DImode, destptr, 0, 1); + emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem)); + } + else + { + srcmem = adjust_automodify_address_1 (srcmem, SImode, srcptr, 0, 1); + destmem = adjust_automodify_address_1 (destmem, SImode, destptr, 0, 1); + emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem)); + emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem)); + } + ix86_adjust_counter (count, 8); + emit_label (label); + LABEL_NUSES (label) = 1; + } + gcc_assert (desired_alignment <= 16); } /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN. @@ -21286,6 +21528,37 @@ expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg, off = 4; emit_insn (gen_strmov (destreg, dst, srcreg, src)); } + if (align_bytes & 8) + { + if (TARGET_64BIT || TARGET_SSE) + { + dst = adjust_automodify_address_nv (dst, DImode, destreg, off); + src = adjust_automodify_address_nv (src, DImode, srcreg, off); + emit_insn (gen_strmov (destreg, dst, srcreg, src)); + } + else + { + dst = adjust_automodify_address_nv (dst, SImode, destreg, off); + src = adjust_automodify_address_nv (src, SImode, srcreg, off); + emit_insn (gen_strmov (destreg, dst, srcreg, src)); + emit_insn (gen_strmov (destreg, dst, srcreg, src)); + } + if (MEM_ALIGN (dst) < 8 * BITS_PER_UNIT) + set_mem_align (dst, 8 * BITS_PER_UNIT); + if (src_align_bytes >= 0) + { + unsigned int src_align = 0; + if ((src_align_bytes & 7) == (align_bytes & 7)) + src_align = 8; + else if ((src_align_bytes & 3) == (align_bytes & 3)) + src_align = 4; + else if ((src_align_bytes & 1) == (align_bytes & 1)) + src_align = 2; + if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT) + set_mem_align (src, src_align * BITS_PER_UNIT); + } + off = 8; + } dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off); src = adjust_automodify_address_nv (src, BLKmode, srcreg, off); if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT) @@ -21293,7 +21566,9 @@ expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg, if (src_align_bytes >= 0) { unsigned int src_align = 0; - if ((src_align_bytes & 7) == (align_bytes & 7)) + if ((src_align_bytes & 15) == (align_bytes & 15)) + src_align = 16; + else if ((src_align_bytes & 7) == (align_bytes & 7)) src_align = 8; else if ((src_align_bytes & 3) == (align_bytes & 3)) src_align = 4; @@ -21321,7 +21596,7 @@ expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count, if (align <= 1 && desired_alignment > 1) { rtx label = ix86_expand_aligntest (destptr, 1, false); - destmem = change_address (destmem, QImode, destptr); + destmem = adjust_automodify_address_1 (destmem, QImode, destptr, 0, 1); emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value))); ix86_adjust_counter (count, 1); emit_label (label); @@ -21330,7 +21605,7 @@ expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count, if (align <= 2 && desired_alignment > 2) { rtx label = ix86_expand_aligntest (destptr, 2, false); - destmem = change_address (destmem, HImode, destptr); + destmem = adjust_automodify_address_1 (destmem, HImode, destptr, 0, 1); emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value))); ix86_adjust_counter (count, 2); emit_label (label); @@ -21339,13 +21614,23 @@ expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count, if (align <= 4 && desired_alignment > 4) { rtx label = ix86_expand_aligntest (destptr, 4, false); - destmem = change_address (destmem, SImode, destptr); + destmem = adjust_automodify_address_1 (destmem, SImode, destptr, 0, 1); emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value))); ix86_adjust_counter (count, 4); emit_label (label); LABEL_NUSES (label) = 1; } - gcc_assert (desired_alignment <= 8); + if (align <= 8 && desired_alignment > 8) + { + rtx label = ix86_expand_aligntest (destptr, 8, false); + destmem = adjust_automodify_address_1 (destmem, SImode, destptr, 0, 1); + emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value))); + emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value))); + ix86_adjust_counter (count, 8); + emit_label (label); + LABEL_NUSES (label) = 1; + } + gcc_assert (desired_alignment <= 16); } /* Set enough from DST to align DST known to by aligned by ALIGN to @@ -21381,6 +21666,19 @@ expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value, emit_insn (gen_strset (destreg, dst, gen_lowpart (SImode, value))); } + if (align_bytes & 8) + { + dst = adjust_automodify_address_nv (dst, SImode, destreg, off); + emit_insn (gen_strset (destreg, dst, + gen_lowpart (SImode, value))); + off = 4; + dst = adjust_automodify_address_nv (dst, SImode, destreg, off); + emit_insn (gen_strset (destreg, dst, + gen_lowpart (SImode, value))); + if (MEM_ALIGN (dst) < 8 * BITS_PER_UNIT) + set_mem_align (dst, 8 * BITS_PER_UNIT); + off = 4; + } dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off); if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT) set_mem_align (dst, desired_align * BITS_PER_UNIT); @@ -21392,7 +21690,7 @@ expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value, /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */ static enum stringop_alg decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset, - int *dynamic_check) + int *dynamic_check, bool align_unknown) { const struct stringop_algs * algs; bool optimize_for_speed; @@ -21401,7 +21699,7 @@ decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset, consider such algorithms if the user has appropriated those registers for their own purposes. */ bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG] - || (memset + || (memset ? fixed_regs[AX_REG] : fixed_regs[SI_REG])); #define ALG_USABLE_P(alg) (rep_prefix_usable \ @@ -21414,7 +21712,7 @@ decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset, of time processing large blocks. */ if (optimize_function_for_size_p (cfun) || (optimize_insn_for_size_p () - && expected_size != -1 && expected_size < 256)) + && expected_size != -1 && expected_size < 256)) optimize_for_speed = false; else optimize_for_speed = true; @@ -21423,9 +21721,9 @@ decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset, *dynamic_check = -1; if (memset) - algs = &cost->memset[TARGET_64BIT != 0]; + algs = &cost->memset[align_unknown][TARGET_64BIT != 0]; else - algs = &cost->memcpy[TARGET_64BIT != 0]; + algs = &cost->memcpy[align_unknown][TARGET_64BIT != 0]; if (ix86_stringop_alg != no_stringop && ALG_USABLE_P (ix86_stringop_alg)) return ix86_stringop_alg; /* rep; movq or rep; movl is the smallest variant. */ @@ -21489,29 +21787,33 @@ decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset, enum stringop_alg alg; int i; bool any_alg_usable_p = true; + bool only_libcall_fits = true; for (i = 0; i < MAX_STRINGOP_ALGS; i++) - { - enum stringop_alg candidate = algs->size[i].alg; - any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate); + { + enum stringop_alg candidate = algs->size[i].alg; + any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate); - if (candidate != libcall && candidate - && ALG_USABLE_P (candidate)) - max = algs->size[i].max; - } + if (candidate != libcall && candidate + && ALG_USABLE_P (candidate)) + { + max = algs->size[i].max; + only_libcall_fits = false; + } + } /* If there aren't any usable algorithms, then recursing on - smaller sizes isn't going to find anything. Just return the - simple byte-at-a-time copy loop. */ - if (!any_alg_usable_p) - { - /* Pick something reasonable. */ - if (TARGET_INLINE_STRINGOPS_DYNAMICALLY) - *dynamic_check = 128; - return loop_1_byte; - } + smaller sizes isn't going to find anything. Just return the + simple byte-at-a-time copy loop. */ + if (!any_alg_usable_p || only_libcall_fits) + { + /* Pick something reasonable. */ + if (TARGET_INLINE_STRINGOPS_DYNAMICALLY) + *dynamic_check = 128; + return loop_1_byte; + } if (max == -1) max = 4096; - alg = decide_alg (count, max / 2, memset, dynamic_check); + alg = decide_alg (count, max / 2, memset, dynamic_check, align_unknown); gcc_assert (*dynamic_check == -1); gcc_assert (alg != libcall); if (TARGET_INLINE_STRINGOPS_DYNAMICALLY) @@ -21535,9 +21837,11 @@ decide_alignment (int align, case no_stringop: gcc_unreachable (); case loop: - case unrolled_loop: desired_align = GET_MODE_SIZE (Pmode); break; + case unrolled_loop: + desired_align = GET_MODE_SIZE (TARGET_SSE ? V4SImode : Pmode); + break; case rep_prefix_8_byte: desired_align = 8; break; @@ -21625,6 +21929,11 @@ ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp, enum stringop_alg alg; int dynamic_check; bool need_zero_guard = false; + bool align_unknown; + int unroll_factor; + enum machine_mode move_mode; + rtx loop_iter = NULL_RTX; + int dst_offset, src_offset; if (CONST_INT_P (align_exp)) align = INTVAL (align_exp); @@ -21648,9 +21957,17 @@ ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp, /* Step 0: Decide on preferred algorithm, desired alignment and size of chunks to be copied by main loop. */ - - alg = decide_alg (count, expected_size, false, &dynamic_check); + dst_offset = get_mem_align_offset (dst, MOVE_MAX*BITS_PER_UNIT); + src_offset = get_mem_align_offset (src, MOVE_MAX*BITS_PER_UNIT); + align_unknown = (dst_offset < 0 + || src_offset < 0 + || src_offset != dst_offset); + alg = decide_alg (count, expected_size, false, &dynamic_check, align_unknown); desired_align = decide_alignment (align, alg, expected_size); + if (align_unknown) + desired_align = align; + unroll_factor = 1; + move_mode = Pmode; if (!TARGET_ALIGN_STRINGOPS) align = desired_align; @@ -21669,11 +21986,16 @@ ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp, gcc_unreachable (); case loop: need_zero_guard = true; - size_needed = GET_MODE_SIZE (Pmode); + move_mode = Pmode; + unroll_factor = 1; + size_needed = GET_MODE_SIZE (move_mode) * unroll_factor; break; case unrolled_loop: need_zero_guard = true; - size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2); + /* Use SSE instructions, if possible. */ + move_mode = TARGET_SSE ? (align_unknown ? DImode : V4SImode) : Pmode; + unroll_factor = 4; + size_needed = GET_MODE_SIZE (move_mode) * unroll_factor; break; case rep_prefix_8_byte: size_needed = 8; @@ -21785,6 +22107,8 @@ ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp, dst = change_address (dst, BLKmode, destreg); expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align, desired_align); + set_mem_align (src, desired_align*BITS_PER_UNIT); + set_mem_align (dst, desired_align*BITS_PER_UNIT); } else { @@ -21842,11 +22166,14 @@ ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp, count_exp, Pmode, 1, expected_size); break; case unrolled_loop: - /* Unroll only by factor of 2 in 32bit mode, since we don't have enough - registers for 4 temporaries anyway. */ - expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL, - count_exp, Pmode, TARGET_64BIT ? 4 : 2, - expected_size); + /* In some cases we want to use the same iterator in several adjacent + loops, so here we save loop iterator rtx and don't update addresses. */ + loop_iter = expand_set_or_movmem_via_loop_with_iter (dst, src, destreg, + srcreg, NULL, + count_exp, NULL_RTX, + move_mode, + unroll_factor, + expected_size, false); break; case rep_prefix_8_byte: expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp, @@ -21897,9 +22224,43 @@ ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp, LABEL_NUSES (label) = 1; } + /* We haven't updated addresses, so we'll do it now. + Also, if the epilogue seems to be big, we'll generate a loop (not + unrolled) in it. We'll do it only if alignment is unknown, because in + this case in epilogue we have to perform memmove by bytes, which is very + slow. */ + if (alg == unrolled_loop) + { + rtx tmp; + if (align_unknown && unroll_factor > 1) + { + /* Reduce epilogue's size by creating not-unrolled loop. If we won't + do this, we can have very big epilogue - when alignment is statically + unknown we'll have the epilogue byte by byte which may be very slow. */ + loop_iter = expand_set_or_movmem_via_loop_with_iter (dst, src, destreg, + srcreg, NULL, count_exp, + loop_iter, move_mode, 1, + expected_size, false); + src = change_address (src, BLKmode, srcreg); + dst = change_address (dst, BLKmode, destreg); + epilogue_size_needed = GET_MODE_SIZE (move_mode); + } + tmp = expand_simple_binop (Pmode, PLUS, destreg, loop_iter, destreg, + true, OPTAB_LIB_WIDEN); + if (tmp != destreg) + emit_move_insn (destreg, tmp); + + tmp = expand_simple_binop (Pmode, PLUS, srcreg, loop_iter, srcreg, + true, OPTAB_LIB_WIDEN); + if (tmp != srcreg) + emit_move_insn (srcreg, tmp); + } if (count_exp != const0_rtx && epilogue_size_needed > 1) - expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp, - epilogue_size_needed); + { + expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp, + epilogue_size_needed); + } + if (jump_around_label) emit_label (jump_around_label); return true; @@ -21917,7 +22278,37 @@ promote_duplicated_reg (enum machine_mode mode, rtx val) rtx tmp; int nops = mode == DImode ? 3 : 2; + if (VECTOR_MODE_P (mode)) + { + enum machine_mode inner = GET_MODE_INNER (mode); + rtx promoted_val, vec_reg; + if (CONST_INT_P (val)) + return ix86_build_const_vector (mode, true, val); + + promoted_val = promote_duplicated_reg (inner, val); + vec_reg = gen_reg_rtx (mode); + switch (mode) + { + case V2DImode: + emit_insn (gen_vec_dupv2di (vec_reg, promoted_val)); + break; + case V4SImode: + emit_insn (gen_vec_dupv4si (vec_reg, promoted_val)); + break; + default: + gcc_unreachable (); + break; + } + + return vec_reg; + } gcc_assert (mode == SImode || mode == DImode); + if (mode == DImode && !TARGET_64BIT) + { + rtx vec_reg = promote_duplicated_reg (V4SImode, val); + vec_reg = convert_to_mode (V2DImode, vec_reg, 1); + return vec_reg; + } if (val == const0_rtx) return copy_to_mode_reg (mode, const0_rtx); if (CONST_INT_P (val)) @@ -21983,11 +22374,21 @@ promote_duplicated_reg (enum machine_mode mode, rtx val) static rtx promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align) { - rtx promoted_val; + rtx promoted_val = NULL_RTX; - if (TARGET_64BIT - && (size_needed > 4 || (desired_align > align && desired_align > 4))) - promoted_val = promote_duplicated_reg (DImode, val); + if (size_needed > 8 || (desired_align > align && desired_align > 8)) + { + gcc_assert (TARGET_SSE); + if (TARGET_64BIT) + promoted_val = promote_duplicated_reg (V2DImode, val); + else + promoted_val = promote_duplicated_reg (V4SImode, val); + } + else if (size_needed > 4 || (desired_align > align && desired_align > 4)) + { + gcc_assert (TARGET_64BIT || TARGET_SSE); + promoted_val = promote_duplicated_reg (DImode, val); + } else if (size_needed > 2 || (desired_align > align && desired_align > 2)) promoted_val = promote_duplicated_reg (SImode, val); else if (size_needed > 1 || (desired_align > align && desired_align > 1)) @@ -22013,12 +22414,17 @@ ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp, unsigned HOST_WIDE_INT count = 0; HOST_WIDE_INT expected_size = -1; int size_needed = 0, epilogue_size_needed; + int promote_size_needed = 0; int desired_align = 0, align_bytes = 0; enum stringop_alg alg; rtx promoted_val = NULL; - bool force_loopy_epilogue = false; + rtx vec_promoted_val = NULL; int dynamic_check; bool need_zero_guard = false; + bool align_unknown; + unsigned int unroll_factor; + enum machine_mode move_mode; + rtx loop_iter = NULL_RTX; if (CONST_INT_P (align_exp)) align = INTVAL (align_exp); @@ -22038,8 +22444,11 @@ ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp, /* Step 0: Decide on preferred algorithm, desired alignment and size of chunks to be copied by main loop. */ - alg = decide_alg (count, expected_size, true, &dynamic_check); + align_unknown = get_mem_align_offset (dst, BITS_PER_UNIT) < 0; + alg = decide_alg (count, expected_size, true, &dynamic_check, align_unknown); desired_align = decide_alignment (align, alg, expected_size); + unroll_factor = 1; + move_mode = Pmode; if (!TARGET_ALIGN_STRINGOPS) align = desired_align; @@ -22057,11 +22466,21 @@ ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp, gcc_unreachable (); case loop: need_zero_guard = true; - size_needed = GET_MODE_SIZE (Pmode); + move_mode = Pmode; + size_needed = GET_MODE_SIZE (move_mode) * unroll_factor; break; case unrolled_loop: need_zero_guard = true; - size_needed = GET_MODE_SIZE (Pmode) * 4; + /* Use SSE instructions, if possible. */ + move_mode = TARGET_SSE + ? (TARGET_64BIT ? V2DImode : V4SImode) + : Pmode; + unroll_factor = 1; + /* Select maximal available 1,2 or 4 unroll factor. */ + while (GET_MODE_SIZE (move_mode) * unroll_factor * 2 < count + && unroll_factor < 4) + unroll_factor *= 2; + size_needed = GET_MODE_SIZE (move_mode) * unroll_factor; break; case rep_prefix_8_byte: size_needed = 8; @@ -22078,6 +22497,7 @@ ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp, break; } epilogue_size_needed = size_needed; + promote_size_needed = GET_MODE_SIZE (Pmode); /* Step 1: Prologue guard. */ @@ -22106,8 +22526,10 @@ ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp, main loop and epilogue (ie one load of the big constant in the front of all code. */ if (CONST_INT_P (val_exp)) - promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed, - desired_align, align); + promoted_val = promote_duplicated_reg_to_size (val_exp, + promote_size_needed, + promote_size_needed, + align); /* Ensure that alignment prologue won't copy past end of block. */ if (size_needed > 1 || (desired_align > 1 && desired_align > align)) { @@ -22116,12 +22538,6 @@ ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp, Make sure it is power of 2. */ epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed); - /* To improve performance of small blocks, we jump around the VAL - promoting mode. This mean that if the promoted VAL is not constant, - we might not use it in the epilogue and have to use byte - loop variant. */ - if (epilogue_size_needed > 2 && !promoted_val) - force_loopy_epilogue = true; if (count) { if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed) @@ -22162,8 +22578,10 @@ ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp, /* Do the expensive promotion once we branched off the small blocks. */ if (!promoted_val) - promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed, - desired_align, align); + promoted_val = promote_duplicated_reg_to_size (val_exp, + promote_size_needed, + promote_size_needed, + align); gcc_assert (desired_align >= 1 && align >= 1); if (desired_align > align) @@ -22177,6 +22595,7 @@ ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp, dst = change_address (dst, BLKmode, destreg); expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align, desired_align); + set_mem_align (dst, desired_align*BITS_PER_UNIT); } else { @@ -22186,6 +22605,8 @@ ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp, desired_align, align_bytes); count_exp = plus_constant (count_exp, -align_bytes); count -= align_bytes; + if (count < (unsigned HOST_WIDE_INT) size_needed) + goto epilogue; } if (need_zero_guard && (count < (unsigned HOST_WIDE_INT) size_needed @@ -22227,7 +22648,7 @@ ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp, case no_stringop: gcc_unreachable (); case loop_1_byte: - expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val, + expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, val_exp, count_exp, QImode, 1, expected_size); break; case loop: @@ -22235,8 +22656,14 @@ ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp, count_exp, Pmode, 1, expected_size); break; case unrolled_loop: - expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val, - count_exp, Pmode, 4, expected_size); + vec_promoted_val = + promote_duplicated_reg_to_size (promoted_val, + GET_MODE_SIZE (move_mode), + desired_align, align); + loop_iter = expand_set_or_movmem_via_loop_with_iter (dst, NULL, destreg, + NULL, vec_promoted_val, count_exp, + NULL_RTX, move_mode, unroll_factor, + expected_size, false); break; case rep_prefix_8_byte: expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp, @@ -22280,15 +22707,29 @@ ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp, LABEL_NUSES (label) = 1; } epilogue: - if (count_exp != const0_rtx && epilogue_size_needed > 1) + if (alg == unrolled_loop) { - if (force_loopy_epilogue) - expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp, - epilogue_size_needed); - else - expand_setmem_epilogue (dst, destreg, promoted_val, count_exp, - epilogue_size_needed); + rtx tmp; + if (align_unknown && unroll_factor > 1) + { + /* Reduce epilogue's size by creating not-unrolled loop. If we won't + do this, we can have very big epilogue - when alignment is statically + unknown we'll have the epilogue byte by byte which may be very slow. */ + loop_iter = expand_set_or_movmem_via_loop_with_iter (dst, NULL, destreg, + NULL, vec_promoted_val, count_exp, + loop_iter, move_mode, 1, + expected_size, false); + dst = change_address (dst, BLKmode, destreg); + epilogue_size_needed = GET_MODE_SIZE (move_mode); + } + tmp = expand_simple_binop (Pmode, PLUS, destreg, loop_iter, destreg, + true, OPTAB_LIB_WIDEN); + if (tmp != destreg) + emit_move_insn (destreg, tmp); } + if (count_exp != const0_rtx && epilogue_size_needed > 1) + expand_setmem_epilogue (dst, destreg, promoted_val, val_exp, count_exp, + epilogue_size_needed); if (jump_around_label) emit_label (jump_around_label); return true; @@ -37436,6 +37877,92 @@ ix86_autovectorize_vector_sizes (void) return (TARGET_AVX && !TARGET_PREFER_AVX128) ? 32 | 16 : 0; } +/* Target hook. Prevent unaligned access to data in vector modes. */ + +static bool +ix86_slow_unaligned_access (enum machine_mode mode, + unsigned int align) +{ + if (TARGET_AVX) + { + if (GET_MODE_SIZE (mode) == 32) + { + if (align <= 16) + return (TARGET_AVX256_SPLIT_UNALIGNED_LOAD || + TARGET_AVX256_SPLIT_UNALIGNED_STORE); + else + return false; + } + } + + if (GET_MODE_SIZE (mode) > 8) + { + return (! TARGET_SSE_UNALIGNED_LOAD_OPTIMAL && + ! TARGET_SSE_UNALIGNED_STORE_OPTIMAL); + } + + return false; +} + +/* Target hook. Returns rtx of mode MODE with promoted value VAL, that is + supposed to represent one byte. MODE could be a vector mode. + Example: + 1) VAL = const_int (0xAB), mode = SImode, + the result is const_int (0xABABABAB). + 2) if VAL isn't const, then the result will be the result of MUL-instruction + of VAL and const_int (0x01010101) (for SImode). */ + +static rtx +ix86_promote_rtx_for_memset (enum machine_mode mode ATTRIBUTE_UNUSED, + rtx val) +{ + enum machine_mode val_mode = GET_MODE (val); + gcc_assert (VALID_INT_MODE_P (val_mode) || val_mode == VOIDmode); + + if (vector_extensions_used_for_mode (mode) && TARGET_SSE) + { + rtx promoted_val, vec_reg; + enum machine_mode vec_mode = VOIDmode; + if (TARGET_AVX2) + vec_mode = TARGET_64BIT ? V4DImode : V8SImode; + else + vec_mode = TARGET_64BIT ? V2DImode : V4SImode; + gcc_assert (vec_mode != VOIDmode); + if (CONST_INT_P (val)) + { + rtx const_vec; + HOST_WIDE_INT int_val = (UINTVAL (val) & 0xFF) + * (TARGET_64BIT + ? 0x0101010101010101 + : 0x01010101); + val = gen_int_mode (int_val, Pmode); + vec_reg = gen_reg_rtx (vec_mode); + const_vec = ix86_build_const_vector (vec_mode, true, val); + if (mode != vec_mode) + const_vec = convert_to_mode (vec_mode, const_vec, 1); + emit_move_insn (vec_reg, const_vec); + return vec_reg; + } + /* Else: val isn't const. */ + promoted_val = promote_duplicated_reg (Pmode, val); + vec_reg = gen_reg_rtx (vec_mode); + switch (vec_mode) + { + case V2DImode: + emit_insn (gen_vec_dupv2di (vec_reg, promoted_val)); + break; + case V4SImode: + emit_insn (gen_vec_dupv4si (vec_reg, promoted_val)); + break; + default: + gcc_unreachable (); + break; + } + return vec_reg; + } + return NULL_RTX; +} + /* Initialize the GCC target structure. */ #undef TARGET_RETURN_IN_MEMORY #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory @@ -37743,6 +38270,12 @@ ix86_autovectorize_vector_sizes (void) #undef TARGET_CONDITIONAL_REGISTER_USAGE #define TARGET_CONDITIONAL_REGISTER_USAGE ix86_conditional_register_usage +#undef TARGET_SLOW_UNALIGNED_ACCESS +#define TARGET_SLOW_UNALIGNED_ACCESS ix86_slow_unaligned_access + +#undef TARGET_PROMOTE_RTX_FOR_MEMSET +#define TARGET_PROMOTE_RTX_FOR_MEMSET ix86_promote_rtx_for_memset + #if TARGET_MACHO #undef TARGET_INIT_LIBFUNCS #define TARGET_INIT_LIBFUNCS darwin_rename_builtins diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h index bd69ec2..550b2ab 100644 --- a/gcc/config/i386/i386.h +++ b/gcc/config/i386/i386.h @@ -159,8 +159,12 @@ struct processor_costs { const int fchs; /* cost of FCHS instruction. */ const int fsqrt; /* cost of FSQRT instruction. */ /* Specify what algorithm - to use for stringops on unknown size. */ - struct stringop_algs memcpy[2], memset[2]; + to use for stringops on unknown size. + First index is used to specify whether + alignment is known or not. + Second - to specify whether 32 or 64 bits + are used. */ + struct stringop_algs memcpy[2][2], memset[2][2]; const int scalar_stmt_cost; /* Cost of any scalar operation, excluding load and store. */ const int scalar_load_cost; /* Cost of scalar load. */ @@ -1712,7 +1716,7 @@ typedef struct ix86_args { /* If a clear memory operation would take CLEAR_RATIO or more simple move-instruction sequences, we will do a clrmem or libcall instead. */ -#define CLEAR_RATIO(speed) ((speed) ? MIN (6, ix86_cost->move_ratio) : 2) +#define CLEAR_RATIO(speed) ((speed) ? ix86_cost->move_ratio : 2) /* Define if shifts truncate the shift count which implies one can omit a sign-extension or zero-extension of a shift count. diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md index ff77003..b8ecc59 100644 --- a/gcc/config/i386/sse.md +++ b/gcc/config/i386/sse.md @@ -7426,6 +7426,13 @@ (set_attr "prefix" "maybe_vex,maybe_vex,orig,orig,vex") (set_attr "mode" "TI,TI,V4SF,SF,SF")]) +(define_expand "sse2_loadq" + [(set (match_operand:V2DI 0 "register_operand") + (vec_concat:V2DI + (match_operand:DI 1 "memory_operand") + (const_int 0)))] + "!TARGET_64BIT && TARGET_SSE2") + (define_insn_and_split "sse2_stored" [(set (match_operand:SI 0 "nonimmediate_operand" "=xm,r") (vec_select:SI @@ -7537,6 +7544,16 @@ (set_attr "prefix" "maybe_vex,orig,vex,maybe_vex,orig,orig") (set_attr "mode" "V2SF,TI,TI,TI,V4SF,V2SF")]) +(define_expand "vec_dupv4si" + [(set (match_operand:V4SI 0 "register_operand" "") + (vec_duplicate:V4SI + (match_operand:SI 1 "nonimmediate_operand" "")))] + "TARGET_SSE" +{ + if (!TARGET_AVX) + operands[1] = force_reg (V4SImode, operands[1]); +}) + (define_insn "*vec_dupv4si_avx" [(set (match_operand:V4SI 0 "register_operand" "=x,x") (vec_duplicate:V4SI @@ -7578,6 +7595,16 @@ (set_attr "prefix" "orig,vex,maybe_vex") (set_attr "mode" "TI,TI,DF")]) +(define_expand "vec_dupv2di" + [(set (match_operand:V2DI 0 "register_operand" "") + (vec_duplicate:V2DI + (match_operand:DI 1 "nonimmediate_operand" "")))] + "TARGET_SSE" +{ + if (!TARGET_AVX) + operands[1] = force_reg (V2DImode, operands[1]); +}) + (define_insn "*vec_dupv2di" [(set (match_operand:V2DI 0 "register_operand" "=x,x") (vec_duplicate:V2DI diff --git a/gcc/cse.c b/gcc/cse.c index ae67685..3b6471d 100644 --- a/gcc/cse.c +++ b/gcc/cse.c @@ -4616,7 +4616,10 @@ cse_insn (rtx insn) to fold switch statements when an ADDR_DIFF_VEC is used. */ || (GET_CODE (src_folded) == MINUS && GET_CODE (XEXP (src_folded, 0)) == LABEL_REF - && GET_CODE (XEXP (src_folded, 1)) == LABEL_REF))) + && GET_CODE (XEXP (src_folded, 1)) == LABEL_REF)) + /* Don't propagate vector-constants, as for now no architecture + supports vector immediates. */ + && !vector_extensions_used_for_mode (mode)) src_const = src_folded, src_const_elt = elt; else if (src_const == 0 && src_eqv_here && CONSTANT_P (src_eqv_here)) src_const = src_eqv_here, src_const_elt = src_eqv_elt; diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi index 90cef1c..4b7d67b 100644 --- a/gcc/doc/tm.texi +++ b/gcc/doc/tm.texi @@ -5780,6 +5780,32 @@ mode returned by @code{TARGET_VECTORIZE_PREFERRED_SIMD_MODE}. The default is zero which means to not iterate over other vector sizes. @end deftypefn +@deftypefn {Target Hook} bool TARGET_SLOW_UNALIGNED_ACCESS (enum machine_mode @var{mode}, unsigned int @var{align}) +This hook should return true if memory accesses in mode @var{mode} to data +aligned by @var{align} bits have a cost many times greater than aligned +accesses, for example if they are emulated in a trap handler. + +When this hook returns true, the compiler will act as if +@code{STRICT_ALIGNMENT} were nonzero when generating code for block +moves. This can cause significantly more instructions to be produced. +Therefore, the hook sould not return true if unaligned accesses only add a +cycle or two to the time for a memory access. + +If current compilation options require building faster code, the hook can +be used to prevent access to unaligned data in some set of modes even if +processor can do the access without trap. + +By default the hook returns value of define @code{SLOW_UNALIGNED_ACCESS} if +it is defined and @code{STRICT_ALIGNMENT} otherwise. +@end deftypefn + +@deftypefn {Target Hook} rtx TARGET_PROMOTE_RTX_FOR_MEMSET (enum machine_mode @var{mode}, rtx @var{val}) +This hook returns rtx of mode MODE with promoted value VAL or NULL. +The hook generates instruction, that are needed for performing promotion of +@var{val} to mode @var{mode}. +If generation of instructions for promotion failed, the hook returns NULL. +@end deftypefn + @node Anchored Addresses @section Anchored Addresses @cindex anchored addresses @@ -6252,23 +6278,6 @@ may eliminate subsequent memory access if subsequent accesses occur to other fields in the same word of the structure, but to different bytes. @end defmac -@defmac SLOW_UNALIGNED_ACCESS (@var{mode}, @var{alignment}) -Define this macro to be the value 1 if memory accesses described by the -@var{mode} and @var{alignment} parameters have a cost many times greater -than aligned accesses, for example if they are emulated in a trap -handler. - -When this macro is nonzero, the compiler will act as if -@code{STRICT_ALIGNMENT} were nonzero when generating code for block -moves. This can cause significantly more instructions to be produced. -Therefore, do not set this macro nonzero if unaligned accesses only add a -cycle or two to the time for a memory access. - -If the value of this macro is always zero, it need not be defined. If -this macro is defined, it should produce a nonzero value when -@code{STRICT_ALIGNMENT} is nonzero. -@end defmac - @defmac MOVE_RATIO (@var{speed}) The threshold of number of scalar memory-to-memory move insns, @emph{below} which a sequence of insns should be generated instead of a diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in index 187122e..c7e2457 100644 --- a/gcc/doc/tm.texi.in +++ b/gcc/doc/tm.texi.in @@ -5718,6 +5718,32 @@ mode returned by @code{TARGET_VECTORIZE_PREFERRED_SIMD_MODE}. The default is zero which means to not iterate over other vector sizes. @end deftypefn +@hook TARGET_SLOW_UNALIGNED_ACCESS +This hook should return true if memory accesses in mode @var{mode} to data +aligned by @var{align} bits have a cost many times greater than aligned +accesses, for example if they are emulated in a trap handler. + +When this hook returns true, the compiler will act as if +@code{STRICT_ALIGNMENT} were nonzero when generating code for block +moves. This can cause significantly more instructions to be produced. +Therefore, the hook sould not return true if unaligned accesses only add a +cycle or two to the time for a memory access. + +If current compilation options require building faster code, the hook can +be used to prevent access to unaligned data in some set of modes even if +processor can do the access without trap. + +By default the hook returns value of define @code{SLOW_UNALIGNED_ACCESS} if +it is defined and @code{STRICT_ALIGNMENT} otherwise. +@end deftypefn + +@hook TARGET_PROMOTE_RTX_FOR_MEMSET +This hook returns rtx of mode MODE with promoted value VAL or NULL. +The hook generates instruction, that are needed for performing promotion of +@var{val} to mode @var{mode}. +If generation of instructions for promotion failed, the hook returns NULL. +@end deftypefn + @node Anchored Addresses @section Anchored Addresses @cindex anchored addresses @@ -6190,23 +6216,6 @@ may eliminate subsequent memory access if subsequent accesses occur to other fields in the same word of the structure, but to different bytes. @end defmac -@defmac SLOW_UNALIGNED_ACCESS (@var{mode}, @var{alignment}) -Define this macro to be the value 1 if memory accesses described by the -@var{mode} and @var{alignment} parameters have a cost many times greater -than aligned accesses, for example if they are emulated in a trap -handler. - -When this macro is nonzero, the compiler will act as if -@code{STRICT_ALIGNMENT} were nonzero when generating code for block -moves. This can cause significantly more instructions to be produced. -Therefore, do not set this macro nonzero if unaligned accesses only add a -cycle or two to the time for a memory access. - -If the value of this macro is always zero, it need not be defined. If -this macro is defined, it should produce a nonzero value when -@code{STRICT_ALIGNMENT} is nonzero. -@end defmac - @defmac MOVE_RATIO (@var{speed}) The threshold of number of scalar memory-to-memory move insns, @emph{below} which a sequence of insns should be generated instead of a diff --git a/gcc/emit-rtl.c b/gcc/emit-rtl.c index 8465237..ff568b1 100644 --- a/gcc/emit-rtl.c +++ b/gcc/emit-rtl.c @@ -1495,6 +1495,12 @@ get_mem_align_offset (rtx mem, unsigned int align) if (TYPE_ALIGN (TREE_TYPE (expr)) < (unsigned int) align) return -1; } + else if (TREE_CODE (expr) == MEM_REF) + { + if (get_object_alignment_1 (expr, &offset) < align) + return -1; + offset /= BITS_PER_UNIT; + } else if (TREE_CODE (expr) == COMPONENT_REF) { while (1) @@ -2058,7 +2064,6 @@ adjust_address_1 (rtx memref, enum machine_mode mode, HOST_WIDE_INT offset, enum machine_mode address_mode; int pbits; struct mem_attrs attrs, *defattrs; - unsigned HOST_WIDE_INT max_align; attrs = *get_mem_attrs (memref); @@ -2115,8 +2120,12 @@ adjust_address_1 (rtx memref, enum machine_mode mode, HOST_WIDE_INT offset, if zero. */ if (offset != 0) { - max_align = (offset & -offset) * BITS_PER_UNIT; - attrs.align = MIN (attrs.align, max_align); + int old_offset = get_mem_align_offset (memref, MOVE_MAX*BITS_PER_UNIT); + if (old_offset >= 0) + attrs.align = compute_align_by_offset (old_offset + attrs.offset); + else + attrs.align = MIN (attrs.align, + (unsigned HOST_WIDE_INT) (offset & -offset) * BITS_PER_UNIT); } /* We can compute the size in a number of ways. */ diff --git a/gcc/expr.c b/gcc/expr.c index b020978..83bc789 100644 --- a/gcc/expr.c +++ b/gcc/expr.c @@ -126,15 +126,18 @@ struct store_by_pieces_d static unsigned HOST_WIDE_INT move_by_pieces_ninsns (unsigned HOST_WIDE_INT, unsigned int, unsigned int); -static void move_by_pieces_1 (rtx (*) (rtx, ...), enum machine_mode, - struct move_by_pieces_d *); +static void move_by_pieces_insn (rtx (*) (rtx, ...), enum machine_mode, + struct move_by_pieces_d *); static bool block_move_libcall_safe_for_call_parm (void); static bool emit_block_move_via_movmem (rtx, rtx, rtx, unsigned, unsigned, HOST_WIDE_INT); static tree emit_block_move_libcall_fn (int); static void emit_block_move_via_loop (rtx, rtx, rtx, unsigned); static rtx clear_by_pieces_1 (void *, HOST_WIDE_INT, enum machine_mode); static void clear_by_pieces (rtx, unsigned HOST_WIDE_INT, unsigned int); +static void set_by_pieces_1 (struct store_by_pieces_d *, unsigned int); static void store_by_pieces_1 (struct store_by_pieces_d *, unsigned int); +static void set_by_pieces_2 (rtx (*) (rtx, ...), enum machine_mode, + struct store_by_pieces_d *, rtx); static void store_by_pieces_2 (rtx (*) (rtx, ...), enum machine_mode, struct store_by_pieces_d *); static tree clear_storage_libcall_fn (int); @@ -163,6 +166,12 @@ static void do_tablejump (rtx, enum machine_mode, rtx, rtx, rtx); static rtx const_vector_from_tree (tree); static void write_complex_part (rtx, rtx, bool); +static enum machine_mode widest_mode_for_unaligned_mov (unsigned HOST_WIDE_INT); +static enum machine_mode widest_mode_for_aligned_mov (unsigned HOST_WIDE_INT, + unsigned int); +static enum machine_mode generate_move_with_mode (struct store_by_pieces_d *, + enum machine_mode, rtx *, rtx *); + /* This macro is used to determine whether move_by_pieces should be called to perform a structure copy. */ #ifndef MOVE_BY_PIECES_P @@ -811,7 +820,7 @@ alignment_for_piecewise_move (unsigned int max_pieces, unsigned int align) tmode != VOIDmode; xmode = tmode, tmode = GET_MODE_WIDER_MODE (tmode)) if (GET_MODE_SIZE (tmode) > max_pieces - || SLOW_UNALIGNED_ACCESS (tmode, align)) + || targetm.slow_unaligned_access (tmode, align)) break; align = MAX (align, GET_MODE_ALIGNMENT (xmode)); @@ -820,11 +829,66 @@ alignment_for_piecewise_move (unsigned int max_pieces, unsigned int align) return align; } +/* Given an offset from align border, + compute the maximal alignment of offsetted data. */ +unsigned int +compute_align_by_offset (int offset) +{ + return (offset==0) ? + BIGGEST_ALIGNMENT : + MIN (BIGGEST_ALIGNMENT, (offset & -offset) * BITS_PER_UNIT); +} + +/* Estimate cost of move for given size and offset. Offset is used for + determining max alignment. */ +static int +compute_aligned_cost (unsigned HOST_WIDE_INT size, int offset) +{ + unsigned HOST_WIDE_INT cost = 0; + int cur_off = offset; + + while (size > 0) + { + enum machine_mode mode = widest_mode_for_aligned_mov (size, + compute_align_by_offset (cur_off)); + int cur_mode_cost; + enum vect_cost_for_stmt type_of_cost = vector_load; + if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD + && (SCALAR_INT_MODE_P (mode) || SCALAR_FLOAT_MODE_P (mode))) + type_of_cost = scalar_load; + cur_mode_cost = + targetm.vectorize.builtin_vectorization_cost (type_of_cost, NULL, 0); + size -= GET_MODE_SIZE (mode); + cur_off += GET_MODE_SIZE (mode); + cost += cur_mode_cost; + } + return cost; +} + +/* Estimate cost of move for given size. It's assumed, that + alignment is unknown, so we need to use unaligned movs. */ +static int +compute_unaligned_cost (unsigned HOST_WIDE_INT size) +{ + unsigned HOST_WIDE_INT cost = 0; + while (size > 0) + { + enum machine_mode mode = widest_mode_for_unaligned_mov (size); + unsigned HOST_WIDE_INT n_insns = size/GET_MODE_SIZE (mode); + int cur_mode_cost = + targetm.vectorize.builtin_vectorization_cost (unaligned_load, NULL, 0); + + cost += n_insns*cur_mode_cost; + size %= GET_MODE_SIZE (mode); + } + return cost; +} + /* Return the widest integer mode no wider than SIZE. If no such mode can be found, return VOIDmode. */ static enum machine_mode -widest_int_mode_for_size (unsigned int size) +widest_int_mode_for_size (unsigned HOST_WIDE_INT size) { enum machine_mode tmode, mode = VOIDmode; @@ -836,6 +900,170 @@ widest_int_mode_for_size (unsigned int size) return mode; } +/* If mode is a scalar mode, find corresponding preferred vector mode. + If such mode can't be found, return vector mode, corresponding to Pmode + (a kind of default vector mode). + For vector modes return the mode itself. */ + +static enum machine_mode +vector_mode_for_mode (enum machine_mode mode) +{ + enum machine_mode xmode; + if (VECTOR_MODE_P (mode)) + return mode; + xmode = targetm.vectorize.preferred_simd_mode (mode); + if (VECTOR_MODE_P (xmode)) + return xmode; + + return targetm.vectorize.preferred_simd_mode (Pmode); +} + +/* The routine checks if vector instructions are required for operating + with mode specified. + For vector modes it checks, if the corresponding vector extension is + supported. + Operations with scalar mode will use vector extensions if this scalar + mode is wider than default scalar mode (Pmode) and vector extension + for parent vector mode is available. */ + +bool vector_extensions_used_for_mode (enum machine_mode mode) +{ + enum machine_mode vector_mode = vector_mode_for_mode (mode); + + if (VECTOR_MODE_P (mode)) + return targetm.vector_mode_supported_p (mode); + + /* mode is a scalar mode. */ + if (VECTOR_MODE_P (vector_mode) + && targetm.vector_mode_supported_p (vector_mode) + && (GET_MODE_SIZE (mode) > GET_MODE_SIZE (Pmode))) + return true; + + return false; +} + +/* Find the widest move mode for the given size if alignment is unknown. */ +static enum machine_mode +widest_mode_for_unaligned_mov (unsigned HOST_WIDE_INT size) +{ + enum machine_mode mode; + enum machine_mode tmode, xmode; + enum machine_mode best_simd_mode = targetm.vectorize.preferred_simd_mode ( + mode_for_size (UNITS_PER_WORD*BITS_PER_UNIT, MODE_INT, 0)); + + /* Find the widest integer mode. Here we can find modes wider than Pmode. */ + for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT), xmode = VOIDmode; + tmode != VOIDmode; + tmode = GET_MODE_WIDER_MODE (tmode)) + { + if (GET_MODE_SIZE (tmode) > size + || targetm.slow_unaligned_access (tmode, BITS_PER_UNIT)) + break; + if (optab_handler (mov_optab, tmode) != CODE_FOR_nothing + && targetm.scalar_mode_supported_p (tmode)) + xmode = tmode; + } + mode = xmode; + + /* Find the widest vector mode. */ + for (tmode = GET_CLASS_NARROWEST_MODE (MODE_VECTOR_INT), xmode = VOIDmode; + tmode != VOIDmode; + tmode = GET_MODE_WIDER_MODE (tmode)) + { + if (GET_MODE_SIZE (tmode) > size + || targetm.slow_unaligned_access (tmode, BITS_PER_UNIT)) + break; + if (GET_MODE_SIZE (GET_MODE_INNER (tmode)) == UNITS_PER_WORD + && optab_handler (mov_optab, tmode) != CODE_FOR_nothing + && targetm.vector_mode_supported_p (tmode)) + xmode = tmode; + } + + /* Choose between integer and vector modes. */ + if (xmode != VOIDmode && GET_MODE_SIZE (xmode) > GET_MODE_SIZE (mode)) + mode = xmode; + + /* If found vector and scalar modes have the same sizes, and vector mode is + best_simd_mode, then prefer vector mode to scalar mode. */ + if (xmode != VOIDmode + && GET_MODE_SIZE (xmode) == GET_MODE_SIZE (mode) + && xmode == best_simd_mode) + mode = xmode; + + /* If we failed to find a mode that might use vector extensions, try to + find widest ordinary integer mode. */ + if (mode == VOIDmode) + mode = widest_int_mode_for_size (MIN (MOVE_MAX_PIECES, size) + 1); + + /* If found mode won't use vector extensions, then there is no need to use + mode wider then Pmode. */ + if (!vector_extensions_used_for_mode (mode) + && GET_MODE_SIZE (mode) > MOVE_MAX_PIECES) + mode = widest_int_mode_for_size (MIN (MOVE_MAX_PIECES, size) + 1); + + return mode; +} + +/* Find the widest move mode for the given size and alignment. */ +static enum machine_mode +widest_mode_for_aligned_mov (unsigned HOST_WIDE_INT size, unsigned int align) +{ + enum machine_mode mode; + enum machine_mode tmode, xmode; + enum machine_mode best_simd_mode = targetm.vectorize.preferred_simd_mode ( + mode_for_size (UNITS_PER_WORD*BITS_PER_UNIT, MODE_INT, 0)); + + /* Find the widest integer mode. */ + for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT), xmode = VOIDmode; + tmode != VOIDmode; + tmode = GET_MODE_WIDER_MODE (tmode)) + { + if (GET_MODE_SIZE (tmode) > size || GET_MODE_ALIGNMENT (tmode) > align) + break; + if (optab_handler (mov_optab, tmode) != CODE_FOR_nothing + && targetm.scalar_mode_supported_p (tmode)) + xmode = tmode; + } + mode = xmode; + + /* Find the widest vector mode. */ + for (tmode = GET_CLASS_NARROWEST_MODE (MODE_VECTOR_INT), xmode = VOIDmode; + tmode != VOIDmode; + tmode = GET_MODE_WIDER_MODE (tmode)) + { + if (GET_MODE_SIZE (tmode) > size || GET_MODE_ALIGNMENT (tmode) > align) + break; + if (GET_MODE_SIZE (GET_MODE_INNER (tmode)) == UNITS_PER_WORD && + optab_handler (mov_optab, tmode) != CODE_FOR_nothing && + targetm.vector_mode_supported_p (tmode)) + xmode = tmode; + } + + /* Choose between integer and vector modes. */ + if (xmode != VOIDmode && GET_MODE_SIZE (xmode) > GET_MODE_SIZE (mode)) + mode = xmode; + + /* If found vector and scalar modes have the same sizes, and vector mode is + best_simd_mode, then prefer vector mode to scalar mode. */ + if (xmode != VOIDmode + && GET_MODE_SIZE (xmode) == GET_MODE_SIZE (mode) + && xmode == best_simd_mode) + mode = xmode; + + /* If we failed to find a mode that might use vector extensions, try to + find widest ordinary integer mode. */ + if (mode == VOIDmode) + mode = widest_int_mode_for_size (MIN (MOVE_MAX_PIECES, size) + 1); + + /* If found mode won't use vector extensions, then there is no need to use + mode wider then Pmode. */ + if (!vector_extensions_used_for_mode (mode) + && GET_MODE_SIZE (mode) > MOVE_MAX_PIECES) + mode = widest_int_mode_for_size (MIN (MOVE_MAX_PIECES, size) + 1); + + return mode; +} + /* STORE_MAX_PIECES is the number of bytes at a time that we can store efficiently. Due to internal GCC limitations, this is MOVE_MAX_PIECES limited by the number of bytes GCC can represent @@ -876,6 +1104,7 @@ move_by_pieces (rtx to, rtx from, unsigned HOST_WIDE_INT len, rtx to_addr, from_addr = XEXP (from, 0); unsigned int max_size = MOVE_MAX_PIECES + 1; enum insn_code icode; + int dst_offset, src_offset; align = MIN (to ? MEM_ALIGN (to) : align, MEM_ALIGN (from)); @@ -960,23 +1189,37 @@ move_by_pieces (rtx to, rtx from, unsigned HOST_WIDE_INT len, data.to_addr = copy_to_mode_reg (to_addr_mode, to_addr); } - align = alignment_for_piecewise_move (MOVE_MAX_PIECES, align); - - /* First move what we can in the largest integer mode, then go to - successively smaller modes. */ - - while (max_size > 1) + src_offset = get_mem_align_offset (from, MOVE_MAX*BITS_PER_UNIT); + dst_offset = get_mem_align_offset (to, MOVE_MAX*BITS_PER_UNIT); + if (src_offset < 0 + || dst_offset < 0 + || src_offset != dst_offset + || compute_aligned_cost (data.len, src_offset) >= + compute_unaligned_cost (data.len)) { - enum machine_mode mode = widest_int_mode_for_size (max_size); - - if (mode == VOIDmode) - break; + while (data.len > 0) + { + enum machine_mode mode = widest_mode_for_unaligned_mov (data.len); - icode = optab_handler (mov_optab, mode); - if (icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode)) - move_by_pieces_1 (GEN_FCN (icode), mode, &data); + icode = optab_handler (mov_optab, mode); + gcc_assert (icode != CODE_FOR_nothing); + move_by_pieces_insn (GEN_FCN (icode), mode, &data); + } + } + else + { + while (data.len > 0) + { + enum machine_mode mode; + mode = widest_mode_for_aligned_mov (data.len, + compute_align_by_offset (src_offset)); - max_size = GET_MODE_SIZE (mode); + icode = optab_handler (mov_optab, mode); + gcc_assert (icode != CODE_FOR_nothing && + compute_align_by_offset (src_offset) >= GET_MODE_ALIGNMENT (mode)); + move_by_pieces_insn (GEN_FCN (icode), mode, &data); + src_offset += GET_MODE_SIZE (mode); + } } /* The code above should have handled everything. */ @@ -1014,35 +1257,47 @@ move_by_pieces (rtx to, rtx from, unsigned HOST_WIDE_INT len, } /* Return number of insns required to move L bytes by pieces. - ALIGN (in bits) is maximum alignment we can assume. */ + ALIGN (in bits) is maximum alignment we can assume. + This is just an estimation, so the actual number of instructions might + differ from it (there are several options of expanding memmove). */ static unsigned HOST_WIDE_INT move_by_pieces_ninsns (unsigned HOST_WIDE_INT l, unsigned int align, - unsigned int max_size) + unsigned int max_size ATTRIBUTE_UNUSED) { unsigned HOST_WIDE_INT n_insns = 0; - - align = alignment_for_piecewise_move (MOVE_MAX_PIECES, align); - - while (max_size > 1) + unsigned HOST_WIDE_INT n_insns_u = 0; + enum machine_mode mode; + unsigned HOST_WIDE_INT len = l; + while (len > 0) { - enum machine_mode mode; - enum insn_code icode; - - mode = widest_int_mode_for_size (max_size); - - if (mode == VOIDmode) - break; + mode = widest_mode_for_aligned_mov (len, align); + if (GET_MODE_SIZE (mode) < MOVE_MAX) + { + align += GET_MODE_ALIGNMENT (mode); + len -= GET_MODE_SIZE (mode); + n_insns ++; + } + else + { + /* We are using the widest mode. */ + n_insns += len/GET_MODE_SIZE (mode); + len = len%GET_MODE_SIZE (mode); + } + } + gcc_assert (!len); - icode = optab_handler (mov_optab, mode); - if (icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode)) - n_insns += l / GET_MODE_SIZE (mode), l %= GET_MODE_SIZE (mode); + len = l; + while (len > 0) + { + mode = widest_mode_for_unaligned_mov (len); + n_insns_u += len/GET_MODE_SIZE (mode); + len = len%GET_MODE_SIZE (mode); - max_size = GET_MODE_SIZE (mode); } - gcc_assert (!l); - return n_insns; + gcc_assert (!len); + return MIN (n_insns, n_insns_u); } /* Subroutine of move_by_pieces. Move as many bytes as appropriate @@ -1050,60 +1305,57 @@ move_by_pieces_ninsns (unsigned HOST_WIDE_INT l, unsigned int align, to make a move insn for that mode. DATA has all the other info. */ static void -move_by_pieces_1 (rtx (*genfun) (rtx, ...), enum machine_mode mode, +move_by_pieces_insn (rtx (*genfun) (rtx, ...), enum machine_mode mode, struct move_by_pieces_d *data) { unsigned int size = GET_MODE_SIZE (mode); rtx to1 = NULL_RTX, from1; - while (data->len >= size) - { - if (data->reverse) - data->offset -= size; - - if (data->to) - { - if (data->autinc_to) - to1 = adjust_automodify_address (data->to, mode, data->to_addr, - data->offset); - else - to1 = adjust_address (data->to, mode, data->offset); - } + if (data->reverse) + data->offset -= size; - if (data->autinc_from) - from1 = adjust_automodify_address (data->from, mode, data->from_addr, - data->offset); + if (data->to) + { + if (data->autinc_to) + to1 = adjust_automodify_address (data->to, mode, data->to_addr, + data->offset); else - from1 = adjust_address (data->from, mode, data->offset); + to1 = adjust_address (data->to, mode, data->offset); + } - if (HAVE_PRE_DECREMENT && data->explicit_inc_to < 0) - emit_insn (gen_add2_insn (data->to_addr, - GEN_INT (-(HOST_WIDE_INT)size))); - if (HAVE_PRE_DECREMENT && data->explicit_inc_from < 0) - emit_insn (gen_add2_insn (data->from_addr, - GEN_INT (-(HOST_WIDE_INT)size))); + if (data->autinc_from) + from1 = adjust_automodify_address (data->from, mode, data->from_addr, + data->offset); + else + from1 = adjust_address (data->from, mode, data->offset); - if (data->to) - emit_insn ((*genfun) (to1, from1)); - else - { + if (HAVE_PRE_DECREMENT && data->explicit_inc_to < 0) + emit_insn (gen_add2_insn (data->to_addr, + GEN_INT (-(HOST_WIDE_INT)size))); + if (HAVE_PRE_DECREMENT && data->explicit_inc_from < 0) + emit_insn (gen_add2_insn (data->from_addr, + GEN_INT (-(HOST_WIDE_INT)size))); + + if (data->to) + emit_insn ((*genfun) (to1, from1)); + else + { #ifdef PUSH_ROUNDING - emit_single_push_insn (mode, from1, NULL); + emit_single_push_insn (mode, from1, NULL); #else - gcc_unreachable (); + gcc_unreachable (); #endif - } + } - if (HAVE_POST_INCREMENT && data->explicit_inc_to > 0) - emit_insn (gen_add2_insn (data->to_addr, GEN_INT (size))); - if (HAVE_POST_INCREMENT && data->explicit_inc_from > 0) - emit_insn (gen_add2_insn (data->from_addr, GEN_INT (size))); + if (HAVE_POST_INCREMENT && data->explicit_inc_to > 0) + emit_insn (gen_add2_insn (data->to_addr, GEN_INT (size))); + if (HAVE_POST_INCREMENT && data->explicit_inc_from > 0) + emit_insn (gen_add2_insn (data->from_addr, GEN_INT (size))); - if (! data->reverse) - data->offset += size; + if (! data->reverse) + data->offset += size; - data->len -= size; - } + data->len -= size; } /* Emit code to move a block Y to a block X. This may be done with @@ -1680,7 +1932,7 @@ emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type, int ssize) /* Optimize the access just a bit. */ if (MEM_P (src) - && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (src)) + && (! targetm.slow_unaligned_access (mode, MEM_ALIGN (src)) || MEM_ALIGN (src) >= GET_MODE_ALIGNMENT (mode)) && bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0 && bytelen == GET_MODE_SIZE (mode)) @@ -2070,7 +2322,7 @@ emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED, int ssize) /* Optimize the access just a bit. */ if (MEM_P (dest) - && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (dest)) + && (! targetm.slow_unaligned_access (mode, MEM_ALIGN (dest)) || MEM_ALIGN (dest) >= GET_MODE_ALIGNMENT (mode)) && bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0 && bytelen == GET_MODE_SIZE (mode)) @@ -2464,7 +2716,10 @@ store_by_pieces (rtx to, unsigned HOST_WIDE_INT len, data.constfundata = constfundata; data.len = len; data.to = to; - store_by_pieces_1 (&data, align); + if (memsetp) + set_by_pieces_1 (&data, align); + else + store_by_pieces_1 (&data, align); if (endp) { rtx to1; @@ -2508,10 +2763,10 @@ clear_by_pieces (rtx to, unsigned HOST_WIDE_INT len, unsigned int align) return; data.constfun = clear_by_pieces_1; - data.constfundata = NULL; + data.constfundata = CONST0_RTX (QImode); data.len = len; data.to = to; - store_by_pieces_1 (&data, align); + set_by_pieces_1 (&data, align); } /* Callback routine for clear_by_pieces. @@ -2525,13 +2780,126 @@ clear_by_pieces_1 (void *data ATTRIBUTE_UNUSED, return const0_rtx; } -/* Subroutine of clear_by_pieces and store_by_pieces. +/* Helper function for set by pieces - generates move with the given mode. + Returns a mode used for in generated move (it could differ from requested, + if the requested mode isn't supported. */ +static enum machine_mode generate_move_with_mode ( + struct store_by_pieces_d *data, + enum machine_mode mode, + rtx *promoted_to_vector_value_ptr, + rtx *promoted_value_ptr) +{ + enum insn_code icode; + rtx rhs = NULL_RTX; + + gcc_assert (promoted_to_vector_value_ptr && promoted_value_ptr); + + if (vector_extensions_used_for_mode (mode)) + { + enum machine_mode vec_mode = vector_mode_for_mode (mode); + if (!(*promoted_to_vector_value_ptr)) + *promoted_to_vector_value_ptr + = targetm.promote_rtx_for_memset (vec_mode, (rtx)data->constfundata); + + if (*promoted_to_vector_value_ptr) + { + enum machine_mode promoted_mode = GET_MODE (*promoted_to_vector_value_ptr); + if (GET_MODE_SIZE (promoted_mode) < GET_MODE_SIZE (mode)) + return generate_move_with_mode (data, promoted_mode, + promoted_to_vector_value_ptr, + promoted_value_ptr); + rhs = convert_to_mode (vec_mode, *promoted_to_vector_value_ptr, 1); + } + } + else + { + if (CONST_INT_P ((rtx)data->constfundata)) + { + /* We don't need to load the constant to a register, if it could be + encoded as an immediate operand. */ + rtx imm_const; + switch (mode) + { + case DImode: + imm_const + = gen_int_mode ((UINTVAL ((rtx)data->constfundata) & 0xFF) + * 0x0101010101010101, DImode); + break; + case SImode: + imm_const + = gen_int_mode ((UINTVAL ((rtx)data->constfundata) & 0xFF) + * 0x01010101, SImode); + break; + case HImode: + imm_const + = gen_int_mode ((UINTVAL ((rtx)data->constfundata) & 0xFF) + * 0x00000101, HImode); + break; + case QImode: + imm_const + = gen_int_mode ((UINTVAL ((rtx)data->constfundata) & 0xFF) + * 0x00000001, QImode); + break; + default: + gcc_unreachable (); + break; + } + rhs = imm_const; + } + else /* data->constfundata isn't const. */ + { + if (!(*promoted_value_ptr)) + { + rtx coeff; + enum machine_mode promoted_value_mode; + /* Choose mode for promoted value. It shouldn't be narrower, than + Pmode. */ + if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (Pmode)) + promoted_value_mode = mode; + else + promoted_value_mode = Pmode; + + switch (promoted_value_mode) + { + case DImode: + coeff = gen_int_mode (0x0101010101010101, DImode); + break; + case SImode: + coeff = gen_int_mode (0x01010101, SImode); + break; + default: + gcc_unreachable (); + break; + } + *promoted_value_ptr = convert_to_mode (promoted_value_mode, + (rtx)data->constfundata, + 1); + *promoted_value_ptr = expand_mult (promoted_value_mode, + *promoted_value_ptr, coeff, + NULL_RTX, 1); + } + rhs = convert_to_mode (mode, *promoted_value_ptr, 1); + } + } + /* If RHS is null, then the requested mode isn't supported and can't be used. + Use Pmode instead. */ + if (!rhs) + return generate_move_with_mode (data, Pmode, promoted_to_vector_value_ptr, + promoted_value_ptr); + + gcc_assert (rhs); + icode = optab_handler (mov_optab, mode); + gcc_assert (icode != CODE_FOR_nothing); + set_by_pieces_2 (GEN_FCN (icode), mode, data, rhs); + return mode; +} + +/* Subroutine of store_by_pieces. Generate several move instructions to store LEN bytes of block TO. (A MEM rtx with BLKmode). ALIGN is maximum alignment we can assume. */ static void -store_by_pieces_1 (struct store_by_pieces_d *data ATTRIBUTE_UNUSED, - unsigned int align ATTRIBUTE_UNUSED) +store_by_pieces_1 (struct store_by_pieces_d *data, unsigned int align) { enum machine_mode to_addr_mode = targetm.addr_space.address_mode (MEM_ADDR_SPACE (data->to)); @@ -2606,6 +2974,134 @@ store_by_pieces_1 (struct store_by_pieces_d *data ATTRIBUTE_UNUSED, gcc_assert (!data->len); } +/* Subroutine of clear_by_pieces and store_by_pieces. + Generate several move instructions to store LEN bytes of block TO. (A MEM + rtx with BLKmode). ALIGN is maximum alignment we can assume. + As opposed to store_by_pieces_1, this routine always generates code for + memset. (store_by_pieces_1 is sometimes used to generate code for memcpy + rather than for memset). */ + +static void +set_by_pieces_1 (struct store_by_pieces_d *data, unsigned int align) +{ + enum machine_mode to_addr_mode + = targetm.addr_space.address_mode (MEM_ADDR_SPACE (data->to)); + rtx to_addr = XEXP (data->to, 0); + unsigned int max_size = STORE_MAX_PIECES + 1; + int dst_offset; + rtx promoted_to_vector_value = NULL_RTX; + rtx promoted_value = NULL_RTX; + + data->offset = 0; + data->to_addr = to_addr; + data->autinc_to + = (GET_CODE (to_addr) == PRE_INC || GET_CODE (to_addr) == PRE_DEC + || GET_CODE (to_addr) == POST_INC || GET_CODE (to_addr) == POST_DEC); + + data->explicit_inc_to = 0; + data->reverse + = (GET_CODE (to_addr) == PRE_DEC || GET_CODE (to_addr) == POST_DEC); + if (data->reverse) + data->offset = data->len; + + /* If storing requires more than two move insns, + copy addresses to registers (to make displacements shorter) + and use post-increment if available. */ + if (!data->autinc_to + && move_by_pieces_ninsns (data->len, align, max_size) > 2) + { + /* Determine the main mode we'll be using. + MODE might not be used depending on the definitions of the + USE_* macros below. */ + enum machine_mode mode ATTRIBUTE_UNUSED + = widest_int_mode_for_size (max_size); + + if (USE_STORE_PRE_DECREMENT (mode) && data->reverse && ! data->autinc_to) + { + data->to_addr = copy_to_mode_reg (to_addr_mode, + plus_constant (to_addr, data->len)); + data->autinc_to = 1; + data->explicit_inc_to = -1; + } + + if (USE_STORE_POST_INCREMENT (mode) && ! data->reverse + && ! data->autinc_to) + { + data->to_addr = copy_to_mode_reg (to_addr_mode, to_addr); + data->autinc_to = 1; + data->explicit_inc_to = 1; + } + + if ( !data->autinc_to && CONSTANT_P (to_addr)) + data->to_addr = copy_to_mode_reg (to_addr_mode, to_addr); + } + + dst_offset = get_mem_align_offset (data->to, MOVE_MAX*BITS_PER_UNIT); + if (dst_offset < 0 + || compute_aligned_cost (data->len, dst_offset) >= + compute_unaligned_cost (data->len)) + { + while (data->len > 0) + { + enum machine_mode mode = widest_mode_for_unaligned_mov (data->len); + generate_move_with_mode (data, mode, &promoted_to_vector_value, + &promoted_value); + } + } + else + { + while (data->len > 0) + { + enum machine_mode mode; + mode = widest_mode_for_aligned_mov (data->len, + compute_align_by_offset (dst_offset)); + mode = generate_move_with_mode (data, mode, &promoted_to_vector_value, + &promoted_value); + dst_offset += GET_MODE_SIZE (mode); + } + } + + /* The code above should have handled everything. */ + gcc_assert (!data->len); +} + +/* Subroutine of set_by_pieces_1. Emit move instruction with mode MODE. + DATA has info about destination, RHS is source, GENFUN is the gen_... + function to make a move insn for that mode. */ + +static void +set_by_pieces_2 (rtx (*genfun) (rtx, ...), enum machine_mode mode, + struct store_by_pieces_d *data, rtx rhs) +{ + unsigned int size = GET_MODE_SIZE (mode); + rtx to1; + + if (data->reverse) + data->offset -= size; + + if (data->autinc_to) + to1 = adjust_automodify_address (data->to, mode, data->to_addr, + data->offset); + else + to1 = adjust_address (data->to, mode, data->offset); + + if (HAVE_PRE_DECREMENT && data->explicit_inc_to < 0) + emit_insn (gen_add2_insn (data->to_addr, + GEN_INT (-(HOST_WIDE_INT) size))); + + gcc_assert (rhs); + + emit_insn ((*genfun) (to1, rhs)); + + if (HAVE_POST_INCREMENT && data->explicit_inc_to > 0) + emit_insn (gen_add2_insn (data->to_addr, GEN_INT (size))); + + if (! data->reverse) + data->offset += size; + + data->len -= size; +} + /* Subroutine of store_by_pieces_1. Store as many bytes as appropriate with move instructions for mode MODE. GENFUN is the gen_... function to make a move insn for that mode. DATA has all the other info. */ @@ -4034,7 +4530,7 @@ emit_push_insn (rtx x, enum machine_mode mode, tree type, rtx size, /* Here we avoid the case of a structure whose weak alignment forces many pushes of a small amount of data, and such small pushes do rounding that causes trouble. */ - && ((! SLOW_UNALIGNED_ACCESS (word_mode, align)) + && ((! targetm.slow_unaligned_access (word_mode, align)) || align >= BIGGEST_ALIGNMENT || (PUSH_ROUNDING (align / BITS_PER_UNIT) == (align / BITS_PER_UNIT))) @@ -6325,7 +6821,7 @@ store_field (rtx target, HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos, || (mode != BLKmode && ((((MEM_ALIGN (target) < GET_MODE_ALIGNMENT (mode)) || bitpos % GET_MODE_ALIGNMENT (mode)) - && SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (target))) + && targetm.slow_unaligned_access (mode, MEM_ALIGN (target))) || (bitpos % BITS_PER_UNIT != 0))) /* If the RHS and field are a constant size and the size of the RHS isn't the same size as the bitfield, we must use bitfield @@ -9738,7 +10234,7 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode, && ((modifier == EXPAND_CONST_ADDRESS || modifier == EXPAND_INITIALIZER) ? STRICT_ALIGNMENT - : SLOW_UNALIGNED_ACCESS (mode1, MEM_ALIGN (op0)))) + : targetm.slow_unaligned_access (mode1, MEM_ALIGN (op0)))) || (bitpos % BITS_PER_UNIT != 0))) /* If the type and the field are a constant size and the size of the type isn't the same size as the bitfield, diff --git a/gcc/expr.h b/gcc/expr.h index 1bf1369..6f697d7 100644 --- a/gcc/expr.h +++ b/gcc/expr.h @@ -706,4 +706,8 @@ extern tree build_libfunc_function (const char *); /* Get the personality libfunc for a function decl. */ rtx get_personality_function (tree); +/* Given offset from maximum alignment boundary, compute maximum alignment, + that can be assumed. */ +unsigned int compute_align_by_offset (int); + #endif /* GCC_EXPR_H */ diff --git a/gcc/fwprop.c b/gcc/fwprop.c index 5368d18..cbbb75a 100644 --- a/gcc/fwprop.c +++ b/gcc/fwprop.c @@ -1273,6 +1273,10 @@ forward_propagate_and_simplify (df_ref use, rtx def_insn, rtx def_set) return false; } + /* Don't propagate vector-constants. */ + if (vector_extensions_used_for_mode (GET_MODE (reg)) && CONSTANT_P (src)) + return false; + if (asm_use >= 0) return forward_propagate_asm (use, def_insn, def_set, reg); diff --git a/gcc/rtl.h b/gcc/rtl.h index f13485e..4ec67c7 100644 --- a/gcc/rtl.h +++ b/gcc/rtl.h @@ -2513,6 +2513,9 @@ extern void emit_jump (rtx); /* In expr.c */ extern rtx move_by_pieces (rtx, rtx, unsigned HOST_WIDE_INT, unsigned int, int); +/* Check if vector instructions are required for operating with mode + specified. */ +bool vector_extensions_used_for_mode (enum machine_mode); extern HOST_WIDE_INT find_args_size_adjust (rtx); extern int fixup_args_size_notes (rtx, rtx, int); diff --git a/gcc/target.def b/gcc/target.def index c3bec0e..a74bb7b 100644 --- a/gcc/target.def +++ b/gcc/target.def @@ -1498,6 +1498,22 @@ DEFHOOK bool, (struct ao_ref_s *ref), default_ref_may_alias_errno) +/* True if access to unaligned data in given mode is too slow or + prohibited. */ +DEFHOOK +(slow_unaligned_access, + "", + bool, (enum machine_mode mode, unsigned int align), + default_slow_unaligned_access) + +/* Target hook. Returns rtx of mode MODE with promoted value VAL or NULL. + VAL is supposed to represent one byte. */ +DEFHOOK +(promote_rtx_for_memset, + "", + rtx, (enum machine_mode mode, rtx val), + default_promote_rtx_for_memset) + /* Support for named address spaces. */ #undef HOOK_PREFIX #define HOOK_PREFIX "TARGET_ADDR_SPACE_" diff --git a/gcc/targhooks.c b/gcc/targhooks.c index 81fd12f..f02a9e8 100644 --- a/gcc/targhooks.c +++ b/gcc/targhooks.c @@ -1442,4 +1442,24 @@ default_pch_valid_p (const void *data_p, size_t len) return NULL; } +bool +default_slow_unaligned_access (enum machine_mode mode ATTRIBUTE_UNUSED, + unsigned int align ATTRIBUTE_UNUSED) +{ +#ifdef SLOW_UNALIGNED_ACCESS + return SLOW_UNALIGNED_ACCESS (mode, align); +#else + return STRICT_ALIGNMENT; +#endif +} + +/* Target hook. Returns rtx of mode MODE with promoted value VAL or NULL. + VAL is supposed to represent one byte. */ +rtx +default_promote_rtx_for_memset (enum machine_mode mode ATTRIBUTE_UNUSED, + rtx val ATTRIBUTE_UNUSED) +{ + return NULL_RTX; +} + #include "gt-targhooks.h" diff --git a/gcc/targhooks.h b/gcc/targhooks.h index f19fb50..8d23747 100644 --- a/gcc/targhooks.h +++ b/gcc/targhooks.h @@ -175,3 +175,6 @@ extern enum machine_mode default_get_reg_raw_mode(int); extern void *default_get_pch_validity (size_t *); extern const char *default_pch_valid_p (const void *, size_t); +extern bool default_slow_unaligned_access (enum machine_mode mode, + unsigned int align); +extern rtx default_promote_rtx_for_memset (enum machine_mode mode, rtx val); diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s16-a1-1.c b/gcc/testsuite/gcc.target/i386/memcpy-s16-a1-1.c new file mode 100644 index 0000000..39c8ef0 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s16-a1-1.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 16 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}, src[SIZE + OFFSET] = {}; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "movq" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s16-a1-2.c b/gcc/testsuite/gcc.target/i386/memcpy-s16-a1-2.c new file mode 100644 index 0000000..439694b --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s16-a1-2.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 16 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}, src[SIZE + OFFSET] = {}; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "movq" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s16-a1-3.c b/gcc/testsuite/gcc.target/i386/memcpy-s16-a1-3.c new file mode 100644 index 0000000..51f4c3b --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s16-a1-3.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=corei7" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 16 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}, src[SIZE + OFFSET] = {}; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s16-a1-4.c b/gcc/testsuite/gcc.target/i386/memcpy-s16-a1-4.c new file mode 100644 index 0000000..bca8680 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s16-a1-4.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=corei7" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 16 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}, src[SIZE + OFFSET] = {}; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s3072-a1-1.c b/gcc/testsuite/gcc.target/i386/memcpy-s3072-a1-1.c new file mode 100644 index 0000000..5bc8e74 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s3072-a1-1.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 3072 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}, src[SIZE + OFFSET] = {}; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s3072-a1-2.c b/gcc/testsuite/gcc.target/i386/memcpy-s3072-a1-2.c new file mode 100644 index 0000000..b7dff27 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s3072-a1-2.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 3072 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}, src[SIZE + OFFSET] = {}; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s3072-au-1.c b/gcc/testsuite/gcc.target/i386/memcpy-s3072-au-1.c new file mode 100644 index 0000000..bee85fe --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s3072-au-1.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 3072 +#define OFFSET 1 +char *dst, *src; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "\tmemcpy" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s3072-au-2.c b/gcc/testsuite/gcc.target/i386/memcpy-s3072-au-2.c new file mode 100644 index 0000000..1160beb --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s3072-au-2.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 3072 +#define OFFSET 1 +char *dst, *src; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "\tmemcpy" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s512-a0-1.c b/gcc/testsuite/gcc.target/i386/memcpy-s512-a0-1.c new file mode 100644 index 0000000..b1c78ec --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s512-a0-1.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 512 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}, src[SIZE + OFFSET] = {}; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s512-a0-2.c b/gcc/testsuite/gcc.target/i386/memcpy-s512-a0-2.c new file mode 100644 index 0000000..a15a0f7 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s512-a0-2.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 512 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}, src[SIZE + OFFSET] = {}; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s512-a0-3.c b/gcc/testsuite/gcc.target/i386/memcpy-s512-a0-3.c new file mode 100644 index 0000000..2789660 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s512-a0-3.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=corei7 -mstringop-strategy=unrolled_loop" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 512 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}, src[SIZE + OFFSET] = {}; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s512-a0-4.c b/gcc/testsuite/gcc.target/i386/memcpy-s512-a0-4.c new file mode 100644 index 0000000..17e0342 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s512-a0-4.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=corei7 -mstringop-strategy=unrolled_loop" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 512 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}, src[SIZE + OFFSET] = {}; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s512-a1-1.c b/gcc/testsuite/gcc.target/i386/memcpy-s512-a1-1.c new file mode 100644 index 0000000..e437378 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s512-a1-1.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 512 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}, src[SIZE + OFFSET] = {}; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s512-a1-2.c b/gcc/testsuite/gcc.target/i386/memcpy-s512-a1-2.c new file mode 100644 index 0000000..ba716df --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s512-a1-2.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 512 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}, src[SIZE + OFFSET] = {}; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s512-a1-3.c b/gcc/testsuite/gcc.target/i386/memcpy-s512-a1-3.c new file mode 100644 index 0000000..1845e95 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s512-a1-3.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=corei7 -mstringop-strategy=unrolled_loop" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 512 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}, src[SIZE + OFFSET] = {}; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s512-a1-4.c b/gcc/testsuite/gcc.target/i386/memcpy-s512-a1-4.c new file mode 100644 index 0000000..2b23751 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s512-a1-4.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=corei7 -mstringop-strategy=unrolled_loop" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 512 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}, src[SIZE + OFFSET] = {}; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s512-au-1.c b/gcc/testsuite/gcc.target/i386/memcpy-s512-au-1.c new file mode 100644 index 0000000..e751192 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s512-au-1.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 512 +#define OFFSET 1 +char *dst, *src; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "\tmemcpy" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s512-au-2.c b/gcc/testsuite/gcc.target/i386/memcpy-s512-au-2.c new file mode 100644 index 0000000..7defe7e --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s512-au-2.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 512 +#define OFFSET 1 +char *dst, *src; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "movq" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s512-au-3.c b/gcc/testsuite/gcc.target/i386/memcpy-s512-au-3.c new file mode 100644 index 0000000..ea27378 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s512-au-3.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=corei7 -mstringop-strategy=unrolled_loop" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 512 +#define OFFSET 1 +char *dst, *src; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "movq" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s64-a0-1.c b/gcc/testsuite/gcc.target/i386/memcpy-s64-a0-1.c new file mode 100644 index 0000000..de2a557 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s64-a0-1.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}, src[SIZE + OFFSET] = {}; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s64-a0-2.c b/gcc/testsuite/gcc.target/i386/memcpy-s64-a0-2.c new file mode 100644 index 0000000..1f82258 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s64-a0-2.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}, src[SIZE + OFFSET] = {}; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s64-a0-3.c b/gcc/testsuite/gcc.target/i386/memcpy-s64-a0-3.c new file mode 100644 index 0000000..7f60806 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s64-a0-3.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=corei7" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}, src[SIZE + OFFSET] = {}; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s64-a0-4.c b/gcc/testsuite/gcc.target/i386/memcpy-s64-a0-4.c new file mode 100644 index 0000000..94f0864 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s64-a0-4.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=corei7" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}, src[SIZE + OFFSET] = {}; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s64-a1-1.c b/gcc/testsuite/gcc.target/i386/memcpy-s64-a1-1.c new file mode 100644 index 0000000..20545c8 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s64-a1-1.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}, src[SIZE + OFFSET] = {}; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s64-a1-2.c b/gcc/testsuite/gcc.target/i386/memcpy-s64-a1-2.c new file mode 100644 index 0000000..52dab8e --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s64-a1-2.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}, src[SIZE + OFFSET] = {}; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s64-a1-3.c b/gcc/testsuite/gcc.target/i386/memcpy-s64-a1-3.c new file mode 100644 index 0000000..c662480 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s64-a1-3.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=corei7" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}, src[SIZE + OFFSET] = {}; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s64-a1-4.c b/gcc/testsuite/gcc.target/i386/memcpy-s64-a1-4.c new file mode 100644 index 0000000..9e8e152 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s64-a1-4.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=corei7" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}, src[SIZE + OFFSET] = {}; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s64-au-1.c b/gcc/testsuite/gcc.target/i386/memcpy-s64-au-1.c new file mode 100644 index 0000000..662fc20 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s64-au-1.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 1 +char *dst, *src; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "movq" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s64-au-2.c b/gcc/testsuite/gcc.target/i386/memcpy-s64-au-2.c new file mode 100644 index 0000000..c90e852 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s64-au-2.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 1 +char *dst, *src; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "movq" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s64-au-3.c b/gcc/testsuite/gcc.target/i386/memcpy-s64-au-3.c new file mode 100644 index 0000000..5a41f82 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s64-au-3.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=corei7" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 1 +char *dst, *src; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memcpy-s64-au-4.c b/gcc/testsuite/gcc.target/i386/memcpy-s64-au-4.c new file mode 100644 index 0000000..ec2dfff --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memcpy-s64-au-4.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memcpy where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=corei7" } */ +extern void *memcpy (void *, void *, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 1 +char *dst, *src; + +void +do_copy () +{ + memcpy (dst+OFFSET, src+OFFSET, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s16-a1-1.c b/gcc/testsuite/gcc.target/i386/memset-s16-a1-1.c new file mode 100644 index 0000000..d6b2cd5 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s16-a1-1.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 16 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "movq" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s16-a1-2.c b/gcc/testsuite/gcc.target/i386/memset-s16-a1-2.c new file mode 100644 index 0000000..9cd89e9 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s16-a1-2.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 16 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "movq" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s16-a1-3.c b/gcc/testsuite/gcc.target/i386/memset-s16-a1-3.c new file mode 100644 index 0000000..ddf25fd --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s16-a1-3.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=corei7" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 16 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s16-a1-4.c b/gcc/testsuite/gcc.target/i386/memset-s16-a1-4.c new file mode 100644 index 0000000..fde4f5d --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s16-a1-4.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=corei7" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 16 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s3072-a1-1.c b/gcc/testsuite/gcc.target/i386/memset-s3072-a1-1.c new file mode 100644 index 0000000..4fe2d36 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s3072-a1-1.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 3072 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s3072-a1-2.c b/gcc/testsuite/gcc.target/i386/memset-s3072-a1-2.c new file mode 100644 index 0000000..2209563 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s3072-a1-2.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 3072 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s3072-au-1.c b/gcc/testsuite/gcc.target/i386/memset-s3072-au-1.c new file mode 100644 index 0000000..8d99dde --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s3072-au-1.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 3072 +#define OFFSET 1 +char *dst; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "\tmemset" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s3072-au-2.c b/gcc/testsuite/gcc.target/i386/memset-s3072-au-2.c new file mode 100644 index 0000000..e0ad04a --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s3072-au-2.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 3072 +#define OFFSET 1 +char *dst; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "\tmemset" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s512-a0-1.c b/gcc/testsuite/gcc.target/i386/memset-s512-a0-1.c new file mode 100644 index 0000000..404d04e --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s512-a0-1.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 512 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s512-a0-2.c b/gcc/testsuite/gcc.target/i386/memset-s512-a0-2.c new file mode 100644 index 0000000..1df9db0 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s512-a0-2.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 512 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s512-a0-3.c b/gcc/testsuite/gcc.target/i386/memset-s512-a0-3.c new file mode 100644 index 0000000..beb005c --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s512-a0-3.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=corei7 -mstringop-strategy=unrolled_loop" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 512 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s512-a0-4.c b/gcc/testsuite/gcc.target/i386/memset-s512-a0-4.c new file mode 100644 index 0000000..29f5ea3 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s512-a0-4.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=corei7 -mstringop-strategy=unrolled_loop" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 512 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s512-a1-1.c b/gcc/testsuite/gcc.target/i386/memset-s512-a1-1.c new file mode 100644 index 0000000..2504333 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s512-a1-1.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 512 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s512-a1-2.c b/gcc/testsuite/gcc.target/i386/memset-s512-a1-2.c new file mode 100644 index 0000000..b0aaada --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s512-a1-2.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 512 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s512-a1-3.c b/gcc/testsuite/gcc.target/i386/memset-s512-a1-3.c new file mode 100644 index 0000000..3e250d0 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s512-a1-3.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=corei7 -mstringop-strategy=unrolled_loop" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 512 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s512-a1-4.c b/gcc/testsuite/gcc.target/i386/memset-s512-a1-4.c new file mode 100644 index 0000000..c13edd7 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s512-a1-4.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=corei7 -mstringop-strategy=unrolled_loop" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 512 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s512-au-1.c b/gcc/testsuite/gcc.target/i386/memset-s512-au-1.c new file mode 100644 index 0000000..17d9525 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s512-au-1.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 512 +#define OFFSET 1 +char *dst; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s512-au-2.c b/gcc/testsuite/gcc.target/i386/memset-s512-au-2.c new file mode 100644 index 0000000..8125e9d --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s512-au-2.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 512 +#define OFFSET 1 +char *dst; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s512-au-3.c b/gcc/testsuite/gcc.target/i386/memset-s512-au-3.c new file mode 100644 index 0000000..ff74811 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s512-au-3.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=corei7 -mstringop-strategy=unrolled_loop" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 512 +#define OFFSET 1 +char *dst; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s512-au-4.c b/gcc/testsuite/gcc.target/i386/memset-s512-au-4.c new file mode 100644 index 0000000..d7e0c3d --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s512-au-4.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=corei7 -mstringop-strategy=unrolled_loop" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 512 +#define OFFSET 1 +char *dst; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s64-a0-1.c b/gcc/testsuite/gcc.target/i386/memset-s64-a0-1.c new file mode 100644 index 0000000..ea7b439 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s64-a0-1.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 5, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s64-a0-10.c b/gcc/testsuite/gcc.target/i386/memset-s64-a0-10.c new file mode 100644 index 0000000..5ef250d --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s64-a0-10.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=corei7" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 5, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s64-a0-11.c b/gcc/testsuite/gcc.target/i386/memset-s64-a0-11.c new file mode 100644 index 0000000..846a807 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s64-a0-11.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=corei7" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}; + +void +do_set (char c) +{ + memset (dst+OFFSET, c, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s64-a0-12.c b/gcc/testsuite/gcc.target/i386/memset-s64-a0-12.c new file mode 100644 index 0000000..a8f7c3b --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s64-a0-12.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=corei7" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s64-a0-2.c b/gcc/testsuite/gcc.target/i386/memset-s64-a0-2.c new file mode 100644 index 0000000..ae05e93 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s64-a0-2.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}; + +void +do_set (char c) +{ + memset (dst+OFFSET, c, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s64-a0-3.c b/gcc/testsuite/gcc.target/i386/memset-s64-a0-3.c new file mode 100644 index 0000000..96462bd --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s64-a0-3.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s64-a0-4.c b/gcc/testsuite/gcc.target/i386/memset-s64-a0-4.c new file mode 100644 index 0000000..6aee01e --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s64-a0-4.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 5, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s64-a0-5.c b/gcc/testsuite/gcc.target/i386/memset-s64-a0-5.c new file mode 100644 index 0000000..bbad9b9 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s64-a0-5.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}; + +void +do_set (char c) +{ + memset (dst+OFFSET, c, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s64-a0-6.c b/gcc/testsuite/gcc.target/i386/memset-s64-a0-6.c new file mode 100644 index 0000000..8e90d72 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s64-a0-6.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s64-a0-7.c b/gcc/testsuite/gcc.target/i386/memset-s64-a0-7.c new file mode 100644 index 0000000..26d0b42 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s64-a0-7.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=corei7" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 5, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s64-a0-8.c b/gcc/testsuite/gcc.target/i386/memset-s64-a0-8.c new file mode 100644 index 0000000..84ec749 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s64-a0-8.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=corei7" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}; + +void +do_set (char c) +{ + memset (dst+OFFSET, c, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s64-a0-9.c b/gcc/testsuite/gcc.target/i386/memset-s64-a0-9.c new file mode 100644 index 0000000..ef15265 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s64-a0-9.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=corei7" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s64-a1-1.c b/gcc/testsuite/gcc.target/i386/memset-s64-a1-1.c new file mode 100644 index 0000000..444a8de --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s64-a1-1.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s64-a1-2.c b/gcc/testsuite/gcc.target/i386/memset-s64-a1-2.c new file mode 100644 index 0000000..9154fb9 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s64-a1-2.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s64-a1-3.c b/gcc/testsuite/gcc.target/i386/memset-s64-a1-3.c new file mode 100644 index 0000000..9b7dac1 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s64-a1-3.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=corei7" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s64-a1-4.c b/gcc/testsuite/gcc.target/i386/memset-s64-a1-4.c new file mode 100644 index 0000000..713c8a8 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s64-a1-4.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=corei7" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 1 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s64-au-1.c b/gcc/testsuite/gcc.target/i386/memset-s64-au-1.c new file mode 100644 index 0000000..8c700c0 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s64-au-1.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 1 +char *dst; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "movq" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s64-au-2.c b/gcc/testsuite/gcc.target/i386/memset-s64-au-2.c new file mode 100644 index 0000000..c344fd0 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s64-au-2.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 1 +char *dst; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "movq" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s64-au-3.c b/gcc/testsuite/gcc.target/i386/memset-s64-au-3.c new file mode 100644 index 0000000..125de2f --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s64-au-3.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=corei7" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 1 +char *dst; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s64-au-4.c b/gcc/testsuite/gcc.target/i386/memset-s64-au-4.c new file mode 100644 index 0000000..b50de1b --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s64-au-4.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=corei7" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 64 +#define OFFSET 1 +char *dst; + +void +do_set () +{ + memset (dst+OFFSET, 0, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s768-a0-1.c b/gcc/testsuite/gcc.target/i386/memset-s768-a0-1.c new file mode 100644 index 0000000..c6fd271 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s768-a0-1.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 768 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 5, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s768-a0-2.c b/gcc/testsuite/gcc.target/i386/memset-s768-a0-2.c new file mode 100644 index 0000000..32972e6 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s768-a0-2.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 768 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}; + +void +do_set (char c) +{ + memset (dst+OFFSET, c, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s768-a0-3.c b/gcc/testsuite/gcc.target/i386/memset-s768-a0-3.c new file mode 100644 index 0000000..ac615e8 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s768-a0-3.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 768 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 5, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s768-a0-4.c b/gcc/testsuite/gcc.target/i386/memset-s768-a0-4.c new file mode 100644 index 0000000..8458cfd --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s768-a0-4.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=atom" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 768 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}; + +void +do_set (char c) +{ + memset (dst+OFFSET, c, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s768-a0-5.c b/gcc/testsuite/gcc.target/i386/memset-s768-a0-5.c new file mode 100644 index 0000000..210946d --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s768-a0-5.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=corei7 -mstringop-strategy=unrolled_loop" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 768 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 5, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s768-a0-6.c b/gcc/testsuite/gcc.target/i386/memset-s768-a0-6.c new file mode 100644 index 0000000..e63feae --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s768-a0-6.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ia32 } } } */ +/* { dg-options "-O2 -march=corei7 -mstringop-strategy=unrolled_loop" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 768 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}; + +void +do_set (char c) +{ + memset (dst+OFFSET, c, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s768-a0-7.c b/gcc/testsuite/gcc.target/i386/memset-s768-a0-7.c new file mode 100644 index 0000000..72b2ba0 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s768-a0-7.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=corei7 -mstringop-strategy=unrolled_loop" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 768 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}; + +void +do_set () +{ + memset (dst+OFFSET, 5, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/memset-s768-a0-8.c b/gcc/testsuite/gcc.target/i386/memset-s768-a0-8.c new file mode 100644 index 0000000..cb5dc85 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/memset-s768-a0-8.c @@ -0,0 +1,15 @@ +/* Ensure that we use SSE-moves for memset where it's needed. */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=corei7 -mstringop-strategy=unrolled_loop" } */ +extern void *memset (void *, int, __SIZE_TYPE__); +#define SIZE 768 +#define OFFSET 0 +char dst[SIZE + OFFSET] = {}; + +void +do_set (char c) +{ + memset (dst+OFFSET, c, sizeof (dst[0]) * SIZE); +} + +/* { dg-final { scan-assembler "%xmm" } } */ diff --git a/gcc/testsuite/gcc.target/i386/sw-1.c b/gcc/testsuite/gcc.target/i386/sw-1.c index 483d117..e3d3b91 100644 --- a/gcc/testsuite/gcc.target/i386/sw-1.c +++ b/gcc/testsuite/gcc.target/i386/sw-1.c @@ -1,5 +1,5 @@ /* { dg-do compile } */ -/* { dg-options "-O2 -fshrink-wrap -fdump-rtl-pro_and_epilogue" } */ +/* { dg-options "-O2 -fshrink-wrap -fdump-rtl-pro_and_epilogue -mstringop-strategy=rep_byte" } */ #include