Make use of clrmem if VAL is zero. */
void
-s390_expand_setmem (rtx dst, rtx len, rtx val)
+s390_expand_setmem (rtx dst, rtx len, rtx val, rtx min_len_rtx, rtx max_len_rtx)
{
- if (GET_CODE (len) == CONST_INT && INTVAL (len) <= 0)
+ /* Exit early in case nothing has to be done. */
+ if (CONST_INT_P (len) && UINTVAL (len) == 0)
return;
gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
+ unsigned HOST_WIDE_INT min_len = UINTVAL (min_len_rtx);
+ unsigned HOST_WIDE_INT max_len
+ = max_len_rtx ? UINTVAL (max_len_rtx) : HOST_WIDE_INT_M1U;
+
+ /* Vectorize memset with a constant length
+ - if 0 < LEN < 16, then emit a vstl based solution;
+ - if 16 <= LEN <= 64, then emit a vst based solution
+ where the last two vector stores may overlap in case LEN%16!=0. Paying
+ the price for an overlap is negligible compared to an extra GPR which is
+ required for vstl. */
+ if (CONST_INT_P (len) && UINTVAL (len) <= 64 && val != const0_rtx
+ && TARGET_VX)
+ {
+ rtx val_vec = gen_reg_rtx (V16QImode);
+ emit_move_insn (val_vec, gen_rtx_VEC_DUPLICATE (V16QImode, val));
+
+ if (UINTVAL (len) < 16)
+ {
+ rtx len_reg = gen_reg_rtx (SImode);
+ emit_move_insn (len_reg, GEN_INT (UINTVAL (len) - 1));
+ emit_insn (gen_vstlv16qi (val_vec, len_reg, dst));
+ }
+ else
+ {
+ unsigned HOST_WIDE_INT l = UINTVAL (len) / 16;
+ unsigned HOST_WIDE_INT r = UINTVAL (len) % 16;
+ unsigned HOST_WIDE_INT o = 0;
+ for (unsigned HOST_WIDE_INT i = 0; i < l; ++i)
+ {
+ rtx newdst = adjust_address (dst, V16QImode, o);
+ emit_move_insn (newdst, val_vec);
+ o += 16;
+ }
+ if (r != 0)
+ {
+ rtx newdst = adjust_address (dst, V16QImode, (o - 16) + r);
+ emit_move_insn (newdst, val_vec);
+ }
+ }
+ }
+
/* Expand setmem/clrmem for a constant length operand without a
loop if it will be shorter that way.
clrmem loop (with PFD) is 30 bytes -> 5 * xc
clrmem loop (without PFD) is 24 bytes -> 4 * xc
setmem loop (with PFD) is 38 bytes -> ~4 * (mvi/stc + mvc)
setmem loop (without PFD) is 32 bytes -> ~4 * (mvi/stc + mvc) */
- if (GET_CODE (len) == CONST_INT
+ else if (GET_CODE (len) == CONST_INT
&& ((val == const0_rtx
&& (INTVAL (len) <= 256 * 4
|| (INTVAL (len) <= 256 * 5 && TARGET_SETMEM_PFD(val,len))))
val));
}
+ /* Non-constant length and no loop required. */
+ else if (!CONST_INT_P (len) && max_len <= 256)
+ {
+ rtx_code_label *end_label;
+
+ if (min_len == 0)
+ {
+ end_label = gen_label_rtx ();
+ emit_cmp_and_jump_insns (len, const0_rtx, EQ, NULL_RTX,
+ GET_MODE (len), 1, end_label,
+ profile_probability::very_unlikely ());
+ }
+
+ rtx lenm1 = expand_binop (GET_MODE (len), add_optab, len, constm1_rtx,
+ NULL_RTX, 1, OPTAB_DIRECT);
+
+ /* Prefer a vectorized implementation over one which makes use of an
+ execute instruction since it is faster (although it increases register
+ pressure). */
+ if (max_len <= 16 && TARGET_VX)
+ {
+ rtx val_vec = gen_reg_rtx (V16QImode);
+ if (val == const0_rtx)
+ emit_move_insn (val_vec, CONST0_RTX (V16QImode));
+ else
+ emit_move_insn (val_vec, gen_rtx_VEC_DUPLICATE (V16QImode, val));
+
+ lenm1 = convert_to_mode (SImode, lenm1, 1);
+ emit_insn (gen_vstlv16qi (val_vec, lenm1, dst));
+ }
+ else
+ {
+ if (val == const0_rtx)
+ emit_insn (
+ gen_clrmem_short (dst, convert_to_mode (Pmode, lenm1, 1)));
+ else
+ {
+ emit_move_insn (adjust_address (dst, QImode, 0), val);
+
+ rtx_code_label *onebyte_end_label;
+ if (min_len <= 1)
+ {
+ onebyte_end_label = gen_label_rtx ();
+ emit_cmp_and_jump_insns (
+ len, const1_rtx, EQ, NULL_RTX, GET_MODE (len), 1,
+ onebyte_end_label, profile_probability::very_unlikely ());
+ }
+
+ rtx dstp1 = adjust_address (dst, VOIDmode, 1);
+ rtx lenm2
+ = expand_binop (GET_MODE (len), add_optab, len, GEN_INT (-2),
+ NULL_RTX, 1, OPTAB_DIRECT);
+ lenm2 = convert_to_mode (Pmode, lenm2, 1);
+ emit_insn (gen_cpymem_short (dstp1, dst, lenm2));
+
+ if (min_len <= 1)
+ emit_label (onebyte_end_label);
+ }
+ }
+
+ if (min_len == 0)
+ emit_label (end_label);
+ }
+
else
{
rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
blocks = gen_reg_rtx (mode);
convert_move (count, len, 1);
- emit_cmp_and_jump_insns (count, const0_rtx,
- EQ, NULL_RTX, mode, 1, zerobyte_end_label,
- profile_probability::very_unlikely ());
+ if (min_len == 0)
+ emit_cmp_and_jump_insns (count, const0_rtx, EQ, NULL_RTX, mode, 1,
+ zerobyte_end_label,
+ profile_probability::very_unlikely ());
/* We need to make a copy of the target address since memset is
supposed to return it unmodified. We have to make it here
the mvc reading this value). */
set_mem_size (dst, 1);
dstp1 = adjust_address (dst, VOIDmode, 1);
- emit_cmp_and_jump_insns (count,
- const1_rtx, EQ, NULL_RTX, mode, 1,
- onebyte_end_label,
- profile_probability::very_unlikely ());
+ if (min_len <= 1)
+ emit_cmp_and_jump_insns (count, const1_rtx, EQ, NULL_RTX, mode, 1,
+ onebyte_end_label,
+ profile_probability::very_unlikely ());
}
/* There is one unconditional (mvi+mvc)/xc after the loop
emit_jump (loop_start_label);
- if (val != const0_rtx)
+ if (val != const0_rtx && min_len <= 1)
{
/* The 1 byte != 0 special case. Not handled efficiently
since we require two jumps for that. However, this