This is the mail archive of the
gcc-patches@gcc.gnu.org
mailing list for the GCC project.
vectorize 32-bit integer multiply for x86
- From: Richard Henderson <rth at redhat dot com>
- To: gcc-patches at gcc dot gnu dot org
- Date: Mon, 16 May 2005 21:35:27 -0700
- Subject: vectorize 32-bit integer multiply for x86
Tested on pentium4-linux.
r~
* config/i386/sse.md (mulv4si3): New.
* lib/target-supports.exp (check_effective_target_vect_int_mul): Add
i?86 and x86_64.
Index: gcc/config/i386/sse.md
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/i386/sse.md,v
retrieving revision 1.11
diff -u -p -r1.11 sse.md
--- gcc/config/i386/sse.md 22 Apr 2005 08:42:25 -0000 1.11
+++ gcc/config/i386/sse.md 17 May 2005 02:10:32 -0000
@@ -2490,6 +2490,53 @@
[(set_attr "type" "sseiadd")
(set_attr "mode" "TI")])
+(define_expand "mulv4si3"
+ [(set (match_operand:V4SI 0 "register_operand" "")
+ (mult:V4SI (match_operand:V4SI 1 "nonimmediate_operand" "")
+ (match_operand:V4SI 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE2"
+{
+ rtx t1, t2, t3, t4, t5, t6, thirtytwo;
+ rtx op0, op1, op2;
+ rtvec v;
+
+ op0 = operands[0];
+ op1 = operands[1];
+ op2 = operands[2];
+ t1 = gen_reg_rtx (V4SImode);
+ t2 = gen_reg_rtx (V4SImode);
+ t3 = gen_reg_rtx (V4SImode);
+ t4 = gen_reg_rtx (V4SImode);
+ t5 = gen_reg_rtx (V4SImode);
+ t6 = gen_reg_rtx (V4SImode);
+ thirtytwo = GEN_INT (32);
+
+ /* Multiply elements 2 and 0. */
+ emit_insn (gen_sse2_umulv2siv2di3 (gen_lowpart (V2DImode, t1), op1, op2));
+
+ /* Shift both input vectors down one element, so that elements 3 and 1
+ are now in the slots for elements 2 and 0. For K8, at least, this is
+ faster than using a shuffle. */
+ emit_insn (gen_sse2_lshrti3 (gen_lowpart (TImode, t2),
+ gen_lowpart (TImode, op1), thirtytwo));
+ emit_insn (gen_sse2_lshrti3 (gen_lowpart (TImode, t3),
+ gen_lowpart (TImode, op2), thirtytwo));
+
+ /* Multiply elements 3 and 1. */
+ emit_insn (gen_sse2_umulv2siv2di3 (gen_lowpart (V2DImode, t4), t2, t3));
+
+ /* Move the results in element 2 down to element 1; we don't care what
+ goes in elements 2 and 3. */
+ emit_insn (gen_sse2_pshufd_1 (t5, t1, const0_rtx, const2_rtx,
+ const0_rtx, const0_rtx));
+ emit_insn (gen_sse2_pshufd_1 (t6, t4, const0_rtx, const2_rtx,
+ const0_rtx, const0_rtx));
+
+ /* Merge the parts back together. */
+ emit_insn (gen_sse2_punpckldq (op0, t5, t6));
+ DONE;
+})
+
(define_insn "ashr<mode>3"
[(set (match_operand:SSEMODE24 0 "register_operand" "=x")
(ashiftrt:SSEMODE24
Index: gcc/testsuite/lib/target-supports.exp
===================================================================
RCS file: /cvs/gcc/gcc/gcc/testsuite/lib/target-supports.exp,v
retrieving revision 1.57
diff -u -p -r1.57 target-supports.exp
--- gcc/testsuite/lib/target-supports.exp 16 May 2005 00:51:37 -0000 1.57
+++ gcc/testsuite/lib/target-supports.exp 17 May 2005 02:10:32 -0000
@@ -955,7 +955,9 @@ proc check_effective_target_vect_int_mul
verbose "check_effective_target_vect_int_mult: using cached result" 2
} else {
set et_vect_int_mult_saved 0
- if { [istarget powerpc*-*-*] } {
+ if { [istarget powerpc*-*-*]
+ || [istarget i?86-*-*]
+ || [istarget x86_64-*-*] } {
set et_vect_int_mult_saved 1
}
}