This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH v3 1/6] [MIPS] Split Loongson (MMI) from loongson3a


From e9d36eb4d4a841486ac82037497a2671481f8a27 Mon Sep 17 00:00:00 2001
From: Chenghua Xu <paul.hua.gm@gmail.com>
Date: Sun, 14 Oct 2018 11:11:00 +0800
Subject: [PATCH 1/6] Add support for loongson mmi instructions.

gcc/
        * config.gcc (extra_headers): Add loongson-mmiintrin.h.
        * config/mips/loongson.md: Move to ...
        * config/mips/loongson-mmi.md: here; Adjustment.
        * config/mips/loongsson.h: Move to ...
        State as deprecated. Include loongson-mmiintrin.h for back
        compatibility and warning.
        * config/mips/loongsson-mmiintrin.h: ... here.
        * config/mips/mips.c (mips_hard_regno_mode_ok_uncached,
        mips_vector_mode_supported_p, AVAIL_NON_MIPS16): Use
        TARGET_LOONGSON_MMI instead of TARGET_LOONGSON_VECTORS.
        (mips_option_override): Make sure MMI use hard float; Default
        enable MMI on Loongson 2e/2f/3a.
        (mips_shift_truncation_mask, mips_expand_vpc_loongson_even_odd,
        mips_expand_vpc_loongson_pshufh, mips_expand_vpc_loongson_bcast,
        mips_expand_vector_init): Use TARGET_LOONGSON_MMI instead of
        TARGET_LOONGSON_VECTORS.
        * gcc/config/mips/mips.h (TARGET_LOONGSON_VECTORS): Delete.
        (TARGET_CPU_CPP_BUILTINS): Add __mips_loongson_mmi.
        (SHIFT_COUNT_TRUNCATED): Use TARGET_LOONGSON_MMI instead of
        TARGET_LOONGSON_VECTORS.
        * gcc/config/mips/mips.md (MOVE64, MOVE128): Use
        TARGET_LOONGSON_MMI instead of TARGET_LOONGSON_VECTORS.
        (Loongson MMI patterns): Include loongson-mmi.md instead of
        loongson.md.
        * gcc/config/mips/mips.opt (-mloongson-mmi): New option.
        * gcc/doc/invoke.texi (-mloongson-mmi): Document.

gcc/testsuite/
        * gcc.target/mips/loongson-shift-count-truncated-1.c
        (dg-options): Run under -mloongson-mmi option.
        Include loongson-mmiintrin.h instead of loongson.h.
        * gcc.target/mips/loongson-simd.c: Likewise.
        * gcc.target/mips/mips.exp (mips_option_groups): Add
        -mloongson-mmi option.
        (mips-dg-init): Add -mloongson-mmi option.
        * gcc.target/mips/umips-store16-1.c (dg-options): Add
        forbid_cpu=loongson3a.
        * lib/target-supports.exp: Rename check_mips_loongson_hw_available
        to check_mips_loongson_mmi_hw_available.
        Rename check_effective_target_mips_loongson_runtime to
        check_effective_target_mips_loongson_mmi_runtime.
        (check_effective_target_vect_int): Use mips_loongson_mmi instead
        of mips_loongson when check et-is-effective-target.
        (add_options_for_mips_loongson_mmi): New proc.
        Rename check_effective_target_mips_loongson to
        check_effective_target_mips_loongson_mmi.
        (check_effective_target_vect_shift,
        check_effective_target_whole_vector_shift,
        check_effective_target_vect_no_int_min_max,
        check_effective_target_vect_no_align,
        check_effective_target_vect_short_mult,
        check_vect_support_and_set_flags):Use mips_loongson_mmi instead
        of mips_loongson when check et-is-effective-target.
---
 gcc/config.gcc                                     |   2 +-
 gcc/config/mips/{loongson.md => loongson-mmi.md}   | 241 ++++---
 gcc/config/mips/loongson-mmiintrin.h               | 691 +++++++++++++++++++++
 gcc/config/mips/loongson.h                         | 669 +-------------------
 gcc/config/mips/mips.c                             |  34 +-
 gcc/config/mips/mips.h                             |  21 +-
 gcc/config/mips/mips.md                            |  16 +-
 gcc/config/mips/mips.opt                           |   4 +
 gcc/doc/invoke.texi                                |   7 +
 .../mips/loongson-shift-count-truncated-1.c        |   6 +-
 gcc/testsuite/gcc.target/mips/loongson-simd.c      |   4 +-
 gcc/testsuite/gcc.target/mips/mips.exp             |   7 +
 gcc/testsuite/gcc.target/mips/umips-store16-1.c    |   2 +-
 gcc/testsuite/lib/target-supports.exp              |  47 +-
 14 files changed, 913 insertions(+), 838 deletions(-)
 rename gcc/config/mips/{loongson.md => loongson-mmi.md} (79%)
 create mode 100644 gcc/config/mips/loongson-mmiintrin.h

diff --git a/gcc/config.gcc b/gcc/config.gcc
index 8521f7d556e..7871700db13 100644
--- a/gcc/config.gcc
+++ b/gcc/config.gcc
@@ -441,7 +441,7 @@ microblaze*-*-*)
         ;;
 mips*-*-*)
 	cpu_type=mips
-	extra_headers="loongson.h msa.h"
+	extra_headers="loongson.h loongson-mmiintrin.h msa.h"
 	extra_objs="frame-header-opt.o"
 	extra_options="${extra_options} g.opt fused-madd.opt mips/mips-tables.opt"
 	;;
diff --git a/gcc/config/mips/loongson.md b/gcc/config/mips/loongson-mmi.md
similarity index 79%
rename from gcc/config/mips/loongson.md
rename to gcc/config/mips/loongson-mmi.md
index 14794d3671f..ad23f676581 100644
--- a/gcc/config/mips/loongson.md
+++ b/gcc/config/mips/loongson-mmi.md
@@ -1,5 +1,4 @@
-;; Machine description for Loongson-specific patterns, such as
-;; ST Microelectronics Loongson-2E/2F etc.
+;; Machine description for Loongson MultiMedia extensions Instructions (MMI).
 ;; Copyright (C) 2008-2018 Free Software Foundation, Inc.
 ;; Contributed by CodeSourcery.
 ;;
@@ -102,7 +101,7 @@
 (define_expand "mov<mode>"
   [(set (match_operand:VWHB 0)
 	(match_operand:VWHB 1))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
 {
   if (mips_legitimize_move (<MODE>mode, operands[0], operands[1]))
     DONE;
@@ -111,8 +110,8 @@
 ;; Handle legitimized moves between values of vector modes.
 (define_insn "mov<mode>_internal"
   [(set (match_operand:VWHB 0 "nonimmediate_operand" "=m,f,d,f,  d,  m,  d")
-	(match_operand:VWHB 1 "move_operand"          "f,m,f,dYG,dYG,dYG,m"))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+	(match_operand:VWHB 1 "move_operand"	     "f,m,f,dYG,dYG,dYG,m"))]
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   { return mips_output_move (operands[0], operands[1]); }
   [(set_attr "move_type" "fpstore,fpload,mfc,mtc,move,store,load")
    (set_attr "mode" "DI")])
@@ -122,7 +121,7 @@
 (define_expand "vec_init<mode><unitmode>"
   [(set (match_operand:VWHB 0 "register_operand")
 	(match_operand 1 ""))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
 {
   mips_expand_vector_init (operands[0], operands[1]);
   DONE;
@@ -135,7 +134,7 @@
 	(unspec:VHB [(truncate:<V_inner>
 		       (match_operand:DI 1 "reg_or_0_operand" "Jd"))]
 		    UNSPEC_LOONGSON_VINIT))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "dmtc1\t%z1,%0"
   [(set_attr "move_type" "mtc")
    (set_attr "mode" "DI")])
@@ -146,7 +145,7 @@
 	(vec_concat:V2SI
 	  (match_operand:SI 1 "register_operand" "f")
 	  (match_operand:SI 2 "register_operand" "f")))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "punpcklwd\t%0,%1,%2"
   [(set_attr "type" "fcvt")])
 
@@ -155,33 +154,33 @@
 ;; Pack with signed saturation.
 (define_insn "vec_pack_ssat_<mode>"
   [(set (match_operand:<V_squash_double> 0 "register_operand" "=f")
-        (vec_concat:<V_squash_double>
+	(vec_concat:<V_squash_double>
 	 (ss_truncate:<V_squash>
 	  (match_operand:VWH 1 "register_operand" "f"))
 	 (ss_truncate:<V_squash>
 	  (match_operand:VWH 2 "register_operand" "f"))))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "packss<V_squash_double_suffix>\t%0,%1,%2"
   [(set_attr "type" "fmul")])
 
 ;; Pack with unsigned saturation.
 (define_insn "vec_pack_usat_<mode>"
   [(set (match_operand:<V_squash_double> 0 "register_operand" "=f")
-        (vec_concat:<V_squash_double>
+	(vec_concat:<V_squash_double>
 	 (us_truncate:<V_squash>
 	  (match_operand:VH 1 "register_operand" "f"))
 	 (us_truncate:<V_squash>
 	  (match_operand:VH 2 "register_operand" "f"))))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "packus<V_squash_double_suffix>\t%0,%1,%2"
   [(set_attr "type" "fmul")])
 
 ;; Addition, treating overflow by wraparound.
 (define_insn "add<mode>3"
   [(set (match_operand:VWHB 0 "register_operand" "=f")
-        (plus:VWHB (match_operand:VWHB 1 "register_operand" "f")
+	(plus:VWHB (match_operand:VWHB 1 "register_operand" "f")
 		   (match_operand:VWHB 2 "register_operand" "f")))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "padd<V_suffix>\t%0,%1,%2"
   [(set_attr "type" "fadd")])
 
@@ -193,38 +192,38 @@
 ;; GPRs instead of FPRs.
 (define_insn "loongson_paddd"
   [(set (match_operand:DI 0 "register_operand" "=f")
-        (unspec:DI [(match_operand:DI 1 "register_operand" "f")
+	(unspec:DI [(match_operand:DI 1 "register_operand" "f")
 		    (match_operand:DI 2 "register_operand" "f")]
 		   UNSPEC_LOONGSON_PADDD))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "paddd\t%0,%1,%2"
   [(set_attr "type" "fadd")])
 
 ;; Addition, treating overflow by signed saturation.
 (define_insn "ssadd<mode>3"
   [(set (match_operand:VHB 0 "register_operand" "=f")
-        (ss_plus:VHB (match_operand:VHB 1 "register_operand" "f")
+	(ss_plus:VHB (match_operand:VHB 1 "register_operand" "f")
 		     (match_operand:VHB 2 "register_operand" "f")))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "padds<V_suffix>\t%0,%1,%2"
   [(set_attr "type" "fadd")])
 
 ;; Addition, treating overflow by unsigned saturation.
 (define_insn "usadd<mode>3"
   [(set (match_operand:VHB 0 "register_operand" "=f")
-        (us_plus:VHB (match_operand:VHB 1 "register_operand" "f")
+	(us_plus:VHB (match_operand:VHB 1 "register_operand" "f")
 		     (match_operand:VHB 2 "register_operand" "f")))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "paddus<V_suffix>\t%0,%1,%2"
   [(set_attr "type" "fadd")])
 
 ;; Logical AND NOT.
 (define_insn "loongson_pandn_<V_suffix>"
   [(set (match_operand:VWHBDI 0 "register_operand" "=f")
-        (and:VWHBDI
+	(and:VWHBDI
 	 (not:VWHBDI (match_operand:VWHBDI 1 "register_operand" "f"))
 	 (match_operand:VWHBDI 2 "register_operand" "f")))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "pandn\t%0,%1,%2"
   [(set_attr "type" "fmul")])
 
@@ -233,7 +232,7 @@
   [(set (match_operand:VWHB 0 "register_operand" "=f")
 	(and:VWHB (match_operand:VWHB 1 "register_operand" "f")
 		  (match_operand:VWHB 2 "register_operand" "f")))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "and\t%0,%1,%2"
   [(set_attr "type" "fmul")])
 
@@ -242,7 +241,7 @@
   [(set (match_operand:VWHB 0 "register_operand" "=f")
 	(ior:VWHB (match_operand:VWHB 1 "register_operand" "f")
 		  (match_operand:VWHB 2 "register_operand" "f")))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "or\t%0,%1,%2"
   [(set_attr "type" "fcvt")])
 
@@ -251,7 +250,7 @@
   [(set (match_operand:VWHB 0 "register_operand" "=f")
 	(xor:VWHB (match_operand:VWHB 1 "register_operand" "f")
 		  (match_operand:VWHB 2 "register_operand" "f")))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "xor\t%0,%1,%2"
   [(set_attr "type" "fmul")])
 
@@ -261,7 +260,7 @@
 	(and:VWHB
 	  (not:VWHB (match_operand:VWHB 1 "register_operand" "f"))
 	  (not:VWHB (match_operand:VWHB 2 "register_operand" "f"))))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "nor\t%0,%1,%2"
   [(set_attr "type" "fmul")])
 
@@ -269,47 +268,47 @@
 (define_insn "one_cmpl<mode>2"
   [(set (match_operand:VWHB 0 "register_operand" "=f")
 	(not:VWHB (match_operand:VWHB 1 "register_operand" "f")))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "nor\t%0,%1,%1"
   [(set_attr "type" "fmul")])
 
 ;; Average.
 (define_insn "loongson_pavg<V_suffix>"
   [(set (match_operand:VHB 0 "register_operand" "=f")
-        (unspec:VHB [(match_operand:VHB 1 "register_operand" "f")
+	(unspec:VHB [(match_operand:VHB 1 "register_operand" "f")
 		     (match_operand:VHB 2 "register_operand" "f")]
 		    UNSPEC_LOONGSON_PAVG))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "pavg<V_suffix>\t%0,%1,%2"
   [(set_attr "type" "fadd")])
 
 ;; Equality test.
 (define_insn "loongson_pcmpeq<V_suffix>"
   [(set (match_operand:VWHB 0 "register_operand" "=f")
-        (unspec:VWHB [(match_operand:VWHB 1 "register_operand" "f")
+	(unspec:VWHB [(match_operand:VWHB 1 "register_operand" "f")
 		      (match_operand:VWHB 2 "register_operand" "f")]
 		     UNSPEC_LOONGSON_PCMPEQ))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "pcmpeq<V_suffix>\t%0,%1,%2"
   [(set_attr "type" "fadd")])
 
 ;; Greater-than test.
 (define_insn "loongson_pcmpgt<V_suffix>"
   [(set (match_operand:VWHB 0 "register_operand" "=f")
-        (unspec:VWHB [(match_operand:VWHB 1 "register_operand" "f")
+	(unspec:VWHB [(match_operand:VWHB 1 "register_operand" "f")
 		      (match_operand:VWHB 2 "register_operand" "f")]
 		     UNSPEC_LOONGSON_PCMPGT))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "pcmpgt<V_suffix>\t%0,%1,%2"
   [(set_attr "type" "fadd")])
 
 ;; Extract halfword.
 (define_insn "loongson_pextrh"
   [(set (match_operand:V4HI 0 "register_operand" "=f")
-        (unspec:V4HI [(match_operand:V4HI 1 "register_operand" "f")
+	(unspec:V4HI [(match_operand:V4HI 1 "register_operand" "f")
 		      (match_operand:SI 2 "register_operand" "f")]
 		   UNSPEC_LOONGSON_PEXTR))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "pextrh\t%0,%1,%2"
   [(set_attr "type" "fcvt")])
 
@@ -322,7 +321,7 @@
 	    (match_operand:V4HI 2 "register_operand" "f"))
 	  (parallel [(const_int 4) (const_int 1)
 		     (const_int 2) (const_int 3)])))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "pinsrh_0\t%0,%1,%2"
   [(set_attr "type" "fdiv")])
 
@@ -334,7 +333,7 @@
 	    (match_operand:V4HI 2 "register_operand" "f"))
 	  (parallel [(const_int 0) (const_int 4)
 		     (const_int 2) (const_int 3)])))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "pinsrh_1\t%0,%1,%2"
   [(set_attr "type" "fdiv")])
 
@@ -346,7 +345,7 @@
 	    (match_operand:V4HI 2 "register_operand" "f"))
 	  (parallel [(const_int 0) (const_int 1)
 		     (const_int 4) (const_int 3)])))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "pinsrh_2\t%0,%1,%2"
   [(set_attr "type" "fdiv")])
 
@@ -358,7 +357,7 @@
 	    (match_operand:V4HI 2 "register_operand" "f"))
 	  (parallel [(const_int 0) (const_int 1)
 		     (const_int 2) (const_int 4)])))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "pinsrh_3\t%0,%1,%2"
   [(set_attr "type" "fdiv")])
 
@@ -368,7 +367,7 @@
 		      (match_operand:SI 2 "register_operand" "f")
 		      (match_operand:SI 3 "const_0_to_3_operand" "")]
 		     UNSPEC_LOONGSON_PINSRH))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "pinsrh_%3\t%0,%1,%2"
   [(set_attr "type" "fdiv")])
 
@@ -378,7 +377,7 @@
 		      (match_operand:HI 2 "register_operand" "f")
 		      (match_operand:SI 3 "const_0_to_3_operand" "")]
 		     UNSPEC_LOONGSON_PINSRH))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
 {
   rtx ext = gen_reg_rtx (SImode);
   emit_move_insn (ext, gen_lowpart (SImode, operands[2]));
@@ -388,10 +387,10 @@
 ;; Multiply and add packed integers.
 (define_insn "loongson_pmaddhw"
   [(set (match_operand:V2SI 0 "register_operand" "=f")
-        (unspec:V2SI [(match_operand:V4HI 1 "register_operand" "f")
+	(unspec:V2SI [(match_operand:V4HI 1 "register_operand" "f")
 		      (match_operand:V4HI 2 "register_operand" "f")]
 		     UNSPEC_LOONGSON_PMADD))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "pmaddhw\t%0,%1,%2"
   [(set_attr "type" "fmul")])
 
@@ -400,7 +399,7 @@
    (match_operand:V4HI 1 "register_operand" "")
    (match_operand:V4HI 2 "register_operand" "")
    (match_operand:V2SI 3 "register_operand" "")]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
 {
   rtx t = gen_reg_rtx (V2SImode);
   emit_insn (gen_loongson_pmaddhw (t, operands[1], operands[2]));
@@ -411,9 +410,9 @@
 ;; Maximum of signed halfwords.
 (define_insn "smaxv4hi3"
   [(set (match_operand:V4HI 0 "register_operand" "=f")
-        (smax:V4HI (match_operand:V4HI 1 "register_operand" "f")
+	(smax:V4HI (match_operand:V4HI 1 "register_operand" "f")
 		   (match_operand:V4HI 2 "register_operand" "f")))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "pmaxsh\t%0,%1,%2"
   [(set_attr "type" "fadd")])
 
@@ -421,7 +420,7 @@
   [(match_operand:VWB 0 "register_operand" "")
    (match_operand:VWB 1 "register_operand" "")
    (match_operand:VWB 2 "register_operand" "")]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
 {
   mips_expand_vec_minmax (operands[0], operands[1], operands[2],
 			  gen_loongson_pcmpgt<V_suffix>, false);
@@ -431,18 +430,18 @@
 ;; Maximum of unsigned bytes.
 (define_insn "umaxv8qi3"
   [(set (match_operand:V8QI 0 "register_operand" "=f")
-        (umax:V8QI (match_operand:V8QI 1 "register_operand" "f")
+	(umax:V8QI (match_operand:V8QI 1 "register_operand" "f")
 		   (match_operand:V8QI 2 "register_operand" "f")))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "pmaxub\t%0,%1,%2"
   [(set_attr "type" "fadd")])
 
 ;; Minimum of signed halfwords.
 (define_insn "sminv4hi3"
   [(set (match_operand:V4HI 0 "register_operand" "=f")
-        (smin:V4HI (match_operand:V4HI 1 "register_operand" "f")
+	(smin:V4HI (match_operand:V4HI 1 "register_operand" "f")
 		   (match_operand:V4HI 2 "register_operand" "f")))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "pminsh\t%0,%1,%2"
   [(set_attr "type" "fadd")])
 
@@ -450,7 +449,7 @@
   [(match_operand:VWB 0 "register_operand" "")
    (match_operand:VWB 1 "register_operand" "")
    (match_operand:VWB 2 "register_operand" "")]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
 {
   mips_expand_vec_minmax (operands[0], operands[1], operands[2],
 			  gen_loongson_pcmpgt<V_suffix>, true);
@@ -460,76 +459,76 @@
 ;; Minimum of unsigned bytes.
 (define_insn "uminv8qi3"
   [(set (match_operand:V8QI 0 "register_operand" "=f")
-        (umin:V8QI (match_operand:V8QI 1 "register_operand" "f")
+	(umin:V8QI (match_operand:V8QI 1 "register_operand" "f")
 		   (match_operand:V8QI 2 "register_operand" "f")))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "pminub\t%0,%1,%2"
   [(set_attr "type" "fadd")])
 
 ;; Move byte mask.
 (define_insn "loongson_pmovmsk<V_suffix>"
   [(set (match_operand:VB 0 "register_operand" "=f")
-        (unspec:VB [(match_operand:VB 1 "register_operand" "f")]
+	(unspec:VB [(match_operand:VB 1 "register_operand" "f")]
 		   UNSPEC_LOONGSON_PMOVMSK))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "pmovmsk<V_suffix>\t%0,%1"
   [(set_attr "type" "fabs")])
 
 ;; Multiply unsigned integers and store high result.
 (define_insn "umul<mode>3_highpart"
   [(set (match_operand:VH 0 "register_operand" "=f")
-        (unspec:VH [(match_operand:VH 1 "register_operand" "f")
+	(unspec:VH [(match_operand:VH 1 "register_operand" "f")
 		    (match_operand:VH 2 "register_operand" "f")]
 		   UNSPEC_LOONGSON_PMULHU))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "pmulhu<V_suffix>\t%0,%1,%2"
   [(set_attr "type" "fmul")])
 
 ;; Multiply signed integers and store high result.
 (define_insn "smul<mode>3_highpart"
   [(set (match_operand:VH 0 "register_operand" "=f")
-        (unspec:VH [(match_operand:VH 1 "register_operand" "f")
+	(unspec:VH [(match_operand:VH 1 "register_operand" "f")
 		    (match_operand:VH 2 "register_operand" "f")]
 		   UNSPEC_LOONGSON_PMULH))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "pmulh<V_suffix>\t%0,%1,%2"
   [(set_attr "type" "fmul")])
 
 ;; Multiply signed integers and store low result.
 (define_insn "mul<mode>3"
   [(set (match_operand:VH 0 "register_operand" "=f")
-        (mult:VH (match_operand:VH 1 "register_operand" "f")
-                 (match_operand:VH 2 "register_operand" "f")))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+	(mult:VH (match_operand:VH 1 "register_operand" "f")
+		 (match_operand:VH 2 "register_operand" "f")))]
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "pmull<V_suffix>\t%0,%1,%2"
   [(set_attr "type" "fmul")])
 
 ;; Multiply unsigned word integers.
 (define_insn "loongson_pmulu<V_suffix>"
   [(set (match_operand:DI 0 "register_operand" "=f")
-        (unspec:DI [(match_operand:VW 1 "register_operand" "f")
+	(unspec:DI [(match_operand:VW 1 "register_operand" "f")
 		    (match_operand:VW 2 "register_operand" "f")]
 		   UNSPEC_LOONGSON_PMULU))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "pmulu<V_suffix>\t%0,%1,%2"
   [(set_attr "type" "fmul")])
 
 ;; Absolute difference.
 (define_insn "loongson_pasubub"
   [(set (match_operand:VB 0 "register_operand" "=f")
-        (unspec:VB [(match_operand:VB 1 "register_operand" "f")
+	(unspec:VB [(match_operand:VB 1 "register_operand" "f")
 		    (match_operand:VB 2 "register_operand" "f")]
 		   UNSPEC_LOONGSON_PASUBUB))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "pasubub\t%0,%1,%2"
   [(set_attr "type" "fadd")])
 
 ;; Sum of unsigned byte integers.
 (define_insn "loongson_biadd"
   [(set (match_operand:<V_stretch_half> 0 "register_operand" "=f")
-        (unspec:<V_stretch_half> [(match_operand:VB 1 "register_operand" "f")]
+	(unspec:<V_stretch_half> [(match_operand:VB 1 "register_operand" "f")]
 				 UNSPEC_LOONGSON_BIADD))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "biadd\t%0,%1"
   [(set_attr "type" "fabs")])
 
@@ -537,63 +536,63 @@
   [(set (match_operand:V8QI 0 "register_operand" "=f")
 	(unspec:V8QI [(match_operand:V8QI 1 "register_operand" "f")]
 		     UNSPEC_LOONGSON_BIADD))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "biadd\t%0,%1"
   [(set_attr "type" "fabs")])
 
 ;; Sum of absolute differences.
 (define_insn "loongson_psadbh"
   [(set (match_operand:<V_stretch_half> 0 "register_operand" "=f")
-        (unspec:<V_stretch_half> [(match_operand:VB 1 "register_operand" "f")
+	(unspec:<V_stretch_half> [(match_operand:VB 1 "register_operand" "f")
 				  (match_operand:VB 2 "register_operand" "f")]
 				 UNSPEC_LOONGSON_PSADBH))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "pasubub\t%0,%1,%2;biadd\t%0,%0"
   [(set_attr "type" "fadd")])
 
 ;; Shuffle halfwords.
 (define_insn "loongson_pshufh"
   [(set (match_operand:VH 0 "register_operand" "=f")
-        (unspec:VH [(match_operand:VH 1 "register_operand" "f")
+	(unspec:VH [(match_operand:VH 1 "register_operand" "f")
 		    (match_operand:SI 2 "register_operand" "f")]
 		   UNSPEC_LOONGSON_PSHUFH))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "pshufh\t%0,%1,%2"
   [(set_attr "type" "fmul")])
 
 ;; Shift left logical.
 (define_insn "ashl<mode>3"
   [(set (match_operand:VWH 0 "register_operand" "=f")
-        (ashift:VWH (match_operand:VWH 1 "register_operand" "f")
+	(ashift:VWH (match_operand:VWH 1 "register_operand" "f")
 		    (match_operand:SI 2 "register_operand" "f")))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "psll<V_suffix>\t%0,%1,%2"
   [(set_attr "type" "fcvt")])
 
 ;; Shift right arithmetic.
 (define_insn "ashr<mode>3"
   [(set (match_operand:VWH 0 "register_operand" "=f")
-        (ashiftrt:VWH (match_operand:VWH 1 "register_operand" "f")
+	(ashiftrt:VWH (match_operand:VWH 1 "register_operand" "f")
 		      (match_operand:SI 2 "register_operand" "f")))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "psra<V_suffix>\t%0,%1,%2"
   [(set_attr "type" "fcvt")])
 
 ;; Shift right logical.
 (define_insn "lshr<mode>3"
   [(set (match_operand:VWH 0 "register_operand" "=f")
-        (lshiftrt:VWH (match_operand:VWH 1 "register_operand" "f")
+	(lshiftrt:VWH (match_operand:VWH 1 "register_operand" "f")
 		      (match_operand:SI 2 "register_operand" "f")))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "psrl<V_suffix>\t%0,%1,%2"
   [(set_attr "type" "fcvt")])
 
 ;; Subtraction, treating overflow by wraparound.
 (define_insn "sub<mode>3"
   [(set (match_operand:VWHB 0 "register_operand" "=f")
-        (minus:VWHB (match_operand:VWHB 1 "register_operand" "f")
+	(minus:VWHB (match_operand:VWHB 1 "register_operand" "f")
 		    (match_operand:VWHB 2 "register_operand" "f")))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "psub<V_suffix>\t%0,%1,%2"
   [(set_attr "type" "fadd")])
 
@@ -603,28 +602,28 @@
 ;; 'minus' here.
 (define_insn "loongson_psubd"
   [(set (match_operand:DI 0 "register_operand" "=f")
-        (unspec:DI [(match_operand:DI 1 "register_operand" "f")
+	(unspec:DI [(match_operand:DI 1 "register_operand" "f")
 		    (match_operand:DI 2 "register_operand" "f")]
 		   UNSPEC_LOONGSON_PSUBD))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "psubd\t%0,%1,%2"
   [(set_attr "type" "fadd")])
 
 ;; Subtraction, treating overflow by signed saturation.
 (define_insn "sssub<mode>3"
   [(set (match_operand:VHB 0 "register_operand" "=f")
-        (ss_minus:VHB (match_operand:VHB 1 "register_operand" "f")
+	(ss_minus:VHB (match_operand:VHB 1 "register_operand" "f")
 		      (match_operand:VHB 2 "register_operand" "f")))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "psubs<V_suffix>\t%0,%1,%2"
   [(set_attr "type" "fadd")])
 
 ;; Subtraction, treating overflow by unsigned saturation.
 (define_insn "ussub<mode>3"
   [(set (match_operand:VHB 0 "register_operand" "=f")
-        (us_minus:VHB (match_operand:VHB 1 "register_operand" "f")
+	(us_minus:VHB (match_operand:VHB 1 "register_operand" "f")
 		      (match_operand:VHB 2 "register_operand" "f")))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "psubus<V_suffix>\t%0,%1,%2"
   [(set_attr "type" "fadd")])
 
@@ -639,7 +638,7 @@
 		     (const_int 5) (const_int 13)
 		     (const_int 6) (const_int 14)
 		     (const_int 7) (const_int 15)])))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "punpckhbh\t%0,%1,%2"
   [(set_attr "type" "fdiv")])
 
@@ -651,7 +650,7 @@
 	    (match_operand:V4HI 2 "register_operand" "f"))
 	  (parallel [(const_int 2) (const_int 6)
 		     (const_int 3) (const_int 7)])))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "punpckhhw\t%0,%1,%2"
   [(set_attr "type" "fdiv")])
 
@@ -665,7 +664,7 @@
 		     (const_int 12) (const_int 13)
 		     (const_int 6)  (const_int 7)
 		     (const_int 14) (const_int 15)])))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "punpckhhw\t%0,%1,%2"
   [(set_attr "type" "fdiv")])
 
@@ -676,7 +675,7 @@
 	    (match_operand:V2SI 1 "register_operand" "f")
 	    (match_operand:V2SI 2 "register_operand" "f"))
 	  (parallel [(const_int 1) (const_int 3)])))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "punpckhwd\t%0,%1,%2"
   [(set_attr "type" "fcvt")])
 
@@ -690,7 +689,7 @@
 		     (const_int 6) (const_int 7)
 		     (const_int 12) (const_int 13)
 		     (const_int 14) (const_int 15)])))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "punpckhwd\t%0,%1,%2"
   [(set_attr "type" "fcvt")])
 
@@ -702,7 +701,7 @@
 	    (match_operand:V4HI 2 "register_operand" "f"))
 	  (parallel [(const_int 2) (const_int 3)
 		     (const_int 6) (const_int 7)])))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "punpckhwd\t%0,%1,%2"
   [(set_attr "type" "fcvt")])
 
@@ -717,7 +716,7 @@
 		     (const_int 1) (const_int 9)
 		     (const_int 2) (const_int 10)
 		     (const_int 3) (const_int 11)])))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "punpcklbh\t%0,%1,%2"
   [(set_attr "type" "fdiv")])
 
@@ -729,7 +728,7 @@
 	    (match_operand:V4HI 2 "register_operand" "f"))
 	  (parallel [(const_int 0) (const_int 4)
 		     (const_int 1) (const_int 5)])))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "punpcklhw\t%0,%1,%2"
   [(set_attr "type" "fdiv")])
 
@@ -743,7 +742,7 @@
 		     (const_int 8)  (const_int 9)
 		     (const_int 2)  (const_int 3)
 		     (const_int 10) (const_int 11)])))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "punpcklhw\t%0,%1,%2"
   [(set_attr "type" "fdiv")])
 
@@ -754,7 +753,7 @@
 	    (match_operand:V2SI 1 "register_operand" "f")
 	    (match_operand:V2SI 2 "register_operand" "f"))
 	  (parallel [(const_int 0) (const_int 2)])))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "punpcklwd\t%0,%1,%2"
   [(set_attr "type" "fcvt")])
 
@@ -768,7 +767,7 @@
 		     (const_int 2) (const_int 3)
 		     (const_int 8) (const_int 9)
 		     (const_int 10) (const_int 11)])))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "punpcklwd\t%0,%1,%2"
   [(set_attr "type" "fcvt")])
 
@@ -780,14 +779,14 @@
 	    (match_operand:V4HI 2 "register_operand" "f"))
 	  (parallel [(const_int 0) (const_int 1)
 		     (const_int 4) (const_int 5)])))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "punpcklwd\t%0,%1,%2"
   [(set_attr "type" "fcvt")])
 
 (define_expand "vec_unpacks_lo_<mode>"
   [(match_operand:<V_stretch_half> 0 "register_operand" "")
    (match_operand:VHB 1 "register_operand" "")]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
 {
   mips_expand_vec_unpack (operands, false, false);
   DONE;
@@ -796,7 +795,7 @@
 (define_expand "vec_unpacks_hi_<mode>"
   [(match_operand:<V_stretch_half> 0 "register_operand" "")
    (match_operand:VHB 1 "register_operand" "")]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
 {
   mips_expand_vec_unpack (operands, false, true);
   DONE;
@@ -805,7 +804,7 @@
 (define_expand "vec_unpacku_lo_<mode>"
   [(match_operand:<V_stretch_half> 0 "register_operand" "")
    (match_operand:VHB 1 "register_operand" "")]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
 {
   mips_expand_vec_unpack (operands, true, false);
   DONE;
@@ -814,7 +813,7 @@
 (define_expand "vec_unpacku_hi_<mode>"
   [(match_operand:<V_stretch_half> 0 "register_operand" "")
    (match_operand:VHB 1 "register_operand" "")]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
 {
   mips_expand_vec_unpack (operands, true, true);
   DONE;
@@ -823,35 +822,35 @@
 ;; Whole vector shifts, used for reduction epilogues.
 (define_insn "vec_shl_<mode>"
   [(set (match_operand:VWHBDI 0 "register_operand" "=f")
-        (unspec:VWHBDI [(match_operand:VWHBDI 1 "register_operand" "f")
-                        (match_operand:SI 2 "register_operand" "f")]
-                       UNSPEC_LOONGSON_DSLL))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+	(unspec:VWHBDI [(match_operand:VWHBDI 1 "register_operand" "f")
+			(match_operand:SI 2 "register_operand" "f")]
+		       UNSPEC_LOONGSON_DSLL))]
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "dsll\t%0,%1,%2"
   [(set_attr "type" "fcvt")])
 
 (define_insn "vec_shr_<mode>"
   [(set (match_operand:VWHBDI 0 "register_operand" "=f")
-        (unspec:VWHBDI [(match_operand:VWHBDI 1 "register_operand" "f")
-                        (match_operand:SI 2 "register_operand" "f")]
-                       UNSPEC_LOONGSON_DSRL))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+	(unspec:VWHBDI [(match_operand:VWHBDI 1 "register_operand" "f")
+			(match_operand:SI 2 "register_operand" "f")]
+		       UNSPEC_LOONGSON_DSRL))]
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "dsrl\t%0,%1,%2"
   [(set_attr "type" "fcvt")])
 
 (define_insn "vec_loongson_extract_lo_<mode>"
   [(set (match_operand:<V_inner> 0 "register_operand" "=r")
-        (vec_select:<V_inner>
-          (match_operand:VWHB 1 "register_operand" "f")
-          (parallel [(const_int 0)])))]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+	(vec_select:<V_inner>
+	  (match_operand:VWHB 1 "register_operand" "f")
+	  (parallel [(const_int 0)])))]
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
   "mfc1\t%0,%1"
   [(set_attr "type" "mfc")])
 
 (define_expand "reduc_plus_scal_<mode>"
   [(match_operand:<V_inner> 0 "register_operand" "")
    (match_operand:VWHB 1 "register_operand" "")]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
 {
   rtx tmp = gen_reg_rtx (GET_MODE (operands[1]));
   mips_expand_vec_reduc (tmp, operands[1], gen_add<mode>3);
@@ -862,7 +861,7 @@
 (define_expand "reduc_smax_scal_<mode>"
   [(match_operand:<V_inner> 0 "register_operand" "")
    (match_operand:VWHB 1 "register_operand" "")]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
 {
   rtx tmp = gen_reg_rtx (GET_MODE (operands[1]));
   mips_expand_vec_reduc (tmp, operands[1], gen_smax<mode>3);
@@ -873,7 +872,7 @@
 (define_expand "reduc_smin_scal_<mode>"
   [(match_operand:<V_inner> 0 "register_operand" "")
    (match_operand:VWHB 1 "register_operand" "")]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
 {
   rtx tmp = gen_reg_rtx (GET_MODE (operands[1]));
   mips_expand_vec_reduc (tmp, operands[1], gen_smin<mode>3);
@@ -884,7 +883,7 @@
 (define_expand "reduc_umax_scal_<mode>"
   [(match_operand:<V_inner> 0 "register_operand" "")
    (match_operand:VB 1 "register_operand" "")]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
 {
   rtx tmp = gen_reg_rtx (GET_MODE (operands[1]));
   mips_expand_vec_reduc (tmp, operands[1], gen_umax<mode>3);
@@ -895,7 +894,7 @@
 (define_expand "reduc_umin_scal_<mode>"
   [(match_operand:<V_inner> 0 "register_operand" "")
    (match_operand:VB 1 "register_operand" "")]
-  "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+  "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
 {
   rtx tmp = gen_reg_rtx (GET_MODE (operands[1]));
   mips_expand_vec_reduc (tmp, operands[1], gen_umin<mode>3);
diff --git a/gcc/config/mips/loongson-mmiintrin.h b/gcc/config/mips/loongson-mmiintrin.h
new file mode 100644
index 00000000000..6f35fb5b842
--- /dev/null
+++ b/gcc/config/mips/loongson-mmiintrin.h
@@ -0,0 +1,691 @@
+/* Intrinsics for Loongson MultiMedia extension Instructions operations.
+
+   Copyright (C) 2008-2018 Free Software Foundation, Inc.
+   Contributed by CodeSourcery.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify it
+   under the terms of the GNU General Public License as published
+   by the Free Software Foundation; either version 3, or (at your
+   option) any later version.
+
+   GCC is distributed in the hope that it will be useful, but WITHOUT
+   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
+   License for more details.
+
+   Under Section 7 of GPL version 3, you are granted additional
+   permissions described in the GCC Runtime Library Exception, version
+   3.1, as published by the Free Software Foundation.
+
+   You should have received a copy of the GNU General Public License and
+   a copy of the GCC Runtime Library Exception along with this program;
+   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _GCC_LOONGSON_MMIINTRIN_H
+#define _GCC_LOONGSON_MMIINTRIN_H
+
+#if !defined(__mips_loongson_mmi)
+# error "You must select -mloongson-mmi or -march=loongson2e/2f/3a to use
+	 loongson-mmiintrin.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/* Vectors of unsigned bytes, halfwords and words.  */
+typedef uint8_t uint8x8_t __attribute__((vector_size (8)));
+typedef uint16_t uint16x4_t __attribute__((vector_size (8)));
+typedef uint32_t uint32x2_t __attribute__((vector_size (8)));
+
+/* Vectors of signed bytes, halfwords and words.  */
+typedef int8_t int8x8_t __attribute__((vector_size (8)));
+typedef int16_t int16x4_t __attribute__((vector_size (8)));
+typedef int32_t int32x2_t __attribute__((vector_size (8)));
+
+/* SIMD intrinsics.
+   Unless otherwise noted, calls to the functions below will expand into
+   precisely one machine instruction, modulo any moves required to
+   satisfy register allocation constraints.  */
+
+/* Pack with signed saturation.  */
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+packsswh (int32x2_t s, int32x2_t t)
+{
+  return __builtin_loongson_packsswh (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+packsshb (int16x4_t s, int16x4_t t)
+{
+  return __builtin_loongson_packsshb (s, t);
+}
+
+/* Pack with unsigned saturation.  */
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+packushb (uint16x4_t s, uint16x4_t t)
+{
+  return __builtin_loongson_packushb (s, t);
+}
+
+/* Vector addition, treating overflow by wraparound.  */
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+paddw_u (uint32x2_t s, uint32x2_t t)
+{
+  return __builtin_loongson_paddw_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+paddh_u (uint16x4_t s, uint16x4_t t)
+{
+  return __builtin_loongson_paddh_u (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+paddb_u (uint8x8_t s, uint8x8_t t)
+{
+  return __builtin_loongson_paddb_u (s, t);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+paddw_s (int32x2_t s, int32x2_t t)
+{
+  return __builtin_loongson_paddw_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+paddh_s (int16x4_t s, int16x4_t t)
+{
+  return __builtin_loongson_paddh_s (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+paddb_s (int8x8_t s, int8x8_t t)
+{
+  return __builtin_loongson_paddb_s (s, t);
+}
+
+/* Addition of doubleword integers, treating overflow by wraparound.  */
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+paddd_u (uint64_t s, uint64_t t)
+{
+  return __builtin_loongson_paddd_u (s, t);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+paddd_s (int64_t s, int64_t t)
+{
+  return __builtin_loongson_paddd_s (s, t);
+}
+
+/* Vector addition, treating overflow by signed saturation.  */
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+paddsh (int16x4_t s, int16x4_t t)
+{
+  return __builtin_loongson_paddsh (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+paddsb (int8x8_t s, int8x8_t t)
+{
+  return __builtin_loongson_paddsb (s, t);
+}
+
+/* Vector addition, treating overflow by unsigned saturation.  */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+paddush (uint16x4_t s, uint16x4_t t)
+{
+  return __builtin_loongson_paddush (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+paddusb (uint8x8_t s, uint8x8_t t)
+{
+  return __builtin_loongson_paddusb (s, t);
+}
+
+/* Logical AND NOT.  */
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+pandn_ud (uint64_t s, uint64_t t)
+{
+  return __builtin_loongson_pandn_ud (s, t);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+pandn_uw (uint32x2_t s, uint32x2_t t)
+{
+  return __builtin_loongson_pandn_uw (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pandn_uh (uint16x4_t s, uint16x4_t t)
+{
+  return __builtin_loongson_pandn_uh (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+pandn_ub (uint8x8_t s, uint8x8_t t)
+{
+  return __builtin_loongson_pandn_ub (s, t);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+pandn_sd (int64_t s, int64_t t)
+{
+  return __builtin_loongson_pandn_sd (s, t);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+pandn_sw (int32x2_t s, int32x2_t t)
+{
+  return __builtin_loongson_pandn_sw (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pandn_sh (int16x4_t s, int16x4_t t)
+{
+  return __builtin_loongson_pandn_sh (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+pandn_sb (int8x8_t s, int8x8_t t)
+{
+  return __builtin_loongson_pandn_sb (s, t);
+}
+
+/* Average.  */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pavgh (uint16x4_t s, uint16x4_t t)
+{
+  return __builtin_loongson_pavgh (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+pavgb (uint8x8_t s, uint8x8_t t)
+{
+  return __builtin_loongson_pavgb (s, t);
+}
+
+/* Equality test.  */
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+pcmpeqw_u (uint32x2_t s, uint32x2_t t)
+{
+  return __builtin_loongson_pcmpeqw_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pcmpeqh_u (uint16x4_t s, uint16x4_t t)
+{
+  return __builtin_loongson_pcmpeqh_u (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+pcmpeqb_u (uint8x8_t s, uint8x8_t t)
+{
+  return __builtin_loongson_pcmpeqb_u (s, t);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+pcmpeqw_s (int32x2_t s, int32x2_t t)
+{
+  return __builtin_loongson_pcmpeqw_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pcmpeqh_s (int16x4_t s, int16x4_t t)
+{
+  return __builtin_loongson_pcmpeqh_s (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+pcmpeqb_s (int8x8_t s, int8x8_t t)
+{
+  return __builtin_loongson_pcmpeqb_s (s, t);
+}
+
+/* Greater-than test.  */
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+pcmpgtw_u (uint32x2_t s, uint32x2_t t)
+{
+  return __builtin_loongson_pcmpgtw_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pcmpgth_u (uint16x4_t s, uint16x4_t t)
+{
+  return __builtin_loongson_pcmpgth_u (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+pcmpgtb_u (uint8x8_t s, uint8x8_t t)
+{
+  return __builtin_loongson_pcmpgtb_u (s, t);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+pcmpgtw_s (int32x2_t s, int32x2_t t)
+{
+  return __builtin_loongson_pcmpgtw_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pcmpgth_s (int16x4_t s, int16x4_t t)
+{
+  return __builtin_loongson_pcmpgth_s (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+pcmpgtb_s (int8x8_t s, int8x8_t t)
+{
+  return __builtin_loongson_pcmpgtb_s (s, t);
+}
+
+/* Extract halfword.  */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pextrh_u (uint16x4_t s, int field /* 0--3.  */)
+{
+  return __builtin_loongson_pextrh_u (s, field);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pextrh_s (int16x4_t s, int field /* 0--3.  */)
+{
+  return __builtin_loongson_pextrh_s (s, field);
+}
+
+/* Insert halfword.  */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pinsrh_0_u (uint16x4_t s, uint16x4_t t)
+{
+  return __builtin_loongson_pinsrh_0_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pinsrh_1_u (uint16x4_t s, uint16x4_t t)
+{
+  return __builtin_loongson_pinsrh_1_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pinsrh_2_u (uint16x4_t s, uint16x4_t t)
+{
+  return __builtin_loongson_pinsrh_2_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pinsrh_3_u (uint16x4_t s, uint16x4_t t)
+{
+  return __builtin_loongson_pinsrh_3_u (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pinsrh_0_s (int16x4_t s, int16x4_t t)
+{
+  return __builtin_loongson_pinsrh_0_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pinsrh_1_s (int16x4_t s, int16x4_t t)
+{
+  return __builtin_loongson_pinsrh_1_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pinsrh_2_s (int16x4_t s, int16x4_t t)
+{
+  return __builtin_loongson_pinsrh_2_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pinsrh_3_s (int16x4_t s, int16x4_t t)
+{
+  return __builtin_loongson_pinsrh_3_s (s, t);
+}
+
+/* Multiply and add.  */
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+pmaddhw (int16x4_t s, int16x4_t t)
+{
+  return __builtin_loongson_pmaddhw (s, t);
+}
+
+/* Maximum of signed halfwords.  */
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pmaxsh (int16x4_t s, int16x4_t t)
+{
+  return __builtin_loongson_pmaxsh (s, t);
+}
+
+/* Maximum of unsigned bytes.  */
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+pmaxub (uint8x8_t s, uint8x8_t t)
+{
+  return __builtin_loongson_pmaxub (s, t);
+}
+
+/* Minimum of signed halfwords.  */
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pminsh (int16x4_t s, int16x4_t t)
+{
+  return __builtin_loongson_pminsh (s, t);
+}
+
+/* Minimum of unsigned bytes.  */
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+pminub (uint8x8_t s, uint8x8_t t)
+{
+  return __builtin_loongson_pminub (s, t);
+}
+
+/* Move byte mask.  */
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+pmovmskb_u (uint8x8_t s)
+{
+  return __builtin_loongson_pmovmskb_u (s);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+pmovmskb_s (int8x8_t s)
+{
+  return __builtin_loongson_pmovmskb_s (s);
+}
+
+/* Multiply unsigned integers and store high result.  */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pmulhuh (uint16x4_t s, uint16x4_t t)
+{
+  return __builtin_loongson_pmulhuh (s, t);
+}
+
+/* Multiply signed integers and store high result.  */
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pmulhh (int16x4_t s, int16x4_t t)
+{
+  return __builtin_loongson_pmulhh (s, t);
+}
+
+/* Multiply signed integers and store low result.  */
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pmullh (int16x4_t s, int16x4_t t)
+{
+  return __builtin_loongson_pmullh (s, t);
+}
+
+/* Multiply unsigned word integers.  */
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+pmuluw (uint32x2_t s, uint32x2_t t)
+{
+  return __builtin_loongson_pmuluw (s, t);
+}
+
+/* Absolute difference.  */
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+pasubub (uint8x8_t s, uint8x8_t t)
+{
+  return __builtin_loongson_pasubub (s, t);
+}
+
+/* Sum of unsigned byte integers.  */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+biadd (uint8x8_t s)
+{
+  return __builtin_loongson_biadd (s);
+}
+
+/* Sum of absolute differences.
+   Note that this intrinsic expands into two machine instructions:
+   PASUBUB followed by BIADD.  */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+psadbh (uint8x8_t s, uint8x8_t t)
+{
+  return __builtin_loongson_psadbh (s, t);
+}
+
+/* Shuffle halfwords.  */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pshufh_u (uint16x4_t dest, uint16x4_t s, uint8_t order)
+{
+  return __builtin_loongson_pshufh_u (s, order);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pshufh_s (int16x4_t dest, int16x4_t s, uint8_t order)
+{
+  return __builtin_loongson_pshufh_s (s, order);
+}
+
+/* Shift left logical.  */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+psllh_u (uint16x4_t s, uint8_t amount)
+{
+  return __builtin_loongson_psllh_u (s, amount);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+psllh_s (int16x4_t s, uint8_t amount)
+{
+  return __builtin_loongson_psllh_s (s, amount);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+psllw_u (uint32x2_t s, uint8_t amount)
+{
+  return __builtin_loongson_psllw_u (s, amount);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+psllw_s (int32x2_t s, uint8_t amount)
+{
+  return __builtin_loongson_psllw_s (s, amount);
+}
+
+/* Shift right logical.  */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+psrlh_u (uint16x4_t s, uint8_t amount)
+{
+  return __builtin_loongson_psrlh_u (s, amount);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+psrlh_s (int16x4_t s, uint8_t amount)
+{
+  return __builtin_loongson_psrlh_s (s, amount);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+psrlw_u (uint32x2_t s, uint8_t amount)
+{
+  return __builtin_loongson_psrlw_u (s, amount);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+psrlw_s (int32x2_t s, uint8_t amount)
+{
+  return __builtin_loongson_psrlw_s (s, amount);
+}
+
+/* Shift right arithmetic.  */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+psrah_u (uint16x4_t s, uint8_t amount)
+{
+  return __builtin_loongson_psrah_u (s, amount);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+psrah_s (int16x4_t s, uint8_t amount)
+{
+  return __builtin_loongson_psrah_s (s, amount);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+psraw_u (uint32x2_t s, uint8_t amount)
+{
+  return __builtin_loongson_psraw_u (s, amount);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+psraw_s (int32x2_t s, uint8_t amount)
+{
+  return __builtin_loongson_psraw_s (s, amount);
+}
+
+/* Vector subtraction, treating overflow by wraparound.  */
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+psubw_u (uint32x2_t s, uint32x2_t t)
+{
+  return __builtin_loongson_psubw_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+psubh_u (uint16x4_t s, uint16x4_t t)
+{
+  return __builtin_loongson_psubh_u (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+psubb_u (uint8x8_t s, uint8x8_t t)
+{
+  return __builtin_loongson_psubb_u (s, t);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+psubw_s (int32x2_t s, int32x2_t t)
+{
+  return __builtin_loongson_psubw_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+psubh_s (int16x4_t s, int16x4_t t)
+{
+  return __builtin_loongson_psubh_s (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+psubb_s (int8x8_t s, int8x8_t t)
+{
+  return __builtin_loongson_psubb_s (s, t);
+}
+
+/* Subtraction of doubleword integers, treating overflow by wraparound.  */
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+psubd_u (uint64_t s, uint64_t t)
+{
+  return __builtin_loongson_psubd_u (s, t);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+psubd_s (int64_t s, int64_t t)
+{
+  return __builtin_loongson_psubd_s (s, t);
+}
+
+/* Vector subtraction, treating overflow by signed saturation.  */
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+psubsh (int16x4_t s, int16x4_t t)
+{
+  return __builtin_loongson_psubsh (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+psubsb (int8x8_t s, int8x8_t t)
+{
+  return __builtin_loongson_psubsb (s, t);
+}
+
+/* Vector subtraction, treating overflow by unsigned saturation.  */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+psubush (uint16x4_t s, uint16x4_t t)
+{
+  return __builtin_loongson_psubush (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+psubusb (uint8x8_t s, uint8x8_t t)
+{
+  return __builtin_loongson_psubusb (s, t);
+}
+
+/* Unpack high data.  */
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+punpckhwd_u (uint32x2_t s, uint32x2_t t)
+{
+  return __builtin_loongson_punpckhwd_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+punpckhhw_u (uint16x4_t s, uint16x4_t t)
+{
+  return __builtin_loongson_punpckhhw_u (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+punpckhbh_u (uint8x8_t s, uint8x8_t t)
+{
+  return __builtin_loongson_punpckhbh_u (s, t);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+punpckhwd_s (int32x2_t s, int32x2_t t)
+{
+  return __builtin_loongson_punpckhwd_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+punpckhhw_s (int16x4_t s, int16x4_t t)
+{
+  return __builtin_loongson_punpckhhw_s (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+punpckhbh_s (int8x8_t s, int8x8_t t)
+{
+  return __builtin_loongson_punpckhbh_s (s, t);
+}
+
+/* Unpack low data.  */
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+punpcklwd_u (uint32x2_t s, uint32x2_t t)
+{
+  return __builtin_loongson_punpcklwd_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+punpcklhw_u (uint16x4_t s, uint16x4_t t)
+{
+  return __builtin_loongson_punpcklhw_u (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+punpcklbh_u (uint8x8_t s, uint8x8_t t)
+{
+  return __builtin_loongson_punpcklbh_u (s, t);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+punpcklwd_s (int32x2_t s, int32x2_t t)
+{
+  return __builtin_loongson_punpcklwd_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+punpcklhw_s (int16x4_t s, int16x4_t t)
+{
+  return __builtin_loongson_punpcklhw_s (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+punpcklbh_s (int8x8_t s, int8x8_t t)
+{
+  return __builtin_loongson_punpcklbh_s (s, t);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/gcc/config/mips/loongson.h b/gcc/config/mips/loongson.h
index b4a26027c24..3880e4cb77f 100644
--- a/gcc/config/mips/loongson.h
+++ b/gcc/config/mips/loongson.h
@@ -1,4 +1,4 @@
-/* Intrinsics for ST Microelectronics Loongson-2E/2F SIMD operations.
+/* Intrinsics for Loongson MultiMedia extension Instructions operations.
 
    Copyright (C) 2008-2018 Free Software Foundation, Inc.
    Contributed by CodeSourcery.
@@ -24,667 +24,10 @@
    see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
    <http://www.gnu.org/licenses/>.  */
 
-#ifndef _GCC_LOONGSON_H
-#define _GCC_LOONGSON_H
-
-#if !defined(__mips_loongson_vector_rev)
-# error "You must select -march=loongson2e or -march=loongson2f to use loongson.h"
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stdint.h>
-
-/* Vectors of unsigned bytes, halfwords and words.  */
-typedef uint8_t uint8x8_t __attribute__((vector_size (8)));
-typedef uint16_t uint16x4_t __attribute__((vector_size (8)));
-typedef uint32_t uint32x2_t __attribute__((vector_size (8)));
-
-/* Vectors of signed bytes, halfwords and words.  */
-typedef int8_t int8x8_t __attribute__((vector_size (8)));
-typedef int16_t int16x4_t __attribute__((vector_size (8)));
-typedef int32_t int32x2_t __attribute__((vector_size (8)));
-
-/* SIMD intrinsics.
-   Unless otherwise noted, calls to the functions below will expand into
-   precisely one machine instruction, modulo any moves required to
-   satisfy register allocation constraints.  */
-
-/* Pack with signed saturation.  */
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-packsswh (int32x2_t s, int32x2_t t)
-{
-  return __builtin_loongson_packsswh (s, t);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-packsshb (int16x4_t s, int16x4_t t)
-{
-  return __builtin_loongson_packsshb (s, t);
-}
-
-/* Pack with unsigned saturation.  */
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-packushb (uint16x4_t s, uint16x4_t t)
-{
-  return __builtin_loongson_packushb (s, t);
-}
-
-/* Vector addition, treating overflow by wraparound.  */
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-paddw_u (uint32x2_t s, uint32x2_t t)
-{
-  return __builtin_loongson_paddw_u (s, t);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-paddh_u (uint16x4_t s, uint16x4_t t)
-{
-  return __builtin_loongson_paddh_u (s, t);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-paddb_u (uint8x8_t s, uint8x8_t t)
-{
-  return __builtin_loongson_paddb_u (s, t);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-paddw_s (int32x2_t s, int32x2_t t)
-{
-  return __builtin_loongson_paddw_s (s, t);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-paddh_s (int16x4_t s, int16x4_t t)
-{
-  return __builtin_loongson_paddh_s (s, t);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-paddb_s (int8x8_t s, int8x8_t t)
-{
-  return __builtin_loongson_paddb_s (s, t);
-}
-
-/* Addition of doubleword integers, treating overflow by wraparound.  */
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
-paddd_u (uint64_t s, uint64_t t)
-{
-  return __builtin_loongson_paddd_u (s, t);
-}
-
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
-paddd_s (int64_t s, int64_t t)
-{
-  return __builtin_loongson_paddd_s (s, t);
-}
-
-/* Vector addition, treating overflow by signed saturation.  */
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-paddsh (int16x4_t s, int16x4_t t)
-{
-  return __builtin_loongson_paddsh (s, t);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-paddsb (int8x8_t s, int8x8_t t)
-{
-  return __builtin_loongson_paddsb (s, t);
-}
-
-/* Vector addition, treating overflow by unsigned saturation.  */
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-paddush (uint16x4_t s, uint16x4_t t)
-{
-  return __builtin_loongson_paddush (s, t);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-paddusb (uint8x8_t s, uint8x8_t t)
-{
-  return __builtin_loongson_paddusb (s, t);
-}
-
-/* Logical AND NOT.  */
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
-pandn_ud (uint64_t s, uint64_t t)
-{
-  return __builtin_loongson_pandn_ud (s, t);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-pandn_uw (uint32x2_t s, uint32x2_t t)
-{
-  return __builtin_loongson_pandn_uw (s, t);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-pandn_uh (uint16x4_t s, uint16x4_t t)
-{
-  return __builtin_loongson_pandn_uh (s, t);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-pandn_ub (uint8x8_t s, uint8x8_t t)
-{
-  return __builtin_loongson_pandn_ub (s, t);
-}
-
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
-pandn_sd (int64_t s, int64_t t)
-{
-  return __builtin_loongson_pandn_sd (s, t);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-pandn_sw (int32x2_t s, int32x2_t t)
-{
-  return __builtin_loongson_pandn_sw (s, t);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pandn_sh (int16x4_t s, int16x4_t t)
-{
-  return __builtin_loongson_pandn_sh (s, t);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-pandn_sb (int8x8_t s, int8x8_t t)
-{
-  return __builtin_loongson_pandn_sb (s, t);
-}
-
-/* Average.  */
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-pavgh (uint16x4_t s, uint16x4_t t)
-{
-  return __builtin_loongson_pavgh (s, t);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-pavgb (uint8x8_t s, uint8x8_t t)
-{
-  return __builtin_loongson_pavgb (s, t);
-}
-
-/* Equality test.  */
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-pcmpeqw_u (uint32x2_t s, uint32x2_t t)
-{
-  return __builtin_loongson_pcmpeqw_u (s, t);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-pcmpeqh_u (uint16x4_t s, uint16x4_t t)
-{
-  return __builtin_loongson_pcmpeqh_u (s, t);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-pcmpeqb_u (uint8x8_t s, uint8x8_t t)
-{
-  return __builtin_loongson_pcmpeqb_u (s, t);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-pcmpeqw_s (int32x2_t s, int32x2_t t)
-{
-  return __builtin_loongson_pcmpeqw_s (s, t);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pcmpeqh_s (int16x4_t s, int16x4_t t)
-{
-  return __builtin_loongson_pcmpeqh_s (s, t);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-pcmpeqb_s (int8x8_t s, int8x8_t t)
-{
-  return __builtin_loongson_pcmpeqb_s (s, t);
-}
-
-/* Greater-than test.  */
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-pcmpgtw_u (uint32x2_t s, uint32x2_t t)
-{
-  return __builtin_loongson_pcmpgtw_u (s, t);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-pcmpgth_u (uint16x4_t s, uint16x4_t t)
-{
-  return __builtin_loongson_pcmpgth_u (s, t);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-pcmpgtb_u (uint8x8_t s, uint8x8_t t)
-{
-  return __builtin_loongson_pcmpgtb_u (s, t);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-pcmpgtw_s (int32x2_t s, int32x2_t t)
-{
-  return __builtin_loongson_pcmpgtw_s (s, t);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pcmpgth_s (int16x4_t s, int16x4_t t)
-{
-  return __builtin_loongson_pcmpgth_s (s, t);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-pcmpgtb_s (int8x8_t s, int8x8_t t)
-{
-  return __builtin_loongson_pcmpgtb_s (s, t);
-}
-
-/* Extract halfword.  */
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-pextrh_u (uint16x4_t s, int field /* 0--3 */)
-{
-  return __builtin_loongson_pextrh_u (s, field);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pextrh_s (int16x4_t s, int field /* 0--3 */)
-{
-  return __builtin_loongson_pextrh_s (s, field);
-}
-
-/* Insert halfword.  */
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-pinsrh_0_u (uint16x4_t s, uint16x4_t t)
-{
-  return __builtin_loongson_pinsrh_0_u (s, t);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-pinsrh_1_u (uint16x4_t s, uint16x4_t t)
-{
-  return __builtin_loongson_pinsrh_1_u (s, t);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-pinsrh_2_u (uint16x4_t s, uint16x4_t t)
-{
-  return __builtin_loongson_pinsrh_2_u (s, t);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-pinsrh_3_u (uint16x4_t s, uint16x4_t t)
-{
-  return __builtin_loongson_pinsrh_3_u (s, t);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pinsrh_0_s (int16x4_t s, int16x4_t t)
-{
-  return __builtin_loongson_pinsrh_0_s (s, t);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pinsrh_1_s (int16x4_t s, int16x4_t t)
-{
-  return __builtin_loongson_pinsrh_1_s (s, t);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pinsrh_2_s (int16x4_t s, int16x4_t t)
-{
-  return __builtin_loongson_pinsrh_2_s (s, t);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pinsrh_3_s (int16x4_t s, int16x4_t t)
-{
-  return __builtin_loongson_pinsrh_3_s (s, t);
-}
-
-/* Multiply and add.  */
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-pmaddhw (int16x4_t s, int16x4_t t)
-{
-  return __builtin_loongson_pmaddhw (s, t);
-}
-
-/* Maximum of signed halfwords.  */
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pmaxsh (int16x4_t s, int16x4_t t)
-{
-  return __builtin_loongson_pmaxsh (s, t);
-}
-
-/* Maximum of unsigned bytes.  */
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-pmaxub (uint8x8_t s, uint8x8_t t)
-{
-  return __builtin_loongson_pmaxub (s, t);
-}
-
-/* Minimum of signed halfwords.  */
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pminsh (int16x4_t s, int16x4_t t)
-{
-  return __builtin_loongson_pminsh (s, t);
-}
-
-/* Minimum of unsigned bytes.  */
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-pminub (uint8x8_t s, uint8x8_t t)
-{
-  return __builtin_loongson_pminub (s, t);
-}
-
-/* Move byte mask.  */
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-pmovmskb_u (uint8x8_t s)
-{
-  return __builtin_loongson_pmovmskb_u (s);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-pmovmskb_s (int8x8_t s)
-{
-  return __builtin_loongson_pmovmskb_s (s);
-}
-
-/* Multiply unsigned integers and store high result.  */
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-pmulhuh (uint16x4_t s, uint16x4_t t)
-{
-  return __builtin_loongson_pmulhuh (s, t);
-}
-
-/* Multiply signed integers and store high result.  */
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pmulhh (int16x4_t s, int16x4_t t)
-{
-  return __builtin_loongson_pmulhh (s, t);
-}
-
-/* Multiply signed integers and store low result.  */
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pmullh (int16x4_t s, int16x4_t t)
-{
-  return __builtin_loongson_pmullh (s, t);
-}
-
-/* Multiply unsigned word integers.  */
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
-pmuluw (uint32x2_t s, uint32x2_t t)
-{
-  return __builtin_loongson_pmuluw (s, t);
-}
-
-/* Absolute difference.  */
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-pasubub (uint8x8_t s, uint8x8_t t)
-{
-  return __builtin_loongson_pasubub (s, t);
-}
-
-/* Sum of unsigned byte integers.  */
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-biadd (uint8x8_t s)
-{
-  return __builtin_loongson_biadd (s);
-}
-
-/* Sum of absolute differences.
-   Note that this intrinsic expands into two machine instructions:
-   PASUBUB followed by BIADD.  */
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-psadbh (uint8x8_t s, uint8x8_t t)
-{
-  return __builtin_loongson_psadbh (s, t);
-}
-
-/* Shuffle halfwords.  */
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-pshufh_u (uint16x4_t dest, uint16x4_t s, uint8_t order)
-{
-  return __builtin_loongson_pshufh_u (s, order);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pshufh_s (int16x4_t dest, int16x4_t s, uint8_t order)
-{
-  return __builtin_loongson_pshufh_s (s, order);
-}
-
-/* Shift left logical.  */
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-psllh_u (uint16x4_t s, uint8_t amount)
-{
-  return __builtin_loongson_psllh_u (s, amount);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-psllh_s (int16x4_t s, uint8_t amount)
-{
-  return __builtin_loongson_psllh_s (s, amount);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-psllw_u (uint32x2_t s, uint8_t amount)
-{
-  return __builtin_loongson_psllw_u (s, amount);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-psllw_s (int32x2_t s, uint8_t amount)
-{
-  return __builtin_loongson_psllw_s (s, amount);
-}
-
-/* Shift right logical.  */
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-psrlh_u (uint16x4_t s, uint8_t amount)
-{
-  return __builtin_loongson_psrlh_u (s, amount);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-psrlh_s (int16x4_t s, uint8_t amount)
-{
-  return __builtin_loongson_psrlh_s (s, amount);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-psrlw_u (uint32x2_t s, uint8_t amount)
-{
-  return __builtin_loongson_psrlw_u (s, amount);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-psrlw_s (int32x2_t s, uint8_t amount)
-{
-  return __builtin_loongson_psrlw_s (s, amount);
-}
-
-/* Shift right arithmetic.  */
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-psrah_u (uint16x4_t s, uint8_t amount)
-{
-  return __builtin_loongson_psrah_u (s, amount);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-psrah_s (int16x4_t s, uint8_t amount)
-{
-  return __builtin_loongson_psrah_s (s, amount);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-psraw_u (uint32x2_t s, uint8_t amount)
-{
-  return __builtin_loongson_psraw_u (s, amount);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-psraw_s (int32x2_t s, uint8_t amount)
-{
-  return __builtin_loongson_psraw_s (s, amount);
-}
-
-/* Vector subtraction, treating overflow by wraparound.  */
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-psubw_u (uint32x2_t s, uint32x2_t t)
-{
-  return __builtin_loongson_psubw_u (s, t);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-psubh_u (uint16x4_t s, uint16x4_t t)
-{
-  return __builtin_loongson_psubh_u (s, t);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-psubb_u (uint8x8_t s, uint8x8_t t)
-{
-  return __builtin_loongson_psubb_u (s, t);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-psubw_s (int32x2_t s, int32x2_t t)
-{
-  return __builtin_loongson_psubw_s (s, t);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-psubh_s (int16x4_t s, int16x4_t t)
-{
-  return __builtin_loongson_psubh_s (s, t);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-psubb_s (int8x8_t s, int8x8_t t)
-{
-  return __builtin_loongson_psubb_s (s, t);
-}
-
-/* Subtraction of doubleword integers, treating overflow by wraparound.  */
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
-psubd_u (uint64_t s, uint64_t t)
-{
-  return __builtin_loongson_psubd_u (s, t);
-}
-
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
-psubd_s (int64_t s, int64_t t)
-{
-  return __builtin_loongson_psubd_s (s, t);
-}
-
-/* Vector subtraction, treating overflow by signed saturation.  */
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-psubsh (int16x4_t s, int16x4_t t)
-{
-  return __builtin_loongson_psubsh (s, t);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-psubsb (int8x8_t s, int8x8_t t)
-{
-  return __builtin_loongson_psubsb (s, t);
-}
-
-/* Vector subtraction, treating overflow by unsigned saturation.  */
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-psubush (uint16x4_t s, uint16x4_t t)
-{
-  return __builtin_loongson_psubush (s, t);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-psubusb (uint8x8_t s, uint8x8_t t)
-{
-  return __builtin_loongson_psubusb (s, t);
-}
-
-/* Unpack high data.  */
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-punpckhwd_u (uint32x2_t s, uint32x2_t t)
-{
-  return __builtin_loongson_punpckhwd_u (s, t);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-punpckhhw_u (uint16x4_t s, uint16x4_t t)
-{
-  return __builtin_loongson_punpckhhw_u (s, t);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-punpckhbh_u (uint8x8_t s, uint8x8_t t)
-{
-  return __builtin_loongson_punpckhbh_u (s, t);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-punpckhwd_s (int32x2_t s, int32x2_t t)
-{
-  return __builtin_loongson_punpckhwd_s (s, t);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-punpckhhw_s (int16x4_t s, int16x4_t t)
-{
-  return __builtin_loongson_punpckhhw_s (s, t);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-punpckhbh_s (int8x8_t s, int8x8_t t)
-{
-  return __builtin_loongson_punpckhbh_s (s, t);
-}
-
-/* Unpack low data.  */
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-punpcklwd_u (uint32x2_t s, uint32x2_t t)
-{
-  return __builtin_loongson_punpcklwd_u (s, t);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-punpcklhw_u (uint16x4_t s, uint16x4_t t)
-{
-  return __builtin_loongson_punpcklhw_u (s, t);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-punpcklbh_u (uint8x8_t s, uint8x8_t t)
-{
-  return __builtin_loongson_punpcklbh_u (s, t);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-punpcklwd_s (int32x2_t s, int32x2_t t)
-{
-  return __builtin_loongson_punpcklwd_s (s, t);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-punpcklhw_s (int16x4_t s, int16x4_t t)
-{
-  return __builtin_loongson_punpcklhw_s (s, t);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-punpcklbh_s (int8x8_t s, int8x8_t t)
-{
-  return __builtin_loongson_punpcklbh_s (s, t);
-}
-
-#ifdef __cplusplus
-}
-#endif
 
+#if !defined(_GCC_LOONGSON_MMIINTRIN_H)
+#warning  \
+  loongson.h will be deprecated without further notice at a future date. \
+  Please use loongson-mmiintrin.h instead.
+#include "loongson-mmiintrin.h"
 #endif
diff --git a/gcc/config/mips/mips.c b/gcc/config/mips/mips.c
index ea2fae1d6db..a804f7030db 100644
--- a/gcc/config/mips/mips.c
+++ b/gcc/config/mips/mips.c
@@ -12797,8 +12797,9 @@ mips_hard_regno_mode_ok_uncached (unsigned int regno, machine_mode mode)
       if (mode == CCFmode)
 	return !(TARGET_FLOATXX && (regno & 1) != 0);
 
-      /* Allow 64-bit vector modes for Loongson-2E/2F.  */
-      if (TARGET_LOONGSON_VECTORS
+      /* Allow 64-bit vector modes for Loongson MultiMedia extensions
+	 Instructions (MMI).  */
+      if (TARGET_LOONGSON_MMI
 	  && (mode == V2SImode
 	      || mode == V4HImode
 	      || mode == V8QImode
@@ -13368,7 +13369,7 @@ mips_vector_mode_supported_p (machine_mode mode)
     case E_V2SImode:
     case E_V4HImode:
     case E_V8QImode:
-      return TARGET_LOONGSON_VECTORS;
+      return TARGET_LOONGSON_MMI;
 
     default:
       return MSA_SUPPORTED_MODE_P (mode);
@@ -15203,7 +15204,7 @@ AVAIL_NON_MIPS16 (dspr2, TARGET_DSPR2)
 AVAIL_NON_MIPS16 (dsp_32, !TARGET_64BIT && TARGET_DSP)
 AVAIL_NON_MIPS16 (dsp_64, TARGET_64BIT && TARGET_DSP)
 AVAIL_NON_MIPS16 (dspr2_32, !TARGET_64BIT && TARGET_DSPR2)
-AVAIL_NON_MIPS16 (loongson, TARGET_LOONGSON_VECTORS)
+AVAIL_NON_MIPS16 (loongson, TARGET_LOONGSON_MMI)
 AVAIL_NON_MIPS16 (cache, TARGET_CACHE_BUILTIN)
 AVAIL_NON_MIPS16 (msa, TARGET_MSA)
 
@@ -20164,6 +20165,19 @@ mips_option_override (void)
       TARGET_DSPR2 = false;
     }
 
+  /* Make sure that when TARGET_LOONGSON_MMI is true, TARGET_HARD_FLOAT_ABI
+     is true.  In o32 pairs of floating-point registers provide 64-bit
+     values.  */
+  if (TARGET_LOONGSON_MMI &&  !TARGET_HARD_FLOAT_ABI)
+    error ("%<-mloongson-mmi%> must be used with %<-mhard-float%>");
+
+  /* Default to enable Loongson MMI on Longson 2e, 2f or 3a target.  */
+  if ((target_flags_explicit & MASK_LOONGSON_MMI) == 0
+      && ((strcmp (mips_arch_info->name, "loongson2e") == 0)
+	  || (strcmp (mips_arch_info->name, "loongson2f") == 0)
+	  || (strcmp (mips_arch_info->name, "loongson3a") == 0)))
+    target_flags |= MASK_LOONGSON_MMI;
+
   /* .eh_frame addresses should be the same width as a C pointer.
      Most MIPS ABIs support only one pointer size, so the assembler
      will usually know exactly how big an .eh_frame address is.
@@ -21149,12 +21163,12 @@ void mips_function_profiler (FILE *file)
 
 /* Implement TARGET_SHIFT_TRUNCATION_MASK.  We want to keep the default
    behavior of TARGET_SHIFT_TRUNCATION_MASK for non-vector modes even
-   when TARGET_LOONGSON_VECTORS is true.  */
+   when TARGET_LOONGSON_MMI is true.  */
 
 static unsigned HOST_WIDE_INT
 mips_shift_truncation_mask (machine_mode mode)
 {
-  if (TARGET_LOONGSON_VECTORS && VECTOR_MODE_P (mode))
+  if (TARGET_LOONGSON_MMI && VECTOR_MODE_P (mode))
     return 0;
 
   return GET_MODE_BITSIZE (mode) - 1;
@@ -21255,7 +21269,7 @@ mips_expand_vpc_loongson_even_odd (struct expand_vec_perm_d *d)
   unsigned i, odd, nelt = d->nelt;
   rtx t0, t1, t2, t3;
 
-  if (!(TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS))
+  if (!(TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI))
     return false;
   /* Even-odd for V2SI/V2SFmode is matched by interleave directly.  */
   if (nelt < 4)
@@ -21312,7 +21326,7 @@ mips_expand_vpc_loongson_pshufh (struct expand_vec_perm_d *d)
   unsigned i, mask;
   rtx rmask;
 
-  if (!(TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS))
+  if (!(TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI))
     return false;
   if (d->vmode != V4HImode)
     return false;
@@ -21364,7 +21378,7 @@ mips_expand_vpc_loongson_bcast (struct expand_vec_perm_d *d)
   unsigned i, elt;
   rtx t0, t1;
 
-  if (!(TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS))
+  if (!(TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI))
     return false;
   /* Note that we've already matched V2SI via punpck and V4HI via pshufh.  */
   if (d->vmode != V8QImode)
@@ -21958,7 +21972,7 @@ mips_expand_vector_init (rtx target, rtx vals)
     }
 
   /* Loongson is the only cpu with vectors with more elements.  */
-  gcc_assert (TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS);
+  gcc_assert (TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI);
 
   /* If all values are identical, broadcast the value.  */
   if (all_same)
diff --git a/gcc/config/mips/mips.h b/gcc/config/mips/mips.h
index 6804b792ff1..3563c1d78fe 100644
--- a/gcc/config/mips/mips.h
+++ b/gcc/config/mips/mips.h
@@ -319,13 +319,6 @@ struct mips_cpu_info {
 #define TUNE_I6400                  (mips_tune == PROCESSOR_I6400)
 #define TUNE_P6600                  (mips_tune == PROCESSOR_P6600)
 
-/* Whether vector modes and intrinsics for ST Microelectronics
-   Loongson-2E/2F processors should be enabled.  In o32 pairs of
-   floating-point registers provide 64-bit values.  */
-#define TARGET_LOONGSON_VECTORS	    (TARGET_HARD_FLOAT_ABI		\
-				     && (TARGET_LOONGSON_2EF		\
-					 || TARGET_LOONGSON_3A))
-
 /* True if the pre-reload scheduler should try to create chains of
    multiply-add or multiply-subtract instructions.  For example,
    suppose we have:
@@ -596,9 +589,12 @@ struct mips_cpu_info {
       if (TARGET_ABICALLS)						\
 	builtin_define ("__mips_abicalls");				\
 									\
-      /* Whether Loongson vector modes are enabled.  */                 \
-      if (TARGET_LOONGSON_VECTORS)					\
-        builtin_define ("__mips_loongson_vector_rev");                  \
+      /* Whether Loongson vector modes are enabled.  */			\
+      if (TARGET_LOONGSON_MMI)						\
+	{								\
+	  builtin_define ("__mips_loongson_vector_rev");		\
+	  builtin_define ("__mips_loongson_mmi");			\
+	}								\
 									\
       /* Historical Octeon macro.  */					\
       if (TARGET_OCTEON)						\
@@ -1358,6 +1354,7 @@ struct mips_cpu_info {
 %{mcrc} %{mno-crc} \
 %{mginv} %{mno-ginv} \
 %{mmsa} %{mno-msa} \
+%{mloongson-mmi} %{mno-loongson-mmi} \
 %{msmartmips} %{mno-smartmips} \
 %{mmt} %{mno-mt} \
 %{mfix-rm7000} %{mno-fix-rm7000} \
@@ -2635,9 +2632,9 @@ typedef struct mips_args {
 #define SLOW_BYTE_ACCESS (!TARGET_MIPS16)
 
 /* Standard MIPS integer shifts truncate the shift amount to the
-   width of the shifted operand.  However, Loongson vector shifts
+   width of the shifted operand.  However, Loongson MMI shifts
    do not truncate the shift amount at all.  */
-#define SHIFT_COUNT_TRUNCATED (!TARGET_LOONGSON_VECTORS)
+#define SHIFT_COUNT_TRUNCATED (!TARGET_LOONGSON_MMI)
 
 
 /* Specify the machine mode that pointers have.
diff --git a/gcc/config/mips/mips.md b/gcc/config/mips/mips.md
index ea5a23be1f7..a88c1c53134 100644
--- a/gcc/config/mips/mips.md
+++ b/gcc/config/mips/mips.md
@@ -834,9 +834,9 @@
 (define_mode_iterator MOVE64
   [DI DF
    (V2SF "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT")
-   (V2SI "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS")
-   (V4HI "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS")
-   (V8QI "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS")])
+   (V2SI "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI")
+   (V4HI "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI")
+   (V8QI "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI")])
 
 ;; 128-bit modes for which we provide move patterns on 64-bit targets.
 (define_mode_iterator MOVE128 [TI TF])
@@ -863,9 +863,9 @@
   [(DF "!TARGET_64BIT && TARGET_DOUBLE_FLOAT")
    (DI "!TARGET_64BIT && TARGET_DOUBLE_FLOAT")
    (V2SF "!TARGET_64BIT && TARGET_PAIRED_SINGLE_FLOAT")
-   (V2SI "!TARGET_64BIT && TARGET_LOONGSON_VECTORS")
-   (V4HI "!TARGET_64BIT && TARGET_LOONGSON_VECTORS")
-   (V8QI "!TARGET_64BIT && TARGET_LOONGSON_VECTORS")
+   (V2SI "!TARGET_64BIT && TARGET_LOONGSON_MMI")
+   (V4HI "!TARGET_64BIT && TARGET_LOONGSON_MMI")
+   (V8QI "!TARGET_64BIT && TARGET_LOONGSON_MMI")
    (TF "TARGET_64BIT && TARGET_FLOAT64")])
 
 ;; In GPR templates, a string like "<d>subu" will expand to "subu" in the
@@ -7690,8 +7690,8 @@
 ; microMIPS patterns.
 (include "micromips.md")
 
-; ST-Microelectronics Loongson-2E/2F-specific patterns.
-(include "loongson.md")
+; Loongson MultiMedia extensions Instructions (MMI) patterns.
+(include "loongson-mmi.md")
 
 ; The MIPS MSA Instructions.
 (include "mips-msa.md")
diff --git a/gcc/config/mips/mips.opt b/gcc/config/mips/mips.opt
index 5a9f255fe20..6767c47fa65 100644
--- a/gcc/config/mips/mips.opt
+++ b/gcc/config/mips/mips.opt
@@ -459,3 +459,7 @@ Enum(mips_cb_setting) String(optimal) Value(MIPS_CB_OPTIMAL)
 
 EnumValue
 Enum(mips_cb_setting) String(always) Value(MIPS_CB_ALWAYS)
+
+mloongson-mmi
+Target Report Mask(LOONGSON_MMI)
+Use Loongson MultiMedia extensions Instructions (MMI) instructions.
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index 802cc642453..79544e2c2b6 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -919,6 +919,7 @@ Objective-C and Objective-C++ Dialects}.
 -mginv -mno-ginv @gol
 -mmicromips  -mno-micromips @gol
 -mmsa  -mno-msa @gol
+-mloongson-mmi  -mno-loongson-mmi @gol
 -mfpu=@var{fpu-type} @gol
 -msmartmips  -mno-smartmips @gol
 -mpaired-single  -mno-paired-single  -mdmx  -mno-mdmx @gol
@@ -21256,6 +21257,12 @@ Use (do not use) the MIPS Cyclic Redundancy Check (CRC) instructions.
 @opindex mno-ginv
 Use (do not use) the MIPS Global INValidate (GINV) instructions.
 
+@item -mloongson-mmi
+@itemx -mno-loongson-mmi
+@opindex mloongson-mmi
+@opindex mno-loongson-mmi
+Use (do not use) the MIPS Loongson MultiMedia extensions Instructions (MMI).
+
 @item -mlong64
 @opindex mlong64
 Force @code{long} types to be 64 bits wide.  See @option{-mlong32} for
diff --git a/gcc/testsuite/gcc.target/mips/loongson-shift-count-truncated-1.c b/gcc/testsuite/gcc.target/mips/loongson-shift-count-truncated-1.c
index baed48cf5d5..6e22c0e110b 100644
--- a/gcc/testsuite/gcc.target/mips/loongson-shift-count-truncated-1.c
+++ b/gcc/testsuite/gcc.target/mips/loongson-shift-count-truncated-1.c
@@ -4,11 +4,11 @@
 /* loongson.h does not handle or check for MIPS16ness.  There doesn't
    seem any good reason for it to, given that the Loongson processors
    do not support MIPS16.  */
-/* { dg-options "isa=loongson -mhard-float -mno-mips16 (REQUIRES_STDLIB)" } */
+/* { dg-options "-mloongson-mmi -mhard-float -mno-mips16 (REQUIRES_STDLIB)" } */
 /* See PR 52155.  */
-/* { dg-options "isa=loongson -mhard-float -mno-mips16 -mlong64" { mips*-*-elf* && ilp32 } } */
+/* { dg-options "-mloongson-mmi -mhard-float -mno-mips16 -mlong64" { mips*-*-elf* && ilp32 } } */
 
-#include "loongson.h"
+#include "loongson-mmiintrin.h"
 #include <assert.h>
 
 typedef union { int32x2_t v; int32_t a[2]; } int32x2_encap_t;
diff --git a/gcc/testsuite/gcc.target/mips/loongson-simd.c b/gcc/testsuite/gcc.target/mips/loongson-simd.c
index f263b4393e9..34fdcecc6dc 100644
--- a/gcc/testsuite/gcc.target/mips/loongson-simd.c
+++ b/gcc/testsuite/gcc.target/mips/loongson-simd.c
@@ -26,9 +26,9 @@ along with GCC; see the file COPYING3.  If not see
    because inclusion of some system headers e.g. stdint.h will fail due to not
    finding stubs-o32_hard.h.  */
 /* { dg-require-effective-target mips_nanlegacy } */
-/* { dg-options "isa=loongson -mhard-float -mno-micromips -mno-mips16 -flax-vector-conversions (REQUIRES_STDLIB)" } */
+/* { dg-options "-mloongson-mmi -mhard-float -mno-micromips -mno-mips16 -flax-vector-conversions (REQUIRES_STDLIB)" } */
 
-#include "loongson.h"
+#include "loongson-mmiintrin.h"
 #include <stdio.h>
 #include <stdint.h>
 #include <assert.h>
diff --git a/gcc/testsuite/gcc.target/mips/mips.exp b/gcc/testsuite/gcc.target/mips/mips.exp
index 9db4fbe29ce..4045c593a6c 100644
--- a/gcc/testsuite/gcc.target/mips/mips.exp
+++ b/gcc/testsuite/gcc.target/mips/mips.exp
@@ -296,6 +296,7 @@ foreach option {
     mcount-ra-address
     odd-spreg
     msa
+    loongson-mmi
 } {
     lappend mips_option_groups $option "-m(no-|)$option"
 }
@@ -883,6 +884,12 @@ proc mips-dg-init {} {
 	    "-mno-msa"
 	    #endif
 
+	    #ifdef __mips_loongson_mmi
+	    "-mloongson-mmi"
+	    #else
+	    "-mno-loongson-mmi"
+	    #endif
+
 	    0
 	};
     } 0]
diff --git a/gcc/testsuite/gcc.target/mips/umips-store16-1.c b/gcc/testsuite/gcc.target/mips/umips-store16-1.c
index 6377e8569d6..f82c837d475 100644
--- a/gcc/testsuite/gcc.target/mips/umips-store16-1.c
+++ b/gcc/testsuite/gcc.target/mips/umips-store16-1.c
@@ -1,4 +1,4 @@
-/* { dg-options "(-mmicromips)" } */
+/* { dg-options "(-mmicromips) forbid_cpu=loongson3a" } */
 /* { dg-do assemble } */
 
 register unsigned int global asm ("$16");
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index fd74c04d092..47c623ebab8 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -1896,20 +1896,20 @@ proc check_mpaired_single_hw_available { } {
 # Return 1 if the target supports executing Loongson vector instructions,
 # 0 otherwise.  Cache the result.
 
-proc check_mips_loongson_hw_available { } {
-    return [check_cached_effective_target mips_loongson_hw_available {
+proc check_mips_loongson_mmi_hw_available { } {
+    return [check_cached_effective_target mips_loongson_mmi_hw_available {
 	# If this is not the right target then we can skip the test.
 	if { !([istarget mips*-*-*]) } {
 	    expr 0
 	} else {
-	    check_runtime_nocache mips_loongson_hw_available {
-	      #include <loongson.h>
+	    check_runtime_nocache mips_loongson_mmi_hw_available {
+	      #include <loongson-mmiintrin.h>
 	      int main()
 	      {
 		asm volatile ("paddw $f2,$f4,$f6");
 		return 0;
 	      }
-	    } ""
+	    } "-mloongson-mmi"
 	}
     }]
 }
@@ -1963,9 +1963,9 @@ proc check_effective_target_mpaired_single_runtime { } {
 
 # Return 1 if the target supports running Loongson executables, 0 otherwise.
 
-proc check_effective_target_mips_loongson_runtime { } {
-    if { [check_effective_target_mips_loongson]
-	 && [check_mips_loongson_hw_available] } {
+proc check_effective_target_mips_loongson_mmi_runtime { } {
+    if { [check_effective_target_mips_loongson_mmi]
+	 && [check_mips_loongson_mmi_hw_available] } {
 	return 1
     }
     return 0
@@ -3085,7 +3085,7 @@ proc check_effective_target_vect_int { } {
 	 || [istarget aarch64*-*-*]
 	 || [is-effective-target arm_neon]
 	 || ([istarget mips*-*-*]
-	     && ([et-is-effective-target mips_loongson]
+	     && ([et-is-effective-target mips_loongson_mmi]
 	     || [et-is-effective-target mips_msa]))
 	 || ([istarget s390*-*-*]
 	     && [check_effective_target_s390_vx])
@@ -4708,11 +4708,24 @@ proc add_options_for_mips_msa { flags } {
   return "$flags -mmsa"
 }
 
+# Add the options needed for MIPS Loongsn MMI Architecture.
+
+proc add_options_for_mips_loongson_mmi { flags } {
+  if { ! [check_effective_target_mips_loongson_mmi] } {
+    return "$flags"
+  }
+  return "$flags -mloongson-mmi"
+}
+
+
 # Return 1 if this a Loongson-2E or -2F target using an ABI that supports
 # the Loongson vector modes.
 
-proc check_effective_target_mips_loongson { } {
+proc check_effective_target_mips_loongson_mmi { } {
     return [check_no_compiler_messages loongson assembly {
+	#if !defined(__mips_loongson_mmi)
+	#error !__mips_loongson_mmi
+	#endif
 	#if !defined(__mips_loongson_vector_rev)
 	#error !__mips_loongson_vector_rev
 	#endif
@@ -5311,7 +5324,7 @@ proc check_effective_target_vect_shift { } {
 	     || [is-effective-target arm_neon]
 	     || ([istarget mips*-*-*]
 		 && ([et-is-effective-target mips_msa]
-		     || [et-is-effective-target mips_loongson]))
+		     || [et-is-effective-target mips_loongson_mmi]))
 	     || ([istarget s390*-*-*]
 		 && [check_effective_target_s390_vx]) }}]
 }
@@ -5324,7 +5337,7 @@ proc check_effective_target_whole_vector_shift { } {
 	 || ([is-effective-target arm_neon]
 	     && [check_effective_target_arm_little_endian])
 	 || ([istarget mips*-*-*]
-	     && [et-is-effective-target mips_loongson])
+	     && [et-is-effective-target mips_loongson_mmi])
 	 || ([istarget s390*-*-*]
 	     && [check_effective_target_s390_vx]) } {
 	set answer 1
@@ -5464,7 +5477,7 @@ proc check_effective_target_vect_no_int_min_max { } {
 	     || [istarget spu-*-*]
 	     || [istarget alpha*-*-*]
 	     || ([istarget mips*-*-*]
-		 && [et-is-effective-target mips_loongson]) }}]
+		 && [et-is-effective-target mips_loongson_mmi]) }}]
 }
 
 # Return 1 if the target plus current options does not support a vector
@@ -5933,7 +5946,7 @@ proc check_effective_target_vect_no_align { } {
 	     || [check_effective_target_arm_vect_no_misalign]
 	     || ([istarget powerpc*-*-*] && [check_p8vector_hw_available])
 	     || ([istarget mips*-*-*]
-		 && [et-is-effective-target mips_loongson]) }}]
+		 && [et-is-effective-target mips_loongson_mmi]) }}]
 }
 
 # Return 1 if the target supports a vector misalign access, 0 otherwise.
@@ -6167,7 +6180,7 @@ proc check_effective_target_vect_short_mult { } {
 	     || [check_effective_target_arm32]
 	     || ([istarget mips*-*-*]
 		 && ([et-is-effective-target mips_msa]
-		     || [et-is-effective-target mips_loongson]))
+		     || [et-is-effective-target mips_loongson_mmi]))
 	     || ([istarget s390*-*-*]
 		 && [check_effective_target_s390_vx]) }}]
 }
@@ -8155,8 +8168,8 @@ proc check_vect_support_and_set_flags { } {
 	if { [check_effective_target_mpaired_single] } {
 	    lappend EFFECTIVE_TARGETS mpaired_single
 	}
-	if { [check_effective_target_mips_loongson] } {
-	    lappend EFFECTIVE_TARGETS mips_loongson
+	if { [check_effective_target_mips_loongson_mmi] } {
+	    lappend EFFECTIVE_TARGETS mips_loongson_mmi
 	}
 	if { [check_effective_target_mips_msa] } {
 	    lappend EFFECTIVE_TARGETS mips_msa
-- 
2.11.0


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]