[PATH 4/6] RISC-V: Add vector modes and pattern

戎杰杰(无音) jiejie.rjj@alibaba-inc.com
Fri Jun 12 14:35:31 GMT 2020


gcc/ChangeLog:

        * config/riscv/constraints (Qmv): New.
        * config/riscv/iterators.md : New.
        * config/riscv/predicates.md (const_K_operand): New.
        (const_M_operand): Likewise.
        (vmask_mode_register_operand): Likewise.
        (riscv_vector_mem_operand): Likewise.
        * config/riscv/riscv-modes.def : Define vector modes.
        * config/riscv/riscv.c (riscv_regno_to_class): Add vector regs.
        (riscv_vector_vmode_supported_p): New.
        (riscv_classify_address_vector): Likewise.
        (riscv_classify_address): Call riscv_classify_address_vector.
        (riscv_legitimize_vector_address): New.
        (riscv_legitimize_address): CALL riscv_legitimize_vector_address.
        (riscv_legitimize_address_vector_p): New.
        (riscv_get_arg_info): Check vector args.
        (riscv_pass_by_reference): Likewise.
        (riscv_secondary_memory_needed): Check vector regs.
        (riscv_hard_regno_nregs): Likewise.
        (riscv_hard_regno_mode_ok): Likewise.
        (riscv_class_max_nregs): Likewise.
        (riscv_conditional_register_usage): Likewise.
        (riscv_can_change_mode_class): Check vector mode.
        (riscv_emit_vsetvli_base): New.
        (riscv_emit_vsetvli): Likewise.
        (riscv_emit_vsetvli_max): Likewise.
        (riscv_output_vector_sew): Likewise.
        (riscv_output_vector_lmul): Likewise.
        (riscv_output_vector_insn): Likewise.
        (TARGET_SCALAR_MODE_SUPPORTED_P): Define.
        (TARGET_VECTOR_MODE_SUPPORTED_P): Likewise.
        (TARGET_ARRAY_MODE_SUPPORTED_P): Likewise.
        (TARGET_SCHED_FINISH_GLOBAL): Likewise.
        (TARGET_ASM_FUNCTION_PROLOGUE): Likewise.
        * config/riscv/riscv-v.h (TARGET_VECTOR_VSPLIT): Define.
        (TARGET_VECTOR_VSPN): Likewise.
        * config/riscv/riscv-v-float.md : New.
        * config/riscv/riscv-v-mem.md : New.
        * config/riscv/riscv-v.md : New.
        * config/riscv/riscv.md : Include vector md.
---
 gcc/config/riscv/constraints.md   |    6 +
 gcc/config/riscv/iterators.md     | 2212 +++++++++++++++++++++
 gcc/config/riscv/predicates.md    |   24 +
 gcc/config/riscv/riscv-modes.def  |   40 +
 gcc/config/riscv/riscv-protos.h   |    8 +
 gcc/config/riscv/riscv-v-float.md | 2121 +++++++++++++++++++++
 gcc/config/riscv/riscv-v-mem.md   |  743 ++++++++
 gcc/config/riscv/riscv-v.h        |    6 +
 gcc/config/riscv/riscv-v.md       | 2958 +++++++++++++++++++++++++++++
 gcc/config/riscv/riscv.c          |  417 +++-
 gcc/config/riscv/riscv.md         |    4 +
 11 files changed, 8538 insertions(+), 1 deletion(-)
 create mode 100755 gcc/config/riscv/iterators.md
 create mode 100755 gcc/config/riscv/riscv-v-float.md
 create mode 100755 gcc/config/riscv/riscv-v-mem.md
 create mode 100755 gcc/config/riscv/riscv-v.md

diff --git a/gcc/config/riscv/constraints.md b/gcc/config/riscv/constraints.md
index fb3c7fac8a0..79414d4f550 100644
--- a/gcc/config/riscv/constraints.md
+++ b/gcc/config/riscv/constraints.md
@@ -85,3 +85,9 @@
    A constant @code{move_operand}."
   (and (match_operand 0 "move_operand")
        (match_test "CONSTANT_P (op)")))
+
+(define_memory_constraint "Qmv"
+ "@internal
+  An address valid for vector instructions."
+ (and (match_code "mem")
+      (match_test "riscv_legitimize_address_vector_p (XEXP(op, 0), GET_MODE(op))")))
diff --git a/gcc/config/riscv/iterators.md b/gcc/config/riscv/iterators.md
new file mode 100755
index 00000000000..d7438e84774
--- /dev/null
+++ b/gcc/config/riscv/iterators.md
@@ -0,0 +1,2212 @@
+
+(define_mode_iterator VALL [
+  V1DI  V2SI  V4HI  V8QI
+  V2DI  V4SI  V8HI  V16QI
+  V4DI  V8SI  V16HI V32QI
+  V8DI  V16SI V32HI V64QI
+  V16DI V32SI V64HI V128QI
+
+  V1DF  V2SF  V4HF
+  V2DF  V4SF  V8HF
+  V4DF  V8SF  V16HF
+  V8DF  V16SF V32HF
+  V16DF V32SF V64HF
+])
+
+(define_mode_iterator VANY [
+  V1DI  V2SI  V4HI  V8QI
+  V2DI  V4SI  V8HI  V16QI
+  V4DI  V8SI  V16HI V32QI
+  V8DI  V16SI V32HI V64QI
+  V16DI V32SI V64HI V128QI
+])
+
+;; VANY - V2DI
+(define_mode_iterator VANY_RVV [
+  V2DI  V4SI  V8HI  V16QI
+  V4DI  V8SI  V16HI V32QI
+  V8DI  V16SI V32HI V64QI
+  V16DI V32SI V64HI V128QI
+])
+
+(define_mode_iterator VANY128 [
+  V2DI  V4SI  V8HI  V16QI
+  V4DI  V8SI  V16HI V32QI
+  V8DI  V16SI V32HI V64QI
+  V16DI V32SI V64HI V128QI
+  V2DF  V4SF  V8HF
+  V4DF  V8SF  V16HF
+  V8DF  V16SF V32HF
+  V16DF V32SF V64HF
+])
+
+(define_mode_iterator VNARROW [
+  V2DI  V4SI  V8HI
+  V4DI  V8SI  V16HI
+  V8DI  V16SI V32HI
+  V16DI V32SI V64HI
+])
+
+(define_mode_iterator VWIDEN_DSHQ [
+  V2SI  V4HI  V8QI
+  V4SI  V8HI  V16QI
+  V8SI  V16HI V32QI
+  V16SI V32HI V64QI
+])
+
+(define_mode_iterator VNARROWER [
+  V8DI V4DI V2DI V1DI
+  V16SI V8SI V4SI V2SI
+  V32HI V16HI V8HI V4HI
+  V64QI V32QI V16QI V8QI
+])
+
+(define_mode_iterator VFANY [
+  V8TF V4TF V2TF V1TF
+  V16DF V8DF V4DF V2DF V1DF
+  V32SF V16SF V8SF V4SF V2SF
+  V64HF V32HF V16HF V8HF V4HF
+])
+
+;; vfcvt.x.f.v, vfvct.xu.f.v
+(define_mode_iterator VTDSH [
+  V8TI V4TI V2TI V1TI
+  V16DI V8DI V4DI V2DI V1DI
+  V32SI V16SI V8SI V4SI V2SI
+  V64HI V32HI V16HI V8HI V4HI
+])
+
+(define_mode_iterator VFNARROW [
+  V8DF V4DF V2DF V1DF
+  V16SF V8SF V4SF V2SF
+  V32HF V16HF V8HF V4HF
+])
+
+(define_mode_iterator VFWIDEN [
+  V8TF V4TF V2TF V1TF
+  V16DF V8DF V4DF V2DF
+  V32SF V16SF V8SF V4SF
+])
+
+(define_mode_iterator VFWIDEN_FROM_INT [
+  V8TF V4TF V2TF V1TF
+  V16DF V8DF V4DF V2DF
+  V32SF V16SF V8SF V4SF
+  V64HF V32HF V16HF V8HF
+])
+
+(define_mode_iterator VFDSH [
+  V16DF V8DF V4DF V2DF V1DF
+  V32SF V16SF V8SF V4SF V2SF
+  V64HF V32HF V16HF V8HF V4HF
+])
+
+(define_mode_iterator VWIDEN_FROM_FLOAT [
+  V8TI V4TI V2TI V1TI
+  V16DI V8DI V4DI V2DI
+  V32SI V16SI V8SI V4SI
+])
+
+(define_mode_iterator VMASK [V8QI V16QI])
+
+
+(define_mode_attr VANY_SCALAR_attr [
+  (V1DI "DI")   (V2SI "SI")   (V4HI "HI")   (V8QI "QI")
+  (V2DI "DI")   (V4SI "SI")   (V8HI "HI")   (V16QI "QI")
+  (V4DI "DI")   (V8SI "SI")   (V16HI "HI")  (V32QI "QI")
+  (V8DI "DI")   (V16SI "SI")  (V32HI "HI")  (V64QI "QI")
+  (V16DI "DI")  (V32SI "SI")  (V64HI "HI")  (V128QI "QI")
+])
+
+(define_mode_attr VANY_SCALAR_NARROW_attr [
+  (V2DI "SI")   (V4SI "HI")   (V8HI "QI")
+  (V4DI "SI")   (V8SI "HI")   (V16HI "QI")
+  (V8DI "SI")   (V16SI "HI")  (V32HI "QI")
+  (V16DI "SI")  (V32SI "HI")  (V64HI "QI")
+])
+
+(define_mode_attr VNARROW_attr [
+  (V2DI "V2SI")  (V4SI "V4HI")   (V8HI "V8QI")
+  (V4DI "V4SI")  (V8SI "V8HI")   (V16HI "V16QI")
+  (V8DI "V8SI")  (V16SI "V16HI")  (V32HI "V32QI")
+  (V16DI "V16SI") (V32SI "V32HI") (V64HI "V64QI")
+])
+
+(define_mode_attr vnarrow_attr [
+  (V2DI "v2si")  (V4SI "v4hi")   (V8HI "v8qi")
+  (V4DI "v4si")  (V8SI "v8hi")   (V16HI "v16qi")
+  (V8DI "v8si")  (V16SI "v16hi")  (V32HI "v32qi")
+  (V16DI "v16si") (V32SI "v32hi") (V64HI "v64qi")
+])
+
+(define_mode_attr VWIDEN_attr [
+  (V2SI "V2DI")   (V4HI "V4SI")   (V8QI "V8HI")
+  (V4SI "V4DI")   (V8HI "V8SI")   (V16QI "V16HI")
+  (V8SI "V8DI")   (V16HI "V16SI") (V32QI "V32HI")
+  (V16SI "V16DI") (V32HI "V32SI") (V64QI  "V64HI")
+])
+
+(define_mode_attr vwiden_attr [
+  (V2SI "v2di")   (V4HI "v4si")   (V8QI "v8hi")
+  (V4SI "v4di")   (V8HI "v8si")   (V16QI "v16hi")
+  (V8SI "v8di")   (V16HI "v16si") (V32QI "v32hi")
+  (V16SI "v16di") (V32HI "v32si") (V64QI  "v64hi")
+])
+
+(define_mode_attr VF_CVTX_ATTR [
+  (V8TF "V8TI") (V4TF "V4TI") (V2TF "V2TI") (V1TF "V1TI")
+  (V16DF "V16DI") (V8DF "V8DI") (V4DF "V4DI") (V2DF "V2DI") (V1DF "V1DI")
+  (V32SF "V32SI") (V16SF "V16SI") (V8SF "V8SI") (V4SF "V4SI") (V2SF "V2SI")
+  (V64HF "V64HI") (V32HF "V32HI") (V16HF "V16HI") (V8HF "V8HI") (V4HF "V4HI")
+])
+
+(define_mode_attr vf_cvtx_attr [
+  (V8TF "v8ti") (V4TF "v4ti") (V2TF "v2ti") (V1TF "v1ti")
+  (V16DF "v16di") (V8DF "v8di") (V4DF "v4di") (V2DF "v2di") (V1DF "v1di")
+  (V32SF "v32si") (V16SF "v16si") (V8SF "v8si") (V4SF "v4si") (V2SF "v2si")
+  (V64HF "v64hi") (V32HF "v32hi") (V16HF "v16hi") (V8HF "v8hi") (V4HF "v4hi")
+])
+
+(define_mode_attr V_CVTF_ATTR [
+  (V8TI "V8TF") (V4TI "V4TF") (V2TI "V2TF") (V1TI "V1TF")
+  (V16DI "V16DF") (V8DI "V8DF") (V4DI "V4DF") (V2DI "V2DF") (V1DI "V1DF")
+  (V32SI "V32SF") (V16SI "V16SF") (V8SI "V8SF") (V4SI "V4SF") (V2SI "V2SF")
+  (V64HI "V64HF") (V32HI "V32HF") (V16HI "V16HF") (V8HI "V8HF") (V4HI "V4HF")
+])
+
+(define_mode_attr v_cvtf_attr [
+  (V8TI "v8tf") (V4TI "v4tf") (V2TI "v2tf") (V1TI "v1tf")
+  (V16DI "v16df") (V8DI "v8df") (V4DI "v4df") (V2DI "v2df") (V1DI "v1df")
+  (V32SI "v32sf") (V16SI "v16sf") (V8SI "v8sf") (V4SI "v4sf") (V2SI "v2sf")
+  (V64HI "v64hf") (V32HI "v32hf") (V16HI "v16hf") (V8HI "v8hf") (V4HI "v4hf")
+])
+
+(define_mode_attr VFNARROW_X_ATTR [
+  (V8DF "V8TI") (V4DF "V4TI") (V2DF "V2TI") (V1DF "V1TI")
+  (V16SF "V16DI") (V8SF "V8DI") (V4SF "V4DI") (V2SF "V2DI")
+  (V32HF "V32SI") (V16HF "V16SI") (V8HF "V8SI") (V4HF "V4SI")
+])
+
+(define_mode_attr VNARROW_F_ATTR [
+  (V8DI "V8TF") (V4DI "V4TF") (V2DI "V2TF") (V1DI "V1TF")
+  (V16SI "V16DF") (V8SI "V8DF") (V4SI "V4DF") (V2SI "V2DF")
+  (V32HI "V32SF") (V16HI "V16SF") (V8HI "V8SF") (V4HI "V4SF")
+  (V64QI "V64HF") (V32QI "V32HF") (V16QI "V16HF") (V8QI "V8HF")
+])
+
+(define_mode_attr VFWIDEN_X_ATTR [
+  (V8TF "V8DI") (V4TF "V4DI") (V2TF "V2DI") (V1TF "V1DI")
+  (V16DF "V16SI") (V8DF "V8SI") (V4DF "V4SI") (V2DF "V2SI")
+  (V32SF "V32HI") (V16SF "V16HI") (V8SF "V8HI") (V4SF "V4HI")
+  (V64HF "V64QI") (V32HF "V32QI") (V16HF "V16QI") (V8HF "V8QI")
+])
+
+(define_mode_attr VWIDEN_F_ATTR [
+  (V8TI "V8DF") (V4TI "V4DF") (V2TI "V2DF") (V1TI "V1DF")
+  (V16DI "V16SF") (V8DI "V8SF") (V4DI "V4SF") (V2DI "V2SF")
+  (V32SI "V32HF") (V16SI "V16HF") (V8SI "V8HF") (V4SI "V4HF")
+])
+
+(define_mode_attr VFNARROW_ATTR [
+  (V8DF "V8TF") (V4DF "V4TF") (V2DF "V2TF") (V1DF "V1TF")
+  (V16SF "V16DF") (V8SF "V8DF") (V4SF "V4DF") (V2SF "V2DF")
+  (V32HF "V32SF") (V16HF "V16SF") (V8HF "V8SF") (V4HF "V4SF")
+])
+
+(define_mode_attr vfnarrow_attr [
+  (V8DF "v8tf") (V4DF "v4tf") (V2DF "v2tf") (V1DF "v1tf")
+  (V16SF "v16df") (V8SF "v8df") (V4SF "v4df") (V2SF "v2df")
+  (V32HF "v32sf") (V16HF "v16sf") (V8HF "v8sf") (V4HF "v4sf")
+])
+
+(define_mode_attr VFWIDEN_ATTR [
+  (V8TF "V8DF") (V4TF "V4DF") (V2TF "V2DF") (V1TF "V1DF")
+  (V16DF "V16SF") (V8DF "V8SF") (V4DF "V4SF") (V2DF "V2SF")
+  (V32SF "V32HF") (V16SF "V16HF") (V8SF "V8HF") (V4SF "V4HF")
+])
+
+(define_mode_attr vfwiden_attr [
+  (V8TF "v8df") (V4TF "v4df") (V2TF "v2df") (V1TF "v1df")
+  (V16DF "v16sf") (V8DF "v8sf") (V4DF "v4sf") (V2DF "v2sf")
+  (V32SF "v32hf") (V16SF "v16hf") (V8SF "v8hf") (V4SF "v4hf")
+])
+
+(define_mode_attr VF_SEM_ATTR [
+  (V8TF "TF") (V4TF "TF") (V2TF "TF") (V1TF "TF")
+  (V16DF "DF") (V8DF "DF") (V4DF "DF") (V2DF "DF") (V1DF "DF")
+  (V32SF "SF") (V16SF "SF") (V8SF "SF") (V4SF "SF") (V2SF "SF")
+  (V64HF "HF") (V32HF "HF") (V16HF "HF") (V8HF "HF") (V4HF "HF")
+])
+
+(define_mode_attr VFWIDEN_SEM_ATTR [
+  (V8TF "DF") (V4TF "DF") (V2TF "DF") (V1TF "DF")
+  (V16DF "SF") (V8DF "SF") (V4DF "SF") (V2DF "SF") (V1DF "SF")
+  (V32SF "HF") (V16SF "HF") (V8SF "HF") (V4SF "HF") (V2SF "HF")
+])
+
+(define_mode_attr VF_V1SEM_ATTR [
+  (V8TF "V1TF") (V4TF "V1TF") (V2TF "V1TF") (V1TF "V1TF")
+  (V16DF "V1DF") (V8DF "V1DF") (V4DF "V1DF") (V2DF "V1DF") (V1DF "V1DF")
+  (V32SF "V1SF") (V16SF "V1SF") (V8SF "V1SF") (V4SF "V1SF") (V2SF "V1SF")
+  (V64HF "V1HF") (V32HF "V1HF") (V16HF "V1HF") (V8HF "V1HF") (V4HF "V1HF")
+])
+
+(define_mode_attr VFDSH_V1SEM_ATTR [
+  (V16DF "V1TF") (V8DF "V1TF") (V4DF "V1TF") (V2DF "V1TF") (V1DF "V1TF")
+  (V32SF "V1DF") (V16SF "V1DF") (V8SF "V1DF") (V4SF "V1DF") (V2SF "V1DF")
+  (V64HF "V1SF") (V32HF "V1SF") (V16HF "V1SF") (V8HF "V1SF") (V4HF "V1SF")
+])
+
+(define_mode_attr VLXE_VALL_ATTR [
+  (V1DI  "V1DI") (V2SI  "V2SI") (V4HI  "V4HI") (V8QI  "V8QI")
+  (V2DI  "V2DI") (V4SI  "V4SI") (V8HI  "V8HI") (V16QI  "V16QI")
+  (V4DI  "V4DI") (V8SI  "V8SI") (V16HI "V16HI") (V32QI  "V32QI")
+  (V8DI  "V8DI") (V16SI "V16SI") (V32HI "V32HI") (V64QI  "V64QI")
+  (V16DI "V16DI") (V32SI "V32SI") (V64HI "V64HI") (V128QI "V128QI")
+  (V1DF  "V1DF") (V2SF  "V2SF") (V4HF  "V4HF")
+  (V2DF  "V2DI") (V4SF  "V4SI") (V8HF  "V8HI")
+  (V4DF  "V4DI") (V8SF  "V8SI") (V16HF "V16HI")
+  (V8DF  "V8DI") (V16SF "V16SI") (V32HF "V32HI")
+  (V16DF "V16DI") (V32SF "V32SI") (V64HF "V64HI")
+])
+
+(define_c_enum "unspec" [
+    UNSPEC_VSETVLI
+    UNSPEC_VSETVLI_MAX
+    UNSPEC_VSETVLR
+    UNSPEC_VSTART
+    ;; used for specify the type
+    UNSPEC_TYPE
+    ;; Mask operation unspec
+    UNSPEC_VMSEQVV
+    UNSPEC_VMSNEVV
+    UNSPEC_VMSLTVV
+    UNSPEC_VMSLTUVV
+    UNSPEC_VMSLEVV
+    UNSPEC_VMSLEUVV
+    UNSPEC_VMSEQVX
+    UNSPEC_VMSNEVX
+    UNSPEC_VMSLTVX
+    UNSPEC_VMSLTUVX
+    UNSPEC_VMSLEVX
+    UNSPEC_VMSLEUVX
+    UNSPEC_VMSGTVX
+    UNSPEC_VMSGTUVX
+    UNSPEC_VMSGEVX
+    UNSPEC_VMSGEUVX
+    UNSPEC_VMSEQVI
+    UNSPEC_VMSNEVI
+    UNSPEC_VMSLEVI
+    UNSPEC_VMSLEUVI
+    UNSPEC_VMSGTVI
+    UNSPEC_VMSGTUVI
+    UNSPEC_VMAND
+    UNSPEC_VMNAND
+    UNSPEC_VMANDNOT
+    UNSPEC_VMXOR
+    UNSPEC_VMOR
+    UNSPEC_VMNOR
+    UNSPEC_VMORNOT
+    UNSPEC_VMXNOR
+    UNSPEC_VMCPY
+    UNSPEC_VMCLR
+    UNSPEC_VMSET
+    UNSPEC_VMNOT
+    UNSPEC_VMPOPC
+    UNSPEC_VMFIRST
+    UNSPEC_VMSBF
+    UNSPEC_VMSIF
+    UNSPEC_VMSOF
+    ;; bitwise
+    UNSPEC_VANDVV
+    UNSPEC_VORVV
+    UNSPEC_VXORVV
+    UNSPEC_VANDVX
+    UNSPEC_VORVX
+    UNSPEC_VXORVX
+    UNSPEC_VANDVI
+    UNSPEC_VORVI
+    UNSPEC_VXORVI
+    UNSPEC_VNOTV
+    ;; bit shift
+    UNSPEC_VSLLVV
+    UNSPEC_VSRLVV
+    UNSPEC_VSRAVV
+    UNSPEC_VSLLVX
+    UNSPEC_VSRLVX
+    UNSPEC_VSRAVX
+    UNSPEC_VSLLVI
+    UNSPEC_VSRLVI
+    UNSPEC_VSRAVI
+    ;; narrow shift
+    UNSPEC_VNSRLVV
+    UNSPEC_VNSRAVV
+    UNSPEC_VNSRLVX
+    UNSPEC_VNSRAVX
+    UNSPEC_VNSRLVI
+    UNSPEC_VNSRAVI
+    ;; arithmetic
+    UNSPEC_VADDVV
+    UNSPEC_VSUBVV
+    UNSPEC_VADDVX
+    UNSPEC_VSUBVX
+    UNSPEC_VRSUBVX
+    UNSPEC_VADDVI
+    UNSPEC_VRSUBVI
+    UNSPEC_VDIVUVV
+    UNSPEC_VDIVVV
+    UNSPEC_VREMUVV
+    UNSPEC_VREMVV
+    UNSPEC_VDIVUVX
+    UNSPEC_VDIVVX
+    UNSPEC_VREMUVX
+    UNSPEC_VREMVX
+    UNSPEC_VMINUVV
+    UNSPEC_VMINVV
+    UNSPEC_VMAXUVV
+    UNSPEC_VMAXVV
+    UNSPEC_VMINUVX
+    UNSPEC_VMINVX
+    UNSPEC_VMAXUVX
+    UNSPEC_VMAXVX
+    UNSPEC_VMULVV
+    UNSPEC_VMULHVV
+    UNSPEC_VMULHUVV
+    UNSPEC_VMULHSUVV
+    UNSPEC_VMULVX
+    UNSPEC_VMULHVX
+    UNSPEC_VMULHUVX
+    UNSPEC_VMULHSUVX
+    UNSPEC_VMACCVV
+    UNSPEC_VNMSACVV
+    UNSPEC_VMADDVV
+    UNSPEC_VNMSUBVV
+    UNSPEC_VMACCVX
+    UNSPEC_VNMSACVX
+    UNSPEC_VMADDVX
+    UNSPEC_VNMSUBVX
+    UNSPEC_VWADDUVV
+    UNSPEC_VWSUBUVV
+    UNSPEC_VWADDVV
+    UNSPEC_VWSUBVV
+    UNSPEC_VWADDUVX
+    UNSPEC_VWSUBUVX
+    UNSPEC_VWADDVX
+    UNSPEC_VWSUBVX
+    UNSPEC_VWADDUWV
+    UNSPEC_VWSUBUWV
+    UNSPEC_VWADDWV
+    UNSPEC_VWSUBWV
+    UNSPEC_VWADDUWX
+    UNSPEC_VWSUBUWX
+    UNSPEC_VWADDWX
+    UNSPEC_VWSUBWX
+    UNSPEC_VWMULVV
+    UNSPEC_VWMULUVV
+    UNSPEC_VWMULSUVV
+    UNSPEC_VWMULVX
+    UNSPEC_VWMULUVX
+    UNSPEC_VWMULSUVX
+    UNSPEC_VWMACCUVV
+    UNSPEC_VWMACCVV
+    UNSPEC_VWMACCSUVV
+    UNSPEC_VWMACCUVX
+    UNSPEC_VWMACCVX
+    UNSPEC_VWMACCSUVX
+    UNSPEC_VWMACCUSVX
+    ;; reduction
+    UNSPEC_VREDSUMVS
+    UNSPEC_VREDMAXUVS
+    UNSPEC_VREDMAXVS
+    UNSPEC_VREDMINUVS
+    UNSPEC_VREDMINVS
+    UNSPEC_VREDANDVS
+    UNSPEC_VREDORVS
+    UNSPEC_VREDXORVS
+    UNSPEC_VWREDSUMUVS
+    UNSPEC_VWREDSUMVS
+    ;; slide
+    UNSPEC_VSLIDEUPVX
+    UNSPEC_VSLIDEDOWNVX
+    UNSPEC_VSLIDE1UPVX
+    UNSPEC_VSLIDE1DOWNVX
+    UNSPEC_VSLIDEUPVI
+    UNSPEC_VSLIDEDOWNVI
+    ;; gather
+    UNSPEC_VRGATHERVV
+    UNSPEC_VRGATHERVX
+    UNSPEC_VRGATHERVI
+    ;; compress
+    UNSPEC_VCOMPRESS
+    ;; iota, id
+    UNSPEC_VIOTA
+    UNSPEC_VID
+    ;; mv
+    UNSPEC_VMVVV
+    UNSPEC_VMVVX
+    UNSPEC_VMVVI
+    ;; permutation
+    UNSPEC_VEXTXV
+    UNSPEC_VMVSX
+    UNSPEC_VMVXS
+    ;;fix-point arithmectic
+    UNSPEC_VSADDUVV
+    UNSPEC_VSADDUVX
+    UNSPEC_VSADDUVI
+    UNSPEC_VSADDVV
+    UNSPEC_VSADDVX
+    UNSPEC_VSADDVI
+    UNSPEC_VSSUBUVV
+    UNSPEC_VSSUBUVX
+    UNSPEC_VSSUBVV
+    UNSPEC_VSSUBVX
+    UNSPEC_VAADDVV
+    UNSPEC_VAADDVX
+    UNSPEC_VAADDVI
+    UNSPEC_VASUBVV
+    UNSPEC_VASUBVX
+    UNSPEC_VSMULVV
+    UNSPEC_VSMULVX
+    UNSPEC_VWSMACCUVV
+    UNSPEC_VWSMACCUVX
+    UNSPEC_VWSMACCVV
+    UNSPEC_VWSMACCVX
+    UNSPEC_VWSMACCSUVV
+    UNSPEC_VWSMACCSUVX
+    UNSPEC_VWSMACCUSVX
+    UNSPEC_VSSRLVV
+    UNSPEC_VSSRLVX
+    UNSPEC_VSSRLVI
+    UNSPEC_VSSRAVV
+    UNSPEC_VSSRAVX
+    UNSPEC_VSSRAVI
+    UNSPEC_VNCLIPUVV
+    UNSPEC_VNCLIPUVX
+    UNSPEC_VNCLIPUVI
+    UNSPEC_VNCLIPVV
+    UNSPEC_VNCLIPVX
+    UNSPEC_VNCLIPVI
+    ;; dot
+    UNSPEC_VDOTUVV
+    UNSPEC_VDOTVV
+])
+
+(define_c_enum "unspec" [
+    UNSPEC_VMSEQVV_MASK
+    UNSPEC_VMSNEVV_MASK
+    UNSPEC_VMSLTVV_MASK
+    UNSPEC_VMSLTUVV_MASK
+    UNSPEC_VMSLEVV_MASK
+    UNSPEC_VMSLEUVV_MASK
+    UNSPEC_VMSEQVX_MASK
+    UNSPEC_VMSNEVX_MASK
+    UNSPEC_VMSLTVX_MASK
+    UNSPEC_VMSLTUVX_MASK
+    UNSPEC_VMSLEVX_MASK
+    UNSPEC_VMSLEUVX_MASK
+    UNSPEC_VMSGTVX_MASK
+    UNSPEC_VMSGTUVX_MASK
+    UNSPEC_VMSGEVX_MASK
+    UNSPEC_VMSGEUVX_MASK
+    UNSPEC_VMSEQVI_MASK
+    UNSPEC_VMSNEVI_MASK
+    UNSPEC_VMSLEVI_MASK
+    UNSPEC_VMSLEUVI_MASK
+    UNSPEC_VMSGTVI_MASK
+    UNSPEC_VMSGTUVI_MASK
+    UNSPEC_VMPOPC_MASK
+    UNSPEC_VMFIRST_MASK
+    UNSPEC_VMSBF_MASK
+    UNSPEC_VMSIF_MASK
+    UNSPEC_VMSOF_MASK
+    UNSPEC_VANDVV_MASK
+    UNSPEC_VORVV_MASK
+    UNSPEC_VXORVV_MASK
+    UNSPEC_VANDVX_MASK
+    UNSPEC_VORVX_MASK
+    UNSPEC_VXORVX_MASK
+    UNSPEC_VANDVI_MASK
+    UNSPEC_VORVI_MASK
+    UNSPEC_VXORVI_MASK
+    UNSPEC_VNOTV_MASK
+    UNSPEC_VSLLVV_MASK
+    UNSPEC_VSRLVV_MASK
+    UNSPEC_VSRAVV_MASK
+    UNSPEC_VSLLVX_MASK
+    UNSPEC_VSRLVX_MASK
+    UNSPEC_VSRAVX_MASK
+    UNSPEC_VSLLVI_MASK
+    UNSPEC_VSRLVI_MASK
+    UNSPEC_VSRAVI_MASK
+    UNSPEC_VNSRLVV_MASK
+    UNSPEC_VNSRAVV_MASK
+    UNSPEC_VNSRLVX_MASK
+    UNSPEC_VNSRAVX_MASK
+    UNSPEC_VNSRLVI_MASK
+    UNSPEC_VNSRAVI_MASK
+    UNSPEC_VADDVV_MASK
+    UNSPEC_VSUBVV_MASK
+    UNSPEC_VADDVX_MASK
+    UNSPEC_VSUBVX_MASK
+    UNSPEC_VRSUBVX_MASK
+    UNSPEC_VADDVI_MASK
+    UNSPEC_VRSUBVI_MASK
+    UNSPEC_VDIVUVV_MASK
+    UNSPEC_VDIVVV_MASK
+    UNSPEC_VREMUVV_MASK
+    UNSPEC_VREMVV_MASK
+    UNSPEC_VDIVUVX_MASK
+    UNSPEC_VDIVVX_MASK
+    UNSPEC_VREMUVX_MASK
+    UNSPEC_VREMVX_MASK
+    UNSPEC_VMINUVV_MASK
+    UNSPEC_VMINVV_MASK
+    UNSPEC_VMAXUVV_MASK
+    UNSPEC_VMAXVV_MASK
+    UNSPEC_VMINUVX_MASK
+    UNSPEC_VMINVX_MASK
+    UNSPEC_VMAXUVX_MASK
+    UNSPEC_VMAXVX_MASK
+    UNSPEC_VMULVV_MASK
+    UNSPEC_VMULHVV_MASK
+    UNSPEC_VMULHUVV_MASK
+    UNSPEC_VMULHSUVV_MASK
+    UNSPEC_VMULVX_MASK
+    UNSPEC_VMULHVX_MASK
+    UNSPEC_VMULHUVX_MASK
+    UNSPEC_VMULHSUVX_MASK
+    UNSPEC_VMACCVV_MASK
+    UNSPEC_VNMSACVV_MASK
+    UNSPEC_VMADDVV_MASK
+    UNSPEC_VNMSUBVV_MASK
+    UNSPEC_VMACCVX_MASK
+    UNSPEC_VNMSACVX_MASK
+    UNSPEC_VMADDVX_MASK
+    UNSPEC_VNMSUBVX_MASK
+    UNSPEC_VWADDUVV_MASK
+    UNSPEC_VWSUBUVV_MASK
+    UNSPEC_VWADDVV_MASK
+    UNSPEC_VWSUBVV_MASK
+    UNSPEC_VWADDUVX_MASK
+    UNSPEC_VWSUBUVX_MASK
+    UNSPEC_VWADDVX_MASK
+    UNSPEC_VWSUBVX_MASK
+    UNSPEC_VWADDUWV_MASK
+    UNSPEC_VWSUBUWV_MASK
+    UNSPEC_VWADDWV_MASK
+    UNSPEC_VWSUBWV_MASK
+    UNSPEC_VWADDUWX_MASK
+    UNSPEC_VWSUBUWX_MASK
+    UNSPEC_VWADDWX_MASK
+    UNSPEC_VWSUBWX_MASK
+    UNSPEC_VWMULVV_MASK
+    UNSPEC_VWMULUVV_MASK
+    UNSPEC_VWMULSUVV_MASK
+    UNSPEC_VWMULVX_MASK
+    UNSPEC_VWMULUVX_MASK
+    UNSPEC_VWMULSUVX_MASK
+    UNSPEC_VWMACCUVV_MASK
+    UNSPEC_VWMACCVV_MASK
+    UNSPEC_VWMACCSUVV_MASK
+    UNSPEC_VWMACCUVX_MASK
+    UNSPEC_VWMACCVX_MASK
+    UNSPEC_VWMACCSUVX_MASK
+    UNSPEC_VWMACCUSVX_MASK
+    UNSPEC_VREDSUMVS_MASK
+    UNSPEC_VREDMAXUVS_MASK
+    UNSPEC_VREDMAXVS_MASK
+    UNSPEC_VREDMINUVS_MASK
+    UNSPEC_VREDMINVS_MASK
+    UNSPEC_VREDANDVS_MASK
+    UNSPEC_VREDORVS_MASK
+    UNSPEC_VREDXORVS_MASK
+    UNSPEC_VWREDSUMUVS_MASK
+    UNSPEC_VWREDSUMVS_MASK
+    UNSPEC_VSLIDEUPVX_MASK
+    UNSPEC_VSLIDEDOWNVX_MASK
+    UNSPEC_VSLIDE1UPVX_MASK
+    UNSPEC_VSLIDE1DOWNVX_MASK
+    UNSPEC_VSLIDEUPVI_MASK
+    UNSPEC_VSLIDEDOWNVI_MASK
+    UNSPEC_VRGATHERVV_MASK
+    UNSPEC_VRGATHERVX_MASK
+    UNSPEC_VRGATHERVI_MASK
+    UNSPEC_VIOTA_MASK
+    UNSPEC_VID_MASK
+    UNSPEC_VMERGEVVM_MASK
+    UNSPEC_VMERGEVXM_MASK
+    UNSPEC_VMERGEVIM_MASK
+    ;; add/sub with carry/borrow
+    UNSPEC_VADCVVM_MASK
+    UNSPEC_VADCVXM_MASK
+    UNSPEC_VADCVIM_MASK
+    UNSPEC_VMADCVVM_MASK
+    UNSPEC_VMADCVXM_MASK
+    UNSPEC_VMADCVIM_MASK
+    UNSPEC_VSBCVVM_MASK
+    UNSPEC_VSBCVXM_MASK
+    UNSPEC_VMSBCVVM_MASK
+    UNSPEC_VMSBCVXM_MASK
+    ;;fix-point arithmectic
+    UNSPEC_VSADDUVV_MASK
+    UNSPEC_VSADDUVX_MASK
+    UNSPEC_VSADDUVI_MASK
+    UNSPEC_VSADDVV_MASK
+    UNSPEC_VSADDVX_MASK
+    UNSPEC_VSADDVI_MASK
+    UNSPEC_VSSUBUVV_MASK
+    UNSPEC_VSSUBUVX_MASK
+    UNSPEC_VSSUBVV_MASK
+    UNSPEC_VSSUBVX_MASK
+    UNSPEC_VAADDVV_MASK
+    UNSPEC_VAADDVX_MASK
+    UNSPEC_VAADDVI_MASK
+    UNSPEC_VASUBVV_MASK
+    UNSPEC_VASUBVX_MASK
+    UNSPEC_VSMULVV_MASK
+    UNSPEC_VSMULVX_MASK
+    UNSPEC_VWSMACCUVV_MASK
+    UNSPEC_VWSMACCUVX_MASK
+    UNSPEC_VWSMACCVV_MASK
+    UNSPEC_VWSMACCVX_MASK
+    UNSPEC_VWSMACCSUVV_MASK
+    UNSPEC_VWSMACCSUVX_MASK
+    UNSPEC_VWSMACCUSVX_MASK
+    UNSPEC_VSSRLVV_MASK
+    UNSPEC_VSSRLVX_MASK
+    UNSPEC_VSSRLVI_MASK
+    UNSPEC_VSSRAVV_MASK
+    UNSPEC_VSSRAVX_MASK
+    UNSPEC_VSSRAVI_MASK
+    UNSPEC_VNCLIPUVV_MASK
+    UNSPEC_VNCLIPUVX_MASK
+    UNSPEC_VNCLIPUVI_MASK
+    UNSPEC_VNCLIPVV_MASK
+    UNSPEC_VNCLIPVX_MASK
+    UNSPEC_VNCLIPVI_MASK
+    ;; dot
+    UNSPEC_VDOTUVV_MASK
+    UNSPEC_VDOTVV_MASK
+])
+
+;; unspec for float operation
+(define_c_enum "unspec" [
+    ;; convert
+    UNSPEC_VFCVTFXV
+    UNSPEC_VFCVTFXUV
+    UNSPEC_VFCVTXFV
+    UNSPEC_VFCVTXUFV
+    UNSPEC_VFNCVTFXV
+    UNSPEC_VFNCVTFXUV
+    UNSPEC_VFNCVTXFV
+    UNSPEC_VFNCVTXUFV
+    UNSPEC_VFWCVTFXV
+    UNSPEC_VFWCVTFXUV
+    UNSPEC_VFWCVTXFV
+    UNSPEC_VFWCVTXUFV
+    UNSPEC_VFNCVTFFV
+    UNSPEC_VFWCVTFFV
+    ;; compare
+    UNSPEC_VMFEQVV
+    UNSPEC_VMFEQVF
+    UNSPEC_VMFNEVV
+    UNSPEC_VMFNEVF
+    UNSPEC_VMFLTVV
+    UNSPEC_VMFGTVV
+    UNSPEC_VMFLTVF
+    UNSPEC_VMFLEVV
+    UNSPEC_VMFGEVV
+    UNSPEC_VMFLEVF
+    UNSPEC_VMFGTVF
+    UNSPEC_VMFGEVF
+    UNSPEC_VMFORDVV
+    UNSPEC_VMFORDVF
+    ;; add/sub/mul/div
+    UNSPEC_VFADDVV
+    UNSPEC_VFADDVF
+    UNSPEC_VFSUBVV
+    UNSPEC_VFSUBVF
+    UNSPEC_VFRSUBVF
+    UNSPEC_VFWADDVV
+    UNSPEC_VFWADDVF
+    UNSPEC_VFWSUBVV
+    UNSPEC_VFWSUBVF
+    UNSPEC_VFWADDWV
+    UNSPEC_VFWADDWF
+    UNSPEC_VFWSUBWV
+    UNSPEC_VFWSUBWF
+    UNSPEC_VFMULVV
+    UNSPEC_VFMULVF
+    UNSPEC_VFDIVVV
+    UNSPEC_VFDIVVF
+    UNSPEC_VFRDIVVF
+    UNSPEC_VFWMULVV
+    UNSPEC_VFWMULVF
+    ;; fused multiply-add/sub
+    UNSPEC_VFMACCVV
+    UNSPEC_VFMACCVF
+    UNSPEC_VFNMACCVV
+    UNSPEC_VFNMACCVF
+    UNSPEC_VFMSACVV
+    UNSPEC_VFMSACVF
+    UNSPEC_VFNMSACVV
+    UNSPEC_VFNMSACVF
+    UNSPEC_VFMADDVV
+    UNSPEC_VFMADDVF
+    UNSPEC_VFNMADDVV
+    UNSPEC_VFNMADDVF
+    UNSPEC_VFMSUBVV
+    UNSPEC_VFMSUBVF
+    UNSPEC_VFNMSUBVV
+    UNSPEC_VFNMSUBVF
+    UNSPEC_VFWMACCVV
+    UNSPEC_VFWMACCVF
+    UNSPEC_VFWNMACCVV
+    UNSPEC_VFWNMACCVF
+    UNSPEC_VFWMSACVV
+    UNSPEC_VFWMSACVF
+    UNSPEC_VFWNMSACVV
+    UNSPEC_VFWNMSACVF
+    ;; reducation
+    UNSPEC_VFREDOSUMVS
+    UNSPEC_VFREDSUMVS
+    UNSPEC_VFWREDOSUMVS
+    UNSPEC_VFWREDSUMVS
+    UNSPEC_VFREDMAXVS
+    UNSPEC_VFREDMINVS
+    ;; other
+    UNSPEC_VFSQRTV
+    UNSPEC_VFMINVV
+    UNSPEC_VFMINVF
+    UNSPEC_VFMAXVV
+    UNSPEC_VFMAXVF
+    UNSPEC_VFSGNJVV
+    UNSPEC_VFSGNJVF
+    UNSPEC_VFSGNJNVV
+    UNSPEC_VFSGNJNVF
+    UNSPEC_VFSGNJXVV
+    UNSPEC_VFSGNJXVF
+    UNSPEC_VFCLASSV
+    UNSPEC_VFMARGEVFM
+    UNSPEC_VFMVVF
+    UNSPEC_VFMVSF
+    UNSPEC_VFMVFS
+    UNSPEC_VFDOTVV
+])
+;; unspec for float operation with mask
+(define_c_enum "unspec" [
+    ;; convert
+    UNSPEC_VFCVTFXV_MASK
+    UNSPEC_VFCVTFXUV_MASK
+    UNSPEC_VFCVTXFV_MASK
+    UNSPEC_VFCVTXUFV_MASK
+    UNSPEC_VFNCVTFXV_MASK
+    UNSPEC_VFNCVTFXUV_MASK
+    UNSPEC_VFNCVTXFV_MASK
+    UNSPEC_VFNCVTXUFV_MASK
+    UNSPEC_VFWCVTFXV_MASK
+    UNSPEC_VFWCVTFXUV_MASK
+    UNSPEC_VFWCVTXFV_MASK
+    UNSPEC_VFWCVTXUFV_MASK
+    UNSPEC_VFNCVTFFV_MASK
+    UNSPEC_VFWCVTFFV_MASK
+    ;; compare
+    UNSPEC_VMFEQVV_MASK
+    UNSPEC_VMFEQVF_MASK
+    UNSPEC_VMFNEVV_MASK
+    UNSPEC_VMFNEVF_MASK
+    UNSPEC_VMFLTVV_MASK
+    UNSPEC_VMFGTVV_MASK
+    UNSPEC_VMFLTVF_MASK
+    UNSPEC_VMFLEVV_MASK
+    UNSPEC_VMFGEVV_MASK
+    UNSPEC_VMFLEVF_MASK
+    UNSPEC_VMFGTVF_MASK
+    UNSPEC_VMFGEVF_MASK
+    UNSPEC_VMFORDVV_MASK
+    UNSPEC_VMFORDVF_MASK
+    ;; add/sub/mul/div
+    UNSPEC_VFADDVV_MASK
+    UNSPEC_VFADDVF_MASK
+    UNSPEC_VFSUBVV_MASK
+    UNSPEC_VFSUBVF_MASK
+    UNSPEC_VFRSUBVF_MASK
+    UNSPEC_VFWADDVV_MASK
+    UNSPEC_VFWADDVF_MASK
+    UNSPEC_VFWSUBVV_MASK
+    UNSPEC_VFWSUBVF_MASK
+    UNSPEC_VFWADDWV_MASK
+    UNSPEC_VFWADDWF_MASK
+    UNSPEC_VFWSUBWV_MASK
+    UNSPEC_VFWSUBWF_MASK
+    UNSPEC_VFMULVV_MASK
+    UNSPEC_VFMULVF_MASK
+    UNSPEC_VFDIVVV_MASK
+    UNSPEC_VFDIVVF_MASK
+    UNSPEC_VFRDIVVF_MASK
+    UNSPEC_VFWMULVV_MASK
+    UNSPEC_VFWMULVF_MASK
+    ;; fused multiply-add/sub
+    UNSPEC_VFMACCVV_MASK
+    UNSPEC_VFMACCVF_MASK
+    UNSPEC_VFNMACCVV_MASK
+    UNSPEC_VFNMACCVF_MASK
+    UNSPEC_VFMSACVV_MASK
+    UNSPEC_VFMSACVF_MASK
+    UNSPEC_VFNMSACVV_MASK
+    UNSPEC_VFNMSACVF_MASK
+    UNSPEC_VFMADDVV_MASK
+    UNSPEC_VFMADDVF_MASK
+    UNSPEC_VFNMADDVV_MASK
+    UNSPEC_VFNMADDVF_MASK
+    UNSPEC_VFMSUBVV_MASK
+    UNSPEC_VFMSUBVF_MASK
+    UNSPEC_VFNMSUBVV_MASK
+    UNSPEC_VFNMSUBVF_MASK
+    UNSPEC_VFWMACCVV_MASK
+    UNSPEC_VFWMACCVF_MASK
+    UNSPEC_VFWNMACCVV_MASK
+    UNSPEC_VFWNMACCVF_MASK
+    UNSPEC_VFWMSACVV_MASK
+    UNSPEC_VFWMSACVF_MASK
+    UNSPEC_VFWNMSACVV_MASK
+    UNSPEC_VFWNMSACVF_MASK
+    ;; reducation
+    UNSPEC_VFREDOSUMVS_MASK
+    UNSPEC_VFREDSUMVS_MASK
+    UNSPEC_VFWREDOSUMVS_MASK
+    UNSPEC_VFWREDSUMVS_MASK
+    UNSPEC_VFREDMAXVS_MASK
+    UNSPEC_VFREDMINVS_MASK
+    ;; other;
+    UNSPEC_VFSQRTV_MASK
+    UNSPEC_VFMINVV_MASK
+    UNSPEC_VFMINVF_MASK
+    UNSPEC_VFMAXVV_MASK
+    UNSPEC_VFMAXVF_MASK
+    UNSPEC_VFSGNJVV_MASK
+    UNSPEC_VFSGNJVF_MASK
+    UNSPEC_VFSGNJNVV_MASK
+    UNSPEC_VFSGNJNVF_MASK
+    UNSPEC_VFSGNJXVV_MASK
+    UNSPEC_VFSGNJXVF_MASK
+    UNSPEC_VFCLASSV_MASK
+    UNSPEC_VFDOTVV_MASK
+])
+
+(define_c_enum "unspec" [
+  UNSPEC_VLB UNSPEC_VLBU
+  UNSPEC_VLH UNSPEC_VLHU
+  UNSPEC_VLW UNSPEC_VLWU
+  UNSPEC_VLE
+
+  UNSPEC_VLSB UNSPEC_VLSBU
+  UNSPEC_VLSH UNSPEC_VLSHU
+  UNSPEC_VLSW UNSPEC_VLSWU
+  UNSPEC_VLSE
+
+  UNSPEC_VLXB UNSPEC_VLXBU
+  UNSPEC_VLXH UNSPEC_VLXHU
+  UNSPEC_VLXW UNSPEC_VLXWU
+  UNSPEC_VLXE
+
+  UNSPEC_VSUXB
+  UNSPEC_VSUXH
+  UNSPEC_VSUXW
+  UNSPEC_VSUXE
+])
+
+(define_c_enum "unspec" [
+  UNSPEC_VEND
+])
+
+(define_int_iterator VECTOR_INT_CMP_VV [
+    UNSPEC_VMSEQVV
+    UNSPEC_VMSNEVV
+    UNSPEC_VMSLTVV
+    UNSPEC_VMSLTUVV
+    UNSPEC_VMSLEVV
+    UNSPEC_VMSLEUVV
+])
+
+(define_int_iterator VECTOR_INT_CMP_VV_MASK [
+    UNSPEC_VMSEQVV_MASK
+    UNSPEC_VMSNEVV_MASK
+    UNSPEC_VMSLTVV_MASK
+    UNSPEC_VMSLTUVV_MASK
+    UNSPEC_VMSLEVV_MASK
+    UNSPEC_VMSLEUVV_MASK
+])
+
+(define_int_iterator VECTOR_INT_CMP_VX [
+    UNSPEC_VMSEQVX
+    UNSPEC_VMSNEVX
+    UNSPEC_VMSLTVX
+    UNSPEC_VMSLTUVX
+    UNSPEC_VMSLEVX
+    UNSPEC_VMSLEUVX
+    UNSPEC_VMSGTVX
+    UNSPEC_VMSGTUVX
+    UNSPEC_VMSGEVX
+    UNSPEC_VMSGEUVX
+])
+
+(define_int_iterator VECTOR_INT_CMP_VX_MASK [
+    UNSPEC_VMSEQVX_MASK
+    UNSPEC_VMSNEVX_MASK
+    UNSPEC_VMSLTVX_MASK
+    UNSPEC_VMSLTUVX_MASK
+    UNSPEC_VMSLEVX_MASK
+    UNSPEC_VMSLEUVX_MASK
+    UNSPEC_VMSGTVX_MASK
+    UNSPEC_VMSGTUVX_MASK
+    UNSPEC_VMSGEVX_MASK
+    UNSPEC_VMSGEUVX_MASK
+])
+
+(define_int_iterator VECTOR_INT_CMP_VI [
+    UNSPEC_VMSEQVI
+    UNSPEC_VMSNEVI
+    UNSPEC_VMSLEVI
+    UNSPEC_VMSLEUVI
+    UNSPEC_VMSGTVI
+    UNSPEC_VMSGTUVI
+])
+
+(define_int_iterator VECTOR_INT_CMP_VI_MASK [
+    UNSPEC_VMSEQVI_MASK
+    UNSPEC_VMSNEVI_MASK
+    UNSPEC_VMSLEVI_MASK
+    UNSPEC_VMSLEUVI_MASK
+    UNSPEC_VMSGTVI_MASK
+    UNSPEC_VMSGTUVI_MASK
+])
+
+(define_int_iterator VECTOR_MASK_LOGICAL [
+    UNSPEC_VMAND
+    UNSPEC_VMNAND
+    UNSPEC_VMANDNOT
+    UNSPEC_VMXOR
+    UNSPEC_VMOR
+    UNSPEC_VMNOR
+    UNSPEC_VMORNOT
+    UNSPEC_VMXNOR
+])
+
+(define_int_iterator VECTOR_MASK_PSEUDO_CAST [
+    UNSPEC_VMCPY
+    UNSPEC_VMNOT
+])
+
+(define_int_iterator VECTOR_MASK_PSEUDO_SET [
+    UNSPEC_VMCLR
+    UNSPEC_VMSET
+])
+
+(define_int_iterator VECTOR_MASK_BIT [
+    UNSPEC_VMPOPC
+    UNSPEC_VMFIRST
+])
+
+(define_int_iterator VECTOR_MASK_BIT_MASK [
+    UNSPEC_VMPOPC_MASK
+    UNSPEC_VMFIRST_MASK
+])
+
+(define_int_iterator VECTOR_MASK_SET_FIRST [
+    UNSPEC_VMSBF
+    UNSPEC_VMSIF
+    UNSPEC_VMSOF
+])
+
+(define_int_iterator VECTOR_MASK_SET_FIRST_MASK [
+    UNSPEC_VMSBF_MASK
+    UNSPEC_VMSIF_MASK
+    UNSPEC_VMSOF_MASK
+])
+
+(define_int_iterator VECTOR_INT_BITWISE_VV [
+    UNSPEC_VANDVV
+    UNSPEC_VORVV
+    UNSPEC_VXORVV
+])
+
+(define_int_iterator VECTOR_INT_BITWISE_VV_MASK [
+    UNSPEC_VANDVV_MASK
+    UNSPEC_VORVV_MASK
+    UNSPEC_VXORVV_MASK
+])
+
+(define_int_iterator VECTOR_INT_BITWISE_VX [
+    UNSPEC_VANDVX
+    UNSPEC_VORVX
+    UNSPEC_VXORVX
+])
+
+(define_int_iterator VECTOR_INT_BITWISE_VX_MASK [
+    UNSPEC_VANDVX_MASK
+    UNSPEC_VORVX_MASK
+    UNSPEC_VXORVX_MASK
+])
+
+(define_int_iterator VECTOR_INT_BITWISE_VI [
+    UNSPEC_VANDVI
+    UNSPEC_VORVI
+    UNSPEC_VXORVI
+])
+
+(define_int_iterator VECTOR_INT_BITWISE_VI_MASK [
+    UNSPEC_VANDVI_MASK
+    UNSPEC_VORVI_MASK
+    UNSPEC_VXORVI_MASK
+])
+
+(define_int_iterator VECTOR_INT_BITSHIFT_VV [
+    UNSPEC_VSLLVV
+    UNSPEC_VSRLVV
+    UNSPEC_VSRAVV
+    UNSPEC_VSSRLVV
+    UNSPEC_VSSRAVV
+])
+
+(define_int_iterator VECTOR_INT_BITSHIFT_VV_MASK [
+    UNSPEC_VSLLVV_MASK
+    UNSPEC_VSRLVV_MASK
+    UNSPEC_VSRAVV_MASK
+    UNSPEC_VSSRLVV_MASK
+    UNSPEC_VSSRAVV_MASK
+])
+
+(define_int_iterator VECTOR_INT_BITSHIFT_VX [
+    UNSPEC_VSLLVX
+    UNSPEC_VSRLVX
+    UNSPEC_VSRAVX
+    UNSPEC_VSSRLVX
+    UNSPEC_VSSRAVX
+])
+
+(define_int_iterator VECTOR_INT_BITSHIFT_VX_MASK [
+    UNSPEC_VSLLVX_MASK
+    UNSPEC_VSRLVX_MASK
+    UNSPEC_VSRAVX_MASK
+    UNSPEC_VSSRLVX_MASK
+    UNSPEC_VSSRAVX_MASK
+])
+
+(define_int_iterator VECTOR_INT_BITSHIFT_VI [
+    UNSPEC_VSLLVI
+    UNSPEC_VSRLVI
+    UNSPEC_VSRAVI
+    UNSPEC_VSSRLVI
+    UNSPEC_VSSRAVI
+])
+
+(define_int_iterator VECTOR_INT_BITSHIFT_VI_MASK [
+    UNSPEC_VSLLVI_MASK
+    UNSPEC_VSRLVI_MASK
+    UNSPEC_VSRAVI_MASK
+    UNSPEC_VSSRLVI_MASK
+    UNSPEC_VSSRAVI_MASK
+])
+
+(define_int_iterator VECTOR_INT_NARROWSHIFT_VV [
+    UNSPEC_VNSRLVV
+    UNSPEC_VNSRAVV
+    UNSPEC_VNCLIPUVV
+    UNSPEC_VNCLIPVV
+])
+
+(define_int_iterator VECTOR_INT_NARROWSHIFT_VX [
+    UNSPEC_VNSRLVX
+    UNSPEC_VNSRAVX
+    UNSPEC_VNCLIPUVX
+    UNSPEC_VNCLIPVX
+])
+
+(define_int_iterator VECTOR_INT_NARROWSHIFT_VI [
+    UNSPEC_VNSRLVI
+    UNSPEC_VNSRAVI
+    UNSPEC_VNCLIPUVI
+    UNSPEC_VNCLIPVI
+])
+
+(define_int_iterator VECTOR_INT_NARROWSHIFT_VV_MASK [
+    UNSPEC_VNSRLVV_MASK
+    UNSPEC_VNSRAVV_MASK
+    UNSPEC_VNCLIPUVV_MASK
+    UNSPEC_VNCLIPVV_MASK
+])
+
+(define_int_iterator VECTOR_INT_NARROWSHIFT_VX_MASK [
+    UNSPEC_VNSRLVX_MASK
+    UNSPEC_VNSRAVX_MASK
+    UNSPEC_VNCLIPUVX_MASK
+    UNSPEC_VNCLIPVX_MASK
+])
+
+(define_int_iterator VECTOR_INT_NARROWSHIFT_VI_MASK [
+    UNSPEC_VNSRLVI_MASK
+    UNSPEC_VNSRAVI_MASK
+    UNSPEC_VNCLIPUVI_MASK
+    UNSPEC_VNCLIPVI_MASK
+])
+
+(define_int_iterator VECTOR_INT_ARITH_VV [
+    UNSPEC_VADDVV
+    UNSPEC_VSUBVV
+    UNSPEC_VDIVUVV
+    UNSPEC_VDIVVV
+    UNSPEC_VREMUVV
+    UNSPEC_VREMVV
+    UNSPEC_VMINUVV
+    UNSPEC_VMINVV
+    UNSPEC_VMAXUVV
+    UNSPEC_VMAXVV
+    UNSPEC_VMULVV
+    UNSPEC_VMULHVV
+    UNSPEC_VMULHUVV
+    UNSPEC_VMULHSUVV
+    UNSPEC_VSADDUVV
+    UNSPEC_VSADDVV
+    UNSPEC_VSSUBUVV
+    UNSPEC_VSSUBVV
+    UNSPEC_VAADDVV
+    UNSPEC_VASUBVV
+    UNSPEC_VSMULVV
+    UNSPEC_VDOTVV
+])
+
+(define_int_iterator VECTOR_INT_ARITH_VV_MASK [
+    UNSPEC_VADDVV_MASK
+    UNSPEC_VSUBVV_MASK
+    UNSPEC_VDIVUVV_MASK
+    UNSPEC_VDIVVV_MASK
+    UNSPEC_VREMUVV_MASK
+    UNSPEC_VREMVV_MASK
+    UNSPEC_VMINUVV_MASK
+    UNSPEC_VMINVV_MASK
+    UNSPEC_VMAXUVV_MASK
+    UNSPEC_VMAXVV_MASK
+    UNSPEC_VMULVV_MASK
+    UNSPEC_VMULHVV_MASK
+    UNSPEC_VMULHUVV_MASK
+    UNSPEC_VMULHSUVV_MASK
+    UNSPEC_VSADDUVV_MASK
+    UNSPEC_VSADDVV_MASK
+    UNSPEC_VSSUBUVV_MASK
+    UNSPEC_VSSUBVV_MASK
+    UNSPEC_VAADDVV_MASK
+    UNSPEC_VASUBVV_MASK
+    UNSPEC_VSMULVV_MASK
+    UNSPEC_VDOTVV_MASK
+])
+
+(define_int_iterator VECTOR_INT_ARITH_VX [
+    UNSPEC_VADDVX
+    UNSPEC_VSUBVX
+    UNSPEC_VRSUBVX
+    UNSPEC_VDIVUVX
+    UNSPEC_VDIVVX
+    UNSPEC_VREMUVX
+    UNSPEC_VREMVX
+    UNSPEC_VMINUVX
+    UNSPEC_VMINVX
+    UNSPEC_VMAXUVX
+    UNSPEC_VMAXVX
+    UNSPEC_VMULVX
+    UNSPEC_VMULHVX
+    UNSPEC_VMULHUVX
+    UNSPEC_VMULHSUVX
+    UNSPEC_VSADDUVX
+    UNSPEC_VSADDVX
+    UNSPEC_VSSUBUVX
+    UNSPEC_VSSUBVX
+    UNSPEC_VAADDVX
+    UNSPEC_VASUBVX
+    UNSPEC_VSMULVX
+])
+
+(define_int_iterator VECTOR_INT_ARITH_VX_MASK [
+    UNSPEC_VADDVX_MASK
+    UNSPEC_VSUBVX_MASK
+    UNSPEC_VRSUBVX_MASK
+    UNSPEC_VDIVUVX_MASK
+    UNSPEC_VDIVVX_MASK
+    UNSPEC_VREMUVX_MASK
+    UNSPEC_VREMVX_MASK
+    UNSPEC_VMINUVX_MASK
+    UNSPEC_VMINVX_MASK
+    UNSPEC_VMAXUVX_MASK
+    UNSPEC_VMAXVX_MASK
+    UNSPEC_VMULVX_MASK
+    UNSPEC_VMULHVX_MASK
+    UNSPEC_VMULHUVX_MASK
+    UNSPEC_VMULHSUVX_MASK
+    UNSPEC_VSADDUVX_MASK
+    UNSPEC_VSADDVX_MASK
+    UNSPEC_VSSUBUVX_MASK
+    UNSPEC_VSSUBVX_MASK
+    UNSPEC_VAADDVX_MASK
+    UNSPEC_VASUBVX_MASK
+    UNSPEC_VSMULVX_MASK
+])
+
+(define_int_iterator VECTOR_INT_ARITH_VI [
+    UNSPEC_VADDVI
+    UNSPEC_VRSUBVI
+    UNSPEC_VSADDUVI
+    UNSPEC_VSADDVI
+    UNSPEC_VAADDVI
+])
+
+(define_int_iterator VECTOR_INT_ARITH_VI_MASK [
+    UNSPEC_VADDVI_MASK
+    UNSPEC_VRSUBVI_MASK
+    UNSPEC_VSADDUVI_MASK
+    UNSPEC_VSADDVI_MASK
+    UNSPEC_VAADDVI_MASK
+])
+
+(define_int_iterator VECTOR_INT_ARITH_ACCUM_VV [
+    UNSPEC_VMACCVV
+    UNSPEC_VNMSACVV
+    UNSPEC_VMADDVV
+    UNSPEC_VNMSUBVV
+])
+
+(define_int_iterator VECTOR_INT_ARITH_ACCUM_VV_MASK [
+    UNSPEC_VMACCVV_MASK
+    UNSPEC_VNMSACVV_MASK
+    UNSPEC_VMADDVV_MASK
+    UNSPEC_VNMSUBVV_MASK
+])
+
+(define_int_iterator VECTOR_INT_ARITH_ACCUM_VX [
+    UNSPEC_VMACCVX
+    UNSPEC_VNMSACVX
+    UNSPEC_VMADDVX
+    UNSPEC_VNMSUBVX
+])
+
+(define_int_iterator VECTOR_INT_ARITH_ACCUM_VX_MASK [
+    UNSPEC_VMACCVX_MASK
+    UNSPEC_VNMSACVX_MASK
+    UNSPEC_VMADDVX_MASK
+    UNSPEC_VNMSUBVX_MASK
+])
+
+(define_int_iterator VECTOR_INT_WIDENARITH_VV [
+    UNSPEC_VWADDUVV
+    UNSPEC_VWSUBUVV
+    UNSPEC_VWADDVV
+    UNSPEC_VWSUBVV
+    UNSPEC_VWMULVV
+    UNSPEC_VWMULUVV
+    UNSPEC_VWMULSUVV
+])
+
+(define_int_iterator VECTOR_INT_WIDENARITH_VX [
+    UNSPEC_VWADDUVX
+    UNSPEC_VWSUBUVX
+    UNSPEC_VWADDVX
+    UNSPEC_VWSUBVX
+    UNSPEC_VWMULVX
+    UNSPEC_VWMULUVX
+    UNSPEC_VWMULSUVX
+])
+
+(define_int_iterator VECTOR_INT_WIDENWIDENARITH_VV [
+    UNSPEC_VWADDUWV
+    UNSPEC_VWSUBUWV
+    UNSPEC_VWADDWV
+    UNSPEC_VWSUBWV
+])
+
+(define_int_iterator VECTOR_INT_WIDENWIDENARITH_VX [
+    UNSPEC_VWADDUWX
+    UNSPEC_VWSUBUWX
+    UNSPEC_VWADDWX
+    UNSPEC_VWSUBWX
+])
+
+(define_int_iterator VECTOR_INT_WIDENARITH_VV_MASK [
+    UNSPEC_VWADDUVV_MASK
+    UNSPEC_VWSUBUVV_MASK
+    UNSPEC_VWADDVV_MASK
+    UNSPEC_VWSUBVV_MASK
+    UNSPEC_VWMULVV_MASK
+    UNSPEC_VWMULUVV_MASK
+    UNSPEC_VWMULSUVV_MASK
+])
+
+(define_int_iterator VECTOR_INT_WIDENARITH_VX_MASK [
+    UNSPEC_VWADDUVX_MASK
+    UNSPEC_VWSUBUVX_MASK
+    UNSPEC_VWADDVX_MASK
+    UNSPEC_VWSUBVX_MASK
+    UNSPEC_VWMULVX_MASK
+    UNSPEC_VWMULUVX_MASK
+    UNSPEC_VWMULSUVX_MASK
+])
+
+(define_int_iterator VECTOR_INT_WIDENWIDENARITH_VV_MASK [
+    UNSPEC_VWADDUWV_MASK
+    UNSPEC_VWSUBUWV_MASK
+    UNSPEC_VWADDWV_MASK
+    UNSPEC_VWSUBWV_MASK
+])
+
+(define_int_iterator VECTOR_INT_WIDENWIDENARITH_VX_MASK [
+    UNSPEC_VWADDUWX_MASK
+    UNSPEC_VWSUBUWX_MASK
+    UNSPEC_VWADDWX_MASK
+    UNSPEC_VWSUBWX_MASK
+])
+
+(define_int_iterator VECTOR_INT_WIDENARITH_ACCUM_VV [
+    UNSPEC_VWMACCUVV
+    UNSPEC_VWMACCVV
+    UNSPEC_VWMACCSUVV
+    UNSPEC_VWSMACCUVV
+    UNSPEC_VWSMACCVV
+    UNSPEC_VWSMACCSUVV
+])
+
+(define_int_iterator VECTOR_INT_WIDENARITH_ACCUM_VX [
+    UNSPEC_VWMACCUVX
+    UNSPEC_VWMACCVX
+    UNSPEC_VWMACCSUVX
+    UNSPEC_VWMACCUSVX
+    UNSPEC_VWSMACCUVX
+    UNSPEC_VWSMACCVX
+    UNSPEC_VWSMACCSUVX
+    UNSPEC_VWSMACCUSVX
+])
+
+(define_int_iterator VECTOR_INT_WIDENARITH_ACCUM_VV_MASK [
+    UNSPEC_VWMACCUVV_MASK
+    UNSPEC_VWMACCVV_MASK
+    UNSPEC_VWMACCSUVV_MASK
+    UNSPEC_VWSMACCUVV_MASK
+    UNSPEC_VWSMACCVV_MASK
+    UNSPEC_VWSMACCSUVV_MASK
+])
+
+(define_int_iterator VECTOR_INT_WIDENARITH_ACCUM_VX_MASK [
+    UNSPEC_VWMACCUVX_MASK
+    UNSPEC_VWMACCVX_MASK
+    UNSPEC_VWMACCSUVX_MASK
+    UNSPEC_VWMACCUSVX_MASK
+    UNSPEC_VWSMACCUVX_MASK
+    UNSPEC_VWSMACCVX_MASK
+    UNSPEC_VWSMACCSUVX_MASK
+    UNSPEC_VWSMACCUSVX_MASK
+])
+
+(define_int_iterator VECTOR_CARRYUSE_VVM_MASK [
+    UNSPEC_VADCVVM_MASK
+    UNSPEC_VSBCVVM_MASK
+])
+
+(define_int_iterator VECTOR_CARRYUSE_VXM_MASK [
+    UNSPEC_VADCVXM_MASK
+    UNSPEC_VSBCVXM_MASK
+])
+
+(define_int_iterator VECTOR_CARRYOUT_VVM_MASK [
+    UNSPEC_VMADCVVM_MASK
+    UNSPEC_VMSBCVVM_MASK
+])
+
+(define_int_iterator VECTOR_CARRYOUT_VXM_MASK [
+    UNSPEC_VMADCVXM_MASK
+    UNSPEC_VMSBCVXM_MASK
+])
+
+(define_int_iterator VECTOR_INT_RED_VS [
+    UNSPEC_VREDSUMVS
+    UNSPEC_VREDMAXUVS
+    UNSPEC_VREDMAXVS
+    UNSPEC_VREDMINUVS
+    UNSPEC_VREDMINVS
+    UNSPEC_VREDANDVS
+    UNSPEC_VREDORVS
+    UNSPEC_VREDXORVS
+])
+
+(define_int_iterator VECTOR_INT_RED_VS_MASK [
+    UNSPEC_VREDSUMVS_MASK
+    UNSPEC_VREDMAXUVS_MASK
+    UNSPEC_VREDMAXVS_MASK
+    UNSPEC_VREDMINUVS_MASK
+    UNSPEC_VREDMINVS_MASK
+    UNSPEC_VREDANDVS_MASK
+    UNSPEC_VREDORVS_MASK
+    UNSPEC_VREDXORVS_MASK
+])
+
+(define_int_iterator VECTOR_INT_WIDENWIDENRED_VS [
+    UNSPEC_VWREDSUMUVS
+    UNSPEC_VWREDSUMVS
+])
+
+(define_int_iterator VECTOR_INT_WIDENWIDENRED_VS_MASK [
+    UNSPEC_VWREDSUMUVS_MASK
+    UNSPEC_VWREDSUMVS_MASK
+])
+
+(define_int_iterator VECTOR_INT_GATHER_VV [
+    UNSPEC_VRGATHERVV
+])
+
+(define_int_iterator VECTOR_INT_GATHER_VV_MASK [
+    UNSPEC_VRGATHERVV_MASK
+])
+
+(define_int_iterator VECTOR_INT_SLIDEGATHER_VX [
+    UNSPEC_VSLIDEUPVX
+    UNSPEC_VSLIDEDOWNVX
+    UNSPEC_VSLIDE1UPVX
+    UNSPEC_VSLIDE1DOWNVX
+    UNSPEC_VRGATHERVX
+])
+
+(define_int_iterator VECTOR_INT_SLIDEGATHER_VX_MASK [
+    UNSPEC_VSLIDEUPVX_MASK
+    UNSPEC_VSLIDEDOWNVX_MASK
+    UNSPEC_VSLIDE1UPVX_MASK
+    UNSPEC_VSLIDE1DOWNVX_MASK
+    UNSPEC_VRGATHERVX_MASK
+])
+
+(define_int_iterator VECTOR_INT_SLIDE_VI [
+    UNSPEC_VSLIDEUPVI
+    UNSPEC_VSLIDEDOWNVI
+])
+
+(define_int_iterator VECTOR_INT_SLIDE_VI_MASK [
+    UNSPEC_VSLIDEUPVI_MASK
+    UNSPEC_VSLIDEDOWNVI_MASK
+])
+
+(define_int_iterator VECTOR_INT_GATHER_VI [
+    UNSPEC_VRGATHERVI
+])
+
+(define_int_iterator VECTOR_INT_GATHER_VI_MASK [
+    UNSPEC_VRGATHERVI_MASK
+])
+
+(define_int_iterator VF_OPM_VV [
+    UNSPEC_VMFEQVV
+    UNSPEC_VMFNEVV
+    UNSPEC_VMFLTVV
+    UNSPEC_VMFLEVV
+    UNSPEC_VMFGTVV
+    UNSPEC_VMFGEVV
+    UNSPEC_VMFORDVV
+])
+
+(define_int_iterator VF_OPM_VV_MASK [
+    UNSPEC_VMFEQVV_MASK
+    UNSPEC_VMFNEVV_MASK
+    UNSPEC_VMFLTVV_MASK
+    UNSPEC_VMFLEVV_MASK
+    UNSPEC_VMFGTVV_MASK
+    UNSPEC_VMFGEVV_MASK
+    UNSPEC_VMFORDVV_MASK
+])
+
+(define_int_iterator VF_OPM_VF [
+    UNSPEC_VMFEQVF
+    UNSPEC_VMFNEVF
+    UNSPEC_VMFLTVF
+    UNSPEC_VMFLEVF
+    UNSPEC_VMFGTVF
+    UNSPEC_VMFGEVF
+    UNSPEC_VMFORDVF
+])
+
+(define_int_iterator VF_OPM_VF_MASK [
+    UNSPEC_VMFEQVF_MASK
+    UNSPEC_VMFNEVF_MASK
+    UNSPEC_VMFLTVF_MASK
+    UNSPEC_VMFLEVF_MASK
+    UNSPEC_VMFGTVF_MASK
+    UNSPEC_VMFGEVF_MASK
+    UNSPEC_VMFORDVF_MASK
+])
+
+(define_int_iterator VF_OP_VV [
+    UNSPEC_VFADDVV
+    UNSPEC_VFSUBVV
+    UNSPEC_VFMULVV
+    UNSPEC_VFDIVVV
+    UNSPEC_VFMINVV
+    UNSPEC_VFMAXVV
+    UNSPEC_VFSGNJVV
+    UNSPEC_VFSGNJNVV
+    UNSPEC_VFSGNJXVV
+    UNSPEC_VFDOTVV
+])
+
+(define_int_iterator VF_OP_VV_MASK [
+    UNSPEC_VFADDVV_MASK
+    UNSPEC_VFSUBVV_MASK
+    UNSPEC_VFMULVV_MASK
+    UNSPEC_VFDIVVV_MASK
+    UNSPEC_VFMINVV_MASK
+    UNSPEC_VFMAXVV_MASK
+    UNSPEC_VFSGNJVV_MASK
+    UNSPEC_VFSGNJNVV_MASK
+    UNSPEC_VFSGNJXVV_MASK
+    UNSPEC_VFDOTVV_MASK
+])
+
+(define_int_iterator VF_OP_VF [
+    UNSPEC_VFADDVF
+    UNSPEC_VFSUBVF
+    UNSPEC_VFRSUBVF
+    UNSPEC_VFMULVF
+    UNSPEC_VFDIVVF
+    UNSPEC_VFRDIVVF
+    UNSPEC_VFMINVF
+    UNSPEC_VFMAXVF
+    UNSPEC_VFSGNJVF
+    UNSPEC_VFSGNJNVF
+    UNSPEC_VFSGNJXVF
+])
+
+(define_int_iterator VF_OP_VF_MASK [
+    UNSPEC_VFADDVF_MASK
+    UNSPEC_VFSUBVF_MASK
+    UNSPEC_VFRSUBVF_MASK
+    UNSPEC_VFMULVF_MASK
+    UNSPEC_VFDIVVF_MASK
+    UNSPEC_VFRDIVVF_MASK
+    UNSPEC_VFMINVF_MASK
+    UNSPEC_VFMAXVF_MASK
+    UNSPEC_VFSGNJVF_MASK
+    UNSPEC_VFSGNJNVF_MASK
+    UNSPEC_VFSGNJXVF_MASK
+])
+
+(define_int_iterator VF_OPW_VV [
+    UNSPEC_VFWADDVV
+    UNSPEC_VFWSUBVV
+    UNSPEC_VFWMULVV
+])
+
+(define_int_iterator VF_OPW_VV_MASK [
+    UNSPEC_VFWADDVV_MASK
+    UNSPEC_VFWSUBVV_MASK
+    UNSPEC_VFWMULVV_MASK
+])
+
+(define_int_iterator VF_OPW_VF [
+    UNSPEC_VFWADDVF
+    UNSPEC_VFWSUBVF
+    UNSPEC_VFWMULVF
+])
+
+(define_int_iterator VF_OPW_VF_MASK [
+    UNSPEC_VFWADDVF_MASK
+    UNSPEC_VFWSUBVF_MASK
+    UNSPEC_VFWMULVF_MASK
+])
+
+(define_int_iterator VF_OPW_WV [
+    UNSPEC_VFWADDWV
+    UNSPEC_VFWSUBWV
+])
+
+(define_int_iterator VF_OPW_WV_MASK [
+    UNSPEC_VFWADDWV_MASK
+    UNSPEC_VFWSUBWV_MASK
+])
+
+(define_int_iterator VF_OPW_WF [
+    UNSPEC_VFWADDWF
+    UNSPEC_VFWSUBWF
+])
+
+(define_int_iterator VF_OPW_WF_MASK [
+    UNSPEC_VFWADDWF_MASK
+    UNSPEC_VFWSUBWF_MASK
+])
+
+(define_int_iterator VF_OP_FUSED_VV [
+    UNSPEC_VFMACCVV
+    UNSPEC_VFNMACCVV
+    UNSPEC_VFMSACVV
+    UNSPEC_VFNMSACVV
+    UNSPEC_VFMADDVV
+    UNSPEC_VFNMADDVV
+    UNSPEC_VFMSUBVV
+    UNSPEC_VFNMSUBVV
+])
+
+(define_int_iterator VF_OP_FUSED_VV_MASK [
+    UNSPEC_VFMACCVV_MASK
+    UNSPEC_VFNMACCVV_MASK
+    UNSPEC_VFMSACVV_MASK
+    UNSPEC_VFNMSACVV_MASK
+    UNSPEC_VFMADDVV_MASK
+    UNSPEC_VFNMADDVV_MASK
+    UNSPEC_VFMSUBVV_MASK
+    UNSPEC_VFNMSUBVV_MASK
+])
+
+(define_int_iterator VF_OP_FUSED_VF [
+    UNSPEC_VFMACCVF
+    UNSPEC_VFNMACCVF
+    UNSPEC_VFMSACVF
+    UNSPEC_VFNMSACVF
+    UNSPEC_VFMADDVF
+    UNSPEC_VFNMADDVF
+    UNSPEC_VFMSUBVF
+    UNSPEC_VFNMSUBVF
+])
+
+(define_int_iterator VF_OP_FUSED_VF_MASK [
+    UNSPEC_VFMACCVF_MASK
+    UNSPEC_VFNMACCVF_MASK
+    UNSPEC_VFMSACVF_MASK
+    UNSPEC_VFNMSACVF_MASK
+    UNSPEC_VFMADDVF_MASK
+    UNSPEC_VFNMADDVF_MASK
+    UNSPEC_VFMSUBVF_MASK
+    UNSPEC_VFNMSUBVF_MASK
+])
+
+(define_int_iterator VF_OP_FUSEDW_VV [
+    UNSPEC_VFWMACCVV
+    UNSPEC_VFWNMACCVV
+    UNSPEC_VFWMSACVV
+    UNSPEC_VFWNMSACVV
+])
+
+(define_int_iterator VF_OP_FUSEDW_VV_MASK [
+    UNSPEC_VFWMACCVV_MASK
+    UNSPEC_VFWNMACCVV_MASK
+    UNSPEC_VFWMSACVV_MASK
+    UNSPEC_VFWNMSACVV_MASK
+])
+
+(define_int_iterator VF_OP_FUSEDW_VF [
+    UNSPEC_VFWMACCVF
+    UNSPEC_VFWNMACCVF
+    UNSPEC_VFWMSACVF
+    UNSPEC_VFWNMSACVF
+])
+
+(define_int_iterator VF_OP_FUSEDW_VF_MASK [
+    UNSPEC_VFWMACCVF_MASK
+    UNSPEC_VFWNMACCVF_MASK
+    UNSPEC_VFWMSACVF_MASK
+    UNSPEC_VFWNMSACVF_MASK
+])
+
+(define_int_iterator VF_OP_VS [
+    UNSPEC_VFREDOSUMVS
+    UNSPEC_VFREDSUMVS
+    UNSPEC_VFREDMAXVS
+    UNSPEC_VFREDMINVS
+])
+
+(define_int_iterator VF_OP_VS_MASK [
+    UNSPEC_VFREDOSUMVS_MASK
+    UNSPEC_VFREDSUMVS_MASK
+    UNSPEC_VFREDMAXVS_MASK
+    UNSPEC_VFREDMINVS_MASK
+])
+
+(define_int_iterator VF_OPW_VS [
+    UNSPEC_VFWREDOSUMVS
+    UNSPEC_VFWREDSUMVS
+])
+
+(define_int_iterator VF_OPW_VS_MASK [
+    UNSPEC_VFWREDOSUMVS_MASK
+    UNSPEC_VFWREDSUMVS_MASK
+])
+
+
+(define_int_attr vop [
+    (UNSPEC_VMSEQVV         "vmseq")
+    (UNSPEC_VMSNEVV         "vmsne")
+    (UNSPEC_VMSLTVV         "vmslt")
+    (UNSPEC_VMSLTUVV        "vmsltu")
+    (UNSPEC_VMSLEVV         "vmsle")
+    (UNSPEC_VMSLEUVV        "vmsleu")
+    (UNSPEC_VMSEQVX         "vmseq")
+    (UNSPEC_VMSNEVX         "vmsne")
+    (UNSPEC_VMSLTVX         "vmslt")
+    (UNSPEC_VMSLTUVX        "vmsltu")
+    (UNSPEC_VMSLEVX         "vmsle")
+    (UNSPEC_VMSLEUVX        "vmsleu")
+    (UNSPEC_VMSGTVX         "vmsgt")
+    (UNSPEC_VMSGTUVX        "vmsgtu")
+    (UNSPEC_VMSGEVX         "vmsge")
+    (UNSPEC_VMSGEUVX        "vmsgeu")
+    (UNSPEC_VMSEQVI         "vmseq")
+    (UNSPEC_VMSNEVI         "vmsne")
+    (UNSPEC_VMSLEVI         "vmsle")
+    (UNSPEC_VMSLEUVI        "vmsleu")
+    (UNSPEC_VMSGTVI         "vmsgt")
+    (UNSPEC_VMSGTUVI        "vmsgtu")
+    (UNSPEC_VMAND           "vmand")
+    (UNSPEC_VMNAND          "vmnand")
+    (UNSPEC_VMANDNOT        "vmandnot")
+    (UNSPEC_VMXOR           "vmxor")
+    (UNSPEC_VMOR            "vmor")
+    (UNSPEC_VMNOR           "vmnor")
+    (UNSPEC_VMORNOT         "vmornot")
+    (UNSPEC_VMXNOR          "vmxnor")
+    (UNSPEC_VMCPY           "vmcpy")
+    (UNSPEC_VMCLR           "vmclr")
+    (UNSPEC_VMSET           "vmset")
+    (UNSPEC_VMNOT           "vmnot")
+    (UNSPEC_VMPOPC          "vmpopc")
+    (UNSPEC_VMFIRST         "vmfirst")
+    (UNSPEC_VMSBF           "vmsbf")
+    (UNSPEC_VMSIF           "vmsif")
+    (UNSPEC_VMSOF           "vmsof")
+    (UNSPEC_VANDVV          "vand")
+    (UNSPEC_VORVV           "vor")
+    (UNSPEC_VXORVV          "vxor")
+    (UNSPEC_VANDVX          "vand")
+    (UNSPEC_VORVX           "vor")
+    (UNSPEC_VXORVX          "vxor")
+    (UNSPEC_VANDVI          "vand")
+    (UNSPEC_VORVI           "vor")
+    (UNSPEC_VXORVI          "vxor")
+    (UNSPEC_VSLLVV          "vsll")
+    (UNSPEC_VSRLVV          "vsrl")
+    (UNSPEC_VSRAVV          "vsra")
+    (UNSPEC_VSLLVX          "vsll")
+    (UNSPEC_VSRLVX          "vsrl")
+    (UNSPEC_VSRAVX          "vsra")
+    (UNSPEC_VSLLVI          "vsll")
+    (UNSPEC_VSRLVI          "vsrl")
+    (UNSPEC_VSRAVI          "vsra")
+    (UNSPEC_VNSRLVV         "vnsrl")
+    (UNSPEC_VNSRAVV         "vnsra")
+    (UNSPEC_VNSRLVX         "vnsrl")
+    (UNSPEC_VNSRAVX         "vnsra")
+    (UNSPEC_VNSRLVI         "vnsrl")
+    (UNSPEC_VNSRAVI         "vnsra")
+    (UNSPEC_VADDVV          "vadd")
+    (UNSPEC_VSUBVV          "vsub")
+    (UNSPEC_VADDVX          "vadd")
+    (UNSPEC_VSUBVX          "vsub")
+    (UNSPEC_VRSUBVX         "vrsub")
+    (UNSPEC_VADDVI          "vadd")
+    (UNSPEC_VRSUBVI         "vrsub")
+    (UNSPEC_VDIVUVV         "vdivu")
+    (UNSPEC_VDIVVV          "vdiv")
+    (UNSPEC_VREMUVV         "vremu")
+    (UNSPEC_VREMVV          "vrem")
+    (UNSPEC_VDIVUVX         "vdivu")
+    (UNSPEC_VDIVVX          "vdiv")
+    (UNSPEC_VREMUVX         "vremu")
+    (UNSPEC_VREMVX          "vrem")
+    (UNSPEC_VMINUVV         "vminu")
+    (UNSPEC_VMINVV          "vmin")
+    (UNSPEC_VMAXUVV         "vmaxu")
+    (UNSPEC_VMAXVV          "vmax")
+    (UNSPEC_VMINUVX         "vminu")
+    (UNSPEC_VMINVX          "vmin")
+    (UNSPEC_VMAXUVX         "vmaxu")
+    (UNSPEC_VMAXVX          "vmax")
+    (UNSPEC_VMULVV          "vmul")
+    (UNSPEC_VMULHVV         "vmulh")
+    (UNSPEC_VMULHUVV        "vmulhu")
+    (UNSPEC_VMULHSUVV       "vmulhsu")
+    (UNSPEC_VMULVX          "vmul")
+    (UNSPEC_VMULHVX         "vmulh")
+    (UNSPEC_VMULHUVX        "vmulhu")
+    (UNSPEC_VMULHSUVX       "vmulhsu")
+    (UNSPEC_VMACCVV         "vmacc")
+    (UNSPEC_VNMSACVV        "vnmsac")
+    (UNSPEC_VMADDVV         "vmadd")
+    (UNSPEC_VNMSUBVV        "vnmsub")
+    (UNSPEC_VMACCVX         "vmacc")
+    (UNSPEC_VNMSACVX        "vnmsac")
+    (UNSPEC_VMADDVX         "vmadd")
+    (UNSPEC_VNMSUBVX        "vnmsub")
+    (UNSPEC_VWADDUVV        "vwaddu")
+    (UNSPEC_VWSUBUVV        "vwsubu")
+    (UNSPEC_VWADDVV         "vwadd")
+    (UNSPEC_VWSUBVV         "vwsub")
+    (UNSPEC_VWADDUVX        "vwaddu")
+    (UNSPEC_VWSUBUVX        "vwsubu")
+    (UNSPEC_VWADDVX         "vwadd")
+    (UNSPEC_VWSUBVX         "vwsub")
+    (UNSPEC_VWADDUWV        "vwaddu")
+    (UNSPEC_VWSUBUWV        "vwsubu")
+    (UNSPEC_VWADDWV         "vwadd")
+    (UNSPEC_VWSUBWV         "vwsub")
+    (UNSPEC_VWADDUWX        "vwaddu")
+    (UNSPEC_VWSUBUWX        "vwsubu")
+    (UNSPEC_VWADDWX         "vwadd")
+    (UNSPEC_VWSUBWX         "vwsub")
+    (UNSPEC_VWMULVV         "vwmul")
+    (UNSPEC_VWMULUVV        "vwmulu")
+    (UNSPEC_VWMULSUVV       "vwmulsu")
+    (UNSPEC_VWMULVX         "vwmul")
+    (UNSPEC_VWMULUVX        "vwmulu")
+    (UNSPEC_VWMULSUVX       "vwmulsu")
+    (UNSPEC_VWMACCUVV       "vwmaccu")
+    (UNSPEC_VWMACCVV        "vwmacc")
+    (UNSPEC_VWMACCSUVV      "vwmaccsu")
+    (UNSPEC_VWMACCUVX       "vwmaccu")
+    (UNSPEC_VWMACCVX        "vwmacc")
+    (UNSPEC_VWMACCSUVX      "vwmaccsu")
+    (UNSPEC_VWMACCUSVX      "vwmaccus")
+    (UNSPEC_VREDSUMVS       "vredsum")
+    (UNSPEC_VREDMAXUVS      "vredmaxu")
+    (UNSPEC_VREDMAXVS       "vredmax")
+    (UNSPEC_VREDMINUVS      "vredminu")
+    (UNSPEC_VREDMINVS       "vredmin")
+    (UNSPEC_VREDANDVS       "vredand")
+    (UNSPEC_VREDORVS        "vredor")
+    (UNSPEC_VREDXORVS       "vredxor")
+    (UNSPEC_VWREDSUMUVS     "vwredsumu")
+    (UNSPEC_VWREDSUMVS      "vwredsum")
+    (UNSPEC_VSLIDEUPVX      "vslideup")
+    (UNSPEC_VSLIDEDOWNVX    "vslidedown")
+    (UNSPEC_VSLIDE1UPVX     "vslide1up")
+    (UNSPEC_VSLIDE1DOWNVX   "vslide1down")
+    (UNSPEC_VSLIDEUPVI      "vslideup")
+    (UNSPEC_VSLIDEDOWNVI    "vslidedown")
+    (UNSPEC_VRGATHERVV      "vrgather")
+    (UNSPEC_VRGATHERVX      "vrgather")
+    (UNSPEC_VRGATHERVI      "vrgather")
+    (UNSPEC_VSADDUVV        "vsaddu")
+    (UNSPEC_VSADDUVX        "vsaddu")
+    (UNSPEC_VSADDUVI        "vsaddu")
+    (UNSPEC_VSADDVV         "vsadd")
+    (UNSPEC_VSADDVX         "vsadd")
+    (UNSPEC_VSADDVI         "vsadd")
+    (UNSPEC_VSSUBUVV        "vssubu")
+    (UNSPEC_VSSUBUVX        "vssubu")
+    (UNSPEC_VSSUBVV         "vssub")
+    (UNSPEC_VSSUBVX         "vssub")
+    (UNSPEC_VAADDVV         "vaadd")
+    (UNSPEC_VAADDVX         "vaadd")
+    (UNSPEC_VAADDVI         "vaadd")
+    (UNSPEC_VASUBVV         "vasub")
+    (UNSPEC_VASUBVX         "vasub")
+    (UNSPEC_VSMULVV         "vsmul")
+    (UNSPEC_VSMULVX         "vsmul")
+    (UNSPEC_VWSMACCUVV      "vwsmaccu")
+    (UNSPEC_VWSMACCUVX      "vwsmaccu")
+    (UNSPEC_VWSMACCVV       "vwsmacc")
+    (UNSPEC_VWSMACCVX       "vwsmacc")
+    (UNSPEC_VWSMACCSUVV     "vwsmaccsu")
+    (UNSPEC_VWSMACCSUVX     "vwsmaccsu")
+    (UNSPEC_VWSMACCUSVX     "vwsmaccus")
+    (UNSPEC_VSSRLVV         "vssrl")
+    (UNSPEC_VSSRLVX         "vssrl")
+    (UNSPEC_VSSRLVI         "vssrl")
+    (UNSPEC_VSSRAVV         "vssra")
+    (UNSPEC_VSSRAVX         "vssra")
+    (UNSPEC_VSSRAVI         "vssra")
+    (UNSPEC_VNCLIPUVV       "vnclipu")
+    (UNSPEC_VNCLIPUVX       "vnclipu")
+    (UNSPEC_VNCLIPUVI       "vnclipu")
+    (UNSPEC_VNCLIPVV        "vnclip")
+    (UNSPEC_VNCLIPVX        "vnclip")
+    (UNSPEC_VNCLIPVI        "vnclip")
+    (UNSPEC_VDOTVV          "vdot")
+])
+
+(define_int_attr vop_mask [
+    (UNSPEC_VMSEQVV_MASK         "vmseq")
+    (UNSPEC_VMSNEVV_MASK         "vmsne")
+    (UNSPEC_VMSLTVV_MASK         "vmslt")
+    (UNSPEC_VMSLTUVV_MASK        "vmsltu")
+    (UNSPEC_VMSLEVV_MASK         "vmsle")
+    (UNSPEC_VMSLEUVV_MASK        "vmsleu")
+    (UNSPEC_VMSEQVX_MASK         "vmseq")
+    (UNSPEC_VMSNEVX_MASK         "vmsne")
+    (UNSPEC_VMSLTVX_MASK         "vmslt")
+    (UNSPEC_VMSLTUVX_MASK        "vmsltu")
+    (UNSPEC_VMSLEVX_MASK         "vmsle")
+    (UNSPEC_VMSLEUVX_MASK        "vmsleu")
+    (UNSPEC_VMSGTVX_MASK         "vmsgt")
+    (UNSPEC_VMSGTUVX_MASK        "vmsgtu")
+    (UNSPEC_VMSGEVX_MASK         "vmsge")
+    (UNSPEC_VMSGEUVX_MASK        "vmsgeu")
+    (UNSPEC_VMSEQVI_MASK         "vmseq")
+    (UNSPEC_VMSNEVI_MASK         "vmsne")
+    (UNSPEC_VMSLEVI_MASK         "vmsle")
+    (UNSPEC_VMSLEUVI_MASK        "vmsleu")
+    (UNSPEC_VMSGTVI_MASK         "vmsgt")
+    (UNSPEC_VMSGTUVI_MASK        "vmsgtu")
+    (UNSPEC_VMPOPC_MASK          "vmpopc")
+    (UNSPEC_VMFIRST_MASK         "vmfirst")
+    (UNSPEC_VMSBF_MASK           "vmsbf")
+    (UNSPEC_VMSIF_MASK           "vmsif")
+    (UNSPEC_VMSOF_MASK           "vmsof")
+    (UNSPEC_VANDVV_MASK          "vand")
+    (UNSPEC_VORVV_MASK           "vor")
+    (UNSPEC_VXORVV_MASK          "vxor")
+    (UNSPEC_VANDVX_MASK          "vand")
+    (UNSPEC_VORVX_MASK           "vor")
+    (UNSPEC_VXORVX_MASK          "vxor")
+    (UNSPEC_VANDVI_MASK          "vand")
+    (UNSPEC_VORVI_MASK           "vor")
+    (UNSPEC_VXORVI_MASK          "vxor")
+    (UNSPEC_VSLLVV_MASK          "vsll")
+    (UNSPEC_VSRLVV_MASK          "vsrl")
+    (UNSPEC_VSRAVV_MASK          "vsra")
+    (UNSPEC_VSLLVX_MASK          "vsll")
+    (UNSPEC_VSRLVX_MASK          "vsrl")
+    (UNSPEC_VSRAVX_MASK          "vsra")
+    (UNSPEC_VSLLVI_MASK          "vsll")
+    (UNSPEC_VSRLVI_MASK          "vsrl")
+    (UNSPEC_VSRAVI_MASK          "vsra")
+    (UNSPEC_VNSRLVV_MASK         "vnsrl")
+    (UNSPEC_VNSRAVV_MASK         "vnsra")
+    (UNSPEC_VNSRLVX_MASK         "vnsrl")
+    (UNSPEC_VNSRAVX_MASK         "vnsra")
+    (UNSPEC_VNSRLVI_MASK         "vnsrl")
+    (UNSPEC_VNSRAVI_MASK         "vnsra")
+    (UNSPEC_VADDVV_MASK          "vadd")
+    (UNSPEC_VSUBVV_MASK          "vsub")
+    (UNSPEC_VADDVX_MASK          "vadd")
+    (UNSPEC_VSUBVX_MASK          "vsub")
+    (UNSPEC_VRSUBVX_MASK         "vrsub")
+    (UNSPEC_VADDVI_MASK          "vadd")
+    (UNSPEC_VRSUBVI_MASK         "vrsub")
+    (UNSPEC_VDIVUVV_MASK         "vdivu")
+    (UNSPEC_VDIVVV_MASK          "vdiv")
+    (UNSPEC_VREMUVV_MASK         "vremu")
+    (UNSPEC_VREMVV_MASK          "vrem")
+    (UNSPEC_VDIVUVX_MASK         "vdivu")
+    (UNSPEC_VDIVVX_MASK          "vdiv")
+    (UNSPEC_VREMUVX_MASK         "vremu")
+    (UNSPEC_VREMVX_MASK          "vrem")
+    (UNSPEC_VMINUVV_MASK         "vminu")
+    (UNSPEC_VMINVV_MASK          "vmin")
+    (UNSPEC_VMAXUVV_MASK         "vmaxu")
+    (UNSPEC_VMAXVV_MASK          "vmax")
+    (UNSPEC_VMINUVX_MASK         "vminu")
+    (UNSPEC_VMINVX_MASK          "vmin")
+    (UNSPEC_VMAXUVX_MASK         "vmaxu")
+    (UNSPEC_VMAXVX_MASK          "vmax")
+    (UNSPEC_VMULVV_MASK          "vmul")
+    (UNSPEC_VMULHVV_MASK         "vmulh")
+    (UNSPEC_VMULHUVV_MASK        "vmulhu")
+    (UNSPEC_VMULHSUVV_MASK       "vmulhsu")
+    (UNSPEC_VMULVX_MASK          "vmul")
+    (UNSPEC_VMULHVX_MASK         "vmulh")
+    (UNSPEC_VMULHUVX_MASK        "vmulhu")
+    (UNSPEC_VMULHSUVX_MASK       "vmulhsu")
+    (UNSPEC_VMACCVV_MASK         "vmacc")
+    (UNSPEC_VNMSACVV_MASK        "vnmsac")
+    (UNSPEC_VMADDVV_MASK         "vmadd")
+    (UNSPEC_VNMSUBVV_MASK        "vnmsub")
+    (UNSPEC_VMACCVX_MASK         "vmacc")
+    (UNSPEC_VNMSACVX_MASK        "vnmsac")
+    (UNSPEC_VMADDVX_MASK         "vmadd")
+    (UNSPEC_VNMSUBVX_MASK        "vnmsub")
+    (UNSPEC_VWADDUVV_MASK        "vwaddu")
+    (UNSPEC_VWSUBUVV_MASK        "vwsubu")
+    (UNSPEC_VWADDVV_MASK         "vwadd")
+    (UNSPEC_VWSUBVV_MASK         "vwsub")
+    (UNSPEC_VWADDUVX_MASK        "vwaddu")
+    (UNSPEC_VWSUBUVX_MASK        "vwsubu")
+    (UNSPEC_VWADDVX_MASK         "vwadd")
+    (UNSPEC_VWSUBVX_MASK         "vwsub")
+    (UNSPEC_VWADDUWV_MASK        "vwaddu")
+    (UNSPEC_VWSUBUWV_MASK        "vwsubu")
+    (UNSPEC_VWADDWV_MASK         "vwadd")
+    (UNSPEC_VWSUBWV_MASK         "vwsub")
+    (UNSPEC_VWADDUWX_MASK        "vwaddu")
+    (UNSPEC_VWSUBUWX_MASK        "vwsubu")
+    (UNSPEC_VWADDWX_MASK         "vwadd")
+    (UNSPEC_VWSUBWX_MASK         "vwsub")
+    (UNSPEC_VWMULVV_MASK         "vwmul")
+    (UNSPEC_VWMULUVV_MASK        "vwmulu")
+    (UNSPEC_VWMULSUVV_MASK       "vwmulsu")
+    (UNSPEC_VWMULVX_MASK         "vwmul")
+    (UNSPEC_VWMULUVX_MASK        "vwmulu")
+    (UNSPEC_VWMULSUVX_MASK       "vwmulsu")
+    (UNSPEC_VWMACCUVV_MASK       "vwmaccu")
+    (UNSPEC_VWMACCVV_MASK        "vwmacc")
+    (UNSPEC_VWMACCSUVV_MASK      "vwmaccsu")
+    (UNSPEC_VWMACCUVX_MASK       "vwmaccu")
+    (UNSPEC_VWMACCVX_MASK        "vwmacc")
+    (UNSPEC_VWMACCSUVX_MASK      "vwmaccsu")
+    (UNSPEC_VWMACCUSVX_MASK      "vwmaccus")
+    (UNSPEC_VREDSUMVS_MASK       "vredsum")
+    (UNSPEC_VREDMAXUVS_MASK      "vredmaxu")
+    (UNSPEC_VREDMAXVS_MASK       "vredmax")
+    (UNSPEC_VREDMINUVS_MASK      "vredminu")
+    (UNSPEC_VREDMINVS_MASK       "vredmin")
+    (UNSPEC_VREDANDVS_MASK       "vredand")
+    (UNSPEC_VREDORVS_MASK        "vredor")
+    (UNSPEC_VREDXORVS_MASK       "vredxor")
+    (UNSPEC_VWREDSUMUVS_MASK     "vwredsumu")
+    (UNSPEC_VWREDSUMVS_MASK      "vwredsum")
+    (UNSPEC_VSLIDEUPVX_MASK      "vslideup")
+    (UNSPEC_VSLIDEDOWNVX_MASK    "vslidedown")
+    (UNSPEC_VSLIDE1UPVX_MASK     "vslide1up")
+    (UNSPEC_VSLIDE1DOWNVX_MASK   "vslide1down")
+    (UNSPEC_VSLIDEUPVI_MASK      "vslideup")
+    (UNSPEC_VSLIDEDOWNVI_MASK    "vslidedown")
+    (UNSPEC_VRGATHERVV_MASK      "vrgather")
+    (UNSPEC_VRGATHERVX_MASK      "vrgather")
+    (UNSPEC_VRGATHERVI_MASK      "vrgather")
+    (UNSPEC_VSADDUVV_MASK        "vsaddu")
+    (UNSPEC_VSADDUVX_MASK        "vsaddu")
+    (UNSPEC_VSADDUVI_MASK        "vsaddu")
+    (UNSPEC_VSADDVV_MASK         "vsadd")
+    (UNSPEC_VSADDVX_MASK         "vsadd")
+    (UNSPEC_VSADDVI_MASK         "vsadd")
+    (UNSPEC_VSSUBUVV_MASK        "vssubu")
+    (UNSPEC_VSSUBUVX_MASK        "vssubu")
+    (UNSPEC_VSSUBVV_MASK         "vssub")
+    (UNSPEC_VSSUBVX_MASK         "vssub")
+    (UNSPEC_VAADDVV_MASK         "vaadd")
+    (UNSPEC_VAADDVX_MASK         "vaadd")
+    (UNSPEC_VAADDVI_MASK         "vaadd")
+    (UNSPEC_VASUBVV_MASK         "vasub")
+    (UNSPEC_VASUBVX_MASK         "vasub")
+    (UNSPEC_VSMULVV_MASK         "vsmul")
+    (UNSPEC_VSMULVX_MASK         "vsmul")
+    (UNSPEC_VWSMACCUVV_MASK      "vwsmaccu")
+    (UNSPEC_VWSMACCUVX_MASK      "vwsmaccu")
+    (UNSPEC_VWSMACCVV_MASK       "vwsmacc")
+    (UNSPEC_VWSMACCVX_MASK       "vwsmacc")
+    (UNSPEC_VWSMACCSUVV_MASK     "vwsmaccsu")
+    (UNSPEC_VWSMACCSUVX_MASK     "vwsmaccsu")
+    (UNSPEC_VWSMACCUSVX_MASK     "vwsmaccus")
+    (UNSPEC_VSSRLVV_MASK         "vssrl")
+    (UNSPEC_VSSRLVX_MASK         "vssrl")
+    (UNSPEC_VSSRLVI_MASK         "vssrl")
+    (UNSPEC_VSSRAVV_MASK         "vssra")
+    (UNSPEC_VSSRAVX_MASK         "vssra")
+    (UNSPEC_VSSRAVI_MASK         "vssra")
+    (UNSPEC_VNCLIPUVV_MASK       "vnclipu")
+    (UNSPEC_VNCLIPUVX_MASK       "vnclipu")
+    (UNSPEC_VNCLIPUVI_MASK       "vnclipu")
+    (UNSPEC_VNCLIPVV_MASK        "vnclip")
+    (UNSPEC_VNCLIPVX_MASK        "vnclip")
+    (UNSPEC_VNCLIPVI_MASK        "vnclip")
+    (UNSPEC_VADCVVM_MASK         "vadc")
+    (UNSPEC_VADCVXM_MASK         "vadc")
+    (UNSPEC_VSBCVVM_MASK         "vsbc")
+    (UNSPEC_VSBCVXM_MASK         "vsbc")
+    (UNSPEC_VMADCVVM_MASK        "vmadc")
+    (UNSPEC_VMADCVXM_MASK        "vmadc")
+    (UNSPEC_VMSBCVVM_MASK        "vmsbc")
+    (UNSPEC_VMSBCVXM_MASK        "vmsbc")
+    (UNSPEC_VDOTVV_MASK          "vdot")
+])
+
+;; float operation whose return type is vmask
+(define_int_attr vf_opm_attr [
+    (UNSPEC_VMFEQVV "vmfeq") (UNSPEC_VMFEQVV_MASK "vmfeq")
+    (UNSPEC_VMFEQVF "vmfeq") (UNSPEC_VMFEQVF_MASK "vmfeq")
+    (UNSPEC_VMFNEVV "vmfne") (UNSPEC_VMFNEVV_MASK "vmfne")
+    (UNSPEC_VMFNEVF "vmfne") (UNSPEC_VMFNEVF_MASK "vmfne")
+    (UNSPEC_VMFLTVV "vmflt") (UNSPEC_VMFLTVV_MASK "vmflt")
+    (UNSPEC_VMFGTVV "vmfgt") (UNSPEC_VMFGTVV_MASK "vmfgt")
+    (UNSPEC_VMFLTVF "vmflt") (UNSPEC_VMFLTVF_MASK "vmflt")
+    (UNSPEC_VMFLEVV "vmfle") (UNSPEC_VMFLEVV_MASK "vmfle")
+    (UNSPEC_VMFGEVV "vmfge") (UNSPEC_VMFGEVV_MASK "vmfge")
+    (UNSPEC_VMFLEVF "vmfle") (UNSPEC_VMFLEVF_MASK "vmfle")
+    (UNSPEC_VMFGTVF "vmfgt") (UNSPEC_VMFGTVF_MASK "vmfgt")
+    (UNSPEC_VMFGEVF "vmfge") (UNSPEC_VMFGEVF_MASK "vmfge")
+    (UNSPEC_VMFORDVV "vmford") (UNSPEC_VMFORDVV_MASK "vmford")
+    (UNSPEC_VMFORDVF "vmford") (UNSPEC_VMFORDVF_MASK "vmford")
+])
+
+;; float operation whose return type is vfany
+(define_int_attr vf_op_attr [
+    (UNSPEC_VFADDVV "vfadd") (UNSPEC_VFADDVV_MASK "vfadd")
+    (UNSPEC_VFADDVF "vfadd") (UNSPEC_VFADDVF_MASK "vfadd")
+    (UNSPEC_VFSUBVV "vfsub") (UNSPEC_VFSUBVV_MASK "vfsub")
+    (UNSPEC_VFSUBVF "vfsub") (UNSPEC_VFSUBVF_MASK "vfsub")
+    (UNSPEC_VFRSUBVF "vfrsub") (UNSPEC_VFRSUBVF_MASK "vfrsub")
+    (UNSPEC_VFMULVV "vfmul") (UNSPEC_VFMULVV_MASK "vfmul")
+    (UNSPEC_VFMULVF "vfmul") (UNSPEC_VFMULVF_MASK "vfmul")
+    (UNSPEC_VFDIVVV "vfdiv") (UNSPEC_VFDIVVV_MASK "vfdiv")
+    (UNSPEC_VFDIVVF "vfdiv") (UNSPEC_VFDIVVF_MASK "vfdiv")
+    (UNSPEC_VFRDIVVF "vfrdiv") (UNSPEC_VFRDIVVF_MASK "vfrdiv")
+    (UNSPEC_VFMINVV "vfmin") (UNSPEC_VFMINVV_MASK "vfmin")
+    (UNSPEC_VFMINVF "vfmin") (UNSPEC_VFMINVF_MASK "vfmin")
+    (UNSPEC_VFMAXVV "vfmax") (UNSPEC_VFMAXVV_MASK "vfmax")
+    (UNSPEC_VFMAXVF "vfmax") (UNSPEC_VFMAXVF_MASK "vfmax")
+    (UNSPEC_VFSGNJVV "vfsgnj") (UNSPEC_VFSGNJVV_MASK "vfsgnj")
+    (UNSPEC_VFSGNJVF "vfsgnj") (UNSPEC_VFSGNJVF_MASK "vfsgnj")
+    (UNSPEC_VFSGNJNVV "vfsgnjn") (UNSPEC_VFSGNJNVV_MASK "vfsgnjn")
+    (UNSPEC_VFSGNJNVF "vfsgnjn") (UNSPEC_VFSGNJNVF_MASK "vfsgnjn")
+    (UNSPEC_VFSGNJXVV "vfsgnjx") (UNSPEC_VFSGNJXVV_MASK "vfsgnjx")
+    (UNSPEC_VFSGNJXVF "vfsgnjx") (UNSPEC_VFSGNJXVF_MASK "vfsgnjx")
+    (UNSPEC_VFDOTVV "vfdot") (UNSPEC_VFDOTVV_MASK "vfdot")
+])
+
+;; float operation whose return type is vfwiden
+(define_int_attr vf_opw_attr [
+    (UNSPEC_VFWADDVV "vfwadd") (UNSPEC_VFWADDVV_MASK "vfwadd")
+    (UNSPEC_VFWADDVF "vfwadd") (UNSPEC_VFWADDVF_MASK "vfwadd")
+    (UNSPEC_VFWSUBVV "vfwsub") (UNSPEC_VFWSUBVV_MASK "vfwsub")
+    (UNSPEC_VFWSUBVF "vfwsub") (UNSPEC_VFWSUBVF_MASK "vfwsub")
+    (UNSPEC_VFWADDWV "vfwadd") (UNSPEC_VFWADDWV_MASK "vfwadd")
+    (UNSPEC_VFWADDWF "vfwadd") (UNSPEC_VFWADDWF_MASK "vfwadd")
+    (UNSPEC_VFWSUBWV "vfwsub") (UNSPEC_VFWSUBWV_MASK "vfwsub")
+    (UNSPEC_VFWSUBWF "vfwsub") (UNSPEC_VFWSUBWF_MASK "vfwsub")
+    (UNSPEC_VFWMULVV "vfwmul") (UNSPEC_VFWMULVV_MASK "vfwmul")
+    (UNSPEC_VFWMULVF "vfwmul") (UNSPEC_VFWMULVF_MASK "vfwmul")
+])
+
+;; float operation which is fused
+(define_int_attr vf_op_fused_attr [
+    (UNSPEC_VFMACCVV "vfmacc") (UNSPEC_VFMACCVV_MASK "vfmacc")
+    (UNSPEC_VFMACCVF "vfmacc") (UNSPEC_VFMACCVF_MASK "vfmacc")
+    (UNSPEC_VFNMACCVV "vfnmacc") (UNSPEC_VFNMACCVV_MASK "vfnmacc")
+    (UNSPEC_VFNMACCVF "vfnmacc") (UNSPEC_VFNMACCVF_MASK "vfnmacc")
+    (UNSPEC_VFMSACVV "vfmsac") (UNSPEC_VFMSACVV_MASK "vfmsac")
+    (UNSPEC_VFMSACVF "vfmsac") (UNSPEC_VFMSACVF_MASK "vfmsac")
+    (UNSPEC_VFNMSACVV "vfnmsac") (UNSPEC_VFNMSACVV_MASK "vfnmsac")
+    (UNSPEC_VFNMSACVF "vfnmsac") (UNSPEC_VFNMSACVF_MASK "vfnmsac")
+    (UNSPEC_VFMADDVV "vfmadd") (UNSPEC_VFMADDVV_MASK "vfmadd")
+    (UNSPEC_VFMADDVF "vfmadd") (UNSPEC_VFMADDVF_MASK "vfmadd")
+    (UNSPEC_VFNMADDVV "vfnmadd") (UNSPEC_VFNMADDVV_MASK "vfnmadd")
+    (UNSPEC_VFNMADDVF "vfnmadd") (UNSPEC_VFNMADDVF_MASK "vfnmadd")
+    (UNSPEC_VFMSUBVV "vfmsub") (UNSPEC_VFMSUBVV_MASK "vfmsub")
+    (UNSPEC_VFMSUBVF "vfmsub") (UNSPEC_VFMSUBVF_MASK "vfmsub")
+    (UNSPEC_VFNMSUBVV "vfnmsub") (UNSPEC_VFNMSUBVV_MASK "vfnmsub")
+    (UNSPEC_VFNMSUBVF "vfnmsub") (UNSPEC_VFNMSUBVF_MASK "vfnmsub")
+])
+
+;; float operation which is fused and its return type is vfwiden
+(define_int_attr vf_op_fusedw_attr [
+    (UNSPEC_VFWMACCVV "vfwmacc") (UNSPEC_VFWMACCVV_MASK "vfwmacc")
+    (UNSPEC_VFWMACCVF "vfwmacc") (UNSPEC_VFWMACCVF_MASK "vfwmacc")
+    (UNSPEC_VFWNMACCVV "vfwnmacc") (UNSPEC_VFWNMACCVV_MASK "vfwnmacc")
+    (UNSPEC_VFWNMACCVF "vfwnmacc") (UNSPEC_VFWNMACCVF_MASK "vfwnmacc")
+    (UNSPEC_VFWMSACVV "vfwmsac") (UNSPEC_VFWMSACVV_MASK "vfwmsac")
+    (UNSPEC_VFWMSACVF "vfwmsac") (UNSPEC_VFWMSACVF_MASK "vfwmsac")
+    (UNSPEC_VFWNMSACVV "vfwnmsac") (UNSPEC_VFWNMSACVV_MASK "vfwnmsac")
+    (UNSPEC_VFWNMSACVF "vfwnmsac") (UNSPEC_VFWNMSACVF_MASK "vfwnmsac")
+])
+
+;; float operation with reduction
+(define_int_attr vf_op_vs_attr [
+    (UNSPEC_VFREDOSUMVS "vfredosum") (UNSPEC_VFREDOSUMVS_MASK "vfredosum")
+    (UNSPEC_VFREDSUMVS "vfredsum") (UNSPEC_VFREDSUMVS_MASK "vfredsum")
+    (UNSPEC_VFREDMAXVS "vfredmax") (UNSPEC_VFREDMAXVS_MASK "vfredmax")
+    (UNSPEC_VFREDMINVS "vfredmin") (UNSPEC_VFREDMINVS_MASK "vfredmin")
+])
+
+;; float operation with reduction and its return type is widening form
+(define_int_attr vf_opw_vs_attr [
+    (UNSPEC_VFWREDOSUMVS "vfwredosum") (UNSPEC_VFWREDOSUMVS_MASK "vfwredosum")
+    (UNSPEC_VFWREDSUMVS "vfwredsum") (UNSPEC_VFWREDSUMVS_MASK "vfwredsum")
+])
diff --git a/gcc/config/riscv/predicates.md b/gcc/config/riscv/predicates.md
index 83fc4bd663d..7a90eaf81b8 100644
--- a/gcc/config/riscv/predicates.md
+++ b/gcc/config/riscv/predicates.md
@@ -27,6 +27,14 @@
   (ior (match_operand 0 "const_arith_operand")
        (match_operand 0 "register_operand")))

+(define_predicate "const_K_operand"
+  (and (match_code "const_int")
+       (match_test "INTVAL (op) >= 0 && INTVAL (op) <= 31")))
+
+(define_predicate "const_M_operand"
+  (and (match_code "const_int")
+       (match_test "INTVAL (op) >= -16 && INTVAL (op) <= 15")))
+
 (define_predicate "const_csr_operand"
   (and (match_code "const_int")
        (match_test "IN_RANGE (INTVAL (op), 0, 31)")))
@@ -198,3 +206,19 @@

 (define_predicate "fp_branch_comparison"
   (match_code "unordered,ordered,unlt,unge,unle,ungt,uneq,ltgt,ne,eq,lt,le,gt,ge"))
+
+(define_predicate "vmask_mode_register_operand"
+  (match_operand 0 "register_operand")
+  {
+     if (TARGET_VECTOR_VLEN(64))
+       return GET_MODE (op) == V8QImode;
+     else if (TARGET_VECTOR_VLEN(128))
+       return GET_MODE (op) == V16QImode;
+     else
+       gcc_unreachable ();
+  }
+)
+
+(define_predicate "riscv_vector_mem_operand"
+  (and (match_code "mem")
+       (match_test "riscv_legitimize_address_vector_p (XEXP(op, 0), GET_MODE(op))")))
diff --git a/gcc/config/riscv/riscv-modes.def b/gcc/config/riscv/riscv-modes.def
index ef2243555c4..6401132beba 100644
--- a/gcc/config/riscv/riscv-modes.def
+++ b/gcc/config/riscv/riscv-modes.def
@@ -19,4 +19,44 @@ You should have received a copy of the GNU General Public License
 along with GCC; see the file COPYING3.  If not see
 <http://www.gnu.org/licenses/>.  */

+FLOAT_MODE (HF, 2, ieee_half_format);
 FLOAT_MODE (TF, 16, ieee_quad_format);
+
+/* Vector modes. */
+VECTOR_MODES (INT, 4);        /*              V4QI V2HI */
+VECTOR_MODES (INT, 8);        /*         V8QI V4HI V2SI */
+VECTOR_MODES (INT, 16);       /*   V16QI V8HI V4SI V2DI */
+VECTOR_MODES (INT, 32);       /*  V32QI V16HI V8SI V4DI V2TI*/
+VECTOR_MODES (INT, 64);       /* V64QI V32HI V16SI V8DI V4TI*/
+VECTOR_MODES (INT, 128);      /* V128QI V64HI V32SI V16DI V8TI*/
+
+VECTOR_MODES (FLOAT, 8);      /*                   V2SF */
+VECTOR_MODES (FLOAT, 16);     /*              V4SF V2DF */
+VECTOR_MODES (FLOAT, 32);     /*         V8SF V4DF V2TF */
+VECTOR_MODES (FLOAT, 64);     /*        V16SF V8DF V4TF */
+VECTOR_MODES (FLOAT, 128);    /*       V32SF V16DF V8TF */
+
+VECTOR_MODE (FLOAT, DF, 1);   /* V1DF */
+VECTOR_MODE (FLOAT, TF, 1);   /* V1TF */
+VECTOR_MODE (INT, DI, 1);     /* V1DI */
+VECTOR_MODE (INT, TI, 1);     /* V1TI */
+
+VECTOR_MODES (INT, 24);
+VECTOR_MODES (INT, 40);
+VECTOR_MODES (INT, 48);
+VECTOR_MODES (INT, 56);
+VECTOR_MODES (INT, 80);
+VECTOR_MODES (INT, 96);
+VECTOR_MODES (INT, 112);
+
+VECTOR_MODES (FLOAT, 24);
+VECTOR_MODES (FLOAT, 40);
+VECTOR_MODES (FLOAT, 48);
+VECTOR_MODES (FLOAT, 56);
+VECTOR_MODES (FLOAT, 80);
+VECTOR_MODES (FLOAT, 96);
+VECTOR_MODES (FLOAT, 112);
+
+INT_MODE (OI, 32);
+INT_MODE (XI, 64);
+INT_MODE (ZI, 128);
diff --git a/gcc/config/riscv/riscv-protos.h b/gcc/config/riscv/riscv-protos.h
index 1bfc65e6d9f..f64f5d65b56 100644
--- a/gcc/config/riscv/riscv-protos.h
+++ b/gcc/config/riscv/riscv-protos.h
@@ -59,6 +59,14 @@ extern const char *riscv_output_return ();
 extern void riscv_expand_int_scc (rtx, enum rtx_code, rtx, rtx);
 extern void riscv_expand_float_scc (rtx, enum rtx_code, rtx, rtx);
 extern void riscv_expand_conditional_branch (rtx, enum rtx_code, rtx, rtx);
+extern bool riscv_legitimize_address_vector_p (rtx, machine_mode);
+extern rtx riscv_emit_vsetvli_max(machine_mode);
+extern rtx riscv_emit_vsetvli(machine_mode, rtx, rtx);
+extern rtx riscv_emit_vsetvli_base(machine_mode, rtx, rtx, rtx);
+extern const char *riscv_output_vector_insn(machine_mode, const char *);
+extern const char *riscv_output_vector_sew(int);
+extern const char *riscv_output_vector_lmul(int);
+extern int sched_finish_global;
 #endif
 extern rtx riscv_legitimize_call_address (rtx);
 extern void riscv_set_return_address (rtx, rtx);
diff --git a/gcc/config/riscv/riscv-v-float.md b/gcc/config/riscv/riscv-v-float.md
new file mode 100755
index 00000000000..3c5c14e353f
--- /dev/null
+++ b/gcc/config/riscv/riscv-v-float.md
@@ -0,0 +1,2121 @@
+;; float-point/interger type-convert instruction
+(define_insn_and_split "riscv_vfcvtxfv_<mode>"
+  [(set (match_operand:VTDSH 0 "register_operand" "=v")
+        (unspec:VTDSH [(match_operand:<V_CVTF_ATTR> 1 "register_operand" "v")
+                       (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFCVTXFV))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[2], operands[2]);
+    emit_insn(gen_riscv_vfcvt_x_f_v_<mode>(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfcvtxfv_mask_<mode>"
+  [(set (match_operand:VTDSH 0 "register_operand" "=u")
+        (unspec:VTDSH [(match_operand:VTDSH 1 "register_operand" "0")
+                       (match_operand:<V_CVTF_ATTR> 2 "register_operand" "u")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFCVTXFV_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_vfcvt_x_f_v_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfcvtxufv_<mode>"
+  [(set (match_operand:VTDSH 0 "register_operand" "=v")
+        (unspec:VTDSH [(match_operand:<V_CVTF_ATTR> 1 "register_operand" "v")
+                       (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFCVTXUFV))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[2], operands[2]);
+    emit_insn(gen_riscv_vfcvt_xu_f_v_<mode>(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfcvtxufv_mask_<mode>"
+  [(set (match_operand:VTDSH 0 "register_operand" "=u")
+        (unspec:VTDSH [(match_operand:VTDSH 1 "register_operand" "0")
+                       (match_operand:<V_CVTF_ATTR> 2 "register_operand" "u")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFCVTXUFV_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_vfcvt_xu_f_v_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfcvtfxv_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (unspec:VFANY [(match_operand:<VF_CVTX_ATTR> 1 "register_operand" "v")
+                       (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFCVTFXV))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[2], operands[2]);
+    emit_insn(gen_riscv_vfcvt_f_x_v_<mode>(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfcvtfxv_mask_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=u")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "0")
+                       (match_operand:<VF_CVTX_ATTR> 2 "register_operand" "u")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFCVTFXV_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_vfcvt_f_x_v_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfcvtfxuv_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (unspec:VFANY [(match_operand:<VF_CVTX_ATTR> 1 "register_operand" "v")
+                (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFCVTFXUV))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[2], operands[2]);
+    emit_insn(gen_riscv_vfcvt_f_xu_v_<mode>(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfcvtfxuv_mask_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=u")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "0")
+                       (match_operand:<VF_CVTX_ATTR> 2 "register_operand" "u")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFCVTFXUV_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_vfcvt_f_xu_v_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfncvtxfv_<mode>"
+  [(set (match_operand:VNARROWER 0 "register_operand" "=&v")
+        (unspec:VNARROWER [(match_operand:<VNARROW_F_ATTR> 1 "register_operand" "v")
+                           (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFNCVTXFV))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[2], operands[2]);
+    emit_insn(gen_riscv_vfncvt_x_f_v_<mode>(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfncvtxfv_mask_<mode>"
+  [(set (match_operand:VNARROWER 0 "register_operand" "=&u")
+        (unspec:VNARROWER [(match_operand:VNARROWER 1 "register_operand" "0")
+                           (match_operand:<VNARROW_F_ATTR> 2 "register_operand" "u")
+                           (match_operand 3 "vmask_mode_register_operand" "w")
+                           (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFNCVTXFV_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_vfncvt_x_f_v_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfncvtxufv_<mode>"
+  [(set (match_operand:VNARROWER 0 "register_operand" "=&v")
+        (unspec:VNARROWER [(match_operand:<VNARROW_F_ATTR> 1 "register_operand" "v")
+                           (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFNCVTXUFV))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[2], operands[2]);
+    emit_insn(gen_riscv_vfncvt_xu_f_v_<mode>(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfncvtxufv_mask_<mode>"
+  [(set (match_operand:VNARROWER 0 "register_operand" "=&u")
+        (unspec:VNARROWER [(match_operand:VNARROWER 1 "register_operand" "0")
+                           (match_operand:<VNARROW_F_ATTR> 2 "register_operand" "u")
+                           (match_operand 3 "vmask_mode_register_operand" "w")
+                           (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFNCVTXUFV_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_vfncvt_xu_f_v_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfncvtfxv_<mode>"
+  [(set (match_operand:VFNARROW 0 "register_operand" "=&v")
+        (unspec:VFNARROW [(match_operand:<VFNARROW_X_ATTR> 1 "register_operand" "v")
+                          (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFNCVTFXV))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[2], operands[2]);
+    emit_insn(gen_riscv_vfncvt_f_x_v_<mode>(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfncvtfxv_mask_<mode>"
+  [(set (match_operand:VFNARROW 0 "register_operand" "=&u")
+        (unspec:VFNARROW [(match_operand:VFNARROW 1 "register_operand" "0")
+                          (match_operand:<VFNARROW_X_ATTR> 2 "register_operand" "u")
+                          (match_operand 3 "vmask_mode_register_operand" "w")
+                          (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFNCVTFXV_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_vfncvt_f_x_v_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfncvtfxuv_<mode>"
+  [(set (match_operand:VFNARROW 0 "register_operand" "=&v")
+        (unspec:VFNARROW [(match_operand:<VFNARROW_X_ATTR> 1 "register_operand" "v")
+                          (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFNCVTFXUV))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[2], operands[2]);
+    emit_insn(gen_riscv_vfncvt_f_xu_v_<mode>(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfncvtfxuv_mask_<mode>"
+  [(set (match_operand:VFNARROW 0 "register_operand" "=&u")
+        (unspec:VFNARROW [(match_operand:VFNARROW 1 "register_operand" "0")
+                          (match_operand:<VFNARROW_X_ATTR> 2 "register_operand" "u")
+                          (match_operand 3 "vmask_mode_register_operand" "w")
+                          (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFNCVTFXUV_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_vfncvt_f_xu_v_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfwcvtxfv_<mode>"
+  [(set (match_operand:VWIDEN_FROM_FLOAT 0 "register_operand" "=&v")
+        (unspec:VWIDEN_FROM_FLOAT [(match_operand:<VWIDEN_F_ATTR> 1 "register_operand" "v")
+                                   (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFWCVTXFV))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VWIDEN_F_ATTR>mode, operands[2], operands[2]);
+    emit_insn(gen_riscv_vfwcvt_x_f_v_<mode>(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfwcvtxfv_mask_<mode>"
+  [(set (match_operand:VWIDEN_FROM_FLOAT 0 "register_operand" "=&u")
+        (unspec:VWIDEN_FROM_FLOAT [(match_operand:VWIDEN_FROM_FLOAT 1 "register_operand" "0")
+                                   (match_operand:<VWIDEN_F_ATTR> 2 "register_operand" "u")
+                                   (match_operand 3 "vmask_mode_register_operand" "w")
+                                   (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFWCVTXFV_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VWIDEN_F_ATTR>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_vfwcvt_x_f_v_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfwcvtxufv_<mode>"
+  [(set (match_operand:VWIDEN_FROM_FLOAT 0 "register_operand" "=&v")
+        (unspec:VWIDEN_FROM_FLOAT [(match_operand:<VWIDEN_F_ATTR> 1 "register_operand" "v")
+                                   (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFWCVTXUFV))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VWIDEN_F_ATTR>mode, operands[2], operands[2]);
+    emit_insn(gen_riscv_vfwcvt_xu_f_v_<mode>(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfwcvtxufv_mask_<mode>"
+  [(set (match_operand:VWIDEN_FROM_FLOAT 0 "register_operand" "=&u")
+        (unspec:VWIDEN_FROM_FLOAT [(match_operand:VWIDEN_FROM_FLOAT 1 "register_operand" "0")
+                                   (match_operand:<VWIDEN_F_ATTR> 2 "register_operand" "u")
+                                   (match_operand 3 "vmask_mode_register_operand" "w")
+                                   (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFWCVTXUFV_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VWIDEN_F_ATTR>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_vfwcvt_xu_f_v_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfwcvtfxv_<mode>"
+  [(set (match_operand:VFWIDEN_FROM_INT 0 "register_operand" "=&v")
+        (unspec:VFWIDEN_FROM_INT [(match_operand:<VFWIDEN_X_ATTR> 1 "register_operand" "v")
+                                  (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFWCVTFXV))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VFWIDEN_X_ATTR>mode, operands[2], operands[2]);
+    emit_insn(gen_riscv_vfwcvt_f_x_v_<mode>(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfwcvtfxv_mask_<mode>"
+  [(set (match_operand:VFWIDEN_FROM_INT 0 "register_operand" "=&u")
+        (unspec:VFWIDEN_FROM_INT [(match_operand:VFWIDEN_FROM_INT 1 "register_operand" "0")
+                                  (match_operand:<VFWIDEN_X_ATTR> 2 "register_operand" "u")
+                                  (match_operand 3 "vmask_mode_register_operand" "w")
+                                  (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFWCVTFXV_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VFWIDEN_X_ATTR>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_vfwcvt_f_x_v_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfwcvtfxuv_<mode>"
+  [(set (match_operand:VFWIDEN_FROM_INT 0 "register_operand" "=&v")
+        (unspec:VFWIDEN_FROM_INT [(match_operand:<VFWIDEN_X_ATTR> 1 "register_operand" "v")
+                                  (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFWCVTFXUV))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VFWIDEN_X_ATTR>mode, operands[2], operands[2]);
+    emit_insn(gen_riscv_vfwcvt_f_xu_v_<mode>(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfwcvtfxuv_mask_<mode>"
+  [(set (match_operand:VFWIDEN_FROM_INT 0 "register_operand" "=&u")
+        (unspec:VFWIDEN_FROM_INT [(match_operand:VFWIDEN_FROM_INT 1 "register_operand" "0")
+                                  (match_operand:<VFWIDEN_X_ATTR> 2 "register_operand" "u")
+                                  (match_operand 3 "vmask_mode_register_operand" "w")
+                                  (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFWCVTFXUV_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VFWIDEN_X_ATTR>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_vfwcvt_f_xu_v_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfncvtffv_<mode>"
+  [(set (match_operand:VFNARROW 0 "register_operand" "=&v")
+        (unspec:VFNARROW [(match_operand:<VFNARROW_ATTR> 1 "register_operand" "v")
+                          (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFNCVTFFV))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[2], operands[2]);
+    emit_insn(gen_riscv_vfncvt_f_f_v_<mode>(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfncvtffv_mask_<mode>"
+  [(set (match_operand:VFNARROW 0 "register_operand" "=&u")
+        (unspec:VFNARROW [(match_operand:VFNARROW 1 "register_operand" "0")
+                          (match_operand:<VFNARROW_ATTR> 2 "register_operand" "u")
+                          (match_operand 3 "vmask_mode_register_operand" "w")
+                          (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFNCVTFFV_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_vfncvt_f_f_v_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfwcvtffv_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&v")
+        (unspec:VFWIDEN [(match_operand:<VFWIDEN_ATTR> 1 "register_operand" "v")
+                         (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFWCVTFFV))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VFWIDEN_ATTR>mode, operands[2], operands[2]);
+    emit_insn(gen_riscv_vfwcvt_f_f_v_<mode>(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfwcvtffv_mask_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&u")
+        (unspec:VFWIDEN [(match_operand:VFWIDEN 1 "register_operand" "0")
+                         (match_operand:<VFWIDEN_ATTR> 2 "register_operand" "u")
+                         (match_operand 3 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFWCVTFFV_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VFWIDEN_ATTR>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_vfwcvt_f_f_v_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_opm_attr>vv_<VFANY:mode>_<VMASK:mode>"
+  [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&v")
+        (unspec:VMASK [(match_operand:VFANY 1 "register_operand" "v")
+                       (match_operand:VFANY 2 "register_operand" "v")
+                       (match_operand:SI 3 "register_operand" "r")]
+         VF_OPM_VV))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VFANY:MODE>mode, operands[3], operands[3]);
+    emit_insn(gen_riscv_<vf_opm_attr>_vv_<VFANY:mode>_<VMASK:mode>(operands[0], operands[1], operands[2], operands[3]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_opm_attr>vf_<VFANY:mode>_<VMASK:mode>"
+  [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&v")
+        (unspec:VMASK [(match_operand:VFANY 1 "register_operand" "v")
+                 (match_operand:<VFANY:VF_SEM_ATTR> 2 "register_operand" "f")
+                 (match_operand:SI 3 "register_operand" "r")]
+         VF_OPM_VF))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+    emit_insn(gen_riscv_<vf_opm_attr>_vf_<VFANY:mode>_<VMASK:mode>(operands[0], operands[1], operands[2], operands[3]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_opm_attr>vv_mask_<VFANY:mode>_<VMASK:mode>"
+  [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&u")
+        (unspec:VMASK [(match_operand:VMASK 1 "vmask_mode_register_operand" "0")
+                 (match_operand:VFANY 2 "register_operand" "u")
+                 (match_operand:VFANY 3 "register_operand" "u")
+                 (match_operand 4 "vmask_mode_register_operand" "w")
+                 (match_operand:SI 5 "register_operand" "r")]
+         VF_OPM_VV_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VFANY:MODE>mode, operands[5], operands[5]);
+    emit_insn(gen_riscv_<vf_opm_attr>_vv_mask_<VFANY:mode>_<VMASK:mode>(operands[0], operands[1], operands[2], operands[3], operands[4], operands[5]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_opm_attr>vf_mask_<VFANY:mode>_<VMASK:mode>"
+  [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&u")
+        (unspec:VMASK [(match_operand:VMASK 1 "vmask_mode_register_operand" "0")
+                       (match_operand:VFANY 2 "register_operand" "u")
+                       (match_operand:<VFANY:VF_SEM_ATTR> 3 "register_operand" "f")
+                       (match_operand:VMASK 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+         VF_OPM_VF_MASK))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VFANY:MODE>mode, operands[5], operands[5]);
+    emit_insn(gen_riscv_<vf_opm_attr>_vf_mask_<VFANY:mode>_<VMASK:mode>(operands[0], operands[1], operands[2], operands[3], operands[4], operands[5]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfsqrtv_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "v")
+                       (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFSQRTV))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[2], operands[2]);
+    emit_insn(gen_riscv_vfsqrt_v_<mode>(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfsqrtv_mask_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=u")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "0")
+                       (match_operand:VFANY 2 "register_operand" "u")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFSQRTV_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_vfsqrt_v_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfclassv_<mode>"
+  [(set (match_operand:<VF_CVTX_ATTR> 0 "register_operand" "=v")
+        (unspec:<VF_CVTX_ATTR> [(match_operand:VFANY 1 "register_operand" "v")
+                                (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFCLASSV))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[2], operands[2]);
+    emit_insn(gen_riscv_vfclass_v_<mode>(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfclassv_mask_<mode>"
+  [(set (match_operand:<VF_CVTX_ATTR> 0 "register_operand" "=u")
+        (unspec:<VF_CVTX_ATTR> [(match_operand:<VF_CVTX_ATTR> 1 "register_operand" "0")
+                                (match_operand:VFANY 2 "register_operand" "u")
+                                (match_operand 3 "vmask_mode_register_operand" "w")
+                                (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFCLASSV_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_vfclass_v_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_op_attr>vv_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "v")
+                       (match_operand:VFANY 2 "register_operand" "v")
+                       (match_operand:SI 3 "register_operand" "r")]
+         VF_OP_VV))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+    emit_insn(gen_riscv_<vf_op_attr>_vv_<mode>(operands[0], operands[1], operands[2], operands[3]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_op_attr>vf_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "v")
+                       (match_operand:<VF_SEM_ATTR> 2 "register_operand" "f")
+                       (match_operand:SI 3 "register_operand" "r")]
+         VF_OP_VF))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+    emit_insn(gen_riscv_<vf_op_attr>_vf_<mode>(operands[0], operands[1], operands[2], operands[3]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_op_attr>vv_mask_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=u")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "0")
+                       (match_operand:VFANY 2 "register_operand" "u")
+                       (match_operand:VFANY 3 "register_operand" "u")
+                       (match_operand 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+         VF_OP_VV_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[5], operands[5]);
+    emit_insn(gen_riscv_<vf_op_attr>_vv_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4], operands[5]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_op_attr>vf_mask_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=u")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "0")
+                       (match_operand:VFANY 2 "register_operand" "u")
+                       (match_operand:<VF_SEM_ATTR> 3 "register_operand" "f")
+                       (match_operand 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+         VF_OP_VF_MASK))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[5], operands[5]);
+    emit_insn(gen_riscv_<vf_op_attr>_vf_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4], operands[5]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_opw_attr>vv_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&v")
+        (unspec:VFWIDEN [(match_operand:<VFWIDEN_ATTR> 1 "register_operand" "v")
+                         (match_operand:<VFWIDEN_ATTR> 2 "register_operand" "v")
+                         (match_operand:SI 3 "register_operand" "r")]
+         VF_OPW_VV))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VFWIDEN_ATTR>mode, operands[3], operands[3]);
+    emit_insn(gen_riscv_<vf_opw_attr>_vv_<mode>(operands[0], operands[1], operands[2], operands[3]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_opw_attr>vf_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&v")
+        (unspec:VFWIDEN [(match_operand:<VFWIDEN_ATTR> 1 "register_operand" "v")
+                         (match_operand:<VFWIDEN_SEM_ATTR> 2 "register_operand" "f")
+                         (match_operand:SI 3 "register_operand" "r")]
+         VF_OPW_VF))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VFWIDEN_ATTR>mode, operands[3], operands[3]);
+    emit_insn(gen_riscv_<vf_opw_attr>_vf_<mode>(operands[0], operands[1], operands[2], operands[3]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_opw_attr>vv_mask_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&u")
+        (unspec:VFWIDEN [(match_operand:VFWIDEN 1 "register_operand" "0")
+                         (match_operand:<VFWIDEN_ATTR> 2 "register_operand" "u")
+                         (match_operand:<VFWIDEN_ATTR> 3 "register_operand" "u")
+                         (match_operand 4 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 5 "register_operand" "r")]
+         VF_OPW_VV_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VFWIDEN_ATTR>mode, operands[5], operands[5]);
+    emit_insn(gen_riscv_<vf_opw_attr>_vv_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4], operands[5]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_opw_attr>vf_mask_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&u")
+        (unspec:VFWIDEN [(match_operand:VFWIDEN 1 "register_operand" "0")
+                         (match_operand:<VFWIDEN_ATTR> 2 "register_operand" "u")
+                         (match_operand:<VFWIDEN_SEM_ATTR> 3 "register_operand" "f")
+                         (match_operand 4 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 5 "register_operand" "r")]
+         VF_OPW_VF_MASK))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VFWIDEN_ATTR>mode, operands[5], operands[5]);
+    emit_insn(gen_riscv_<vf_opw_attr>_vf_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4], operands[5]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_opw_attr>wv_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&v")
+        (unspec:VFWIDEN [(match_operand:VFWIDEN 1 "register_operand" "v")
+                         (match_operand:<VFWIDEN_ATTR> 2 "register_operand" "v")
+                         (match_operand:SI 3 "register_operand" "r")]
+         VF_OPW_WV))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VFWIDEN_ATTR>mode, operands[3], operands[3]);
+    emit_insn(gen_riscv_<vf_opw_attr>_wv_<mode>(operands[0], operands[1], operands[2], operands[3]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_opw_attr>wf_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&v")
+        (unspec:VFWIDEN [(match_operand:VFWIDEN 1 "register_operand" "v")
+                         (match_operand:<VFWIDEN_SEM_ATTR> 2 "register_operand" "f")
+                         (match_operand:SI 3 "register_operand" "r")]
+         VF_OPW_WF))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VFWIDEN_ATTR>mode, operands[3], operands[3]);
+    emit_insn(gen_riscv_<vf_opw_attr>_wf_<mode>(operands[0], operands[1], operands[2], operands[3]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_opw_attr>wv_mask_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&u")
+        (unspec:VFWIDEN [(match_operand:VFWIDEN 1 "register_operand" "0")
+                         (match_operand:VFWIDEN 2 "register_operand" "u")
+                         (match_operand:<VFWIDEN_ATTR> 3 "register_operand" "u")
+                         (match_operand 4 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 5 "register_operand" "r")]
+         VF_OPW_WV_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VFWIDEN_ATTR>mode, operands[5], operands[5]);
+    emit_insn(gen_riscv_<vf_opw_attr>_wv_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4], operands[5]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_opw_attr>wf_mask_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&u")
+        (unspec:VFWIDEN [(match_operand:VFWIDEN 1 "register_operand" "0")
+                         (match_operand:VFWIDEN 2 "register_operand" "u")
+                         (match_operand:<VFWIDEN_SEM_ATTR> 3 "register_operand" "f")
+                         (match_operand 4 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 5 "register_operand" "r")]
+         VF_OPW_VF_MASK))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VFWIDEN_ATTR>mode, operands[5], operands[5]);
+    emit_insn(gen_riscv_<vf_opw_attr>_wf_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4], operands[5]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_op_fused_attr>vv_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "0")
+                       (match_operand:VFANY 2 "register_operand" "v")
+                       (match_operand:VFANY 3 "register_operand" "v")
+                       (match_operand:SI 4 "register_operand" "r")]
+         VF_OP_FUSED_VV))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_<vf_op_fused_attr>_vv_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_op_fused_attr>vf_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "0")
+                       (match_operand:<VF_SEM_ATTR> 2 "register_operand" "f")
+                       (match_operand:VFANY 3 "register_operand" "v")
+                       (match_operand:SI 4 "register_operand" "r")]
+         VF_OP_FUSED_VF))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_<vf_op_fused_attr>_vf_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_op_fused_attr>vv_mask_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=u")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "u")
+                       (match_operand:VFANY 2 "register_operand" "0")
+                       (match_operand:VFANY 3 "register_operand" "u")
+                       (match_operand:VFANY 4 "register_operand" "u")
+                       (match_operand 5 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 6 "register_operand" "r")]
+         VF_OP_FUSED_VV_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    if (REGNO(operands[0]) != REGNO(operands[1]))
+      emit_move_insn(operands[0], operands[1]);
+    riscv_emit_vsetvli(<MODE>mode, operands[6], operands[6]);
+    emit_insn(gen_riscv_<vf_op_fused_attr>_vv_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4], operands[5], operands[6]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_op_fused_attr>vf_mask_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=u")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "u")
+                       (match_operand:VFANY 2 "register_operand" "0")
+                       (match_operand:<VF_SEM_ATTR> 3 "register_operand" "f")
+                       (match_operand:VFANY 4 "register_operand" "u")
+                       (match_operand 5 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 6 "register_operand" "r")]
+         VF_OP_FUSED_VF_MASK))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    if (REGNO(operands[0]) != REGNO(operands[1]))
+      emit_move_insn(operands[0], operands[1]);
+    riscv_emit_vsetvli(<MODE>mode, operands[6], operands[6]);
+    emit_insn(gen_riscv_<vf_op_fused_attr>_vf_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4], operands[5], operands[6]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_op_fusedw_attr>vv_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&v")
+        (unspec:VFWIDEN [(match_operand:VFWIDEN 1 "register_operand" "0")
+                         (match_operand:<VFWIDEN_ATTR> 2 "register_operand" "v")
+                         (match_operand:<VFWIDEN_ATTR> 3 "register_operand" "v")
+                         (match_operand:SI 4 "register_operand" "r")]
+         VF_OP_FUSEDW_VV))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VFWIDEN_ATTR>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_<vf_op_fusedw_attr>_vv_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_op_fusedw_attr>vf_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&v")
+        (unspec:VFWIDEN [(match_operand:VFWIDEN 1 "register_operand" "0")
+                         (match_operand:<VFWIDEN_SEM_ATTR> 2 "register_operand" "f")
+                         (match_operand:<VFWIDEN_ATTR> 3 "register_operand" "v")
+                         (match_operand:SI 4 "register_operand" "r")]
+         VF_OP_FUSEDW_VF))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VFWIDEN_ATTR>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_<vf_op_fusedw_attr>_vf_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_op_fusedw_attr>vv_mask_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&u")
+        (unspec:VFWIDEN [(match_operand:VFWIDEN 1 "register_operand" "u")
+                         (match_operand:VFWIDEN 2 "register_operand" "0")
+                         (match_operand:<VFWIDEN_ATTR> 3 "register_operand" "u")
+                         (match_operand:<VFWIDEN_ATTR> 4 "register_operand" "u")
+                         (match_operand 5 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 6 "register_operand" "r")]
+         VF_OP_FUSEDW_VV_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    if (REGNO(operands[0]) != REGNO(operands[1]))
+      emit_move_insn(operands[0], operands[1]);
+    riscv_emit_vsetvli(<VFWIDEN_ATTR>mode, operands[6], operands[6]);
+    emit_insn(gen_riscv_<vf_op_fusedw_attr>_vv_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4], operands[5], operands[6]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_op_fusedw_attr>vf_mask_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&u")
+        (unspec:VFWIDEN [(match_operand:VFWIDEN 1 "register_operand" "u")
+                         (match_operand:VFWIDEN 2 "register_operand" "0")
+                 (match_operand:<VFWIDEN_SEM_ATTR> 3 "register_operand" "f")
+                 (match_operand:<VFWIDEN_ATTR> 4 "register_operand" "u")
+                 (match_operand 5 "vmask_mode_register_operand" "w")
+                 (match_operand:SI 6 "register_operand" "r")]
+         VF_OP_FUSEDW_VF_MASK))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    if (REGNO(operands[0]) != REGNO(operands[1]))
+      emit_move_insn(operands[0], operands[1]);
+    riscv_emit_vsetvli(<VFWIDEN_ATTR>mode, operands[6], operands[6]);
+    emit_insn(gen_riscv_<vf_op_fusedw_attr>_vf_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4], operands[5], operands[6]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfmergevfm_mask_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=u")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "u")
+                       (match_operand:<VF_SEM_ATTR> 2 "register_operand" "f")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFMARGEVFM))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_vfmerge_vfm_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfmvvf_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (unspec:VFANY [(match_operand:<VF_SEM_ATTR> 1 "register_operand" "f")
+                       (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFMVVF))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[2], operands[2]);
+    emit_insn(gen_riscv_vfmv_v_f_<mode>(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfmvsf_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (unspec:VFANY [(match_operand:<VF_SEM_ATTR> 1 "register_operand" "f")
+                       (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFMVSF))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[2], operands[2]);
+    emit_insn(gen_riscv_vfmv_s_f_<mode>(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vfmvfs_<mode>"
+  [(set (match_operand:<VF_SEM_ATTR> 0 "register_operand" "=f")
+        (unspec:<VF_SEM_ATTR> [(match_operand:VFANY 1 "register_operand" "v")
+                               (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFMVFS))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[2], operands[2]);
+    emit_insn(gen_riscv_vfmv_f_s_<mode>(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_op_vs_attr>vs_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "v")
+                       (match_operand:VFANY 2 "register_operand" "v")
+                       (match_operand:SI 3 "register_operand" "r")]
+         VF_OP_VS))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+    emit_insn(gen_riscv_<vf_op_vs_attr>_vs_<mode>(operands[0], operands[1], operands[2], operands[3]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_op_vs_attr>vs_mask_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=u")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "u")
+                       (match_operand:VFANY 2 "register_operand" "u")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+         VF_OP_VS))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_<vf_op_vs_attr>_vs_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_opw_vs_attr>vs_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&v")
+        (unspec:VFWIDEN [(match_operand:<VFWIDEN_ATTR> 1 "register_operand" "v")
+                         (match_operand:VFWIDEN 2 "register_operand" "v")
+                         (match_operand:SI 3 "register_operand" "r")]
+         VF_OPW_VS))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VFWIDEN_ATTR>mode, operands[3], operands[3]);
+    emit_insn(gen_riscv_<vf_opw_vs_attr>_vs_<mode>(operands[0], operands[1], operands[2], operands[3]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_<vf_opw_vs_attr>vs_mask_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&u")
+        (unspec:VFWIDEN [(match_operand:<VFWIDEN_ATTR> 1 "register_operand" "u")
+                         (match_operand:VFWIDEN 2 "register_operand" "u")
+                         (match_operand 3 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 4 "register_operand" "r")]
+         VF_OPW_VS))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VFWIDEN_ATTR>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_<vf_opw_vs_attr>_vs_mask_<mode>(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+;; Auto-vectorize
+;; SPN
+(define_expand "vec_duplicate<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (vec_duplicate:VFANY (match_operand:<VF_SEM_ATTR> 1 "register_operand" "f")))]
+  "TARGET_VECTOR && TARGET_VECTOR_VSPN && TARGET_HARD_FLOAT"
+  {
+    emit_insn(gen_riscv_vfmvvf_<mode>(operands[0], operands[1], gen_rtx_REG(SImode, 0)));
+    DONE;
+  }
+)
+
+(define_expand "add<mode>3"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (plus:VFANY (match_operand:VFANY 1 "register_operand" "v")
+                    (match_operand:VFANY 2 "register_operand" "v")))]
+  "TARGET_VECTOR && TARGET_VECTOR_VSPN"
+  {
+    emit_insn(gen_riscv_vfaddvv_<mode>(operands[0], operands[1], operands[2], gen_rtx_REG(SImode, 0)));
+    DONE;
+  }
+)
+
+(define_expand "sub<mode>3"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (minus:VFANY (match_operand:VFANY 1 "register_operand" "v")
+                     (match_operand:VFANY 2 "register_operand" "v")))]
+  "TARGET_VECTOR && TARGET_VECTOR_VSPN"
+  {
+    emit_insn(gen_riscv_vfsubvv_<mode>(operands[0], operands[1], operands[2], gen_rtx_REG(SImode, 0)));
+    DONE;
+  }
+)
+
+(define_expand "mul<mode>3"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (mult:VFANY (match_operand:VFANY 1 "register_operand" "v")
+                    (match_operand:VFANY 2 "register_operand" "v")))]
+  "TARGET_VECTOR && TARGET_VECTOR_VSPN"
+  {
+    emit_insn(gen_riscv_vfmulvv_<mode>(operands[0], operands[1], operands[2], gen_rtx_REG(SImode, 0)));
+    DONE;
+  }
+)
+
+(define_expand "div<mode>3"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (div:VFANY (match_operand:VFANY 1 "register_operand" "v")
+                   (match_operand:VFANY 2 "register_operand" "v")))]
+  "TARGET_VECTOR && TARGET_VECTOR_VSPN"
+  {
+    emit_insn(gen_riscv_vfdivvv_<mode>(operands[0], operands[1], operands[2], gen_rtx_REG(SImode, 0)));
+    DONE;
+  }
+)
+
+(define_expand "sqrt<mode>2"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (sqrt:VFANY (match_operand:VFANY 1 "register_operand" "v")))]
+  "TARGET_VECTOR && TARGET_VECTOR_VSPN"
+  {
+    emit_insn(gen_riscv_vfsqrtv_<mode>(operands[0], operands[1], gen_rtx_REG(SImode, 0)));
+    DONE;
+  }
+)
+
+(define_expand "fix<v_cvtf_attr><mode>2"
+  [(set (match_operand:VTDSH 0 "register_operand" "=v")
+        (fix:VTDSH (match_operand:<V_CVTF_ATTR> 1 "register_operand" "v")))]
+  "TARGET_VECTOR && TARGET_VECTOR_VSPN"
+  {
+    emit_insn(gen_riscv_vfcvtxfv_<mode>(operands[0], operands[1], gen_rtx_REG(SImode, 0)));
+    DONE;
+  }
+)
+
+(define_expand "fixuns<v_cvtf_attr><mode>2"
+  [(set (match_operand:VTDSH 0 "register_operand" "=v")
+        (unsigned_fix:VTDSH (match_operand:<V_CVTF_ATTR> 1 "register_operand" "v")))]
+  "TARGET_VECTOR && TARGET_VECTOR_VSPN"
+  {
+    emit_insn(gen_riscv_vfcvtxufv_<mode>(operands[0], operands[1], gen_rtx_REG(SImode, 0)));
+    DONE;
+  }
+)
+
+(define_expand "float<vf_cvtx_attr><mode>2"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (float:VFANY (match_operand:<VF_CVTX_ATTR> 1 "register_operand" "v")))]
+  "TARGET_VECTOR && TARGET_VECTOR_VSPN"
+  {
+    emit_insn(gen_riscv_vfcvtfxv_<mode>(operands[0], operands[1], gen_rtx_REG(SImode, 0)));
+    DONE;
+  }
+)
+
+(define_expand "floatuns<vf_cvtx_attr><mode>2"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (unsigned_float:VFANY (match_operand:<VF_CVTX_ATTR> 1 "register_operand" "v")))]
+  "TARGET_VECTOR && TARGET_VECTOR_VSPN"
+  {
+    emit_insn(gen_riscv_vfcvtfxuv_<mode>(operands[0], operands[1], gen_rtx_REG(SImode, 0)));
+    DONE;
+  }
+)
+
+(define_expand "trunc<vfnarrow_attr><mode>2"
+  [(set (match_operand:VFNARROW 0 "register_operand" "=v")
+        (float_truncate:VFNARROW (match_operand:<VFNARROW_ATTR> 1 "register_operand" "v")))]
+  "TARGET_VECTOR && TARGET_VECTOR_VSPN"
+  {
+    emit_insn(gen_riscv_vfncvtffv_<mode>(operands[0], operands[1], gen_rtx_REG(SImode, 0)));
+    DONE;
+  }
+)
+
+(define_expand "extend<vfwiden_attr><mode>2"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=v")
+        (float_extend:VFWIDEN (match_operand:<VFWIDEN_ATTR> 1 "register_operand" "v")))]
+  "TARGET_VECTOR && TARGET_VECTOR_VSPN"
+  {
+    emit_insn(gen_riscv_vfwcvtffv_<mode>(operands[0], operands[1], gen_rtx_REG(SImode, 0)));
+    DONE;
+  }
+)
+
+;; float-point/interger type-convert instruction
+(define_insn "riscv_vfcvt_x_f_v_<mode>"
+  [(set (match_operand:VTDSH 0 "register_operand" "=v")
+        (unspec:VTDSH [(match_operand:<V_CVTF_ATTR> 1 "register_operand" "v")
+                       (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFCVTXFV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfcvt.x.f.v\t%0,%1");
+  }
+)
+
+(define_insn "riscv_vfcvt_x_f_v_mask_<mode>"
+  [(set (match_operand:VTDSH 0 "register_operand" "=u")
+        (unspec:VTDSH [(match_operand:VTDSH 1 "register_operand" "0")
+                       (match_operand:<V_CVTF_ATTR> 2 "register_operand" "u")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFCVTXFV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfcvt.x.f.v\t%0,%2,%3.t");
+  }
+)
+
+(define_insn "riscv_vfcvt_xu_f_v_<mode>"
+  [(set (match_operand:VTDSH 0 "register_operand" "=v")
+        (unspec:VTDSH [(match_operand:<V_CVTF_ATTR> 1 "register_operand" "v")
+                       (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFCVTXUFV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfcvt.xu.f.v\t%0,%1");
+  }
+)
+
+(define_insn "riscv_vfcvt_xu_f_v_mask_<mode>"
+  [(set (match_operand:VTDSH 0 "register_operand" "=u")
+        (unspec:VTDSH [(match_operand:VTDSH 1 "register_operand" "0")
+                       (match_operand:<V_CVTF_ATTR> 2 "register_operand" "u")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFCVTXUFV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfcvt.xu.f.v\t%0,%2,%3.t");
+  }
+)
+
+(define_insn "riscv_vfcvt_f_x_v_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (unspec:VFANY [(match_operand:<VF_CVTX_ATTR> 1 "register_operand" "v")
+                       (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFCVTFXV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfcvt.f.x.v\t%0,%1");
+  }
+)
+
+(define_insn "riscv_vfcvt_f_x_v_mask_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=u")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "0")
+                       (match_operand:<VF_CVTX_ATTR> 2 "register_operand" "u")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFCVTFXV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfcvt.f.x.v\t%0,%2,%3.t");
+  }
+)
+
+(define_insn "riscv_vfcvt_f_xu_v_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (unspec:VFANY [(match_operand:<VF_CVTX_ATTR> 1 "register_operand" "v")
+                       (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFCVTFXUV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfcvt.f.xu.v\t%0,%1");
+  }
+)
+
+(define_insn "riscv_vfcvt_f_xu_v_mask_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=u")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "0")
+                       (match_operand:<VF_CVTX_ATTR> 2 "register_operand" "u")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFCVTFXUV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfcvt.f.xu.v\t%0,%2,%3.t");
+  }
+)
+
+(define_insn "riscv_vfncvt_x_f_v_<mode>"
+  [(set (match_operand:VNARROWER 0 "register_operand" "=&v")
+        (unspec:VNARROWER [(match_operand:<VNARROW_F_ATTR> 1 "register_operand" "v")
+                           (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFNCVTXFV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfncvt.x.f.v\t%0,%1");
+  }
+)
+
+(define_insn "riscv_vfncvt_x_f_v_mask_<mode>"
+  [(set (match_operand:VNARROWER 0 "register_operand" "=&u")
+        (unspec:VNARROWER [(match_operand:VNARROWER 1 "register_operand" "0")
+                           (match_operand:<VNARROW_F_ATTR> 2 "register_operand" "u")
+                           (match_operand 3 "vmask_mode_register_operand" "w")
+                           (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFNCVTXFV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfncvt.x.f.v\t%0,%2,%3.t");
+  }
+)
+
+(define_insn "riscv_vfncvt_xu_f_v_<mode>"
+  [(set (match_operand:VNARROWER 0 "register_operand" "=&v")
+        (unspec:VNARROWER [(match_operand:<VNARROW_F_ATTR> 1 "register_operand" "v")
+                           (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFNCVTXUFV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfncvt.xu.f.v\t%0,%1");
+  }
+)
+
+(define_insn "riscv_vfncvt_xu_f_v_mask_<mode>"
+  [(set (match_operand:VNARROWER 0 "register_operand" "=&u")
+        (unspec:VNARROWER [(match_operand:VNARROWER 1 "register_operand" "0")
+                           (match_operand:<VNARROW_F_ATTR> 2 "register_operand" "u")
+                           (match_operand 3 "vmask_mode_register_operand" "w")
+                           (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFNCVTXUFV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfncvt.xu.f.v\t%0,%2,%3.t");
+  }
+)
+
+(define_insn "riscv_vfncvt_f_x_v_<mode>"
+  [(set (match_operand:VFNARROW 0 "register_operand" "=&v")
+        (unspec:VFNARROW [(match_operand:<VFNARROW_X_ATTR> 1 "register_operand" "v")
+                          (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFNCVTFXV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfncvt.f.x.v\t%0,%1");
+  }
+)
+
+(define_insn "riscv_vfncvt_f_x_v_mask_<mode>"
+  [(set (match_operand:VFNARROW 0 "register_operand" "=&u")
+        (unspec:VFNARROW [(match_operand:VFNARROW 1 "register_operand" "0")
+                          (match_operand:<VFNARROW_X_ATTR> 2 "register_operand" "u")
+                          (match_operand 3 "vmask_mode_register_operand" "w")
+                          (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFNCVTFXV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfncvt.f.x.v\t%0,%2,%3.t");
+  }
+)
+
+(define_insn "riscv_vfncvt_f_xu_v_<mode>"
+  [(set (match_operand:VFNARROW 0 "register_operand" "=&v")
+        (unspec:VFNARROW [(match_operand:<VFNARROW_X_ATTR> 1 "register_operand" "v")
+                          (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFNCVTFXUV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfncvt.f.xu.v\t%0,%1");
+  }
+)
+
+(define_insn "riscv_vfncvt_f_xu_v_mask_<mode>"
+  [(set (match_operand:VFNARROW 0 "register_operand" "=&u")
+        (unspec:VFNARROW [(match_operand:VFNARROW 1 "register_operand" "0")
+                          (match_operand:<VFNARROW_X_ATTR> 2 "register_operand" "u")
+                          (match_operand 3 "vmask_mode_register_operand" "w")
+                          (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFNCVTFXUV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfncvt.f.xu.v\t%0,%2,%3.t");
+  }
+)
+
+(define_insn "riscv_vfwcvt_x_f_v_<mode>"
+  [(set (match_operand:VWIDEN_FROM_FLOAT 0 "register_operand" "=&v")
+        (unspec:VWIDEN_FROM_FLOAT [(match_operand:<VWIDEN_F_ATTR> 1 "register_operand" "v")
+                                   (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFWCVTXFV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfwcvt.x.f.v\t%0,%1");
+  }
+)
+
+(define_insn "riscv_vfwcvt_x_f_v_mask_<mode>"
+  [(set (match_operand:VWIDEN_FROM_FLOAT 0 "register_operand" "=&u")
+        (unspec:VWIDEN_FROM_FLOAT [(match_operand:VWIDEN_FROM_FLOAT 1 "register_operand" "0")
+                                   (match_operand:<VWIDEN_F_ATTR> 2 "register_operand" "u")
+                                   (match_operand 3 "vmask_mode_register_operand" "w")
+                                   (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFWCVTXFV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfwcvt.x.f.v\t%0,%2,%3.t");
+  }
+)
+
+(define_insn "riscv_vfwcvt_xu_f_v_<mode>"
+  [(set (match_operand:VWIDEN_FROM_FLOAT 0 "register_operand" "=&v")
+        (unspec:VWIDEN_FROM_FLOAT [(match_operand:<VWIDEN_F_ATTR> 1 "register_operand" "v")
+                                   (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFWCVTXUFV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfwcvt.xu.f.v\t%0,%1");
+  }
+)
+
+(define_insn "riscv_vfwcvt_xu_f_v_mask_<mode>"
+  [(set (match_operand:VWIDEN_FROM_FLOAT 0 "register_operand" "=&u")
+        (unspec:VWIDEN_FROM_FLOAT [(match_operand:VWIDEN_FROM_FLOAT 1 "register_operand" "0")
+                                   (match_operand:<VWIDEN_F_ATTR> 2 "register_operand" "u")
+                                   (match_operand 3 "vmask_mode_register_operand" "w")
+                                   (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFWCVTXUFV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfwcvt.xu.f.v\t%0,%2,%3.t");
+  }
+)
+
+(define_insn "riscv_vfwcvt_f_x_v_<mode>"
+  [(set (match_operand:VFWIDEN_FROM_INT 0 "register_operand" "=&v")
+        (unspec:VFWIDEN_FROM_INT [(match_operand:<VFWIDEN_X_ATTR> 1 "register_operand" "v")
+                                  (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFWCVTFXV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfwcvt.f.x.v\t%0,%1");
+  }
+)
+
+(define_insn "riscv_vfwcvt_f_x_v_mask_<mode>"
+  [(set (match_operand:VFWIDEN_FROM_INT 0 "register_operand" "=&u")
+        (unspec:VFWIDEN_FROM_INT [(match_operand:VFWIDEN_FROM_INT 1 "register_operand" "0")
+                                  (match_operand:<VFWIDEN_X_ATTR> 2 "register_operand" "u")
+                                  (match_operand 3 "vmask_mode_register_operand" "w")
+                                  (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFWCVTFXV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfwcvt.f.x.v\t%0,%2,%3.t");
+  }
+)
+
+(define_insn "riscv_vfwcvt_f_xu_v_<mode>"
+  [(set (match_operand:VFWIDEN_FROM_INT 0 "register_operand" "=&v")
+        (unspec:VFWIDEN_FROM_INT [(match_operand:<VFWIDEN_X_ATTR> 1 "register_operand" "v")
+                                  (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFWCVTFXUV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfwcvt.f.xu.v\t%0,%1");
+  }
+)
+
+(define_insn "riscv_vfwcvt_f_xu_v_mask_<mode>"
+  [(set (match_operand:VFWIDEN_FROM_INT 0 "register_operand" "=&u")
+        (unspec:VFWIDEN_FROM_INT [(match_operand:VFWIDEN_FROM_INT 1 "register_operand" "0")
+                                  (match_operand:<VFWIDEN_X_ATTR> 2 "register_operand" "u")
+                                  (match_operand 3 "vmask_mode_register_operand" "w")
+                                  (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFWCVTFXUV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfwcvt.f.xu.v\t%0,%2,%3.t");
+  }
+)
+
+(define_insn "riscv_vfncvt_f_f_v_<mode>"
+  [(set (match_operand:VFNARROW 0 "register_operand" "=&v")
+        (unspec:VFNARROW [(match_operand:<VFNARROW_ATTR> 1 "register_operand" "v")
+                          (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFNCVTFFV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfncvt.f.f.v\t%0,%1");
+  }
+)
+
+(define_insn "riscv_vfncvt_f_f_v_mask_<mode>"
+  [(set (match_operand:VFNARROW 0 "register_operand" "=&u")
+        (unspec:VFNARROW [(match_operand:VFNARROW 1 "register_operand" "0")
+                          (match_operand:<VFNARROW_ATTR> 2 "register_operand" "u")
+                          (match_operand 3 "vmask_mode_register_operand" "w")
+                          (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFNCVTFFV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfncvt.f.f.v\t%0,%2,%3.t");
+  }
+)
+
+(define_insn "riscv_vfwcvt_f_f_v_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&v")
+        (unspec:VFWIDEN [(match_operand:<VFWIDEN_ATTR> 1 "register_operand" "v")
+                         (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFWCVTFFV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfwcvt.f.f.v\t%0,%1");
+  }
+)
+
+(define_insn "riscv_vfwcvt_f_f_v_mask_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&u")
+        (unspec:VFWIDEN [(match_operand:VFWIDEN 1 "register_operand" "0")
+                         (match_operand:<VFWIDEN_ATTR> 2 "register_operand" "u")
+                         (match_operand 3 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFWCVTFFV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfwcvt.f.f.v\t%0,%2,%3.t");
+  }
+)
+
+(define_insn "riscv_<vf_opm_attr>_vv_<VFANY:mode>_<VMASK:mode>"
+  [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&v")
+        (unspec:VMASK [(match_operand:VFANY 1 "register_operand" "v")
+                       (match_operand:VFANY 2 "register_operand" "v")
+                       (match_operand:SI 3 "register_operand" "r")]
+         VF_OPM_VV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<VFANY:MODE>mode, "<vf_opm_attr>.vv\t%0,%1,%2");
+  }
+)
+
+(define_insn "riscv_<vf_opm_attr>_vf_<VFANY:mode>_<VMASK:mode>"
+  [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&v")
+        (unspec:VMASK [(match_operand:VFANY 1 "register_operand" "v")
+                       (match_operand:<VFANY:VF_SEM_ATTR> 2 "register_operand" "f")
+                       (match_operand:SI 3 "register_operand" "r")]
+         VF_OPM_VF))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  {
+    return riscv_output_vector_insn(<VFANY:MODE>mode, "<vf_opm_attr>.vf\t%0,%1,%2");
+  }
+)
+
+(define_insn "riscv_<vf_opm_attr>_vv_mask_<VFANY:mode>_<VMASK:mode>"
+  [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&u")
+        (unspec:VMASK [(match_operand:VMASK 1 "vmask_mode_register_operand" "0")
+                       (match_operand:VFANY 2 "register_operand" "u")
+                       (match_operand:VFANY 3 "register_operand" "u")
+                       (match_operand:VMASK 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+         VF_OPM_VV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<VFANY:MODE>mode, "<vf_opm_attr>.vv\t%0,%2,%3,%4.t");
+  }
+)
+
+(define_insn "riscv_<vf_opm_attr>_vf_mask_<VFANY:mode>_<VMASK:mode>"
+  [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&u")
+        (unspec:VMASK [(match_operand:VMASK 1 "vmask_mode_register_operand" "0")
+                       (match_operand:VFANY 2 "register_operand" "u")
+                       (match_operand:<VFANY:VF_SEM_ATTR> 3 "register_operand" "f")
+                       (match_operand:VMASK 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+         VF_OPM_VF_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  {
+    return riscv_output_vector_insn(<VFANY:MODE>mode, "<vf_opm_attr>.vf\t%0,%2,%3,%4.t");
+  }
+)
+
+(define_insn "riscv_vfsqrt_v_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "v")
+                       (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFSQRTV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfsqrt.v\t%0,%1");
+  }
+)
+
+(define_insn "riscv_vfsqrt_v_mask_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=u")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "0")
+                       (match_operand:VFANY 2 "register_operand" "u")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFSQRTV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfsqrt.v\t%0,%2,%3.t");
+  }
+)
+
+(define_insn "riscv_vfclass_v_<mode>"
+  [(set (match_operand:<VF_CVTX_ATTR> 0 "register_operand" "=v")
+        (unspec:<VF_CVTX_ATTR> [(match_operand:VFANY 1 "register_operand" "v")
+                                (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFCLASSV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfclass.v\t%0,%1");
+  }
+)
+
+(define_insn "riscv_vfclass_v_mask_<mode>"
+  [(set (match_operand:<VF_CVTX_ATTR> 0 "register_operand" "=u")
+        (unspec:<VF_CVTX_ATTR> [(match_operand:<VF_CVTX_ATTR> 1 "register_operand" "0")
+                                (match_operand:VFANY 2 "register_operand" "u")
+                                (match_operand 3 "vmask_mode_register_operand" "w")
+                                (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFCLASSV_MASK))
+   (clobber (const_int 0))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfclass.v\t%0,%2,%3.t");
+  }
+)
+
+(define_insn "riscv_<vf_op_attr>_vv_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "v")
+                       (match_operand:VFANY 2 "register_operand" "v")
+                       (match_operand:SI 3 "register_operand" "r")]
+         VF_OP_VV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "<vf_op_attr>.vv\t%0,%1,%2");
+  }
+)
+
+(define_insn "riscv_<vf_op_attr>_vf_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "v")
+                       (match_operand:<VF_SEM_ATTR> 2 "register_operand" "f")
+                       (match_operand:SI 3 "register_operand" "r")]
+         VF_OP_VF))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "<vf_op_attr>.vf\t%0,%1,%2");
+  }
+)
+
+(define_insn "riscv_<vf_op_attr>_vv_mask_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=u")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "0")
+                       (match_operand:VFANY 2 "register_operand" "u")
+                       (match_operand:VFANY 3 "register_operand" "u")
+                       (match_operand 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+         VF_OP_VV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "<vf_op_attr>.vv\t%0,%2,%3,%4.t");
+  }
+)
+
+(define_insn "riscv_<vf_op_attr>_vf_mask_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=u")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "0")
+                       (match_operand:VFANY 2 "register_operand" "u")
+                       (match_operand:<VF_SEM_ATTR> 3 "register_operand" "f")
+                       (match_operand 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+         VF_OP_VF_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "<vf_op_attr>.vf\t%0,%2,%3,%4.t");
+  }
+)
+
+(define_insn "riscv_<vf_opw_attr>_vv_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&v")
+        (unspec:VFWIDEN [(match_operand:<VFWIDEN_ATTR> 1 "register_operand" "v")
+                         (match_operand:<VFWIDEN_ATTR> 2 "register_operand" "v")
+                         (match_operand:SI 3 "register_operand" "r")]
+         VF_OPW_VV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "<vf_opw_attr>.vv\t%0,%1,%2");
+  }
+)
+
+(define_insn "riscv_<vf_opw_attr>_vf_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&v")
+        (unspec:VFWIDEN [(match_operand:<VFWIDEN_ATTR> 1 "register_operand" "v")
+                         (match_operand:<VFWIDEN_SEM_ATTR> 2 "register_operand" "f")
+                         (match_operand:SI 3 "register_operand" "r")]
+         VF_OPW_VF))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "<vf_opw_attr>.vf\t%0,%1,%2");
+  }
+)
+
+(define_insn "riscv_<vf_opw_attr>_vv_mask_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&u")
+        (unspec:VFWIDEN [(match_operand:VFWIDEN 1 "register_operand" "0")
+                         (match_operand:<VFWIDEN_ATTR> 2 "register_operand" "u")
+                         (match_operand:<VFWIDEN_ATTR> 3 "register_operand" "u")
+                         (match_operand 4 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 5 "register_operand" "r")]
+         VF_OPW_VV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "<vf_opw_attr>.vv\t%0,%2,%3,%4.t");
+  }
+)
+
+(define_insn "riscv_<vf_opw_attr>_vf_mask_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&u")
+        (unspec:VFWIDEN [(match_operand:VFWIDEN 1 "register_operand" "0")
+                         (match_operand:<VFWIDEN_ATTR> 2 "register_operand" "u")
+                         (match_operand:<VFWIDEN_SEM_ATTR> 3 "register_operand" "f")
+                         (match_operand 4 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 5 "register_operand" "r")]
+         VF_OPW_VF_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "<vf_opw_attr>.vf\t%0,%2,%3,%4.t");
+  }
+)
+
+(define_insn "riscv_<vf_opw_attr>_wv_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&v")
+        (unspec:VFWIDEN [(match_operand:VFWIDEN 1 "register_operand" "v")
+                         (match_operand:<VFWIDEN_ATTR> 2 "register_operand" "v")
+                         (match_operand:SI 3 "register_operand" "r")]
+         VF_OPW_WV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "<vf_opw_attr>.wv\t%0,%1,%2");
+  }
+)
+
+(define_insn "riscv_<vf_opw_attr>_wf_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&v")
+        (unspec:VFWIDEN [(match_operand:VFWIDEN 1 "register_operand" "v")
+                         (match_operand:<VFWIDEN_SEM_ATTR> 2 "register_operand" "f")
+                         (match_operand:SI 3 "register_operand" "r")]
+         VF_OPW_WF))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "<vf_opw_attr>.wf\t%0,%1,%2");
+  }
+)
+
+(define_insn "riscv_<vf_opw_attr>_wv_mask_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&u")
+        (unspec:VFWIDEN [(match_operand:VFWIDEN 1 "register_operand" "0")
+                         (match_operand:VFWIDEN 2 "register_operand" "u")
+                         (match_operand:<VFWIDEN_ATTR> 3 "register_operand" "u")
+                         (match_operand 4 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 5 "register_operand" "r")]
+         VF_OPW_WV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "<vf_opw_attr>.wv\t%0,%2,%3,%4.t");
+  }
+)
+
+(define_insn "riscv_<vf_opw_attr>_wf_mask_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&u")
+        (unspec:VFWIDEN [(match_operand:VFWIDEN 1 "register_operand" "0")
+                         (match_operand:VFWIDEN 2 "register_operand" "u")
+                         (match_operand:<VFWIDEN_SEM_ATTR> 3 "register_operand" "f")
+                         (match_operand 4 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 5 "register_operand" "r")]
+         VF_OPW_VF_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "<vf_opw_attr>.wf\t%0,%2,%3,%4.t");
+  }
+)
+
+(define_insn "riscv_<vf_op_fused_attr>_vv_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "0")
+                       (match_operand:VFANY 2 "register_operand" "v")
+                       (match_operand:VFANY 3 "register_operand" "v")
+                       (match_operand:SI 4 "register_operand" "r")]
+         VF_OP_FUSED_VV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "<vf_op_fused_attr>.vv\t%0,%2,%3");
+  }
+)
+
+(define_insn "riscv_<vf_op_fused_attr>_vf_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "0")
+                       (match_operand:<VF_SEM_ATTR> 2 "register_operand" "f")
+                       (match_operand:VFANY 3 "register_operand" "v")
+                       (match_operand:SI 4 "register_operand" "r")]
+         VF_OP_FUSED_VF))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "<vf_op_fused_attr>.vf\t%0,%2,%3");
+  }
+)
+
+(define_insn "riscv_<vf_op_fused_attr>_vv_mask_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=u")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "u")
+                       (match_operand:VFANY 2 "register_operand" "0")
+                       (match_operand:VFANY 3 "register_operand" "u")
+                       (match_operand:VFANY 4 "register_operand" "u")
+                       (match_operand 5 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 6 "register_operand" "r")]
+         VF_OP_FUSED_VV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "<vf_op_fused_attr>.vv\t%0,%3,%4,%5.t");
+  }
+)
+
+(define_insn "riscv_<vf_op_fused_attr>_vf_mask_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=u")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "u")
+                       (match_operand:VFANY 2 "register_operand" "0")
+                       (match_operand:<VF_SEM_ATTR> 3 "register_operand" "f")
+                       (match_operand:VFANY 4 "register_operand" "u")
+                       (match_operand 5 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 6 "register_operand" "r")]
+         VF_OP_FUSED_VF_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "<vf_op_fused_attr>.vf\t%0,%3,%4,%5.t");
+  }
+)
+
+(define_insn "riscv_<vf_op_fusedw_attr>_vv_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&v")
+        (unspec:VFWIDEN [(match_operand:VFWIDEN 1 "register_operand" "0")
+                         (match_operand:<VFWIDEN_ATTR> 2 "register_operand" "v")
+                         (match_operand:<VFWIDEN_ATTR> 3 "register_operand" "v")
+                         (match_operand:SI 4 "register_operand" "r")]
+         VF_OP_FUSEDW_VV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "<vf_op_fusedw_attr>.vv\t%0,%2,%3");
+  }
+)
+
+(define_insn "riscv_<vf_op_fusedw_attr>_vf_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&v")
+        (unspec:VFWIDEN [(match_operand:VFWIDEN 1 "register_operand" "0")
+                         (match_operand:<VFWIDEN_SEM_ATTR> 2 "register_operand" "f")
+                         (match_operand:<VFWIDEN_ATTR> 3 "register_operand" "v")
+                         (match_operand:SI 4 "register_operand" "r")]
+         VF_OP_FUSEDW_VF))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "<vf_op_fusedw_attr>.vf\t%0,%2,%3");
+  }
+)
+
+(define_insn "riscv_<vf_op_fusedw_attr>_vv_mask_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&u")
+        (unspec:VFWIDEN [(match_operand:VFWIDEN 1 "register_operand" "u")
+                         (match_operand:VFWIDEN 2 "register_operand" "0")
+                         (match_operand:<VFWIDEN_ATTR> 3 "register_operand" "u")
+                         (match_operand:<VFWIDEN_ATTR> 4 "register_operand" "u")
+                         (match_operand 5 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 6 "register_operand" "r")]
+         VF_OP_FUSEDW_VV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "<vf_op_fusedw_attr>.vv\t%0,%3,%4,%5.t");
+  }
+)
+
+(define_insn "riscv_<vf_op_fusedw_attr>_vf_mask_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&u")
+        (unspec:VFWIDEN [(match_operand:VFWIDEN 1 "register_operand" "u")
+                         (match_operand:VFWIDEN 2 "register_operand" "0")
+                 (match_operand:<VFWIDEN_SEM_ATTR> 3 "register_operand" "f")
+                 (match_operand:<VFWIDEN_ATTR> 4 "register_operand" "u")
+                 (match_operand 5 "vmask_mode_register_operand" "w")
+                 (match_operand:SI 6 "register_operand" "r")]
+         VF_OP_FUSEDW_VF_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "<vf_op_fusedw_attr>.vf\t%0,%3,%4,%5.t");
+  }
+)
+
+(define_insn "riscv_vfmerge_vfm_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=u")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "u")
+                       (match_operand:<VF_SEM_ATTR> 2 "register_operand" "f")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VFMARGEVFM))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfmerge.vfm\t%0,%1,%2,%3");
+  }
+)
+
+(define_insn "riscv_vfmv_v_f_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (unspec:VFANY [(match_operand:<VF_SEM_ATTR> 1 "register_operand" "f")
+                       (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFMVVF))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfmv.v.f\t%0,%1");
+  }
+)
+
+(define_insn "riscv_vfmv_s_f_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (unspec:VFANY [(match_operand:<VF_SEM_ATTR> 1 "register_operand" "f")
+                       (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFMVSF))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfmv.s.f\t%0,%1");
+  }
+)
+
+(define_insn "riscv_vfmv_f_s_<mode>"
+  [(set (match_operand:<VF_SEM_ATTR> 0 "register_operand" "=f")
+        (unspec:<VF_SEM_ATTR> [(match_operand:VFANY 1 "register_operand" "v")
+                               (match_operand:SI 2 "register_operand" "r")]
+         UNSPEC_VFMVFS))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR && TARGET_HARD_FLOAT"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vfmv.f.s\t%0,%1");
+  }
+)
+
+(define_insn "riscv_<vf_op_vs_attr>_vs_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=v")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "v")
+                       (match_operand:VFANY 2 "register_operand" "v")
+                       (match_operand:SI 3 "register_operand" "r")]
+         VF_OP_VS))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "<vf_op_vs_attr>.vs\t%0,%1,%2");
+  }
+)
+
+(define_insn "riscv_<vf_op_vs_attr>_vs_mask_<mode>"
+  [(set (match_operand:VFANY 0 "register_operand" "=u")
+        (unspec:VFANY [(match_operand:VFANY 1 "register_operand" "u")
+                       (match_operand:VFANY 2 "register_operand" "u")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+         VF_OP_VS))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "<vf_op_vs_attr>.vs\t%0,%1,%2,%3.t");
+  }
+)
+
+(define_insn "riscv_<vf_opw_vs_attr>_vs_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&v")
+        (unspec:VFWIDEN [(match_operand:<VFWIDEN_ATTR> 1 "register_operand" "v")
+                         (match_operand:VFWIDEN 2 "register_operand" "v")
+                         (match_operand:SI 3 "register_operand" "r")]
+         VF_OPW_VS))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "<vf_opw_vs_attr>.vs\t%0,%1,%2");
+  }
+)
+
+(define_insn "riscv_<vf_opw_vs_attr>_vs_mask_<mode>"
+  [(set (match_operand:VFWIDEN 0 "register_operand" "=&u")
+        (unspec:VFWIDEN [(match_operand:<VFWIDEN_ATTR> 1 "register_operand" "u")
+                         (match_operand:VFWIDEN 2 "register_operand" "u")
+                         (match_operand 3 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 4 "register_operand" "r")]
+         VF_OPW_VS))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "<vf_opw_vs_attr>.vs\t%0,%1,%2,%3.t");
+  }
+)
\ No newline at end of file
diff --git a/gcc/config/riscv/riscv-v-mem.md b/gcc/config/riscv/riscv-v-mem.md
new file mode 100755
index 00000000000..3261d0191ad
--- /dev/null
+++ b/gcc/config/riscv/riscv-v-mem.md
@@ -0,0 +1,743 @@
+
+(define_int_iterator UNSPEC_VLMEM_OP [
+  UNSPEC_VLB UNSPEC_VLBU
+  UNSPEC_VLH UNSPEC_VLHU
+  UNSPEC_VLW UNSPEC_VLWU
+  UNSPEC_VLE
+])
+
+(define_int_iterator UNSPEC_VLSMEM_OP [
+  UNSPEC_VLSB UNSPEC_VLSBU
+  UNSPEC_VLSH UNSPEC_VLSHU
+  UNSPEC_VLSW UNSPEC_VLSWU
+  UNSPEC_VLSE
+])
+
+(define_int_iterator UNSPEC_VLXMEM_OP [
+  UNSPEC_VLXB UNSPEC_VLXBU
+  UNSPEC_VLXH UNSPEC_VLXHU
+  UNSPEC_VLXW UNSPEC_VLXWU
+  UNSPEC_VLXE
+])
+
+(define_int_attr vlmem_op_attr [
+  (UNSPEC_VLB "b") (UNSPEC_VLBU "bu")
+  (UNSPEC_VLH "h") (UNSPEC_VLHU "hu")
+  (UNSPEC_VLW "w") (UNSPEC_VLWU "wu")
+  (UNSPEC_VLE "e")
+
+  (UNSPEC_VLSB "b") (UNSPEC_VLSBU "bu")
+  (UNSPEC_VLSH "h") (UNSPEC_VLSHU "hu")
+  (UNSPEC_VLSW "w") (UNSPEC_VLSWU "wu")
+  (UNSPEC_VLSE "e")
+
+  (UNSPEC_VLXB "b") (UNSPEC_VLXBU "bu")
+  (UNSPEC_VLXH "h") (UNSPEC_VLXHU "hu")
+  (UNSPEC_VLXW "w") (UNSPEC_VLXWU "wu")
+  (UNSPEC_VLXE "e")
+
+  (UNSPEC_VSUXB "b")
+  (UNSPEC_VSUXH "h")
+  (UNSPEC_VSUXW "w")
+  (UNSPEC_VSUXE "e")
+])
+
+(define_int_iterator UNSPEC_VSMEM_OP [
+  UNSPEC_VLB
+  UNSPEC_VLH
+  UNSPEC_VLW
+  UNSPEC_VLE
+])
+
+(define_int_iterator UNSPEC_VSSMEM_OP [
+  UNSPEC_VLSB
+  UNSPEC_VLSH
+  UNSPEC_VLSW
+  UNSPEC_VLSE
+])
+
+(define_int_iterator UNSPEC_VSXMEM_OP [
+  UNSPEC_VLXB
+  UNSPEC_VLXH
+  UNSPEC_VLXW
+  UNSPEC_VLXE
+])
+
+(define_int_iterator UNSPEC_VSUXMEM_OP [
+  UNSPEC_VSUXB
+  UNSPEC_VSUXH
+  UNSPEC_VSUXW
+  UNSPEC_VSUXE
+])
+
+;; Vector Unit-Stride Loads and Stores
+;; ===================================
+
+; load
+
+(define_expand "riscv_vl<vlmem_op_attr>v_<mode>"
+  [(set (match_operand 0 "register_operand" "=v")
+        (unspec:VALL [(match_operand 1 "register_operand" "r")
+                         (match_operand:SI 2 "register_operand" "r")]
+                        UNSPEC_VLMEM_OP))]
+  "TARGET_VECTOR"
+  {
+    operands[1] = gen_rtx_MEM(<MODE>mode, operands[1]);
+    emit_insn(gen_riscv_vl<vlmem_op_attr>v_<mode>_split(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vl<vlmem_op_attr>v_<mode>_split"
+  [(set (match_operand 0 "register_operand" "=v")
+        (unspec:VALL [(match_operand:VALL 1 "riscv_vector_mem_operand" "Qmv")
+                         (match_operand:SI 2 "register_operand" "r")]
+                        UNSPEC_VLMEM_OP))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[2], operands[2]);
+    emit_insn(gen_riscv_vl<vlmem_op_attr>v_<mode>_internal(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn "riscv_vl<vlmem_op_attr>v_<mode>_internal"
+  [(set (match_operand 0 "register_operand" "=v")
+        (unspec:VALL [(match_operand:VALL 1 "riscv_vector_mem_operand" "Qmv")
+                         (match_operand:SI 2 "register_operand" "r")]
+                        UNSPEC_VLMEM_OP))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vl<vlmem_op_attr>.v\t%0,%1");
+  }
+)
+
+; load mask
+
+(define_expand "riscv_vl<vlmem_op_attr>v_mask_<mode>"
+  [(set (match_operand 0 "register_operand" "=u")
+        (unspec:VALL [(match_operand 1 "register_operand" "0")
+                 (match_operand 2 "register_operand" "r")
+                                (match_operand 3 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 4 "register_operand" "r")]
+                        UNSPEC_VLMEM_OP))]
+  "TARGET_VECTOR"
+  {
+    operands[2] = gen_rtx_MEM(<MODE>mode, operands[2]);
+    emit_insn(gen_riscv_vl<vlmem_op_attr>v_mask_<mode>_split(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vl<vlmem_op_attr>v_mask_<mode>_split"
+  [(set (match_operand 0 "register_operand" "=u")
+        (unspec:VALL [(match_operand 1 "register_operand" "0")
+                 (match_operand:VALL 2 "riscv_vector_mem_operand" "Qmv")
+                                (match_operand 3 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 4 "register_operand" "r")]
+                        UNSPEC_VLMEM_OP))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_vl<vlmem_op_attr>v_mask_<mode>_internal(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn "riscv_vl<vlmem_op_attr>v_mask_<mode>_internal"
+  [(set (match_operand 0 "register_operand" "=u")
+        (unspec:VALL [(match_operand 1 "register_operand" "0")
+                 (match_operand:VALL 2 "riscv_vector_mem_operand" "Qmv")
+                                (match_operand 3 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 4 "register_operand" "r")]
+                        UNSPEC_VLMEM_OP))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vl<vlmem_op_attr>.v\t%0,%2,%3.t");
+  }
+)
+
+; store
+
+(define_expand "riscv_vs<vlmem_op_attr>v_<mode>"
+  [(set (match_operand 0 "register_operand" "=r")
+        (unspec:VALL [(match_operand 1 "register_operand" "v")
+                         (match_operand:SI 2 "register_operand" "r")]
+                        UNSPEC_VSMEM_OP))]
+  "TARGET_VECTOR"
+  {
+    operands[0] = gen_rtx_MEM(<MODE>mode, operands[0]);
+    emit_insn(gen_riscv_vs<vlmem_op_attr>v_<mode>_split(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vs<vlmem_op_attr>v_<mode>_split"
+  [(set (match_operand:VALL 0 "riscv_vector_mem_operand" "=Qmv")
+        (unspec:VALL [(match_operand 1 "register_operand" "v")
+                         (match_operand:SI 2 "register_operand" "r")]
+                        UNSPEC_VSMEM_OP))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[2], operands[2]);
+    emit_insn(gen_riscv_vs<vlmem_op_attr>v_<mode>_internal(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn "riscv_vs<vlmem_op_attr>v_<mode>_internal"
+  [(set (match_operand:VALL 0 "riscv_vector_mem_operand" "=Qmv")
+        (unspec:VALL [(match_operand 1 "register_operand" "v")
+                         (match_operand:SI 2 "register_operand" "r")]
+                        UNSPEC_VSMEM_OP))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vs<vlmem_op_attr>.v\t%1,%0");
+  }
+)
+
+; store mask
+
+(define_expand "riscv_vs<vlmem_op_attr>v_mask_<mode>"
+  [(set (match_operand 0 "register_operand" "=r")
+        (unspec:VALL [(match_operand 1 "register_operand" "v")
+                         (match_operand 2 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 3 "register_operand" "r")]
+                        UNSPEC_VSMEM_OP))]
+  "TARGET_VECTOR"
+  {
+    operands[0] = gen_rtx_MEM(<MODE>mode, operands[0]);
+    emit_insn(gen_riscv_vs<vlmem_op_attr>v_mask_<mode>_split(operands[0], operands[1], operands[2], operands[3]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vs<vlmem_op_attr>v_mask_<mode>_split"
+  [(set (match_operand:VALL 0 "riscv_vector_mem_operand" "=Qmv")
+        (unspec:VALL [(match_operand 1 "register_operand" "v")
+                         (match_operand 2 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 3 "register_operand" "r")]
+                        UNSPEC_VSMEM_OP))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+    emit_insn(gen_riscv_vs<vlmem_op_attr>v_mask_<mode>_internal(operands[0], operands[1], operands[2], operands[3]));
+    DONE;
+  }
+)
+
+(define_insn "riscv_vs<vlmem_op_attr>v_mask_<mode>_internal"
+  [(set (match_operand:VALL 0 "riscv_vector_mem_operand" "=Qmv")
+        (unspec:VALL [(match_operand 1 "register_operand" "v")
+                         (match_operand 2 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 3 "register_operand" "r")]
+                        UNSPEC_VSMEM_OP))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vs<vlmem_op_attr>.v\t%1,%0,%2.t");
+  }
+)
+
+;; Vector Strided Loads and Stores
+;; ===============================
+
+; load
+
+(define_expand "riscv_vls<vlmem_op_attr>v_<mode>"
+  [(set (match_operand 0 "register_operand" "=v")
+        (unspec:VALL [(match_operand 1 "register_operand" "r")
+                         (match_operand 2 "register_operand" "r")
+                         (match_operand 3 "register_operand" "r")]
+                        UNSPEC_VLSMEM_OP))]
+  "TARGET_VECTOR"
+  {
+    operands[1] = gen_rtx_MEM(<MODE>mode, operands[1]);
+    emit_insn(gen_riscv_vls<vlmem_op_attr>v_<mode>_split(operands[0], operands[1], operands[2], operands[3]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vls<vlmem_op_attr>v_<mode>_split"
+  [(set (match_operand 0 "register_operand" "=v")
+        (unspec:VALL [(match_operand:VALL 1 "riscv_vector_mem_operand" "Qmv")
+                         (match_operand 2 "register_operand" "r")
+                         (match_operand 3 "register_operand" "r")]
+                        UNSPEC_VLSMEM_OP))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+    emit_insn(gen_riscv_vls<vlmem_op_attr>v_<mode>_internal(operands[0], operands[1], operands[2], operands[3]));
+    DONE;
+  }
+)
+
+(define_insn "riscv_vls<vlmem_op_attr>v_<mode>_internal"
+  [(set (match_operand 0 "register_operand" "=v")
+        (unspec:VALL [(match_operand:VALL 1 "riscv_vector_mem_operand" "Qmv")
+                         (match_operand 2 "register_operand" "r")
+                         (match_operand 3 "register_operand" "r")]
+                        UNSPEC_VLSMEM_OP))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vls<vlmem_op_attr>.v\t%0,%1,%2");
+  }
+)
+
+; load mask
+
+(define_expand "riscv_vls<vlmem_op_attr>v_mask_<mode>"
+  [(set (match_operand 0 "register_operand" "=u")
+        (unspec:VALL [(match_operand 1 "register_operand" "0")
+                 (match_operand 2 "register_operand" "r")
+                                (match_operand 3 "register_operand" "r")
+                                (match_operand 4 "vmask_mode_register_operand" "w")
+                         (match_operand 5 "register_operand" "r")]
+                        UNSPEC_VLSMEM_OP))]
+  "TARGET_VECTOR"
+  {
+    operands[2] = gen_rtx_MEM(<MODE>mode, operands[2]);
+    emit_insn(gen_riscv_vls<vlmem_op_attr>v_mask_<mode>_split(operands[0], operands[1], operands[2], operands[3], operands[4], operands[5]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vls<vlmem_op_attr>v_mask_<mode>_split"
+  [(set (match_operand 0 "register_operand" "=u")
+        (unspec:VALL [(match_operand 1 "register_operand" "0")
+                 (match_operand:VALL 2 "riscv_vector_mem_operand" "Qmv")
+                                (match_operand 3 "register_operand" "r")
+                                (match_operand 4 "vmask_mode_register_operand" "w")
+                         (match_operand 5 "register_operand" "r")]
+                        UNSPEC_VLSMEM_OP))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[5], operands[5]);
+    emit_insn(gen_riscv_vls<vlmem_op_attr>v_mask_<mode>_internal(operands[0], operands[1], operands[2], operands[3], operands[4], operands[5]));
+    DONE;
+  }
+)
+
+(define_insn "riscv_vls<vlmem_op_attr>v_mask_<mode>_internal"
+  [(set (match_operand 0 "register_operand" "=u")
+        (unspec:VALL [(match_operand 1 "register_operand" "0")
+                 (match_operand:VALL 2 "riscv_vector_mem_operand" "Qmv")
+                                (match_operand 3 "register_operand" "r")
+                                (match_operand 4 "vmask_mode_register_operand" "w")
+                         (match_operand 5 "register_operand" "r")]
+                        UNSPEC_VLSMEM_OP))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vls<vlmem_op_attr>.v\t%0,%2,%3,%4.t");
+  }
+)
+
+; store
+
+(define_expand "riscv_vss<vlmem_op_attr>v_<mode>"
+  [(set (match_operand 0 "register_operand" "=r")
+        (unspec:VALL [(match_operand 1 "register_operand" "r")
+                         (match_operand 2 "register_operand" "v")
+                         (match_operand 3 "register_operand" "r")]
+                        UNSPEC_VSSMEM_OP))]
+  "TARGET_VECTOR"
+  {
+    operands[0] = gen_rtx_MEM(<MODE>mode, operands[0]);
+    emit_insn(gen_riscv_vss<vlmem_op_attr>v_<mode>_split(operands[0], operands[1], operands[2], operands[3]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vss<vlmem_op_attr>v_<mode>_split"
+  [(set (match_operand:VALL 0 "riscv_vector_mem_operand" "=Qmv")
+        (unspec:VALL [(match_operand 1 "register_operand" "r")
+                         (match_operand 2 "register_operand" "v")
+                         (match_operand 3 "register_operand" "r")]
+                        UNSPEC_VSSMEM_OP))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+    emit_insn(gen_riscv_vss<vlmem_op_attr>v_<mode>_internal(operands[0], operands[1], operands[2], operands[3]));
+    DONE;
+  }
+)
+
+(define_insn "riscv_vss<vlmem_op_attr>v_<mode>_internal"
+  [(set (match_operand:VALL 0 "riscv_vector_mem_operand" "=Qmv")
+        (unspec:VALL [(match_operand 1 "register_operand" "r")
+                         (match_operand 2 "register_operand" "v")
+                         (match_operand 3 "register_operand" "r")]
+                        UNSPEC_VSSMEM_OP))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vss<vlmem_op_attr>.v\t%2,%0,%1");
+  }
+)
+
+; store mask
+
+(define_expand "riscv_vss<vlmem_op_attr>v_mask_<mode>"
+  [(set (match_operand 0 "register_operand" "=r")
+        (unspec:VALL [(match_operand 1 "register_operand" "r")
+                         (match_operand 2 "register_operand" "v")
+                         (match_operand 3 "vmask_mode_register_operand" "w")
+                         (match_operand 4 "register_operand" "r")]
+                        UNSPEC_VSSMEM_OP))]
+  "TARGET_VECTOR"
+  {
+    operands[0] = gen_rtx_MEM(<MODE>mode, operands[0]);
+    emit_insn(gen_riscv_vss<vlmem_op_attr>v_mask_<mode>_split(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vss<vlmem_op_attr>v_mask_<mode>_split"
+  [(set (match_operand:VALL 0 "riscv_vector_mem_operand" "=Qmv")
+        (unspec:VALL [(match_operand 1 "register_operand" "r")
+                         (match_operand 2 "register_operand" "v")
+                         (match_operand 3 "vmask_mode_register_operand" "w")
+                         (match_operand 4 "register_operand" "r")]
+                        UNSPEC_VSSMEM_OP))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_vss<vlmem_op_attr>v_mask_<mode>_internal(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn "riscv_vss<vlmem_op_attr>v_mask_<mode>_internal"
+  [(set (match_operand:VALL 0 "riscv_vector_mem_operand" "=Qmv")
+        (unspec:VALL [(match_operand 1 "register_operand" "r")
+                         (match_operand 2 "register_operand" "v")
+                         (match_operand 3 "vmask_mode_register_operand" "w")
+                         (match_operand 4 "register_operand" "r")]
+                        UNSPEC_VSSMEM_OP))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vss<vlmem_op_attr>.v\t%2,%0,%1,%3.t");
+  }
+)
+
+;; Vector Indexed Loads and Stores
+;; ===============================
+
+; load
+
+(define_expand "riscv_vlx<vlmem_op_attr>v_<mode>"
+  [(set (match_operand 0 "register_operand" "=v")
+        (unspec:VALL [(match_operand 1 "register_operand" "r")
+                         (match_operand 2 "register_operand" "v")
+                         (match_operand 3 "register_operand" "r")]
+                        UNSPEC_VLXMEM_OP))]
+  "TARGET_VECTOR"
+  {
+    operands[1] = gen_rtx_MEM(<MODE>mode, operands[1]);
+    emit_insn(gen_riscv_vlx<vlmem_op_attr>v_<mode>_split(operands[0], operands[1], operands[2], operands[3]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vlx<vlmem_op_attr>v_<mode>_split"
+  [(set (match_operand 0 "register_operand" "=v")
+        (unspec:VALL [(match_operand:VALL 1 "riscv_vector_mem_operand" "Qmv")
+                         (match_operand 2 "register_operand" "v")
+                         (match_operand 3 "register_operand" "r")]
+                        UNSPEC_VLXMEM_OP))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+    emit_insn(gen_riscv_vlx<vlmem_op_attr>v_<mode>_internal(operands[0], operands[1], operands[2], operands[3]));
+    DONE;
+  }
+)
+
+(define_insn "riscv_vlx<vlmem_op_attr>v_<mode>_internal"
+  [(set (match_operand 0 "register_operand" "=v")
+        (unspec:VALL [(match_operand:VALL 1 "riscv_vector_mem_operand" "Qmv")
+                         (match_operand 2 "register_operand" "v")
+                         (match_operand 3 "register_operand" "r")]
+                        UNSPEC_VLXMEM_OP))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vlx<vlmem_op_attr>.v\t%0,%1,%2");
+  }
+)
+
+; load mask
+
+(define_expand "riscv_vlx<vlmem_op_attr>v_mask_<mode>"
+  [(set (match_operand 0 "register_operand" "=u")
+        (unspec:VALL [(match_operand 1 "register_operand" "0")
+                 (match_operand 2 "register_operand" "r")
+                                (match_operand 3 "register_operand" "u")
+                                (match_operand 4 "vmask_mode_register_operand" "w")
+                         (match_operand 5 "register_operand" "r")]
+                        UNSPEC_VLXMEM_OP))]
+  "TARGET_VECTOR"
+  {
+    operands[2] = gen_rtx_MEM(<MODE>mode, operands[2]);
+    emit_insn(gen_riscv_vlx<vlmem_op_attr>v_mask_<mode>_split(operands[0], operands[1], operands[2], operands[3], operands[4], operands[5]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vlx<vlmem_op_attr>v_mask_<mode>_split"
+  [(set (match_operand 0 "register_operand" "=u")
+        (unspec:VALL [(match_operand 1 "register_operand" "0")
+                 (match_operand:VALL 2 "riscv_vector_mem_operand" "Qmv")
+                                (match_operand 3 "register_operand" "u")
+                                (match_operand 4 "vmask_mode_register_operand" "w")
+                         (match_operand 5 "register_operand" "r")]
+                        UNSPEC_VLXMEM_OP))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[5], operands[5]);
+    emit_insn(gen_riscv_vlx<vlmem_op_attr>v_mask_<mode>_internal(operands[0], operands[1], operands[2], operands[3], operands[4], operands[5]));
+    DONE;
+  }
+)
+
+(define_insn "riscv_vlx<vlmem_op_attr>v_mask_<mode>_internal"
+  [(set (match_operand 0 "register_operand" "=u")
+        (unspec:VALL [(match_operand 1 "register_operand" "0")
+                 (match_operand:VALL 2 "riscv_vector_mem_operand" "Qmv")
+                                (match_operand 3 "register_operand" "u")
+                                (match_operand 4 "vmask_mode_register_operand" "w")
+                         (match_operand 5 "register_operand" "r")]
+                        UNSPEC_VLXMEM_OP))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vlx<vlmem_op_attr>.v\t%0,%2,%3,%4.t");
+  }
+)
+
+; store
+
+(define_expand "riscv_vsx<vlmem_op_attr>v_<mode>"
+  [(set (match_operand 0 "register_operand" "=r")
+        (unspec:VALL [(match_operand 1 "register_operand" "v")
+                         (match_operand 2 "register_operand" "v")
+                         (match_operand 3 "register_operand" "r")]
+                        UNSPEC_VSXMEM_OP))]
+  "TARGET_VECTOR"
+  {
+    operands[0] = gen_rtx_MEM(<MODE>mode, operands[0]);
+    emit_insn(gen_riscv_vsx<vlmem_op_attr>v_<mode>_split(operands[0], operands[1], operands[2], operands[3]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vsx<vlmem_op_attr>v_<mode>_split"
+  [(set (match_operand:VALL 0 "riscv_vector_mem_operand" "=Qmv")
+        (unspec:VALL [(match_operand 1 "register_operand" "v")
+                         (match_operand 2 "register_operand" "v")
+                         (match_operand 3 "register_operand" "r")]
+                        UNSPEC_VSXMEM_OP))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+    emit_insn(gen_riscv_vsx<vlmem_op_attr>v_<mode>_internal(operands[0], operands[1], operands[2], operands[3]));
+    DONE;
+  }
+)
+
+(define_insn "riscv_vsx<vlmem_op_attr>v_<mode>_internal"
+  [(set (match_operand:VALL 0 "riscv_vector_mem_operand" "=Qmv")
+        (unspec:VALL [(match_operand 1 "register_operand" "v")
+                         (match_operand 2 "register_operand" "v")
+                         (match_operand 3 "register_operand" "r")]
+                        UNSPEC_VSXMEM_OP))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vsx<vlmem_op_attr>.v\t%2,%0,%1");
+  }
+)
+
+; store mask
+
+(define_expand "riscv_vsx<vlmem_op_attr>v_mask_<mode>"
+  [(set (match_operand 0 "register_operand" "=r")
+        (unspec:VALL [(match_operand 1 "register_operand" "v")
+                         (match_operand 2 "register_operand" "v")
+                         (match_operand 3 "vmask_mode_register_operand" "w")
+                         (match_operand 4 "register_operand" "r")]
+                        UNSPEC_VSXMEM_OP))]
+  "TARGET_VECTOR"
+  {
+    operands[0] = gen_rtx_MEM(<MODE>mode, operands[0]);
+    emit_insn(gen_riscv_vsx<vlmem_op_attr>v_mask_<mode>_split(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vsx<vlmem_op_attr>v_mask_<mode>_split"
+  [(set (match_operand:VALL 0 "riscv_vector_mem_operand" "=Qmv")
+        (unspec:VALL [(match_operand 1 "register_operand" "v")
+                         (match_operand 2 "register_operand" "v")
+                         (match_operand 3 "vmask_mode_register_operand" "w")
+                         (match_operand 4 "register_operand" "r")]
+                        UNSPEC_VSXMEM_OP))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_vsx<vlmem_op_attr>v_mask_<mode>_internal(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn "riscv_vsx<vlmem_op_attr>v_mask_<mode>_internal"
+  [(set (match_operand:VALL 0 "riscv_vector_mem_operand" "=Qmv")
+        (unspec:VALL [(match_operand 1 "register_operand" "v")
+                         (match_operand 2 "register_operand" "v")
+                         (match_operand 3 "vmask_mode_register_operand" "w")
+                         (match_operand 4 "register_operand" "r")]
+                        UNSPEC_VSXMEM_OP))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vsx<vlmem_op_attr>.v\t%2,%0,%1,%3.t");
+  }
+)
+
+; unordered store
+
+(define_expand "riscv_vsux<vlmem_op_attr>v_<mode>"
+  [(set (match_operand 0 "register_operand" "=r")
+        (unspec:VALL [(match_operand 1 "register_operand" "v")
+                         (match_operand 2 "register_operand" "v")
+                         (match_operand 3 "register_operand" "r")]
+                        UNSPEC_VSUXMEM_OP))]
+  "TARGET_VECTOR"
+  {
+    operands[0] = gen_rtx_MEM(<MODE>mode, operands[0]);
+    emit_insn(gen_riscv_vsux<vlmem_op_attr>v_<mode>_split(operands[0], operands[1], operands[2], operands[3]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vsux<vlmem_op_attr>v_<mode>_split"
+  [(set (match_operand:VALL 0 "riscv_vector_mem_operand" "=Qmv")
+        (unspec:VALL [(match_operand 1 "register_operand" "v")
+                         (match_operand 2 "register_operand" "v")
+                         (match_operand 3 "register_operand" "r")]
+                        UNSPEC_VSUXMEM_OP))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+    emit_insn(gen_riscv_vsux<vlmem_op_attr>v_<mode>_internal(operands[0], operands[1], operands[2], operands[3]));
+    DONE;
+  }
+)
+
+(define_insn "riscv_vsux<vlmem_op_attr>v_<mode>_internal"
+  [(set (match_operand:VALL 0 "riscv_vector_mem_operand" "=Qmv")
+        (unspec:VALL [(match_operand 1 "register_operand" "v")
+                         (match_operand 2 "register_operand" "v")
+                         (match_operand 3 "register_operand" "r")]
+                        UNSPEC_VSUXMEM_OP))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vsux<vlmem_op_attr>.v\t%2,%0,%1");
+  }
+)
+
+; unordered store mask
+
+(define_expand "riscv_vsux<vlmem_op_attr>v_mask_<mode>"
+  [(set (match_operand 0 "register_operand" "=r")
+        (unspec:VALL [(match_operand 1 "register_operand" "v")
+                         (match_operand 2 "register_operand" "v")
+                         (match_operand 3 "vmask_mode_register_operand" "w")
+                         (match_operand 4 "register_operand" "r")]
+                        UNSPEC_VSUXMEM_OP))]
+  "TARGET_VECTOR"
+  {
+    operands[0] = gen_rtx_MEM(<MODE>mode, operands[0]);
+    emit_insn(gen_riscv_vsux<vlmem_op_attr>v_mask_<mode>_split(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn_and_split "riscv_vsux<vlmem_op_attr>v_mask_<mode>_split"
+  [(set (match_operand:VALL 0 "riscv_vector_mem_operand" "=Qmv")
+        (unspec:VALL [(match_operand 1 "register_operand" "v")
+                         (match_operand 2 "register_operand" "v")
+                         (match_operand 3 "vmask_mode_register_operand" "w")
+                         (match_operand 4 "register_operand" "r")]
+                        UNSPEC_VSUXMEM_OP))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_vsux<vlmem_op_attr>v_mask_<mode>_internal(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn "riscv_vsux<vlmem_op_attr>v_mask_<mode>_internal"
+  [(set (match_operand:VALL 0 "riscv_vector_mem_operand" "=Qmv")
+        (unspec:VALL [(match_operand 1 "register_operand" "v")
+                         (match_operand 2 "register_operand" "v")
+                         (match_operand 3 "vmask_mode_register_operand" "w")
+                         (match_operand 4 "register_operand" "r")]
+                        UNSPEC_VSUXMEM_OP))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vsux<vlmem_op_attr>.v\t%2,%0,%1,%3.t");
+  }
+)
+
diff --git a/gcc/config/riscv/riscv-v.h b/gcc/config/riscv/riscv-v.h
index c17242b9d47..da31a8cc5f7 100755
--- a/gcc/config/riscv/riscv-v.h
+++ b/gcc/config/riscv/riscv-v.h
@@ -24,6 +24,12 @@ along with GCC; see the file COPYING3.  If not see

 #define TARGET_VECTOR_VLEN(BITS) (TARGET_VECTOR && riscv_vlen == VLEN_##BITS )

+#define TARGET_VECTOR_VSPLIT (reload_completed \
+  && ((flag_schedule_insns_after_reload && sched_finish_global) \
+      || !flag_schedule_insns_after_reload))
+
+#define TARGET_VECTOR_VSPN true
+
 #define V_REG_FIRST 66
 #define V_REG_LAST  97
 #define V_REG_NUM   (V_REG_LAST - V_REG_FIRST + 1)
diff --git a/gcc/config/riscv/riscv-v.md b/gcc/config/riscv/riscv-v.md
new file mode 100755
index 00000000000..c315dd1468a
--- /dev/null
+++ b/gcc/config/riscv/riscv-v.md
@@ -0,0 +1,2958 @@
+;; vsetvli/vsetvl
+
+(define_insn "riscv_vsetvli"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+        (unspec:SI
+          [(match_operand:SI 1 "register_operand" "r")
+           (match_operand 2 "immediate_operand" "i")
+           (match_operand 3 "immediate_operand" "i")]
+          UNSPEC_VSETVLI))]
+  "TARGET_VECTOR"
+  {
+    const char *pattern = "vsetvli\t%%0,%%1,%s,%s";
+    static char buf[128] = {0};
+    snprintf(buf, sizeof(buf), pattern,
+             riscv_output_vector_sew(INTVAL(operands[2])),
+             riscv_output_vector_lmul(INTVAL(operands[3])));
+    return buf;
+  }
+)
+
+(define_insn "riscv_vsetvli_max"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+        (unspec:SI
+          [(match_operand 1 "immediate_operand" "i")
+           (match_operand 2 "immediate_operand" "i")]
+          UNSPEC_VSETVLI_MAX))]
+  "TARGET_VECTOR"
+  {
+    const char *pattern = "vsetvli\t%%0,x0,%s,%s";
+    static char buf[128] = {0};
+    snprintf(buf, sizeof(buf), pattern,
+             riscv_output_vector_sew(INTVAL(operands[1])),
+             riscv_output_vector_lmul(INTVAL(operands[2])));
+    return buf;
+  }
+)
+
+(define_insn "riscv_vsetvl"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+        (unspec:SI
+          [(match_operand:SI 1 "register_operand" "r")
+           (match_operand:SI 2 "register_operand" "r")]
+          UNSPEC_VSETVLR))]
+  "TARGET_VECTOR"
+  "vsetvl\t%0,%1,%2"
+)
+
+(define_expand "mov<mode>"
+  [(set (match_operand:VANY128 0 "nonimmediate_operand"  "")
+        (match_operand:VANY128 1 "nonimmediate_operand"  ""))]
+  "TARGET_VECTOR"
+  {
+    if (can_create_pseudo_p ())
+      {
+        if (!REG_P (operands[0]))
+          operands[1] = force_reg (<MODE>mode, operands[1]);
+      }
+  }
+)
+
+(define_insn_and_split "riscv_mov_<mode>"
+  [(set (match_operand:VANY128 0 "nonimmediate_operand"  "=v,v,m")
+        (match_operand:VANY128 1 "nonimmediate_operand"  "v,m,v"))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    operands[2] = gen_rtx_REG(SImode, 0);
+    riscv_emit_vsetvli(<MODE>mode, operands[2], operands[2]);
+
+    if (REG_P(operands[0]) && REG_P(operands[1]))
+      emit_insn(gen_riscv_vmvvv_<mode>_internal(operands[0], operands[1], operands[2]));
+    else if (REG_P(operands[0]) && MEM_P(operands[1]))
+      emit_insn(gen_riscv_vlev_<mode>_internal(operands[0], operands[1], operands[2]));
+    else if (REG_P(operands[1]) && MEM_P(operands[0]))
+      emit_insn(gen_riscv_vsev_<mode>_internal(operands[0], operands[1], operands[2]));
+    else
+      gcc_unreachable ();
+
+    DONE;
+  }
+)
+
+(include "riscv-v-mem.md")
+
+;; vmcmp instruction
+
+(define_insn_and_split "riscv_<vop>vv_<VANY:mode>_<VMASK:mode>"
+   [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&v")
+         (unspec:VMASK [(match_operand:VANY 1 "register_operand" "v")
+                  (match_operand:VANY 2 "register_operand" "v")
+                  (match_operand:SI 3 "register_operand" "r")]
+                 VECTOR_INT_CMP_VV))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VANY:MODE>mode, operands[3], operands[3]);
+    emit_insn(gen_riscv_<vop>vv_<VANY:mode>_internal_<VMASK:mode>(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+    DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vv_<VANY:mode>_internal_<VMASK:mode>"
+   [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&v")
+         (unspec:VMASK [(match_operand:VANY 1 "register_operand" "v")
+                  (match_operand:VANY 2 "register_operand" "v")
+                  (match_operand:SI 3 "register_operand" "r")]
+                 VECTOR_INT_CMP_VV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<VANY:MODE>mode, "<vop>.vv\t%0,%1,%2");
+  }
+)
+
+(define_insn_and_split "riscv_<vop>vx_<VANY:mode>_<VMASK:mode>"
+  [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&v")
+        (unspec:VMASK [(match_operand:VANY 1 "register_operand" "v")
+                 (match_operand:<VANY:VANY_SCALAR_attr> 2 "register_operand" "r")
+                 (match_operand:SI 3 "register_operand" "r")]
+         VECTOR_INT_CMP_VX))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VANY:MODE>mode, operands[3], operands[3]);
+    emit_insn(gen_riscv_<vop>vx_<VANY:mode>_internal_<VMASK:mode>(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+    DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vx_<VANY:mode>_internal_<VMASK:mode>"
+   [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&v")
+         (unspec:VMASK [(match_operand:VANY 1 "register_operand" "v")
+                  (match_operand:<VANY:VANY_SCALAR_attr> 2 "register_operand" "r")
+                  (match_operand:SI 3 "register_operand" "r")]
+                 VECTOR_INT_CMP_VX))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<VANY:MODE>mode, "<vop>.vx\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop>vi_<VANY:mode>_<VMASK:mode>"
+  [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&v")
+        (unspec:VMASK [(match_operand:VANY 1 "register_operand" "v")
+                 (match_operand:<VANY:VANY_SCALAR_attr> 2 "const_int_operand" "i")
+                 (match_operand:SI 3 "register_operand" "r")]
+         VECTOR_INT_CMP_VI))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VANY:MODE>mode, operands[3], operands[3]);
+    emit_insn(gen_riscv_<vop>vi_<VANY:mode>_internal_<VMASK:mode>(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+    DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vi_<VANY:mode>_internal_<VMASK:mode>"
+  [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&v")
+        (unspec:VMASK [(match_operand:VANY 1 "register_operand" "v")
+                 (match_operand:<VANY:VANY_SCALAR_attr> 2 "const_int_operand" "i")
+                 (match_operand:SI 3 "register_operand" "r")]
+         VECTOR_INT_CMP_VI))
+  (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<VANY:MODE>mode, "<vop>.vi\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vv_mask_<VANY:mode>_<VMASK:mode>"
+   [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&u")
+         (unspec:VMASK [(match_operand:VMASK 1 "vmask_mode_register_operand" "0")
+                  (match_operand:VANY 2 "register_operand" "u")
+                  (match_operand:VANY 3 "register_operand" "u")
+                  (match_operand:VMASK 4 "vmask_mode_register_operand" "w")
+                  (match_operand:SI 5 "register_operand" "r")]
+                 VECTOR_INT_CMP_VV_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[5], operands[5]);
+    emit_insn(gen_riscv_<vop_mask>vv_mask_<VANY:mode>_internal_<VMASK:mode>(operands[0], operands[1], operands[2],
+                                                      operands[3], operands[4], operands[5]));
+    DONE;
+  }
+)
+
+(define_insn "riscv_<vop_mask>vv_mask_<VANY:mode>_internal_<VMASK:mode>"
+   [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&u")
+         (unspec:VMASK [(match_operand:VMASK 1 "vmask_mode_register_operand" "0")
+                  (match_operand:VANY 2 "register_operand" "u")
+                  (match_operand:VANY 3 "register_operand" "u")
+                  (match_operand:VMASK 4 "vmask_mode_register_operand" "w")
+                  (match_operand:SI 5 "register_operand" "r")]
+                 VECTOR_INT_CMP_VV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<VANY:MODE>mode, "<vop_mask>.vv\t%0,%2,%3,%4.t");
+  }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vx_mask_<VANY:mode>_<VMASK:mode>"
+   [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&u")
+         (unspec:VMASK [(match_operand:VMASK 1 "vmask_mode_register_operand" "0")
+                  (match_operand:VANY 2 "register_operand" "u")
+                  (match_operand:<VANY:VANY_SCALAR_attr> 3 "register_operand" "r")
+                  (match_operand:VMASK 4 "vmask_mode_register_operand" "w")
+                  (match_operand:SI 5 "register_operand" "r")]
+                 VECTOR_INT_CMP_VX_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VANY:MODE>mode, operands[5], operands[5]);
+     emit_insn(gen_riscv_<vop_mask>vx_mask_<VANY:mode>_internal_<VMASK:mode>(operands[0], operands[1], operands[2],
+                                                      operands[3], operands[4], operands[5]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vx_mask_<VANY:mode>_internal_<VMASK:mode>"
+   [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&u")
+         (unspec:VMASK [(match_operand:VMASK 1 "vmask_mode_register_operand" "0")
+                  (match_operand:VANY 2 "register_operand" "u")
+                  (match_operand:<VANY:VANY_SCALAR_attr> 3 "register_operand" "r")
+                  (match_operand:VMASK 4 "vmask_mode_register_operand" "w")
+                  (match_operand:SI 5 "register_operand" "r")]
+                 VECTOR_INT_CMP_VX_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<VANY:MODE>mode, "<vop_mask>.vx\t%0,%2,%3,%4.t");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vi_mask_<VANY:mode>_<VMASK:mode>"
+   [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&u")
+         (unspec:VMASK [(match_operand:VMASK 1 "vmask_mode_register_operand" "0")
+                  (match_operand:VANY 2 "register_operand" "u")
+                  (match_operand:<VANY:VANY_SCALAR_attr> 3 "const_int_operand" "i")
+                  (match_operand:VMASK 4 "vmask_mode_register_operand" "w")
+                  (match_operand:SI 5 "register_operand" "r")]
+                 VECTOR_INT_CMP_VI_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VANY:MODE>mode, operands[5], operands[5]);
+     emit_insn(gen_riscv_<vop_mask>vi_mask_<VANY:mode>_internal_<VMASK:mode>(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vi_mask_<VANY:mode>_internal_<VMASK:mode>"
+   [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&u")
+         (unspec:VMASK [(match_operand:VMASK 1 "vmask_mode_register_operand" "0")
+                  (match_operand:VANY 2 "register_operand" "u")
+                  (match_operand:<VANY:VANY_SCALAR_attr> 3 "const_int_operand" "i")
+                  (match_operand:VMASK 4 "vmask_mode_register_operand" "w")
+                  (match_operand:SI 5 "register_operand" "r")]
+                 VECTOR_INT_CMP_VI_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<VANY:MODE>mode, "<vop_mask>.vi\t%0,%2,%3,%4.t");
+   }
+)
+
+;; vmask instruction
+
+(define_insn_and_split "riscv_<vop>mm_<VALL:mode>_<VMASK:mode>"
+   [(set (match_operand:VMASK                   0 "vmask_mode_register_operand" "=v")
+         (unspec:VMASK [(match_operand:VMASK          1 "vmask_mode_register_operand" "v")
+                        (match_operand:VMASK    2 "vmask_mode_register_operand" "v")
+                        (match_operand:SI 3 "register_operand" "r")
+                        (unspec:VALL [(const_int 0)] UNSPEC_TYPE)]
+                       VECTOR_MASK_LOGICAL))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VALL:MODE>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_<vop>mm_<VALL:mode>_internal_<VMASK:mode>(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>mm_<VALL:mode>_internal_<VMASK:mode>"
+   [(set (match_operand:VMASK                   0 "vmask_mode_register_operand" "=v")
+         (unspec:VMASK [(match_operand:VMASK          1 "vmask_mode_register_operand" "v")
+                        (match_operand:VMASK    2 "vmask_mode_register_operand" "v")
+                        (match_operand:SI 3 "register_operand" "r")
+                        (unspec:VALL [(const_int 0)] UNSPEC_TYPE)]
+                       VECTOR_MASK_LOGICAL))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<VALL:MODE>mode, "<vop>.mm\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop>m_<VALL:mode>_<VMASK:mode>"
+   [(set (match_operand:VMASK                   0 "vmask_mode_register_operand" "=v")
+         (unspec:VMASK [(match_operand:VMASK          1 "vmask_mode_register_operand" "v")
+                        (match_operand:SI 2 "register_operand" "r")
+                        (unspec:VALL [(const_int 0)] UNSPEC_TYPE)]
+                       VECTOR_MASK_PSEUDO_CAST))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VALL:MODE>mode, operands[2], operands[2]);
+    emit_insn(gen_riscv_<vop>m_<VALL:mode>_internal_<VMASK:mode>(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn "riscv_<vop>m_<VALL:mode>_internal_<VMASK:mode>"
+   [(set (match_operand:VMASK                   0 "vmask_mode_register_operand" "=v")
+         (unspec:VMASK [(match_operand:VMASK          1 "vmask_mode_register_operand" "v")
+                        (match_operand:SI 2 "register_operand" "r")
+                        (unspec:VALL [(const_int 0)] UNSPEC_TYPE)]
+                       VECTOR_MASK_PSEUDO_CAST))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<VALL:MODE>mode, "<vop>.m\t%0,%1");
+  }
+)
+
+(define_insn_and_split "riscv_<vop>m_<VALL:mode>_<VMASK:mode>"
+   [(set (match_operand:VMASK                   0 "vmask_mode_register_operand" "=v")
+         (unspec:VMASK [(match_operand:SI 1 "register_operand" "r")
+                        (unspec:VALL [(const_int 0)] UNSPEC_TYPE)]
+                       VECTOR_MASK_PSEUDO_SET))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VALL:MODE>mode, operands[1], operands[1]);
+    emit_insn(gen_riscv_<vop>m_<VALL:mode>_internal_<VMASK:mode>(operands[0], operands[1]));
+    DONE;
+  }
+)
+
+(define_insn "riscv_<vop>m_<VALL:mode>_internal_<VMASK:mode>"
+   [(set (match_operand:VMASK                   0 "vmask_mode_register_operand" "=v")
+         (unspec:VMASK [(match_operand:SI 1 "register_operand" "r")
+                        (unspec:VALL [(const_int 0)] UNSPEC_TYPE)]
+                       VECTOR_MASK_PSEUDO_SET))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<VALL:MODE>mode, "<vop>.m\t%0");
+  }
+)
+
+(define_insn_and_split "riscv_<vop>m_<mode>"
+   [(set (match_operand:DI              0 "register_operand" "=r")
+         (unspec:DI [(match_operand     1 "vmask_mode_register_operand" "v")
+                     (match_operand:SI  2 "register_operand" "r")
+                     (unspec:VALL [(const_int 0)] UNSPEC_TYPE)]
+                    VECTOR_MASK_BIT))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[2], operands[2]);
+     emit_insn(gen_riscv_<vop>m_<mode>_internal(operands[0], operands[1], operands[2]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>m_<mode>_internal"
+   [(set (match_operand:DI              0 "register_operand" "=r")
+         (unspec:DI [(match_operand     1 "vmask_mode_register_operand" "v")
+                     (match_operand:SI  2 "register_operand" "r")
+                     (unspec:VALL [(const_int 0)] UNSPEC_TYPE)]
+                    VECTOR_MASK_BIT))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.m\t%0,%1");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>m_mask_<mode>"
+   [(set (match_operand:DI              0 "register_operand" "=r")
+         (unspec:DI [(match_operand     1 "vmask_mode_register_operand" "u")
+                     (match_operand     2 "vmask_mode_register_operand" "w")
+                     (match_operand:SI  3 "register_operand" "r")
+                     (unspec:VALL [(const_int 0)] UNSPEC_TYPE)]
+                    VECTOR_MASK_BIT_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_<vop_mask>m_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                          operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>m_mask_<mode>_internal"
+   [(set (match_operand:DI              0 "register_operand" "=r")
+         (unspec:DI [(match_operand     1 "vmask_mode_register_operand" "u")
+                     (match_operand     2 "vmask_mode_register_operand" "w")
+                     (match_operand:SI  3 "register_operand" "r")
+                     (unspec:VALL [(const_int 0)] UNSPEC_TYPE)]
+                    VECTOR_MASK_BIT_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.m\t%0,%1,%2.t");
+   }
+)
+
+(define_insn_and_split "riscv_<vop>m_<VALL:mode>_<VMASK:mode>"
+   [(set (match_operand:VMASK             0 "vmask_mode_register_operand" "=v")
+         (unspec:VMASK [(match_operand:VMASK    1 "vmask_mode_register_operand" "v")
+                  (match_operand:SI 2 "register_operand" "r")
+                  (unspec:VALL [(const_int 0)] UNSPEC_TYPE)]
+                  VECTOR_MASK_SET_FIRST))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VALL:MODE>mode, operands[2], operands[2]);
+     emit_insn(gen_riscv_<vop>m_<VALL:mode>_internal_<VMASK:mode>(operands[0], operands[1], operands[2]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>m_<VALL:mode>_internal_<VMASK:mode>"
+   [(set (match_operand:VMASK             0 "vmask_mode_register_operand" "=v")
+         (unspec:VMASK [(match_operand:VMASK    1 "vmask_mode_register_operand" "v")
+                  (match_operand:SI 2 "register_operand" "r")
+                  (unspec:VALL [(const_int 0)] UNSPEC_TYPE)]
+                  VECTOR_MASK_SET_FIRST))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<VALL:MODE>mode, "<vop>.m\t%0,%1");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>m_mask_<VALL:mode>_<VMASK:mode>"
+   [(set (match_operand:VMASK             0 "vmask_mode_register_operand" "=u")
+         (unspec:VMASK [(match_operand:VMASK    1 "vmask_mode_register_operand" "u")
+                  (match_operand:VMASK    2 "vmask_mode_register_operand" "w")
+                  (match_operand:SI 3 "register_operand" "r")
+                  (unspec:VALL [(const_int 0)] UNSPEC_TYPE)]
+                  VECTOR_MASK_SET_FIRST_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VALL:MODE>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_<vop_mask>m_mask_<VALL:mode>_internal_<VMASK:mode>(operands[0], operands[1], operands[2],
+                                                          operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>m_mask_<VALL:mode>_internal_<VMASK:mode>"
+   [(set (match_operand:VMASK             0 "vmask_mode_register_operand" "=u")
+         (unspec:VMASK [(match_operand:VMASK    1 "vmask_mode_register_operand" "u")
+                  (match_operand:VMASK    2 "vmask_mode_register_operand" "w")
+                  (match_operand:SI 3 "register_operand" "r")
+                  (unspec:VALL [(const_int 0)] UNSPEC_TYPE)]
+                  VECTOR_MASK_SET_FIRST_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<VALL:MODE>mode, "<vop_mask>.m\t%0,%1,%2.t");
+   }
+)
+
+;; vector bitwise instruction
+
+(define_insn_and_split "riscv_<vop>vv_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:VANY 2 "register_operand" "v")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_BITWISE_VV))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_<vop>vv_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vv_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:VANY 2 "register_operand" "v")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_BITWISE_VV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.vv\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop>vx_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:<VANY_SCALAR_attr> 2 "register_operand" "r")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_BITWISE_VX))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_<vop>vx_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vx_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:<VANY_SCALAR_attr> 2 "register_operand" "r")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_BITWISE_VX))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.vx\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_vnotv_<mode>"
+  [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:SI 2 "register_operand" "r")]
+                      UNSPEC_VNOTV))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[2], operands[2]);
+    emit_insn(gen_riscv_vnotv_<mode>_internal(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn "riscv_vnotv_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:SI 2 "register_operand" "r")]
+                      UNSPEC_VNOTV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vnot.v\t%0,%1");
+  }
+)
+
+(define_insn_and_split "riscv_vnotv_mask_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand:VANY 2 "register_operand" "u")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+                      UNSPEC_VNOTV_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_vnotv_mask_<mode>_internal(operands[0], operands[1], operands[2], operands[3], operands[4]));
+    DONE;
+  }
+)
+
+(define_insn "riscv_vnotv_mask_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand:VANY 2 "register_operand" "u")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+                      UNSPEC_VNOTV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vnot.v\t%0,%2,%3.t");
+  }
+)
+
+(define_insn_and_split "riscv_<vop>vi_<mode>"
+  [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:<VANY_SCALAR_attr> 2 "const_int_operand" "i")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_BITWISE_VI))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_<vop>vi_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vi_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:<VANY_SCALAR_attr> 2 "const_int_operand" "i")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_BITWISE_VI))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.vi\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vv_mask_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand:VANY 2 "register_operand" "u")
+                       (match_operand:VANY 3 "register_operand" "u")
+                       (match_operand 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+                       VECTOR_INT_BITWISE_VV_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[5], operands[5]);
+     emit_insn(gen_riscv_<vop_mask>vv_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vv_mask_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand:VANY 2 "register_operand" "u")
+                       (match_operand:VANY 3 "register_operand" "u")
+                       (match_operand 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+                       VECTOR_INT_BITWISE_VV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vv\t%0,%2,%3,%4.t");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vx_mask_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand:VANY 2 "register_operand" "u")
+                       (match_operand:<VANY_SCALAR_attr> 3 "register_operand" "r")
+                       (match_operand 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+                       VECTOR_INT_BITWISE_VX_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[5], operands[5]);
+     emit_insn(gen_riscv_<vop_mask>vx_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vx_mask_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand:VANY 2 "register_operand" "u")
+                       (match_operand:<VANY_SCALAR_attr> 3 "register_operand" "r")
+                       (match_operand 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+                       VECTOR_INT_BITWISE_VX_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vx\t%0,%2,%3,%4.t");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vi_mask_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand:VANY 2 "register_operand" "u")
+                       (match_operand:<VANY_SCALAR_attr> 3 "const_int_operand" "i")
+                       (match_operand 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+                       VECTOR_INT_BITWISE_VI_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[5], operands[5]);
+     emit_insn(gen_riscv_<vop_mask>vi_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vi_mask_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand:VANY 2 "register_operand" "u")
+                       (match_operand:<VANY_SCALAR_attr> 3 "const_int_operand" "i")
+                       (match_operand 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+                       VECTOR_INT_BITWISE_VI_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vi\t%0,%2,%3,%4.t");
+   }
+)
+
+;;vector bit shift instruction
+
+(define_insn_and_split "riscv_<vop>vv_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:VANY 2 "register_operand" "v")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_BITSHIFT_VV))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_<vop>vv_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vv_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:VANY 2 "register_operand" "v")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_BITSHIFT_VV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.vv\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop>vx_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:<VANY_SCALAR_attr> 2 "register_operand" "r")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_BITSHIFT_VX))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_<vop>vx_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vx_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:<VANY_SCALAR_attr> 2 "register_operand" "r")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_BITSHIFT_VX))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.vx\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop>vi_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:<VANY_SCALAR_attr> 2 "const_K_operand" "K")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_BITSHIFT_VI))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_<vop>vi_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vi_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:<VANY_SCALAR_attr> 2 "const_K_operand" "K")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_BITSHIFT_VI))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.vi\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vv_mask_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand:VANY 2 "register_operand" "u")
+                       (match_operand:VANY 3 "register_operand" "u")
+                       (match_operand 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+                      VECTOR_INT_BITSHIFT_VV_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[5], operands[5]);
+     emit_insn(gen_riscv_<vop_mask>vv_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vv_mask_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand:VANY 2 "register_operand" "u")
+                       (match_operand:VANY 3 "register_operand" "u")
+                       (match_operand 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+                      VECTOR_INT_BITSHIFT_VV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vv\t%0,%2,%3,%4.t");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vx_mask_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand:VANY 2 "register_operand" "u")
+                       (match_operand:<VANY_SCALAR_attr> 3 "register_operand" "r")
+                       (match_operand 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+                      VECTOR_INT_BITSHIFT_VX_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[5], operands[5]);
+     emit_insn(gen_riscv_<vop_mask>vx_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vx_mask_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand:VANY 2 "register_operand" "u")
+                       (match_operand:<VANY_SCALAR_attr> 3 "register_operand" "r")
+                       (match_operand 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+                      VECTOR_INT_BITSHIFT_VX_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vx\t%0,%2,%3,%4.t");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vi_mask_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand:VANY 2 "register_operand" "u")
+                         (match_operand:<VANY_SCALAR_attr> 3 "const_K_operand" "K")
+                         (match_operand 4 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 5 "register_operand" "r")]
+                        VECTOR_INT_BITSHIFT_VI_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[5], operands[5]);
+     emit_insn(gen_riscv_<vop_mask>vi_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vi_mask_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand:VANY 2 "register_operand" "u")
+                         (match_operand:<VANY_SCALAR_attr> 3 "const_K_operand" "K")
+                         (match_operand 4 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 5 "register_operand" "r")]
+                        VECTOR_INT_BITSHIFT_VI_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vi\t%0,%2,%3,%4.t");
+   }
+)
+
+;;vector narrow shift instruction
+
+(define_insn_and_split "riscv_<vop>vv_<mode>"
+   [(set (match_operand:<VNARROW_attr>                 0 "register_operand" "=&v")
+         (unspec:<VNARROW_attr> [(match_operand:VNARROW 1 "register_operand" "v")
+                                 (match_operand:<VNARROW_attr> 2 "register_operand" "v")
+                                 (match_operand:SI 3 "register_operand" "r")]
+                                VECTOR_INT_NARROWSHIFT_VV))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VNARROW_attr>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_<vop>vv_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vv_<mode>_internal"
+   [(set (match_operand:<VNARROW_attr>                 0 "register_operand" "=&v")
+         (unspec:<VNARROW_attr> [(match_operand:VNARROW 1 "register_operand" "v")
+                                 (match_operand:<VNARROW_attr> 2 "register_operand" "v")
+                                 (match_operand:SI 3 "register_operand" "r")]
+                                VECTOR_INT_NARROWSHIFT_VV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.vv\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop>vx_<mode>"
+   [(set (match_operand:<VNARROW_attr>                 0 "register_operand" "=&v")
+         (unspec:<VNARROW_attr> [(match_operand:VNARROW 1 "register_operand" "v")
+                                 (match_operand:<VANY_SCALAR_NARROW_attr> 2 "register_operand" "r")
+                                 (match_operand:SI 3 "register_operand" "r")]
+                                VECTOR_INT_NARROWSHIFT_VX))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VNARROW_attr>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_<vop>vx_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vx_<mode>_internal"
+   [(set (match_operand:<VNARROW_attr>                 0 "register_operand" "=&v")
+         (unspec:<VNARROW_attr> [(match_operand:VNARROW 1 "register_operand" "v")
+                                 (match_operand:<VANY_SCALAR_NARROW_attr> 2 "register_operand" "r")
+                                 (match_operand:SI 3 "register_operand" "r")]
+                                VECTOR_INT_NARROWSHIFT_VX))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.vx\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop>vi_<mode>"
+   [(set (match_operand:<VNARROW_attr>                 0 "register_operand" "=&v")
+         (unspec:<VNARROW_attr> [(match_operand:VNARROW 1 "register_operand" "v")
+                                 (match_operand:<VANY_SCALAR_NARROW_attr> 2 "const_K_operand" "K")
+                                 (match_operand:SI 3 "register_operand" "r")]
+                                VECTOR_INT_NARROWSHIFT_VI))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VNARROW_attr>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_<vop>vi_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vi_<mode>_internal"
+   [(set (match_operand:<VNARROW_attr>                 0 "register_operand" "=&v")
+         (unspec:<VNARROW_attr> [(match_operand:VNARROW 1 "register_operand" "v")
+                                 (match_operand:<VANY_SCALAR_NARROW_attr> 2 "const_K_operand" "K")
+                                 (match_operand:SI 3 "register_operand" "r")]
+                                VECTOR_INT_NARROWSHIFT_VI))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.vi\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vv_mask_<mode>"
+   [(set (match_operand:<VNARROW_attr>                 0 "register_operand" "=&u")
+         (unspec:<VNARROW_attr> [(match_operand:<VNARROW_attr> 1 "register_operand" "0")
+                                 (match_operand:VNARROW 2 "register_operand" "u")
+                                 (match_operand:<VNARROW_attr> 3 "register_operand" "u")
+                                 (match_operand 4 "vmask_mode_register_operand" "w")
+                                 (match_operand:SI 5 "register_operand" "r")]
+                                VECTOR_INT_NARROWSHIFT_VV_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VNARROW_attr>mode, operands[5], operands[5]);
+     emit_insn(gen_riscv_<vop_mask>vv_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vv_mask_<mode>_internal"
+   [(set (match_operand:<VNARROW_attr>                 0 "register_operand" "=&u")
+         (unspec:<VNARROW_attr> [(match_operand:<VNARROW_attr> 1 "register_operand" "0")
+                                 (match_operand:VNARROW 2 "register_operand" "u")
+                                 (match_operand:<VNARROW_attr> 3 "register_operand" "u")
+                                 (match_operand 4 "vmask_mode_register_operand" "w")
+                                 (match_operand:SI 5 "register_operand" "r")]
+                                VECTOR_INT_NARROWSHIFT_VV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vv\t%0,%2,%3,%4.t");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vx_mask_<mode>"
+   [(set (match_operand:<VNARROW_attr>                 0 "register_operand" "=&u")
+         (unspec:<VNARROW_attr> [(match_operand:<VNARROW_attr> 1 "register_operand" "0")
+                                 (match_operand:VNARROW 2 "register_operand" "u")
+                                 (match_operand:<VANY_SCALAR_NARROW_attr> 3 "register_operand" "r")
+                                 (match_operand 4 "vmask_mode_register_operand" "w")
+                                 (match_operand:SI 5 "register_operand" "r")]
+                                VECTOR_INT_NARROWSHIFT_VX_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VNARROW_attr>mode, operands[5], operands[5]);
+     emit_insn(gen_riscv_<vop_mask>vx_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                                operands[3], operands[4], operands[5]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vx_mask_<mode>_internal"
+   [(set (match_operand:<VNARROW_attr>                 0 "register_operand" "=&u")
+         (unspec:<VNARROW_attr> [(match_operand:<VNARROW_attr> 1 "register_operand" "0")
+                                 (match_operand:VNARROW 2 "register_operand" "u")
+                                 (match_operand:<VANY_SCALAR_NARROW_attr> 3 "register_operand" "r")
+                                 (match_operand 4 "vmask_mode_register_operand" "w")
+                                 (match_operand:SI 5 "register_operand" "r")]
+                                VECTOR_INT_NARROWSHIFT_VX_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vx\t%0,%2,%3,%4.t");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vi_mask_<mode>"
+   [(set (match_operand:<VNARROW_attr>                 0 "register_operand" "=&u")
+         (unspec:<VNARROW_attr> [(match_operand:<VNARROW_attr> 1 "register_operand" "0")
+                                 (match_operand:VNARROW 2 "register_operand" "u")
+                                 (match_operand:<VANY_SCALAR_NARROW_attr> 3 "const_K_operand" "K")
+                                 (match_operand 4 "vmask_mode_register_operand" "w")
+                                 (match_operand:SI 5 "register_operand" "r")]
+                                VECTOR_INT_NARROWSHIFT_VI_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VNARROW_attr>mode, operands[5], operands[5]);
+     emit_insn(gen_riscv_<vop_mask>vi_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vi_mask_<mode>_internal"
+   [(set (match_operand:<VNARROW_attr>                 0 "register_operand" "=&u")
+         (unspec:<VNARROW_attr> [(match_operand:<VNARROW_attr> 1 "register_operand" "0")
+                                 (match_operand:VNARROW 2 "register_operand" "u")
+                                 (match_operand:<VANY_SCALAR_NARROW_attr> 3 "const_K_operand" "K")
+                                 (match_operand 4 "vmask_mode_register_operand" "w")
+                                 (match_operand:SI 5 "register_operand" "r")]
+                                VECTOR_INT_NARROWSHIFT_VI_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vi\t%0,%2,%3,%4.t");
+   }
+)
+
+;; vector arithmetic instruction
+
+; Binary operation
+
+(define_insn_and_split "riscv_<vop>vv_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:VANY 2 "register_operand" "v")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_ARITH_VV))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_<vop>vv_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vv_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:VANY 2 "register_operand" "v")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_ARITH_VV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.vv\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop>vx_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:<VANY_SCALAR_attr> 2 "register_operand" "r")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_ARITH_VX))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_<vop>vx_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vx_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:<VANY_SCALAR_attr> 2 "register_operand" "r")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_ARITH_VX))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.vx\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop>vi_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:<VANY_SCALAR_attr> 2 "const_M_operand" "i")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_ARITH_VI))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_<vop>vi_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vi_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:<VANY_SCALAR_attr> 2 "const_M_operand" "i")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_ARITH_VI))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.vi\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vv_mask_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand:VANY 2 "register_operand" "u")
+                       (match_operand:VANY 3 "register_operand" "u")
+                       (match_operand 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+                        VECTOR_INT_ARITH_VV_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[5], operands[5]);
+     emit_insn(gen_riscv_<vop_mask>vv_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vv_mask_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand:VANY 2 "register_operand" "u")
+                         (match_operand:VANY 3 "register_operand" "u")
+                         (match_operand 4 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 5 "register_operand" "r")]
+                        VECTOR_INT_ARITH_VV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vv\t%0,%2,%3,%4.t");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vx_mask_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand:VANY 2 "register_operand" "u")
+                         (match_operand:<VANY_SCALAR_attr> 3 "register_operand" "r")
+                         (match_operand 4 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 5 "register_operand" "r")]
+                        VECTOR_INT_ARITH_VX_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[5], operands[5]);
+     emit_insn(gen_riscv_<vop_mask>vx_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vx_mask_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand:VANY 2 "register_operand" "u")
+                         (match_operand:<VANY_SCALAR_attr> 3 "register_operand" "r")
+                         (match_operand 4 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 5 "register_operand" "r")]
+                        VECTOR_INT_ARITH_VX_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vx\t%0,%2,%3,%4.t");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vi_mask_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand:VANY 2 "register_operand" "u")
+                         (match_operand:<VANY_SCALAR_attr> 3 "const_M_operand" "i")
+                         (match_operand 4 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 5 "register_operand" "r")]
+                        VECTOR_INT_ARITH_VI_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[5], operands[5]);
+     emit_insn(gen_riscv_<vop_mask>vi_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vi_mask_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand:VANY 2 "register_operand" "u")
+                         (match_operand:<VANY_SCALAR_attr> 3 "const_M_operand" "i")
+                         (match_operand 4 "vmask_mode_register_operand" "w")
+                         (match_operand:SI 5 "register_operand" "r")]
+                        VECTOR_INT_ARITH_VI_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vi\t%0,%2,%3,%4.t");
+   }
+)
+
+; Ternary accumulation operation
+
+(define_insn_and_split "riscv_<vop>vv_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:VANY 2 "register_operand" "v")
+                       (match_operand:VANY 3 "register_operand" "0")
+                       (match_operand:SI 4 "register_operand" "r")]
+                      VECTOR_INT_ARITH_ACCUM_VV))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+     emit_insn(gen_riscv_<vop>vv_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3], operands[4]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vv_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:VANY 2 "register_operand" "v")
+                       (match_operand:VANY 3 "register_operand" "0")
+                       (match_operand:SI 4 "register_operand" "r")]
+                      VECTOR_INT_ARITH_ACCUM_VV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.vv\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop>vx_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:<VANY_SCALAR_attr> 1 "register_operand" "r")
+                       (match_operand:VANY 2 "register_operand" "v")
+                       (match_operand:VANY 3 "register_operand" "0")
+                       (match_operand:SI 4 "register_operand" "r")]
+                      VECTOR_INT_ARITH_ACCUM_VX))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+     emit_insn(gen_riscv_<vop>vx_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3], operands[4]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vx_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:<VANY_SCALAR_attr> 1 "register_operand" "r")
+                       (match_operand:VANY 2 "register_operand" "v")
+                       (match_operand:VANY 3 "register_operand" "0")
+                       (match_operand:SI 4 "register_operand" "r")]
+                      VECTOR_INT_ARITH_ACCUM_VX))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.vx\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vv_mask_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "u")
+                       (match_operand:VANY 2 "register_operand" "u")
+                       (match_operand:VANY 3 "register_operand" "u")
+                       (match_operand:VANY 4 "register_operand" "0")
+                       (match_operand 5 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 6 "register_operand" "r")]
+                      VECTOR_INT_ARITH_ACCUM_VV_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[6], operands[6]);
+     emit_insn(gen_riscv_<vop_mask>vv_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5], operands[6]));
+    if (REGNO(operands[0]) != REGNO(operands[1]))
+      emit_insn(gen_riscv_vmergevvm_mask_<mode>(operands[0], operands[1], operands[0], operands[5], operands[6]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vv_mask_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "u")
+                       (match_operand:VANY 2 "register_operand" "u")
+                       (match_operand:VANY 3 "register_operand" "u")
+                       (match_operand:VANY 4 "register_operand" "0")
+                       (match_operand 5 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 6 "register_operand" "r")]
+                      VECTOR_INT_ARITH_ACCUM_VV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vv\t%0,%2,%3,%5.t");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vx_mask_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "u")
+                       (match_operand:<VANY_SCALAR_attr> 2 "register_operand" "r")
+                       (match_operand:VANY 3 "register_operand" "u")
+                       (match_operand:VANY 4 "register_operand" "0")
+                       (match_operand 5 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 6 "register_operand" "r")]
+                      VECTOR_INT_ARITH_ACCUM_VX_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[6], operands[6]);
+     emit_insn(gen_riscv_<vop_mask>vx_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5], operands[6]));
+    if (REGNO(operands[0]) != REGNO(operands[1]))
+      emit_insn(gen_riscv_vmergevvm_mask_<mode>(operands[0], operands[1], operands[0], operands[5], operands[6]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vx_mask_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "u")
+                       (match_operand:<VANY_SCALAR_attr> 2 "register_operand" "r")
+                       (match_operand:VANY 3 "register_operand" "u")
+                       (match_operand:VANY 4 "register_operand" "0")
+                       (match_operand 5 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 6 "register_operand" "r")]
+                      VECTOR_INT_ARITH_ACCUM_VX_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vx\t%0,%2,%3,%5.t");
+   }
+)
+
+;;vector widen arithmetic instruction
+
+(define_insn_and_split "riscv_<vop>vv_<mode>"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&v")
+         (unspec:<VWIDEN_attr> [(match_operand:VWIDEN_DSHQ 1 "register_operand" "v")
+                                (match_operand:VWIDEN_DSHQ 2 "register_operand" "v")
+                                (match_operand:SI 3 "register_operand" "r")]
+                               VECTOR_INT_WIDENARITH_VV))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_<vop>vv_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vv_<mode>_internal"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&v")
+         (unspec:<VWIDEN_attr> [(match_operand:VWIDEN_DSHQ 1 "register_operand" "v")
+                                (match_operand:VWIDEN_DSHQ 2 "register_operand" "v")
+                                (match_operand:SI 3 "register_operand" "r")]
+                               VECTOR_INT_WIDENARITH_VV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.vv\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop>vx_<mode>"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&v")
+         (unspec:<VWIDEN_attr> [(match_operand:VWIDEN_DSHQ 1 "register_operand" "v")
+                                (match_operand:<VANY_SCALAR_attr> 2 "register_operand" "r")
+                                (match_operand:SI 3 "register_operand" "r")]
+                               VECTOR_INT_WIDENARITH_VX))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_<vop>vx_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vx_<mode>_internal"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&v")
+         (unspec:<VWIDEN_attr> [(match_operand:VWIDEN_DSHQ 1 "register_operand" "v")
+                                (match_operand:<VANY_SCALAR_attr> 2 "register_operand" "r")
+                                (match_operand:SI 3 "register_operand" "r")]
+                               VECTOR_INT_WIDENARITH_VX))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.vx\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vv_mask_<mode>"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&u")
+         (unspec:<VWIDEN_attr> [(match_operand:<VWIDEN_attr> 1 "register_operand" "0")
+                                (match_operand:VWIDEN_DSHQ 2 "register_operand" "u")
+                                (match_operand:VWIDEN_DSHQ 3 "register_operand" "u")
+                                (match_operand 4 "vmask_mode_register_operand" "w")
+                                (match_operand:SI 5 "register_operand" "r")]
+                               VECTOR_INT_WIDENARITH_VV_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[5], operands[5]);
+     emit_insn(gen_riscv_<vop_mask>vv_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vv_mask_<mode>_internal"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&u")
+         (unspec:<VWIDEN_attr> [(match_operand:<VWIDEN_attr> 1 "register_operand" "0")
+                                (match_operand:VWIDEN_DSHQ 2 "register_operand" "u")
+                                (match_operand:VWIDEN_DSHQ 3 "register_operand" "u")
+                                (match_operand 4 "vmask_mode_register_operand" "w")
+                                (match_operand:SI 5 "register_operand" "r")]
+                               VECTOR_INT_WIDENARITH_VV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vv\t%0,%2,%3,%4.t");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vx_mask_<mode>"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&u")
+         (unspec:<VWIDEN_attr> [(match_operand:<VWIDEN_attr> 1 "register_operand" "0")
+                                (match_operand:VWIDEN_DSHQ 2 "register_operand" "u")
+                                 (match_operand:<VANY_SCALAR_attr> 3 "register_operand" "r")
+                                 (match_operand 4 "vmask_mode_register_operand" "w")
+                                 (match_operand:SI 5 "register_operand" "r")]
+                                VECTOR_INT_WIDENARITH_VX_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[5], operands[5]);
+     emit_insn(gen_riscv_<vop_mask>vx_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vx_mask_<mode>_internal"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&u")
+         (unspec:<VWIDEN_attr> [(match_operand:<VWIDEN_attr> 1 "register_operand" "0")
+                                (match_operand:VWIDEN_DSHQ 2 "register_operand" "u")
+                                 (match_operand:<VANY_SCALAR_attr> 3 "register_operand" "r")
+                                 (match_operand 4 "vmask_mode_register_operand" "w")
+                                 (match_operand:SI 5 "register_operand" "r")]
+                                VECTOR_INT_WIDENARITH_VX_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vx\t%0,%2,%3,%4.t");
+   }
+)
+
+(define_insn_and_split "riscv_<vop>wv_<mode>"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&v")
+         (unspec:<VWIDEN_attr> [(match_operand:<VWIDEN_attr> 1 "register_operand" "v")
+                                (match_operand:VWIDEN_DSHQ 2 "register_operand" "v")
+                                (match_operand:SI 3 "register_operand" "r")]
+                               VECTOR_INT_WIDENWIDENARITH_VV))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_<vop>wv_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>wv_<mode>_internal"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&v")
+         (unspec:<VWIDEN_attr> [(match_operand:<VWIDEN_attr> 1 "register_operand" "v")
+                                (match_operand:VWIDEN_DSHQ 2 "register_operand" "v")
+                                (match_operand:SI 3 "register_operand" "r")]
+                               VECTOR_INT_WIDENWIDENARITH_VV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.wv\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop>wx_<mode>"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&v")
+         (unspec:<VWIDEN_attr> [(match_operand:<VWIDEN_attr> 1 "register_operand" "v")
+                                (match_operand:<VANY_SCALAR_attr> 2 "register_operand" "r")
+                                (match_operand:SI 3 "register_operand" "r")
+                                (unspec:VWIDEN_DSHQ [(const_int 0)] UNSPEC_TYPE)]
+                               VECTOR_INT_WIDENWIDENARITH_VX))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_<vop>wx_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>wx_<mode>_internal"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&v")
+         (unspec:<VWIDEN_attr> [(match_operand:<VWIDEN_attr> 1 "register_operand" "v")
+                                (match_operand:<VANY_SCALAR_attr> 2 "register_operand" "r")
+                                (match_operand:SI 3 "register_operand" "r")
+                                (unspec:VWIDEN_DSHQ [(const_int 0)] UNSPEC_TYPE)]
+                               VECTOR_INT_WIDENWIDENARITH_VX))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.wx\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>wv_mask_<mode>"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&u")
+         (unspec:<VWIDEN_attr> [(match_operand:<VWIDEN_attr> 1 "register_operand" "0")
+                                (match_operand:<VWIDEN_attr> 2 "register_operand" "u")
+                                (match_operand:VWIDEN_DSHQ 3 "register_operand" "u")
+                                (match_operand 4 "vmask_mode_register_operand" "w")
+                                (match_operand:SI 5 "register_operand" "r")]
+                               VECTOR_INT_WIDENWIDENARITH_VV_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[5], operands[5]);
+     emit_insn(gen_riscv_<vop_mask>wv_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>wv_mask_<mode>_internal"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&u")
+         (unspec:<VWIDEN_attr> [(match_operand:<VWIDEN_attr> 1 "register_operand" "0")
+                                (match_operand:<VWIDEN_attr> 2 "register_operand" "u")
+                                (match_operand:VWIDEN_DSHQ 3 "register_operand" "u")
+                                (match_operand 4 "vmask_mode_register_operand" "w")
+                                (match_operand:SI 5 "register_operand" "r")]
+                               VECTOR_INT_WIDENWIDENARITH_VV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.wv\t%0,%2,%3,%4.t");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>wx_mask_<mode>"
+    [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&u")
+          (unspec:<VWIDEN_attr> [(match_operand:<VWIDEN_attr> 1 "register_operand" "0")
+                                 (match_operand:<VWIDEN_attr> 2 "register_operand" "u")
+                                 (match_operand:<VANY_SCALAR_attr> 3 "register_operand" "r")
+                                 (unspec:VWIDEN_DSHQ [(const_int 0)] UNSPEC_TYPE)
+                                 (match_operand 4 "vmask_mode_register_operand" "w")
+                                 (match_operand:SI 5 "register_operand" "r")]
+                                VECTOR_INT_WIDENWIDENARITH_VX_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[5], operands[5]);
+     emit_insn(gen_riscv_<vop_mask>wx_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>wx_mask_<mode>_internal"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&u")
+         (unspec:<VWIDEN_attr> [(match_operand:<VWIDEN_attr> 1 "register_operand" "0")
+                                (match_operand:<VWIDEN_attr> 2 "register_operand" "u")
+                                (match_operand:<VANY_SCALAR_attr> 3 "register_operand" "r")
+                                (unspec:VWIDEN_DSHQ [(const_int 0)] UNSPEC_TYPE)
+                                (match_operand 4 "vmask_mode_register_operand" "w")
+                                (match_operand:SI 5 "register_operand" "r")]
+                               VECTOR_INT_WIDENWIDENARITH_VX_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.wx\t%0,%2,%3,%4.t");
+   }
+)
+
+;;vector widen arithmetic accumulation instruction
+
+(define_insn_and_split "riscv_<vop>vv_<mode>"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&v")
+         (unspec:<VWIDEN_attr> [(match_operand:VWIDEN_DSHQ 1 "register_operand" "v")
+                                (match_operand:VWIDEN_DSHQ 2 "register_operand" "v")
+                                (match_operand:<VWIDEN_attr> 3 "register_operand" "0")
+                                (match_operand:SI 4 "register_operand" "r")]
+                               VECTOR_INT_WIDENARITH_ACCUM_VV))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+     emit_insn(gen_riscv_<vop>vv_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3], operands[4]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vv_<mode>_internal"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&v")
+         (unspec:<VWIDEN_attr> [(match_operand:VWIDEN_DSHQ 1 "register_operand" "v")
+                                (match_operand:VWIDEN_DSHQ 2 "register_operand" "v")
+                                (match_operand:<VWIDEN_attr> 3 "register_operand" "0")
+                                (match_operand:SI 4 "register_operand" "r")]
+                               VECTOR_INT_WIDENARITH_ACCUM_VV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.vv\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop>vx_<mode>"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&v")
+         (unspec:<VWIDEN_attr> [(match_operand:<VANY_SCALAR_attr> 1 "register_operand" "r")
+                                (match_operand:VWIDEN_DSHQ 2 "register_operand" "v")
+                                (match_operand:<VWIDEN_attr> 3 "register_operand" "0")
+                                (match_operand:SI 4 "register_operand" "r")]
+                               VECTOR_INT_WIDENARITH_ACCUM_VX))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+     emit_insn(gen_riscv_<vop>vx_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3], operands[4]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vx_<mode>_internal"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&v")
+         (unspec:<VWIDEN_attr> [(match_operand:<VANY_SCALAR_attr> 1 "register_operand" "r")
+                                (match_operand:VWIDEN_DSHQ 2 "register_operand" "v")
+                                (match_operand:<VWIDEN_attr> 3 "register_operand" "0")
+                                (match_operand:SI 4 "register_operand" "r")]
+                               VECTOR_INT_WIDENARITH_ACCUM_VX))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.vx\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vv_mask_<mode>"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&u")
+         (unspec:<VWIDEN_attr> [(match_operand:<VWIDEN_attr> 1 "register_operand" "u")
+                                (match_operand:VWIDEN_DSHQ 2 "register_operand" "u")
+                                (match_operand:VWIDEN_DSHQ 3 "register_operand" "u")
+                                (match_operand:<VWIDEN_attr> 4 "register_operand" "0")
+                                (match_operand 5 "vmask_mode_register_operand" "w")
+                                (match_operand:SI 6 "register_operand" "r")]
+                               VECTOR_INT_WIDENARITH_ACCUM_VV_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[6], operands[6]);
+     emit_insn(gen_riscv_<vop_mask>vv_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5], operands[6]));
+    if (REGNO(operands[0]) != REGNO(operands[1]))
+      emit_insn(gen_riscv_vmergevvm_mask_<vwiden_attr>(operands[0], operands[1], operands[0], operands[5], operands[6]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vv_mask_<mode>_internal"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&u")
+         (unspec:<VWIDEN_attr> [(match_operand:<VWIDEN_attr> 1 "register_operand" "u")
+                                (match_operand:VWIDEN_DSHQ 2 "register_operand" "u")
+                                (match_operand:VWIDEN_DSHQ 3 "register_operand" "u")
+                                (match_operand:<VWIDEN_attr> 4 "register_operand" "0")
+                                (match_operand 5 "vmask_mode_register_operand" "w")
+                                (match_operand:SI 6 "register_operand" "r")]
+                               VECTOR_INT_WIDENARITH_ACCUM_VV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vv\t%0,%2,%3,%5.t");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vx_mask_<mode>"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&u")
+         (unspec:<VWIDEN_attr> [(match_operand:<VWIDEN_attr> 1 "register_operand" "u")
+                                (match_operand:<VANY_SCALAR_attr> 2 "register_operand" "r")
+                                (match_operand:VWIDEN_DSHQ 3 "register_operand" "u")
+                                (match_operand:<VWIDEN_attr> 4 "register_operand" "0")
+                                (match_operand 5 "vmask_mode_register_operand" "w")
+                                (match_operand:SI 6 "register_operand" "r")]
+                               VECTOR_INT_WIDENARITH_ACCUM_VX_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[6], operands[6]);
+     emit_insn(gen_riscv_<vop_mask>vx_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5], operands[6]));
+    if (REGNO(operands[0]) != REGNO(operands[1]))
+      emit_insn(gen_riscv_vmergevvm_mask_<vwiden_attr>(operands[0], operands[1], operands[0], operands[5], operands[6]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vx_mask_<mode>_internal"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&u")
+         (unspec:<VWIDEN_attr> [(match_operand:<VWIDEN_attr> 1 "register_operand" "u")
+                                (match_operand:<VANY_SCALAR_attr> 2 "register_operand" "r")
+                                (match_operand:VWIDEN_DSHQ 3 "register_operand" "u")
+                                (match_operand:<VWIDEN_attr> 4 "register_operand" "0")
+                                (match_operand 5 "vmask_mode_register_operand" "w")
+                                (match_operand:SI 6 "register_operand" "r")]
+                               VECTOR_INT_WIDENARITH_ACCUM_VX_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vx\t%0,%2,%3,%5.t");
+   }
+)
+
+;; vector reduction instruction
+
+;; Binary operation
+
+(define_insn_and_split "riscv_<vop>vs_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:VANY 2 "register_operand" "v")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_RED_VS))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_<vop>vs_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vs_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:VANY 2 "register_operand" "v")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_RED_VS))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.vs\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vs_mask_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand:VANY 2 "register_operand" "u")
+                       (match_operand:VANY 3 "register_operand" "u")
+                       (match_operand 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+                      VECTOR_INT_RED_VS_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[5], operands[5]);
+     emit_insn(gen_riscv_<vop_mask>vs_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vs_mask_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand:VANY 2 "register_operand" "u")
+                       (match_operand:VANY 3 "register_operand" "u")
+                       (match_operand 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+                      VECTOR_INT_RED_VS_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vs\t%0,%2,%3,%4.t");
+   }
+)
+
+; vector widen reduction instruction
+
+(define_insn_and_split "riscv_<vop>vs_<mode>"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&v")
+         (unspec:<VWIDEN_attr> [(match_operand:VWIDEN_DSHQ 1 "register_operand" "v")
+                                (match_operand:<VWIDEN_attr> 2 "register_operand" "v")
+                                (match_operand:SI 3 "register_operand" "r")]
+                               VECTOR_INT_WIDENWIDENRED_VS))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_<vop>vs_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vs_<mode>_internal"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&v")
+         (unspec:<VWIDEN_attr> [(match_operand:VWIDEN_DSHQ 1 "register_operand" "v")
+                                (match_operand:<VWIDEN_attr> 2 "register_operand" "v")
+                                (match_operand:SI 3 "register_operand" "r")]
+                               VECTOR_INT_WIDENWIDENRED_VS))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.vs\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vs_mask_<mode>"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&u")
+         (unspec:<VWIDEN_attr> [(match_operand:<VWIDEN_attr> 1 "register_operand" "0")
+                                (match_operand:VWIDEN_DSHQ 2 "register_operand" "u")
+                                (match_operand:<VWIDEN_attr> 3 "register_operand" "u")
+                                (match_operand 4 "vmask_mode_register_operand" "w")
+                                (match_operand:SI 5 "register_operand" "r")]
+                               VECTOR_INT_WIDENWIDENRED_VS_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[5], operands[5]);
+     emit_insn(gen_riscv_<vop_mask>vs_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vs_mask_<mode>_internal"
+   [(set (match_operand:<VWIDEN_attr>                 0 "register_operand" "=&u")
+         (unspec:<VWIDEN_attr> [(match_operand:<VWIDEN_attr> 1 "register_operand" "0")
+                                (match_operand:VWIDEN_DSHQ 2 "register_operand" "u")
+                                (match_operand:<VWIDEN_attr> 3 "register_operand" "u")
+                                (match_operand 4 "vmask_mode_register_operand" "w")
+                                (match_operand:SI 5 "register_operand" "r")]
+                               VECTOR_INT_WIDENWIDENRED_VS_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vs\t%0,%2,%3,%4.t");
+   }
+)
+
+;; vector slide instruction and gather instruction
+
+(define_insn_and_split "riscv_<vop>vv_<mode>"
+   [(set (match_operand:VALL                 0 "register_operand" "=&v")
+         (unspec:VALL [(match_operand:VALL 1 "register_operand" "v")
+                       (match_operand 2 "register_operand" "v")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_GATHER_VV))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_<vop>vv_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vv_<mode>_internal"
+   [(set (match_operand:VALL                 0 "register_operand" "=&v")
+         (unspec:VALL [(match_operand:VALL 1 "register_operand" "v")
+                       (match_operand 2 "register_operand" "v")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_GATHER_VV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.vv\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vv_mask_<mode>"
+   [(set (match_operand:VALL                 0 "register_operand" "=&u")
+         (unspec:VALL [(match_operand:VALL 1 "register_operand" "0")
+                       (match_operand:VALL 2 "register_operand" "u")
+                       (match_operand 3 "register_operand" "u")
+                       (match_operand 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+                      VECTOR_INT_GATHER_VV_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[5], operands[5]);
+     emit_insn(gen_riscv_<vop_mask>vv_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vv_mask_<mode>_internal"
+   [(set (match_operand:VALL                 0 "register_operand" "=&u")
+         (unspec:VALL [(match_operand:VALL 1 "register_operand" "0")
+                       (match_operand:VALL 2 "register_operand" "u")
+                       (match_operand 3 "register_operand" "u")
+                       (match_operand 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+                      VECTOR_INT_GATHER_VV_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vv\t%0,%2,%3,%4.t");
+   }
+)
+
+(define_insn_and_split "riscv_<vop>vx_<mode>"
+   [(set (match_operand:VALL                 0 "register_operand" "=&v")
+         (unspec:VALL [(match_operand:VALL 1 "register_operand" "v")
+                       (match_operand 2 "register_operand" "r")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_SLIDEGATHER_VX))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_<vop>vx_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vx_<mode>_internal"
+   [(set (match_operand:VALL                 0 "register_operand" "=&v")
+         (unspec:VALL [(match_operand:VALL 1 "register_operand" "v")
+                       (match_operand 2 "register_operand" "r")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_SLIDEGATHER_VX))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.vx\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vx_mask_<mode>"
+   [(set (match_operand:VALL                 0 "register_operand" "=&u")
+         (unspec:VALL [(match_operand:VALL 1 "register_operand" "0")
+                       (match_operand:VALL 2 "register_operand" "u")
+                       (match_operand 3 "register_operand" "r")
+                       (match_operand 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+                      VECTOR_INT_SLIDEGATHER_VX_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[5], operands[5]);
+     emit_insn(gen_riscv_<vop_mask>vx_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vx_mask_<mode>_internal"
+   [(set (match_operand:VALL                 0 "register_operand" "=&u")
+         (unspec:VALL [(match_operand:VALL 1 "register_operand" "0")
+                       (match_operand:VALL 2 "register_operand" "u")
+                       (match_operand 3 "register_operand" "r")
+                       (match_operand 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+                      VECTOR_INT_SLIDEGATHER_VX_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vx\t%0,%2,%3,%4.t");
+   }
+)
+
+(define_insn_and_split "riscv_<vop>vi_<mode>"
+   [(set (match_operand:VALL                 0 "register_operand" "=&v")
+         (unspec:VALL [(match_operand:VALL 1 "register_operand" "v")
+                       (match_operand 2 "const_K_operand" "K")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_SLIDE_VI))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_<vop>vi_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vi_<mode>_internal"
+   [(set (match_operand:VALL                 0 "register_operand" "=&v")
+         (unspec:VALL [(match_operand:VALL 1 "register_operand" "v")
+                       (match_operand 2 "const_K_operand" "K")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_SLIDE_VI))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.vi\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vi_mask_<mode>"
+   [(set (match_operand:VALL                 0 "register_operand" "=&u")
+         (unspec:VALL [(match_operand:VALL 1 "register_operand" "0")
+                       (match_operand:VALL 2 "register_operand" "u")
+                       (match_operand 3 "const_K_operand" "K")
+                       (match_operand 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+                      VECTOR_INT_SLIDE_VI_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[5], operands[5]);
+     emit_insn(gen_riscv_<vop_mask>vi_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vi_mask_<mode>_internal"
+   [(set (match_operand:VALL                 0 "register_operand" "=&u")
+         (unspec:VALL [(match_operand:VALL 1 "register_operand" "0")
+                       (match_operand:VALL 2 "register_operand" "u")
+                       (match_operand 3 "const_K_operand" "K")
+                       (match_operand 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+                      VECTOR_INT_SLIDE_VI_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vi\t%0,%2,%3,%4.t");
+   }
+)
+
+(define_insn_and_split "riscv_<vop>vi_<mode>"
+   [(set (match_operand:VALL                 0 "register_operand" "=&v")
+         (unspec:VALL [(match_operand:VALL 1 "register_operand" "v")
+                       (match_operand 2 "const_K_operand" "i")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_GATHER_VI))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_<vop>vi_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop>vi_<mode>_internal"
+   [(set (match_operand:VALL                 0 "register_operand" "=&v")
+         (unspec:VALL [(match_operand:VALL 1 "register_operand" "v")
+                       (match_operand 2 "const_K_operand" "i")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      VECTOR_INT_GATHER_VI))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop>.vi\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vi_mask_<mode>"
+   [(set (match_operand:VALL                 0 "register_operand" "=&u")
+         (unspec:VALL [(match_operand:VALL 1 "register_operand" "0")
+                       (match_operand:VALL 2 "register_operand" "u")
+                       (match_operand 3 "const_K_operand" "i")
+                       (match_operand 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+                      VECTOR_INT_GATHER_VI_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[5], operands[5]);
+     emit_insn(gen_riscv_<vop_mask>vi_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                           operands[3], operands[4], operands[5]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vi_mask_<mode>_internal"
+   [(set (match_operand:VALL                 0 "register_operand" "=&u")
+         (unspec:VALL [(match_operand:VALL 1 "register_operand" "0")
+                       (match_operand:VALL 2 "register_operand" "u")
+                       (match_operand 3 "const_K_operand" "i")
+                       (match_operand 4 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 5 "register_operand" "r")]
+                      VECTOR_INT_GATHER_VI_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vi\t%0,%2,%3,%4.t");
+   }
+)
+
+;; vector compress instruction
+
+(define_insn_and_split "riscv_vcompressvm_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=&v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand 2 "vmask_mode_register_operand" "v")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      UNSPEC_VCOMPRESS))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_vcompressvm_<mode>_internal(operands[0], operands[1], operands[2],
+                                                     operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_vcompressvm_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=&v")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand 2 "vmask_mode_register_operand" "v")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      UNSPEC_VCOMPRESS))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "vcompress.vm\t%0,%1,%2");
+   }
+)
+
+;; vector iota and id instruction
+
+(define_insn_and_split "riscv_viotam_<mode>"
+    [(set (match_operand:VANY                 0 "register_operand" "=&v")
+          (unspec:VANY [(match_operand 1 "vmask_mode_register_operand" "v")
+                        (match_operand:SI 2 "register_operand" "r")]
+                       UNSPEC_VIOTA))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[2], operands[2]);
+     emit_insn(gen_riscv_viotam_<mode>_internal(operands[0], operands[1], operands[2]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_viotam_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=&v")
+         (unspec:VANY [(match_operand 1 "vmask_mode_register_operand" "v")
+                       (match_operand:SI 2 "register_operand" "r")]
+                      UNSPEC_VIOTA))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "viota.m\t%0,%1");
+   }
+)
+
+(define_insn_and_split "riscv_viotam_mask_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=&u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand 2 "vmask_mode_register_operand" "u")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+                      UNSPEC_VIOTA_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+     emit_insn(gen_riscv_viotam_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                     operands[3], operands[4]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_viotam_mask_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=&u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand 2 "vmask_mode_register_operand" "u")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+                      UNSPEC_VIOTA_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "viota.m\t%0,%2,%3.t");
+   }
+)
+
+(define_insn_and_split "riscv_vidv_<mode>"
+    [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:SI 1 "register_operand" "r")]
+                      UNSPEC_VID))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[1], operands[1]);
+     emit_insn(gen_riscv_vidv_<mode>_internal(operands[0], operands[1]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_vidv_<mode>_internal"
+    [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:SI 1 "register_operand" "r")]
+                      UNSPEC_VID))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "vid.v\t%0");
+   }
+)
+
+(define_insn_and_split "riscv_vidv_mask_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand 2 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      UNSPEC_VID_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_vidv_mask_<mode>_internal(operands[0], operands[1], operands[2], operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_vidv_mask_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "0")
+                       (match_operand 2 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 3 "register_operand" "r")]
+                      UNSPEC_VID_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "vid.v\t%0,%2.t");
+   }
+)
+
+;; vector vmerge(vmv) instruction
+
+(define_insn_and_split "riscv_vmvvv_<mode>"
+   [(set (match_operand:VALL                 0 "register_operand" "=v")
+         (unspec:VALL [(match_operand:VALL 1 "register_operand" "v")
+                       (match_operand:SI 2 "register_operand" "r")]
+                      UNSPEC_VMVVV))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[2], operands[2]);
+     emit_insn(gen_riscv_vmvvv_<mode>_internal(operands[0], operands[1], operands[2]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_vmvvv_<mode>_internal"
+   [(set (match_operand:VALL                 0 "register_operand" "=v")
+         (unspec:VALL [(match_operand:VALL 1 "register_operand" "v")
+                       (match_operand:SI 2 "register_operand" "r")]
+                      UNSPEC_VMVVV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "vmv.v.v\t%0,%1");
+   }
+)
+
+(define_insn_and_split "riscv_vmergevvm_mask_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "u")
+                       (match_operand:VANY 2 "register_operand" "u")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+                      UNSPEC_VMERGEVVM_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+     emit_insn(gen_riscv_vmergevvm_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                     operands[3], operands[4]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_vmergevvm_mask_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "u")
+                       (match_operand:VANY 2 "register_operand" "u")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+                      UNSPEC_VMERGEVVM_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "vmerge.vvm\t%0,%1,%2,%3");
+   }
+)
+
+(define_insn_and_split "riscv_vmvvx_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:<VANY_SCALAR_attr> 1 "register_operand" "r")
+                       (match_operand:SI 2 "register_operand" "r")]
+                      UNSPEC_VMVVX))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[2], operands[2]);
+     emit_insn(gen_riscv_vmvvx_<mode>_internal(operands[0], operands[1], operands[2]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_vmvvx_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:<VANY_SCALAR_attr> 1 "register_operand" "r")
+                       (match_operand:SI 2 "register_operand" "r")]
+                      UNSPEC_VMVVX))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "vmv.v.x\t%0,%1");
+   }
+)
+
+(define_insn_and_split "riscv_vmergevxm_mask_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "u")
+                       (match_operand:<VANY_SCALAR_attr> 2 "register_operand" "r")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+                      UNSPEC_VMERGEVXM_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+     emit_insn(gen_riscv_vmergevxm_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                        operands[3], operands[4]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_vmergevxm_mask_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "u")
+                       (match_operand:<VANY_SCALAR_attr> 2 "register_operand" "r")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+                      UNSPEC_VMERGEVXM_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "vmerge.vxm\t%0,%1,%2,%3");
+   }
+)
+
+(define_insn_and_split "riscv_vmvvi_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:<VANY_SCALAR_attr> 1 "const_M_operand" "i")
+                       (match_operand:SI 2 "register_operand" "r")]
+                      UNSPEC_VMVVI))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[2], operands[2]);
+     emit_insn(gen_riscv_vmvvi_<mode>_internal(operands[0], operands[1], operands[2]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_vmvvi_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:<VANY_SCALAR_attr> 1 "const_M_operand" "i")
+                       (match_operand:SI 2 "register_operand" "r")]
+                      UNSPEC_VMVVI))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "vmv.v.i\t%0,%1");
+   }
+)
+
+(define_insn_and_split "riscv_vmergevim_mask_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "u")
+                       (match_operand:<VANY_SCALAR_attr> 2 "const_M_operand" "i")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+                      UNSPEC_VMERGEVIM_MASK))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+     emit_insn(gen_riscv_vmergevim_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                        operands[3], operands[4]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_vmergevim_mask_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "u")
+                       (match_operand:<VANY_SCALAR_attr> 2 "const_M_operand" "i")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+                      UNSPEC_VMERGEVIM_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "vmerge.vim\t%0,%1,%2,%3");
+   }
+)
+
+;; vector permutation instruction
+
+(define_insn_and_split "riscv_vextxv_<mode>"
+   [(set (match_operand:<VANY_SCALAR_attr>                 0 "register_operand" "=r")
+         (unspec:<VANY_SCALAR_attr> [(match_operand:VANY 1 "register_operand" "v")
+                                     (match_operand:SI 2 "register_operand" "r")
+                                     (match_operand:SI 3 "register_operand" "r")]
+                                    UNSPEC_VEXTXV))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[3], operands[3]);
+     emit_insn(gen_riscv_vextxv_<mode>_internal(operands[0], operands[1],operands[2],
+                                                operands[3]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_vextxv_<mode>_internal"
+   [(set (match_operand:<VANY_SCALAR_attr>                 0 "register_operand" "=r")
+         (unspec:<VANY_SCALAR_attr> [(match_operand:VANY 1 "register_operand" "v")
+                                     (match_operand:SI 2 "register_operand" "r")
+                                     (match_operand:SI 3 "register_operand" "r")]
+                                    UNSPEC_VEXTXV))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "vext.x.v\t%0,%1,%2");
+   }
+)
+
+(define_insn_and_split "riscv_vmvsx_<mode>"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:<VANY_SCALAR_attr> 1 "register_operand" "r")
+                       (match_operand:SI 2 "register_operand" "r")]
+                       UNSPEC_VMVSX))]
+   "TARGET_VECTOR"
+   "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[2], operands[2]);
+     emit_insn(gen_riscv_vmvsx_<mode>_internal(operands[0], operands[1], operands[2]));
+     DONE;
+   }
+)
+
+(define_insn "riscv_vmvsx_<mode>_internal"
+   [(set (match_operand:VANY                 0 "register_operand" "=v")
+         (unspec:VANY [(match_operand:<VANY_SCALAR_attr> 1 "register_operand" "r")
+                       (match_operand:SI 2 "register_operand" "r")]
+                       UNSPEC_VMVSX))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "vmv.s.x\t%0,%1");
+   }
+)
+
+(define_insn_and_split "riscv_vmvxs_<mode>"
+   [(set (match_operand:<VANY_SCALAR_attr> 0 "register_operand" "=r")
+         (unspec:<VANY_SCALAR_attr> [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:SI 2 "register_operand" "r")]
+                       UNSPEC_VMVXS))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[2], operands[2]);
+    emit_insn(gen_riscv_vmvxs_<mode>_internal(operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+(define_insn "riscv_vmvxs_<mode>_internal"
+   [(set (match_operand:<VANY_SCALAR_attr> 0 "register_operand" "=r")
+         (unspec:<VANY_SCALAR_attr> [(match_operand:VANY 1 "register_operand" "v")
+                       (match_operand:SI 2 "register_operand" "r")]
+                       UNSPEC_VMVXS))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+    return riscv_output_vector_insn(<MODE>mode, "vmv.x.s\t%0,%1");
+  }
+)
+
+;; carry and borrow
+
+(define_insn_and_split "riscv_<vop_mask>vvm_mask_<mode>"
+  [(set (match_operand:VANY 0 "register_operand" "=u")
+        (unspec:VANY [(match_operand:VANY 1 "register_operand" "u")
+                      (match_operand:VANY 2 "register_operand" "u")
+                      (match_operand 3 "vmask_mode_register_operand" "w")
+                      (match_operand:SI 4 "register_operand" "r")]
+         VECTOR_CARRYUSE_VVM_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_<vop_mask>vvm_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3], operands[4]));
+    DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vvm_mask_<mode>_internal"
+   [(set (match_operand:VANY 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "u")
+                       (match_operand:VANY 2 "register_operand" "u")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+          VECTOR_CARRYUSE_VVM_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vvm\t%0,%1,%2,%3");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vxm_mask_<mode>"
+  [(set (match_operand:VANY 0 "register_operand" "=u")
+        (unspec:VANY [(match_operand:VANY 1 "register_operand" "u")
+                      (match_operand:<VANY_SCALAR_attr> 2 "register_operand" "r")
+                      (match_operand 3 "vmask_mode_register_operand" "w")
+                      (match_operand:SI 4 "register_operand" "r")]
+         VECTOR_CARRYUSE_VXM_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_<vop_mask>vxm_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3], operands[4]));
+    DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vxm_mask_<mode>_internal"
+   [(set (match_operand:VANY 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "u")
+                       (match_operand:<VANY_SCALAR_attr> 2 "register_operand" "r")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+          VECTOR_CARRYUSE_VXM_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "<vop_mask>.vxm\t%0,%1,%2,%3");
+   }
+)
+
+(define_insn_and_split "riscv_vadcvim_mask_<mode>"
+  [(set (match_operand:VANY 0 "register_operand" "=u")
+        (unspec:VANY [(match_operand:VANY 1 "register_operand" "u")
+                      (match_operand:<VANY_SCALAR_attr> 2 "const_M_operand" "i")
+                      (match_operand 3 "vmask_mode_register_operand" "w")
+                      (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VADCVIM_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_vadcvim_mask_<mode>_internal(operands[0], operands[1], operands[2],
+                                                 operands[3], operands[4]));
+    DONE;
+   }
+)
+
+(define_insn "riscv_vadcvim_mask_<mode>_internal"
+   [(set (match_operand:VANY 0 "register_operand" "=u")
+         (unspec:VANY [(match_operand:VANY 1 "register_operand" "u")
+                       (match_operand:<VANY_SCALAR_attr> 2 "const_M_operand" "i")
+                       (match_operand 3 "vmask_mode_register_operand" "w")
+                       (match_operand:SI 4 "register_operand" "r")]
+          UNSPEC_VADCVIM_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<MODE>mode, "vadc.vim\t%0,%1,%2,%3");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vvm_mask_<VANY:mode>_<VMASK:mode>"
+  [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&v")
+        (unspec:VMASK [(match_operand:VANY 1 "register_operand" "u")
+                 (match_operand:VANY 2 "register_operand" "u")
+                 (match_operand:VMASK 3 "vmask_mode_register_operand" "w")
+                 (match_operand:SI 4 "register_operand" "r")]
+         VECTOR_CARRYOUT_VVM_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VANY:MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_<vop_mask>vvm_mask_<VANY:mode>_internal_<VMASK:mode>(operands[0], operands[1], operands[2],
+                                                 operands[3], operands[4]));
+    DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vvm_mask_<VANY:mode>_internal_<VMASK:mode>"
+   [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&v")
+         (unspec:VMASK [(match_operand:VANY 1 "register_operand" "u")
+                  (match_operand:VANY 2 "register_operand" "u")
+                  (match_operand:VMASK 3 "vmask_mode_register_operand" "w")
+                  (match_operand:SI 4 "register_operand" "r")]
+          VECTOR_CARRYOUT_VVM_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<VANY:MODE>mode, "<vop_mask>.vvm\t%0,%1,%2,%3");
+   }
+)
+
+(define_insn_and_split "riscv_<vop_mask>vxm_mask_<VANY:mode>_<VMASK:mode>"
+  [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&v")
+        (unspec:VMASK [(match_operand:VANY 1 "register_operand" "u")
+                 (match_operand:<VANY:VANY_SCALAR_attr> 2 "register_operand" "r")
+                 (match_operand:VMASK 3 "vmask_mode_register_operand" "w")
+                 (match_operand:SI 4 "register_operand" "r")]
+         VECTOR_CARRYOUT_VXM_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VANY:MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_<vop_mask>vxm_mask_<VANY:mode>_internal_<VMASK:mode>(operands[0], operands[1], operands[2],
+                                                 operands[3], operands[4]));
+    DONE;
+   }
+)
+
+(define_insn "riscv_<vop_mask>vxm_mask_<VANY:mode>_internal_<VMASK:mode>"
+   [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&v")
+         (unspec:VMASK [(match_operand:VANY 1 "register_operand" "u")
+                  (match_operand:<VANY:VANY_SCALAR_attr> 2 "register_operand" "r")
+                  (match_operand:VMASK 3 "vmask_mode_register_operand" "w")
+                  (match_operand:SI 4 "register_operand" "r")]
+          VECTOR_CARRYOUT_VXM_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<VANY:MODE>mode, "<vop_mask>.vxm\t%0,%1,%2,%3");
+   }
+)
+
+(define_insn_and_split "riscv_vmadcvim_mask_<VANY:mode>_<VMASK:mode>"
+  [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&v")
+        (unspec:VMASK [(match_operand:VANY 1 "register_operand" "u")
+                 (match_operand:<VANY:VANY_SCALAR_attr> 2 "const_M_operand" "i")
+                 (match_operand:VMASK 3 "vmask_mode_register_operand" "w")
+                 (match_operand:SI 4 "register_operand" "r")]
+         UNSPEC_VMADCVIM_MASK))]
+  "TARGET_VECTOR"
+  "#"
+  "&& TARGET_VECTOR_VSPLIT"
+  [(clobber (const_int 0))]
+  {
+    riscv_emit_vsetvli(<VANY:MODE>mode, operands[4], operands[4]);
+    emit_insn(gen_riscv_vmadcvim_mask_<VANY:mode>_internal_<VMASK:mode>(operands[0], operands[1], operands[2],
+                                                 operands[3], operands[4]));
+    DONE;
+   }
+)
+
+(define_insn "riscv_vmadcvim_mask_<VANY:mode>_internal_<VMASK:mode>"
+   [(set (match_operand:VMASK 0 "vmask_mode_register_operand" "=&v")
+         (unspec:VMASK [(match_operand:VANY 1 "register_operand" "u")
+                  (match_operand:<VANY:VANY_SCALAR_attr> 2 "const_M_operand" "i")
+                  (match_operand:VMASK 3 "vmask_mode_register_operand" "w")
+                  (match_operand:SI 4 "register_operand" "r")]
+          UNSPEC_VMADCVIM_MASK))
+   (clobber (const_int 0))]
+  "TARGET_VECTOR"
+  {
+     return riscv_output_vector_insn(<VANY:MODE>mode, "vmadc.vim\t%0,%1,%2,%3");
+   }
+)
diff --git a/gcc/config/riscv/riscv.c b/gcc/config/riscv/riscv.c
index b3297a38114..5ad5cff249b 100644
--- a/gcc/config/riscv/riscv.c
+++ b/gcc/config/riscv/riscv.c
@@ -271,6 +271,14 @@ const enum reg_class riscv_regno_to_class[FIRST_PSEUDO_REGISTER] = {
   FP_REGS, FP_REGS, FP_REGS, FP_REGS,
   FP_REGS, FP_REGS, FP_REGS, FP_REGS,
   FRAME_REGS, FRAME_REGS,
+  V_REGS,       V_REGS,         V_REGS,         V_REGS,
+  V_REGS,       V_REGS,         V_REGS,         V_REGS,
+  V_REGS,       V_REGS,         V_REGS,         V_REGS,
+  V_REGS,       V_REGS,         V_REGS,         V_REGS,
+  V_REGS,       V_REGS,         V_REGS,         V_REGS,
+  V_REGS,       V_REGS,         V_REGS,         V_REGS,
+  V_REGS,       V_REGS,         V_REGS,         V_REGS,
+  V_REGS,       V_REGS,         V_REGS,         V_REGS,
 };

 /* Costs to use when optimizing for rocket.  */
@@ -355,6 +363,149 @@ riscv_parse_cpu (const char *cpu_string)
   return riscv_cpu_info_table;
 }

+static void *
+riscv_vector_vmode_supported_p(machine_mode mode, int *sew, int *lmul,
+                               bool enabled)
+{
+  typedef struct {
+    int lmul;
+    int sew;
+    machine_mode mode;
+    int enabled;
+  } vmode_t;
+
+  static vmode_t vmode[] = {
+    { -1, RVV_E8,  V8QImode  , TARGET_VECTOR_VLEN(64) },
+    { -1, RVV_E8,  V16QImode , TARGET_VECTOR },
+    { -1, RVV_E8,  V32QImode , TARGET_VECTOR },
+    { -1, RVV_E8,  V64QImode , TARGET_VECTOR },
+    { -1, RVV_E8,  V128QImode, TARGET_VECTOR },
+
+    { -1, RVV_E16, V4HImode  , TARGET_VECTOR_VLEN(64) },
+    { -1, RVV_E16, V8HImode  , TARGET_VECTOR },
+    { -1, RVV_E16, V16HImode , TARGET_VECTOR },
+    { -1, RVV_E16, V32HImode , TARGET_VECTOR },
+    { -1, RVV_E16, V64HImode , TARGET_VECTOR },
+
+    { -1, RVV_E32, V2SImode  , TARGET_VECTOR_VLEN(64) },
+    { -1, RVV_E32, V4SImode  , TARGET_VECTOR },
+    { -1, RVV_E32, V8SImode  , TARGET_VECTOR },
+    { -1, RVV_E32, V16SImode , TARGET_VECTOR },
+    { -1, RVV_E32, V32SImode , TARGET_VECTOR },
+
+    { -1, RVV_E64, V1DImode  , TARGET_VECTOR_VLEN(64) },
+    { -1, RVV_E64, V2DImode  , TARGET_VECTOR },
+    { -1, RVV_E64, V4DImode  , TARGET_VECTOR },
+    { -1, RVV_E64, V8DImode  , TARGET_VECTOR },
+    { -1, RVV_E64, V16DImode , TARGET_VECTOR },
+
+    { -1, -1, V1TImode , 0 },
+    { -1, -1, V2TImode , 0 },
+    { -1, -1, V4TImode , 0 },
+    { -1, -1, V8TImode , 0 },
+
+    { -1, RVV_E16, V4HFmode  , TARGET_VECTOR_VLEN(64) },
+    { -1, RVV_E16, V8HFmode  , TARGET_VECTOR },
+    { -1, RVV_E16, V16HFmode , TARGET_VECTOR },
+    { -1, RVV_E16, V32HFmode , TARGET_VECTOR },
+    { -1, RVV_E16, V64HFmode , TARGET_VECTOR },
+
+    { -1, RVV_E32, V2SFmode  , TARGET_VECTOR_VLEN(64) },
+    { -1, RVV_E32, V4SFmode  , TARGET_VECTOR },
+    { -1, RVV_E32, V8SFmode  , TARGET_VECTOR },
+    { -1, RVV_E32, V16SFmode , TARGET_VECTOR },
+    { -1, RVV_E32, V32SFmode , TARGET_VECTOR },
+
+    { -1, RVV_E64, V1DFmode  , TARGET_VECTOR_VLEN(64) },
+    { -1, RVV_E64, V2DFmode  , TARGET_VECTOR },
+    { -1, RVV_E64, V4DFmode  , TARGET_VECTOR },
+    { -1, RVV_E64, V8DFmode  , TARGET_VECTOR },
+    { -1, RVV_E64, V16DFmode , TARGET_VECTOR },
+
+    { -1, RVV_E8, V24QImode  , TARGET_VECTOR_VLEN(64) },
+    { -1, RVV_E8, V40QImode  , TARGET_VECTOR_VLEN(64) },
+    { -1, RVV_E8, V48QImode  , TARGET_VECTOR },
+    { -1, RVV_E8, V56QImode  , TARGET_VECTOR_VLEN(64) },
+    { -1, RVV_E8, V80QImode  , TARGET_VECTOR },
+    { -1, RVV_E8, V96QImode  , TARGET_VECTOR },
+    { -1, RVV_E8, V112QImode , TARGET_VECTOR },
+
+    { -1, RVV_E16, V12HImode , TARGET_VECTOR_VLEN(64) },
+    { -1, RVV_E16, V20HImode , TARGET_VECTOR_VLEN(64) },
+    { -1, RVV_E16, V24HImode , TARGET_VECTOR },
+    { -1, RVV_E16, V28HImode , TARGET_VECTOR_VLEN(64) },
+    { -1, RVV_E16, V40HImode , TARGET_VECTOR },
+    { -1, RVV_E16, V48HImode , TARGET_VECTOR },
+    { -1, RVV_E16, V56HImode , TARGET_VECTOR },
+
+    { -1, RVV_E32, V6SImode  , TARGET_VECTOR_VLEN(64) },
+    { -1, RVV_E32, V10SImode , TARGET_VECTOR_VLEN(64) },
+    { -1, RVV_E32, V12SImode , TARGET_VECTOR },
+    { -1, RVV_E32, V14SImode , TARGET_VECTOR_VLEN(64) },
+    { -1, RVV_E32, V20SImode , TARGET_VECTOR },
+    { -1, RVV_E32, V24SImode , TARGET_VECTOR },
+    { -1, RVV_E32, V28SImode , TARGET_VECTOR },
+
+    { -1, RVV_E64, V3DImode  , TARGET_VECTOR_VLEN(64) },
+    { -1, RVV_E64, V5DImode  , TARGET_VECTOR_VLEN(64) },
+    { -1, RVV_E64, V6DImode  , TARGET_VECTOR },
+    { -1, RVV_E64, V7DImode  , TARGET_VECTOR_VLEN(64) },
+    { -1, RVV_E64, V10DImode , TARGET_VECTOR },
+    { -1, RVV_E64, V12DImode , TARGET_VECTOR },
+    { -1, RVV_E64, V14DImode , TARGET_VECTOR },
+
+    { -1, RVV_E16, V12HFmode , TARGET_VECTOR },
+    { -1, RVV_E16, V20HFmode , TARGET_VECTOR },
+    { -1, RVV_E16, V24HFmode , TARGET_VECTOR },
+    { -1, RVV_E16, V28HFmode , TARGET_VECTOR },
+    { -1, RVV_E16, V40HFmode , TARGET_VECTOR },
+    { -1, RVV_E16, V48HFmode , TARGET_VECTOR },
+    { -1, RVV_E16, V56HFmode , TARGET_VECTOR },
+
+    { -1, RVV_E32, V6SFmode  , TARGET_VECTOR },
+    { -1, RVV_E32, V10SFmode , TARGET_VECTOR },
+    { -1, RVV_E32, V12SFmode , TARGET_VECTOR },
+    { -1, RVV_E32, V14SFmode , TARGET_VECTOR },
+    { -1, RVV_E32, V20SFmode , TARGET_VECTOR },
+    { -1, RVV_E32, V24SFmode , TARGET_VECTOR },
+    { -1, RVV_E32, V28SFmode , TARGET_VECTOR },
+
+    { -1, RVV_E64, V3DFmode  , TARGET_VECTOR },
+    { -1, RVV_E64, V5DFmode  , TARGET_VECTOR },
+    { -1, RVV_E64, V6DFmode  , TARGET_VECTOR },
+    { -1, RVV_E64, V7DFmode  , TARGET_VECTOR },
+    { -1, RVV_E64, V10DFmode , TARGET_VECTOR },
+    { -1, RVV_E64, V12DFmode , TARGET_VECTOR },
+    { -1, RVV_E64, V14DFmode , TARGET_VECTOR },
+  };
+
+  if (!(TARGET_VECTOR || enabled))
+    return NULL;
+
+  for (int i = 0; i < ARRAY_SIZE(vmode); i++)
+    {
+      if (!(vmode[i].enabled || enabled))
+        continue;
+      if (vmode[i].mode != mode)
+        continue;
+      if (sew)
+        *sew = vmode[i].sew;
+      if (lmul)
+        *lmul = vmode[i].lmul;
+      return &vmode[i];
+    }
+
+  return NULL;
+}
+
+static bool
+riscv_vector_mode_supported_p (machine_mode mode)
+{
+  if (riscv_vector_vmode_supported_p(mode, NULL, NULL, false) != NULL)
+    return true;
+  return false;
+}
+
 /* Helper function for riscv_build_integer; arguments are as for
    riscv_build_integer.  */

@@ -785,6 +936,41 @@ riscv_valid_lo_sum_p (enum riscv_symbol_type sym_type, machine_mode mode,
   return true;
 }

+/* Return true if address is a valid vector.  If it is, fill in INFO
+   appropriately.  STRICT_P is true if REG_OK_STRICT is in effect.  */
+
+static bool
+riscv_classify_address_vector (struct riscv_address_info *info, rtx x,
+  machine_mode mode, bool strict_p)
+{
+  if (reload_completed
+      && (GET_CODE (x) == LABEL_REF
+          || (GET_CODE (x) == CONST
+              && GET_CODE (XEXP (x, 0)) == PLUS
+              && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
+              && CONST_INT_P (XEXP (XEXP (x, 0), 1)))))
+    return true;
+
+  switch (GET_CODE (x))
+    {
+    case REG:
+    case SUBREG:
+      info->type = ADDRESS_REG;
+      info->reg = x;
+      info->offset = const0_rtx;
+      return riscv_valid_base_register_p(x, mode, strict_p);
+
+    case PLUS:
+      info->type = ADDRESS_REG;
+      info->reg = XEXP (x, 0);
+      info->offset = XEXP (x, 1);
+      return INTVAL(info->offset) == 0;
+
+    default:
+      return false;
+    }
+}
+
 /* Return true if X is a valid address for machine mode MODE.  If it is,
    fill in INFO appropriately.  STRICT_P is true if REG_OK_STRICT is in
    effect.  */
@@ -793,6 +979,10 @@ static bool
 riscv_classify_address (struct riscv_address_info *info, rtx x,
  machine_mode mode, bool strict_p)
 {
+  if (riscv_vector_mode_supported_p(mode)
+      && !riscv_classify_address_vector(info, x, mode, strict_p))
+    return false;
+
   switch (GET_CODE (x))
     {
     case REG:
@@ -908,6 +1098,8 @@ riscv_const_insns (rtx x)

     case CONST_DOUBLE:
     case CONST_VECTOR:
+      if (GET_CODE (x) == CONST_VECTOR && TARGET_VECTOR)
+        return 0;
       /* We can use x0 to load floating-point zero.  */
       return x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;

@@ -1305,6 +1497,33 @@ riscv_force_address (rtx x, machine_mode mode)
   return x;
 }

+static rtx
+riscv_legitimize_vector_address (rtx x, machine_mode mode)
+{
+  rtx addr;
+
+  /* Handle BASE + OFFSET.  */
+  if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1))
+      && INTVAL (XEXP (x, 1)) != 0)
+    {
+      rtx base = XEXP (x, 0);
+      HOST_WIDE_INT offset = INTVAL (XEXP (x, 1));
+
+      if (!riscv_valid_base_register_p (base, mode, false))
+        base = copy_to_mode_reg (Pmode, base);
+
+        rtx base_reg = gen_reg_rtx (Pmode);
+        rtx val = force_operand (plus_constant (Pmode, base, offset), NULL_RTX);
+        emit_move_insn (base_reg, val);
+        base = base_reg;
+        offset = 0;
+
+      addr = riscv_add_offset (NULL, base, offset);
+      return riscv_force_address (addr, mode);
+    }
+  return x;
+}
+
 /* This function is used to implement LEGITIMIZE_ADDRESS.  If X can
    be legitimized in a way that the generic machinery might not expect,
    return a new address, otherwise return NULL.  MODE is the mode of
@@ -1316,6 +1535,9 @@ riscv_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
 {
   rtx addr;

+  if (riscv_vector_mode_supported_p(mode))
+    return riscv_legitimize_vector_address(x, mode);
+
   if (riscv_tls_symbol_p (x))
     return riscv_legitimize_tls_address (x);

@@ -1865,6 +2087,17 @@ riscv_split_doubleword_move (rtx dest, rtx src)
        riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
      }
 }
+
+/* Return TRUE if X is a legitimate address vector.  */
+
+bool
+riscv_legitimize_address_vector_p (rtx x, machine_mode mode)
+{
+  struct riscv_address_info addr;
+  return riscv_vector_mode_supported_p(mode)
+         && riscv_classify_address_vector(&addr, x, mode, false);
+}
+

 /* Return the appropriate instructions to move SRC into DEST.  Assume
    that SRC is operand 1 and DEST is operand 0.  */
@@ -2635,6 +2868,9 @@ riscv_get_arg_info (struct riscv_arg_info *info, const CUMULATIVE_ARGS *cum,
  }
     }

+  if (riscv_vector_mode_supported_p(mode))
+    return NULL_RTX;
+
   /* Work out the size of the argument.  */
   num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
   num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
@@ -2746,6 +2982,9 @@ riscv_pass_by_reference (cumulative_args_t cum_v, machine_mode mode,
  return false;
     }

+  if (riscv_vector_mode_supported_p(mode))
+    return true;
+
   /* Pass by reference if the data do not fit in two integer registers.  */
   return !IN_RANGE (size, 0, 2 * UNITS_PER_WORD);
 }
@@ -4227,6 +4466,22 @@ static bool
 riscv_secondary_memory_needed (machine_mode mode, reg_class_t class1,
         reg_class_t class2)
 {
+  if (riscv_vector_mode_supported_p(mode))
+    {
+      if (class1 == class2 && class1 == V_REGS)
+        return false;
+
+      if (class1 == V_REGS && class2 == GR_REGS)
+        return true;
+      if (class1 == GR_REGS && class2 == V_REGS)
+        return true;
+
+      if (class1 == V_REGS && class2 == FP_REGS)
+        return true;
+      if (class1 == FP_REGS && class2 == V_REGS)
+        return true;
+    }
+
   return (GET_MODE_SIZE (mode) > UNITS_PER_WORD
    && (class1 == FP_REGS) != (class2 == FP_REGS));
 }
@@ -4248,6 +4503,9 @@ riscv_hard_regno_nregs (unsigned int regno, machine_mode mode)
   if (FP_REG_P (regno))
     return (GET_MODE_SIZE (mode) + UNITS_PER_FP_REG - 1) / UNITS_PER_FP_REG;

+  if (VR_REG_P (regno))
+    return (GET_MODE_SIZE (mode) + UNITS_PER_VR_REG - 1) / UNITS_PER_VR_REG;
+
   /* All other registers are word-sized.  */
   return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
 }
@@ -4263,12 +4521,24 @@ riscv_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
     {
       if (!GP_REG_P (regno + nregs - 1))
  return false;
+
+      if (TARGET_VECTOR)
+        {
+          if (riscv_vector_mode_supported_p(mode))
+            return false;
+        }
     }
   else if (FP_REG_P (regno))
     {
       if (!FP_REG_P (regno + nregs - 1))
  return false;

+      if (TARGET_VECTOR)
+        {
+          if (riscv_vector_mode_supported_p(mode))
+            return false;
+        }
+
       if (GET_MODE_CLASS (mode) != MODE_FLOAT
    && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT)
  return false;
@@ -4280,6 +4550,20 @@ riscv_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
        && GET_MODE_UNIT_SIZE (mode) > UNITS_PER_FP_ARG))
  return false;
     }
+  else if (VR_REG_P (regno))
+    {
+      int sew = 0;
+      int mul = 0;
+      if (riscv_vector_mode_supported_p(mode)
+          && VR_REG_P (regno + nregs - 1)
+          && (!((regno - V_REG_FIRST) % nregs)
+              || (riscv_vector_vmode_supported_p(mode, &sew, &mul, false)
+                  && sew < 0
+                  && mul > 0
+                  && !((regno - V_REG_FIRST) % (nregs / mul)))))
+        return true;
+      return false;
+    }
   else
     return false;

@@ -4315,6 +4599,9 @@ riscv_class_max_nregs (reg_class_t rclass, machine_mode mode)
   if (reg_class_subset_p (GR_REGS, rclass))
     return riscv_hard_regno_nregs (GP_REG_FIRST, mode);

+  if (reg_class_subset_p (V_REGS, rclass))
+    return riscv_hard_regno_nregs (V_REG_FIRST, mode);
+
   return 0;
 }

@@ -4580,6 +4867,12 @@ riscv_conditional_register_usage (void)
       for (int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
  call_used_regs[regno] = 1;
     }
+
+  if (!TARGET_VECTOR)
+    {
+      for (int regno = V_REG_FIRST; regno <= V_REG_LAST; regno++)
+        fixed_regs[regno] = call_used_regs[regno] = 1;
+    }
 }

 /* Return a register priority for hard reg REGNO.  */
@@ -4865,8 +5158,11 @@ riscv_slow_unaligned_access (machine_mode, unsigned int)
 /* Implement TARGET_CAN_CHANGE_MODE_CLASS.  */

 static bool
-riscv_can_change_mode_class (machine_mode, machine_mode, reg_class_t rclass)
+riscv_can_change_mode_class (machine_mode from, machine_mode to, reg_class_t rclass)
 {
+  if (riscv_vector_mode_supported_p(from) && !riscv_vector_mode_supported_p(to))
+    return false;
+
   return !reg_classes_intersect_p (FP_REGS, rclass);
 }

@@ -4920,6 +5216,109 @@ riscv_hard_regno_rename_ok (unsigned from_regno ATTRIBUTE_UNUSED,
   return !cfun->machine->interrupt_handler_p || df_regs_ever_live_p (to_regno);
 }

+rtx
+riscv_emit_vsetvli_base(machine_mode mode, rtx gvl, rtx avl, rtx nf)
+{
+  int isew = -1;
+  riscv_vector_vmode_supported_p(mode, &isew, NULL, false);
+  gcc_assert(isew != -1);
+  rtx sew = gen_rtx_CONST_INT(VOIDmode, isew);
+
+  int mul = (GET_MODE_SIZE (mode) + UNITS_PER_VR_REG - 1) / UNITS_PER_VR_REG;
+
+  if (nf)
+    {
+      int nf_value = INTVAL (nf);
+      mul /= nf_value;
+    }
+
+  gcc_assert(exact_log2(mul) != -1);
+  rtx lmul = gen_rtx_CONST_INT(VOIDmode, exact_log2(mul));
+
+  if (avl == NULL_RTX
+      || (REG_P(avl) && REGNO(avl) == 0))
+    emit_insn(gen_riscv_vsetvli_max(gvl, sew, lmul));
+  else
+    emit_insn(gen_riscv_vsetvli(gvl, avl, sew, lmul));
+  return gvl;
+}
+
+rtx
+riscv_emit_vsetvli(machine_mode mode, rtx gvl, rtx avl)
+{
+  return riscv_emit_vsetvli_base(mode, gvl, avl, NULL);
+}
+
+rtx
+riscv_emit_vsetvli_max(machine_mode mode)
+{
+  rtx gvl = gen_rtx_REG(SImode, 0);
+  return riscv_emit_vsetvli(mode, gvl, NULL_RTX);
+}
+
+const char *
+riscv_output_vector_sew(int index)
+{
+  const char *sew[] = {"e8", "e16", "e32", "e64"};
+  gcc_assert(index < ARRAY_SIZE(sew));
+  return sew[index];
+}
+
+const char *
+riscv_output_vector_lmul(int index)
+{
+  const char *lmul[] = {"m1", "m2", "m4", "m8"};
+  gcc_assert(index < ARRAY_SIZE(lmul));
+  return lmul[index];
+}
+
+const char *
+riscv_output_vector_insn(machine_mode mode, const char *insn)
+{
+  return insn;
+}
+
+/* Scheduling pass is now finished.  */
+int sched_finish_global = 0;
+static void
+riscv_sched_finish_global (FILE *dump ATTRIBUTE_UNUSED,
+                           int sched_verbose ATTRIBUTE_UNUSED)
+{
+  if (reload_completed)
+    sched_finish_global = 1;
+}
+
+static void
+riscv_asm_function_prologue(FILE *)
+{
+  sched_finish_global = 0;
+}
+
+static bool
+riscv_array_mode_supported_p (machine_mode mode,
+                              unsigned HOST_WIDE_INT nelems)
+{
+  if (riscv_vector_mode_supported_p(mode))
+    return true;
+  return false;
+}
+
+/* Implement TARGET_SCALAR_MODE_SUPPORTED_P - return FALSE
+   if MODE is Vector mode, and punt to the generic implementation otherwise.  */
+
+static bool
+riscv_scalar_mode_supported_p (scalar_mode mode)
+{
+  int sew = 0;
+  int mul = 0;
+  if (riscv_vector_vmode_supported_p(mode, &sew, &mul, true)
+      && sew < 0
+      && mul > 0)
+    return false;
+
+  return default_scalar_mode_supported_p (mode);
+}
+
 /* Initialize the GCC target structure.  */
 #undef TARGET_ASM_ALIGNED_HI_OP
 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
@@ -5094,6 +5493,22 @@ riscv_hard_regno_rename_ok (unsigned from_regno ATTRIBUTE_UNUSED,
 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1

+
+#undef TARGET_SCALAR_MODE_SUPPORTED_P
+#define TARGET_SCALAR_MODE_SUPPORTED_P riscv_scalar_mode_supported_p
+
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P riscv_vector_mode_supported_p
+
+#undef TARGET_ARRAY_MODE_SUPPORTED_P
+#define TARGET_ARRAY_MODE_SUPPORTED_P riscv_array_mode_supported_p
+
+#undef TARGET_SCHED_FINISH_GLOBAL
+#define TARGET_SCHED_FINISH_GLOBAL riscv_sched_finish_global
+
+#undef  TARGET_ASM_FUNCTION_PROLOGUE
+#define TARGET_ASM_FUNCTION_PROLOGUE riscv_asm_function_prologue
+
 struct gcc_target targetm = TARGET_INITIALIZER;

 #include "gt-riscv.h"
diff --git a/gcc/config/riscv/riscv.md b/gcc/config/riscv/riscv.md
index e40535c9e40..0d249a4430f 100644
--- a/gcc/config/riscv/riscv.md
+++ b/gcc/config/riscv/riscv.md
@@ -83,6 +83,7 @@

 (include "predicates.md")
 (include "constraints.md")
+(include "iterators.md")

 ;; ....................
 ;;
@@ -2463,3 +2464,6 @@
 (include "pic.md")
 (include "generic.md")
 (include "sifive-7.md")
+
+(include "riscv-v.md")
+(include "riscv-v-float.md")
\ No newline at end of file
-- 
2.24.3 (Apple Git-128)


More information about the Gcc-patches mailing list