]> gcc.gnu.org Git - gcc.git/commitdiff
RISC-V: Support segment intrinsics
authorJu-Zhe Zhong <juzhe.zhong@rivai.ai>
Fri, 28 Apr 2023 10:17:46 +0000 (18:17 +0800)
committerJeff Law <jlaw@ventanamicro.com>
Thu, 13 Jul 2023 18:02:06 +0000 (12:02 -0600)
Add segment load/store intrinsics:
https://github.com/riscv-non-isa/rvv-intrinsic-doc/pull/198

gcc/ChangeLog:

* config/riscv/riscv-vector-builtins-bases.cc (fold_fault_load):
New function.
(class vlseg): New class.
(class vsseg): Ditto.
(class vlsseg): Ditto.
(class vssseg): Ditto.
(class seg_indexed_load): Ditto.
(class seg_indexed_store): Ditto.
(class vlsegff): Ditto.
(BASE): Ditto.
* config/riscv/riscv-vector-builtins-bases.h: Ditto.
* config/riscv/riscv-vector-builtins-functions.def (vlseg):
Ditto.
(vsseg): Ditto.
(vlsseg): Ditto.
(vssseg): Ditto.
(vluxseg): Ditto.
(vloxseg): Ditto.
(vsuxseg): Ditto.
(vsoxseg): Ditto.
(vlsegff): Ditto.
* config/riscv/riscv-vector-builtins-shapes.cc (struct
seg_loadstore_def): Ditto.
(struct seg_indexed_loadstore_def): Ditto.
(struct seg_fault_load_def): Ditto.
(SHAPE): Ditto.
* config/riscv/riscv-vector-builtins-shapes.h: Ditto.
* config/riscv/riscv-vector-builtins.cc
(function_builder::append_nf): New function.
* config/riscv/riscv-vector-builtins.def (vfloat32m1x2_t):
Change ptr from double into float.
(vfloat32m1x3_t): Ditto.
(vfloat32m1x4_t): Ditto.
(vfloat32m1x5_t): Ditto.
(vfloat32m1x6_t): Ditto.
(vfloat32m1x7_t): Ditto.
(vfloat32m1x8_t): Ditto.
(vfloat32m2x2_t): Ditto.
(vfloat32m2x3_t): Ditto.
(vfloat32m2x4_t): Ditto.
(vfloat32m4x2_t): Ditto.
* config/riscv/riscv-vector-builtins.h: Add segment intrinsics.
* config/riscv/riscv-vsetvl.cc (fault_first_load_p): Adapt for
segment ff load.
* config/riscv/riscv.md: Add segment instructions.
* config/riscv/vector-iterators.md: Support segment intrinsics.
* config/riscv/vector.md (@pred_unit_strided_load<mode>): New
pattern.
(@pred_unit_strided_store<mode>): Ditto.
(@pred_strided_load<mode>): Ditto.
(@pred_strided_store<mode>): Ditto.
(@pred_fault_load<mode>): Ditto.
(@pred_indexed_<order>load<V1T:mode><V1I:mode>): Ditto.
(@pred_indexed_<order>load<V2T:mode><V2I:mode>): Ditto.
(@pred_indexed_<order>load<V4T:mode><V4I:mode>): Ditto.
(@pred_indexed_<order>load<V8T:mode><V8I:mode>): Ditto.
(@pred_indexed_<order>load<V16T:mode><V16I:mode>): Ditto.
(@pred_indexed_<order>load<V32T:mode><V32I:mode>): Ditto.
(@pred_indexed_<order>load<V64T:mode><V64I:mode>): Ditto.
(@pred_indexed_<order>store<V1T:mode><V1I:mode>): Ditto.
(@pred_indexed_<order>store<V2T:mode><V2I:mode>): Ditto.
(@pred_indexed_<order>store<V4T:mode><V4I:mode>): Ditto.
(@pred_indexed_<order>store<V8T:mode><V8I:mode>): Ditto.
(@pred_indexed_<order>store<V16T:mode><V16I:mode>): Ditto.
(@pred_indexed_<order>store<V32T:mode><V32I:mode>): Ditto.
(@pred_indexed_<order>store<V64T:mode><V64I:mode>): Ditto.

Signed-off-by: Ju-Zhe Zhong <juzhe.zhong@rivai.ai>
12 files changed:
gcc/config/riscv/riscv-vector-builtins-bases.cc
gcc/config/riscv/riscv-vector-builtins-bases.h
gcc/config/riscv/riscv-vector-builtins-functions.def
gcc/config/riscv/riscv-vector-builtins-shapes.cc
gcc/config/riscv/riscv-vector-builtins-shapes.h
gcc/config/riscv/riscv-vector-builtins.cc
gcc/config/riscv/riscv-vector-builtins.def
gcc/config/riscv/riscv-vector-builtins.h
gcc/config/riscv/riscv-vsetvl.cc
gcc/config/riscv/riscv.md
gcc/config/riscv/vector-iterators.md
gcc/config/riscv/vector.md

index 8693b2887fb54093f2155a6fa6150ffbae308d62..ab5b4dc951565f2885ee05d2623707be35e53460 100644 (file)
@@ -58,6 +58,54 @@ enum lst_type
   LST_INDEXED,
 };
 
+/* Helper function to fold vleff and vlsegff.  */
+static gimple *
+fold_fault_load (gimple_folder &f)
+{
+  /* fold fault_load (const *base, size_t *new_vl, size_t vl)
+
+     ====> fault_load (const *base, size_t vl)
+          new_vl = MEM_REF[read_vl ()].  */
+
+  auto_vec<tree> vargs (gimple_call_num_args (f.call) - 1);
+
+  for (unsigned i = 0; i < gimple_call_num_args (f.call); i++)
+    {
+      /* Exclude size_t *new_vl argument.  */
+      if (i == gimple_call_num_args (f.call) - 2)
+       continue;
+
+      vargs.quick_push (gimple_call_arg (f.call, i));
+    }
+
+  gimple *repl = gimple_build_call_vec (gimple_call_fn (f.call), vargs);
+  gimple_call_set_lhs (repl, f.lhs);
+
+  /* Handle size_t *new_vl by read_vl.  */
+  tree new_vl = gimple_call_arg (f.call, gimple_call_num_args (f.call) - 2);
+  if (integer_zerop (new_vl))
+    {
+      /* This case happens when user passes the nullptr to new_vl argument.
+        In this case, we just need to ignore the new_vl argument and return
+        fault_load instruction directly. */
+      return repl;
+    }
+
+  tree tmp_var = create_tmp_var (size_type_node, "new_vl");
+  tree decl = get_read_vl_decl ();
+  gimple *g = gimple_build_call (decl, 0);
+  gimple_call_set_lhs (g, tmp_var);
+  tree indirect
+    = fold_build2 (MEM_REF, size_type_node,
+                  gimple_call_arg (f.call, gimple_call_num_args (f.call) - 2),
+                  build_int_cst (build_pointer_type (size_type_node), 0));
+  gassign *assign = gimple_build_assign (indirect, tmp_var);
+
+  gsi_insert_after (f.gsi, assign, GSI_SAME_STMT);
+  gsi_insert_after (f.gsi, g, GSI_SAME_STMT);
+  return repl;
+}
+
 /* Implements vsetvl<mode> && vsetvlmax<mode>.  */
 template<bool VLMAX_P>
 class vsetvl : public function_base
@@ -1655,49 +1703,7 @@ public:
 
   gimple *fold (gimple_folder &f) const override
   {
-    /* fold vleff (const *base, size_t *new_vl, size_t vl)
-
-       ====> vleff (const *base, size_t vl)
-            new_vl = MEM_REF[read_vl ()].  */
-
-    auto_vec<tree> vargs (gimple_call_num_args (f.call) - 1);
-
-    for (unsigned i = 0; i < gimple_call_num_args (f.call); i++)
-      {
-       /* Exclude size_t *new_vl argument.  */
-       if (i == gimple_call_num_args (f.call) - 2)
-         continue;
-
-       vargs.quick_push (gimple_call_arg (f.call, i));
-      }
-
-    gimple *repl = gimple_build_call_vec (gimple_call_fn (f.call), vargs);
-    gimple_call_set_lhs (repl, f.lhs);
-
-    /* Handle size_t *new_vl by read_vl.  */
-    tree new_vl = gimple_call_arg (f.call, gimple_call_num_args (f.call) - 2);
-    if (integer_zerop (new_vl))
-      {
-       /* This case happens when user passes the nullptr to new_vl argument.
-          In this case, we just need to ignore the new_vl argument and return
-          vleff instruction directly. */
-       return repl;
-      }
-
-    tree tmp_var = create_tmp_var (size_type_node, "new_vl");
-    tree decl = get_read_vl_decl ();
-    gimple *g = gimple_build_call (decl, 0);
-    gimple_call_set_lhs (g, tmp_var);
-    tree indirect
-      = fold_build2 (MEM_REF, size_type_node,
-                    gimple_call_arg (f.call,
-                                     gimple_call_num_args (f.call) - 2),
-                    build_int_cst (build_pointer_type (size_type_node), 0));
-    gassign *assign = gimple_build_assign (indirect, tmp_var);
-
-    gsi_insert_after (f.gsi, assign, GSI_SAME_STMT);
-    gsi_insert_after (f.gsi, g, GSI_SAME_STMT);
-    return repl;
+    return fold_fault_load (f);
   }
 
   rtx expand (function_expander &e) const override
@@ -1722,6 +1728,166 @@ public:
   }
 };
 
+/* Implements vlseg.v.  */
+class vlseg : public function_base
+{
+public:
+  unsigned int call_properties (const function_instance &) const override
+  {
+    return CP_READ_MEMORY;
+  }
+
+  bool can_be_overloaded_p (enum predication_type_index pred) const override
+  {
+    return pred != PRED_TYPE_none && pred != PRED_TYPE_mu;
+  }
+
+  rtx expand (function_expander &e) const override
+  {
+    return e.use_exact_insn (
+      code_for_pred_unit_strided_load (e.vector_mode ()));
+  }
+};
+
+/* Implements vsseg.v.  */
+class vsseg : public function_base
+{
+public:
+  bool apply_tail_policy_p () const override { return false; }
+  bool apply_mask_policy_p () const override { return false; }
+
+  unsigned int call_properties (const function_instance &) const override
+  {
+    return CP_WRITE_MEMORY;
+  }
+
+  bool can_be_overloaded_p (enum predication_type_index) const override
+  {
+    return true;
+  }
+
+  rtx expand (function_expander &e) const override
+  {
+    return e.use_exact_insn (
+      code_for_pred_unit_strided_store (e.vector_mode ()));
+  }
+};
+
+/* Implements vlsseg.v.  */
+class vlsseg : public function_base
+{
+public:
+  unsigned int call_properties (const function_instance &) const override
+  {
+    return CP_READ_MEMORY;
+  }
+
+  bool can_be_overloaded_p (enum predication_type_index pred) const override
+  {
+    return pred != PRED_TYPE_none && pred != PRED_TYPE_mu;
+  }
+
+  rtx expand (function_expander &e) const override
+  {
+    return e.use_exact_insn (
+      code_for_pred_strided_load (e.vector_mode ()));
+  }
+};
+
+/* Implements vssseg.v.  */
+class vssseg : public function_base
+{
+public:
+  bool apply_tail_policy_p () const override { return false; }
+  bool apply_mask_policy_p () const override { return false; }
+
+  unsigned int call_properties (const function_instance &) const override
+  {
+    return CP_WRITE_MEMORY;
+  }
+
+  bool can_be_overloaded_p (enum predication_type_index) const override
+  {
+    return true;
+  }
+
+  rtx expand (function_expander &e) const override
+  {
+    return e.use_exact_insn (
+      code_for_pred_strided_store (e.vector_mode ()));
+  }
+};
+
+template<int UNSPEC>
+class seg_indexed_load : public function_base
+{
+public:
+  unsigned int call_properties (const function_instance &) const override
+  {
+    return CP_READ_MEMORY;
+  }
+
+  bool can_be_overloaded_p (enum predication_type_index) const override
+  {
+    return true;
+  }
+
+  rtx expand (function_expander &e) const override
+  {
+    return e.use_exact_insn (
+      code_for_pred_indexed_load (UNSPEC, e.vector_mode (), e.index_mode ()));
+  }
+};
+
+template<int UNSPEC>
+class seg_indexed_store : public function_base
+{
+public:
+  bool apply_tail_policy_p () const override { return false; }
+  bool apply_mask_policy_p () const override { return false; }
+
+  unsigned int call_properties (const function_instance &) const override
+  {
+    return CP_WRITE_MEMORY;
+  }
+
+  bool can_be_overloaded_p (enum predication_type_index) const override
+  {
+    return true;
+  }
+
+  rtx expand (function_expander &e) const override
+  {
+    return e.use_exact_insn (
+      code_for_pred_indexed_store (UNSPEC, e.vector_mode (), e.index_mode ()));
+  }
+};
+
+/* Implements vlsegff.v.  */
+class vlsegff : public function_base
+{
+public:
+  unsigned int call_properties (const function_instance &) const override
+  {
+    return CP_READ_MEMORY | CP_WRITE_CSR;
+  }
+
+  bool can_be_overloaded_p (enum predication_type_index pred) const override
+  {
+    return pred != PRED_TYPE_none && pred != PRED_TYPE_mu;
+  }
+
+  gimple *fold (gimple_folder &f) const override
+  {
+    return fold_fault_load (f);
+  }
+
+  rtx expand (function_expander &e) const override
+  {
+    return e.use_exact_insn (code_for_pred_fault_load (e.vector_mode ()));
+  }
+};
+
 static CONSTEXPR const vsetvl<false> vsetvl_obj;
 static CONSTEXPR const vsetvl<true> vsetvlmax_obj;
 static CONSTEXPR const loadstore<false, LST_UNIT_STRIDE, false> vle_obj;
@@ -1933,6 +2099,15 @@ static CONSTEXPR const vget vget_obj;
 static CONSTEXPR const read_vl read_vl_obj;
 static CONSTEXPR const vleff vleff_obj;
 static CONSTEXPR const vlenb vlenb_obj;
+static CONSTEXPR const vlseg vlseg_obj;
+static CONSTEXPR const vsseg vsseg_obj;
+static CONSTEXPR const vlsseg vlsseg_obj;
+static CONSTEXPR const vssseg vssseg_obj;
+static CONSTEXPR const seg_indexed_load<UNSPEC_UNORDERED> vluxseg_obj;
+static CONSTEXPR const seg_indexed_load<UNSPEC_ORDERED> vloxseg_obj;
+static CONSTEXPR const seg_indexed_store<UNSPEC_UNORDERED> vsuxseg_obj;
+static CONSTEXPR const seg_indexed_store<UNSPEC_ORDERED> vsoxseg_obj;
+static CONSTEXPR const vlsegff vlsegff_obj;
 
 /* Declare the function base NAME, pointing it to an instance
    of class <NAME>_obj.  */
@@ -2150,5 +2325,14 @@ BASE (vget)
 BASE (read_vl)
 BASE (vleff)
 BASE (vlenb)
+BASE (vlseg)
+BASE (vsseg)
+BASE (vlsseg)
+BASE (vssseg)
+BASE (vluxseg)
+BASE (vloxseg)
+BASE (vsuxseg)
+BASE (vsoxseg)
+BASE (vlsegff)
 
 } // end namespace riscv_vector
index 0196f80b69e21a4a7c5aa17fe1ba8880a03cfd5a..62ff38a2811a518072b54ce434a090ce96aa77b0 100644 (file)
@@ -241,6 +241,15 @@ extern const function_base *const vget;
 extern const function_base *const read_vl;
 extern const function_base *const vleff;
 extern const function_base *const vlenb;
+extern const function_base *const vlseg;
+extern const function_base *const vsseg;
+extern const function_base *const vlsseg;
+extern const function_base *const vssseg;
+extern const function_base *const vluxseg;
+extern const function_base *const vloxseg;
+extern const function_base *const vsuxseg;
+extern const function_base *const vsoxseg;
+extern const function_base *const vlsegff;
 }
 
 } // end namespace riscv_vector
index ed3f5583fc6aef97a8e87d1cf999bb2b368a9d67..7200036d85373598ec685c0895fb142cc394f940 100644 (file)
@@ -536,5 +536,26 @@ DEF_RVV_FUNCTION (vget, vget, none_preds, all_v_vget_lmul4_x2_ops)
 // Tuple types
 DEF_RVV_FUNCTION (vset, vset, none_preds, all_v_vset_tuple_ops)
 DEF_RVV_FUNCTION (vget, vget, none_preds, all_v_vget_tuple_ops)
+DEF_RVV_FUNCTION (vlseg, seg_loadstore, full_preds, tuple_v_scalar_const_ptr_ops)
+DEF_RVV_FUNCTION (vsseg, seg_loadstore, none_m_preds, tuple_v_scalar_ptr_ops)
+DEF_RVV_FUNCTION (vlsseg, seg_loadstore, full_preds, tuple_v_scalar_const_ptr_ptrdiff_ops)
+DEF_RVV_FUNCTION (vssseg, seg_loadstore, none_m_preds, tuple_v_scalar_ptr_ptrdiff_ops)
+DEF_RVV_FUNCTION (vluxseg, seg_indexed_loadstore, full_preds, tuple_v_scalar_const_ptr_eew8_index_ops)
+DEF_RVV_FUNCTION (vluxseg, seg_indexed_loadstore, full_preds, tuple_v_scalar_const_ptr_eew16_index_ops)
+DEF_RVV_FUNCTION (vluxseg, seg_indexed_loadstore, full_preds, tuple_v_scalar_const_ptr_eew32_index_ops)
+DEF_RVV_FUNCTION (vluxseg, seg_indexed_loadstore, full_preds, tuple_v_scalar_const_ptr_eew64_index_ops)
+DEF_RVV_FUNCTION (vloxseg, seg_indexed_loadstore, full_preds, tuple_v_scalar_const_ptr_eew8_index_ops)
+DEF_RVV_FUNCTION (vloxseg, seg_indexed_loadstore, full_preds, tuple_v_scalar_const_ptr_eew16_index_ops)
+DEF_RVV_FUNCTION (vloxseg, seg_indexed_loadstore, full_preds, tuple_v_scalar_const_ptr_eew32_index_ops)
+DEF_RVV_FUNCTION (vloxseg, seg_indexed_loadstore, full_preds, tuple_v_scalar_const_ptr_eew64_index_ops)
+DEF_RVV_FUNCTION (vsuxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_ptr_eew8_index_ops)
+DEF_RVV_FUNCTION (vsuxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_ptr_eew16_index_ops)
+DEF_RVV_FUNCTION (vsuxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_ptr_eew32_index_ops)
+DEF_RVV_FUNCTION (vsuxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_ptr_eew64_index_ops)
+DEF_RVV_FUNCTION (vsoxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_ptr_eew8_index_ops)
+DEF_RVV_FUNCTION (vsoxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_ptr_eew16_index_ops)
+DEF_RVV_FUNCTION (vsoxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_ptr_eew32_index_ops)
+DEF_RVV_FUNCTION (vsoxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_ptr_eew64_index_ops)
+DEF_RVV_FUNCTION (vlsegff, seg_fault_load, full_preds, tuple_v_scalar_const_ptr_size_ptr_ops)
 
 #undef DEF_RVV_FUNCTION
index 0682f81400ac4343001b69b4f9e2452b8b6ca28c..822588c7b6e2f265edefbfc58f4545474efb5496 100644 (file)
@@ -577,6 +577,142 @@ struct vlenb_def : public function_shape
   }
 };
 
+/* seg_loadstore_def class.  */
+struct seg_loadstore_def : public build_base
+{
+  char *get_name (function_builder &b, const function_instance &instance,
+                 bool overloaded_p) const override
+  {
+    /* Return nullptr if it can not be overloaded.  */
+    if (overloaded_p && !instance.base->can_be_overloaded_p (instance.pred))
+      return nullptr;
+
+    b.append_base_name (instance.base_name);
+
+    tree type = builtin_types[instance.type.index].vector;
+    machine_mode mode = TYPE_MODE (type);
+
+    int nf = get_nf (mode);
+    /* vop --> vop<nf>.  */
+    b.append_nf (nf);
+
+    /* vop<nf> --> vop<nf>e.  */
+    b.append_name ("e");
+
+    int sew = GET_MODE_BITSIZE (GET_MODE_INNER (mode));
+    /* vop<nf>e --> vop<nf>e<sew>.  */
+    b.append_sew (sew);
+
+    if (!overloaded_p)
+      {
+       /* vop<nf>e<sew> --> vop<nf>e<sew>_v.  */
+       b.append_name (operand_suffixes[instance.op_info->op]);
+       /* vop<nf>e<sew>_v --> vop<nf>e<sew>_v_<type>.  */
+       b.append_name (type_suffixes[instance.type.index].vector);
+      }
+
+    /* According to rvv-intrinsic-doc, it does not add "_m" suffix
+       for vop_m C++ overloaded API.  */
+    if (overloaded_p && instance.pred == PRED_TYPE_m)
+      return b.finish_name ();
+    b.append_name (predication_suffixes[instance.pred]);
+    return b.finish_name ();
+  }
+};
+
+/* seg_indexed_loadstore_def class.  */
+struct seg_indexed_loadstore_def : public indexed_loadstore_def
+{
+  char *get_name (function_builder &b, const function_instance &instance,
+                 bool overloaded_p) const override
+  {
+    /* Return nullptr if it can not be overloaded.  */
+    if (overloaded_p && !instance.base->can_be_overloaded_p (instance.pred))
+      return nullptr;
+
+    b.append_base_name (instance.base_name);
+
+    tree type = builtin_types[instance.type.index].vector;
+    machine_mode mode = TYPE_MODE (type);
+
+    int nf = get_nf (mode);
+    /* vop --> vop<nf>.  */
+    b.append_nf (nf);
+
+    /* vop<nf> --> vop<nf>ei.  */
+    b.append_name ("ei");
+
+    /* vop<nf>ei --> vop<nf>ei<eew>.  */
+    vector_type_index arg1_type_idx
+      = instance.op_info->args[1].get_function_type_index (instance.type.index);
+    tree index_type = builtin_types[arg1_type_idx].vector;
+    machine_mode index_mode = TYPE_MODE (index_type);
+    int eew = GET_MODE_BITSIZE (GET_MODE_INNER (index_mode));
+    b.append_sew (eew);
+
+    if (!overloaded_p)
+      {
+       /* vop<sew> --> vop<sew>_v.  */
+       b.append_name (operand_suffixes[instance.op_info->op]);
+       /* vop<sew>_v --> vop<sew>_v_<type>.  */
+       b.append_name (type_suffixes[instance.type.index].vector);
+      }
+
+    /* According to rvv-intrinsic-doc, it does not add "_m" suffix
+       for vop_m C++ overloaded API.  */
+    if (overloaded_p && instance.pred == PRED_TYPE_m)
+      return b.finish_name ();
+    b.append_name (predication_suffixes[instance.pred]);
+    return b.finish_name ();
+  }
+};
+
+/* seg_fault_load_def class.  */
+struct seg_fault_load_def : public build_base
+{
+  char *get_name (function_builder &b, const function_instance &instance,
+                 bool overloaded_p) const override
+  {
+    /* Return nullptr if it can not be overloaded.  */
+    if (overloaded_p && !instance.base->can_be_overloaded_p (instance.pred))
+      return nullptr;
+
+    b.append_name ("__riscv_vlseg");
+
+    tree type = builtin_types[instance.type.index].vector;
+    machine_mode mode = TYPE_MODE (type);
+
+    int nf = get_nf (mode);
+    /* vop --> vop<nf>.  */
+    b.append_nf (nf);
+
+    /* vop<nf> --> vop<nf>e.  */
+    b.append_name ("e");
+
+    int sew = GET_MODE_BITSIZE (GET_MODE_INNER (mode));
+    /* vop<nf>e --> vop<nf>e<sew>.  */
+    b.append_sew (sew);
+
+    /* vop<nf>e<sew> --> vop<nf>e<sew>ff.  */
+    b.append_name ("ff");
+
+    if (!overloaded_p)
+      {
+       /* vop<nf>e<sew>ff --> vop<nf>e<sew>ff_v.  */
+       b.append_name (operand_suffixes[instance.op_info->op]);
+       /* vop<nf>e<sew>ff_v --> vop<nf>e<sew>ff_v_<type>.  */
+       b.append_name (type_suffixes[instance.type.index].vector);
+      }
+
+    /* According to rvv-intrinsic-doc, it does not add "_m" suffix
+       for vop_m C++ overloaded API.  */
+    if (overloaded_p && instance.pred == PRED_TYPE_m)
+      return b.finish_name ();
+    b.append_name (predication_suffixes[instance.pred]);
+    return b.finish_name ();
+  }
+};
+
 SHAPE(vsetvl, vsetvl)
 SHAPE(vsetvl, vsetvlmax)
 SHAPE(loadstore, loadstore)
@@ -597,5 +733,8 @@ SHAPE(vget, vget)
 SHAPE(read_vl, read_vl)
 SHAPE(fault_load, fault_load)
 SHAPE(vlenb, vlenb)
+SHAPE(seg_loadstore, seg_loadstore)
+SHAPE(seg_indexed_loadstore, seg_indexed_loadstore)
+SHAPE(seg_fault_load, seg_fault_load)
 
 } // end namespace riscv_vector
index aee2f94b04c433000207d11f8b00b9e30b44ab7d..6a51713c12c74efa76c50dd4856d5817d89ddc76 100644 (file)
@@ -44,6 +44,9 @@ extern const function_shape *const vget;
 extern const function_shape *const read_vl;
 extern const function_shape *const fault_load;
 extern const function_shape *const vlenb;
+extern const function_shape *const seg_loadstore;
+extern const function_shape *const seg_indexed_loadstore;
+extern const function_shape *const seg_fault_load;
 }
 
 } // end namespace riscv_vector
index e3cdbfe890a0c8656597e19cff63fbd045af9324..4117897c6c9151ea78aca60aab0bf29b997f53f4 100644 (file)
@@ -2170,6 +2170,110 @@ static CONSTEXPR const rvv_op_info all_v_vget_tuple_ops
      rvv_arg_type_info (RVV_BASE_tuple_subpart), /* Return type */
      v_size_args /* Args */};
 
+/* A static operand information for vector_type func (const scalar_type *)
+ * function registration. */
+static CONSTEXPR const rvv_op_info tuple_v_scalar_const_ptr_ops
+  = {tuple_ops,                                  /* Types */
+     OP_TYPE_v,                                  /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+     scalar_const_ptr_args /* Args */};
+
+/* A static operand information for void func (scalar_type *, vector_type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info tuple_v_scalar_ptr_ops
+  = {tuple_ops,                                /* Types */
+     OP_TYPE_v,                                /* Suffix */
+     rvv_arg_type_info (RVV_BASE_void), /* Return type */
+     scalar_ptr_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *,
+ * ptrdiff_t) function registration. */
+static CONSTEXPR const rvv_op_info tuple_v_scalar_const_ptr_ptrdiff_ops
+  = {tuple_ops,                                  /* Types */
+     OP_TYPE_v,                                  /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+     scalar_const_ptr_ptrdiff_args /* Args */};
+
+/* A static operand information for void func (scalar_type *, ptrdiff_t,
+ * vector_type) function registration. */
+static CONSTEXPR const rvv_op_info tuple_v_scalar_ptr_ptrdiff_ops
+  = {tuple_ops,                                /* Types */
+     OP_TYPE_v,                                /* Suffix */
+     rvv_arg_type_info (RVV_BASE_void), /* Return type */
+     scalar_ptr_ptrdiff_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *,
+ * eew8_index_type) function registration. */
+static CONSTEXPR const rvv_op_info tuple_v_scalar_const_ptr_eew8_index_ops
+  = {tuple_ops,                                  /* Types */
+     OP_TYPE_v,                                  /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+     scalar_const_ptr_eew8_index_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *,
+ * eew16_index_type) function registration. */
+static CONSTEXPR const rvv_op_info tuple_v_scalar_const_ptr_eew16_index_ops
+  = {tuple_ops,                                  /* Types */
+     OP_TYPE_v,                                  /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+     scalar_const_ptr_eew16_index_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *,
+ * eew32_index_type) function registration. */
+static CONSTEXPR const rvv_op_info tuple_v_scalar_const_ptr_eew32_index_ops
+  = {tuple_ops,                                  /* Types */
+     OP_TYPE_v,                                  /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+     scalar_const_ptr_eew32_index_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *,
+ * eew64_index_type) function registration. */
+static CONSTEXPR const rvv_op_info tuple_v_scalar_const_ptr_eew64_index_ops
+  = {tuple_ops,                                  /* Types */
+     OP_TYPE_v,                                  /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+     scalar_const_ptr_eew64_index_args /* Args */};
+
+/* A static operand information for void func (scalar_type *, eew8_index_type,
+ * vector_type) function registration. */
+static CONSTEXPR const rvv_op_info tuple_v_scalar_ptr_eew8_index_ops
+  = {tuple_ops,                                /* Types */
+     OP_TYPE_v,                                /* Suffix */
+     rvv_arg_type_info (RVV_BASE_void), /* Return type */
+     scalar_ptr_eew8_index_args /* Args */};
+
+/* A static operand information for void func (scalar_type *, eew16_index_type,
+ * vector_type) function registration. */
+static CONSTEXPR const rvv_op_info tuple_v_scalar_ptr_eew16_index_ops
+  = {tuple_ops,                                /* Types */
+     OP_TYPE_v,                                /* Suffix */
+     rvv_arg_type_info (RVV_BASE_void), /* Return type */
+     scalar_ptr_eew16_index_args /* Args */};
+
+/* A static operand information for void func (scalar_type *, eew32_index_type,
+ * vector_type) function registration. */
+static CONSTEXPR const rvv_op_info tuple_v_scalar_ptr_eew32_index_ops
+  = {tuple_ops,                                /* Types */
+     OP_TYPE_v,                                /* Suffix */
+     rvv_arg_type_info (RVV_BASE_void), /* Return type */
+     scalar_ptr_eew32_index_args /* Args */};
+
+/* A static operand information for void func (scalar_type *, eew64_index_type,
+ * vector_type) function registration. */
+static CONSTEXPR const rvv_op_info tuple_v_scalar_ptr_eew64_index_ops
+  = {tuple_ops,                                /* Types */
+     OP_TYPE_v,                                /* Suffix */
+     rvv_arg_type_info (RVV_BASE_void), /* Return type */
+     scalar_ptr_eew64_index_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *)
+ * function registration. */
+static CONSTEXPR const rvv_op_info tuple_v_scalar_const_ptr_size_ptr_ops
+  = {tuple_ops,                                  /* Types */
+     OP_TYPE_v,                                  /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+     scalar_const_ptr_size_ptr_args /* Args */};
+
 /* A list of all RVV base function types.  */
 static CONSTEXPR const function_type_info function_types[] = {
 #define DEF_RVV_TYPE_INDEX(                                                    \
@@ -2911,6 +3015,38 @@ function_builder::append_sew (int sew)
     }
 }
 
+/* Add NF into function name.  */
+void
+function_builder::append_nf (int nf)
+{
+  switch (nf)
+    {
+    case 2:
+      append_name ("2");
+      break;
+    case 3:
+      append_name ("3");
+      break;
+    case 4:
+      append_name ("4");
+      break;
+    case 5:
+      append_name ("5");
+      break;
+    case 6:
+      append_name ("6");
+      break;
+    case 7:
+      append_name ("7");
+      break;
+    case 8:
+      append_name ("8");
+      break;
+    default:
+      gcc_unreachable ();
+    }
+}
+
 /* Zero-terminate and complete the function name being built.  */
 char *
 function_builder::finish_name ()
index 78b3c7e33fd413ee167341bfada9efbffbfe4eba..0a387fd1617ab160847dacb0ea3901f165790b22 100644 (file)
@@ -496,13 +496,13 @@ DEF_RVV_TUPLE_TYPE (vfloat32mf2x8_t, 20, __rvv_float32mf2x8_t, vfloat32mf2_t, fl
 DEF_RVV_TYPE (vfloat32m1_t, 17, __rvv_float32m1_t, float, VNx4SF, VNx2SF, VNx1SF,
              _f32m1, _f32, _e32m1)
 /* Define tuple types for SEW = 32, LMUL = M1.  */
-DEF_RVV_TUPLE_TYPE (vfloat32m1x2_t, 19, __rvv_float32m1x2_t, vfloat32m1_t, double, 2, _f32m1x2)
-DEF_RVV_TUPLE_TYPE (vfloat32m1x3_t, 19, __rvv_float32m1x3_t, vfloat32m1_t, double, 3, _f32m1x3)
-DEF_RVV_TUPLE_TYPE (vfloat32m1x4_t, 19, __rvv_float32m1x4_t, vfloat32m1_t, double, 4, _f32m1x4)
-DEF_RVV_TUPLE_TYPE (vfloat32m1x5_t, 19, __rvv_float32m1x5_t, vfloat32m1_t, double, 5, _f32m1x5)
-DEF_RVV_TUPLE_TYPE (vfloat32m1x6_t, 19, __rvv_float32m1x6_t, vfloat32m1_t, double, 6, _f32m1x6)
-DEF_RVV_TUPLE_TYPE (vfloat32m1x7_t, 19, __rvv_float32m1x7_t, vfloat32m1_t, double, 7, _f32m1x7)
-DEF_RVV_TUPLE_TYPE (vfloat32m1x8_t, 19, __rvv_float32m1x8_t, vfloat32m1_t, double, 8, _f32m1x8)
+DEF_RVV_TUPLE_TYPE (vfloat32m1x2_t, 19, __rvv_float32m1x2_t, vfloat32m1_t, float, 2, _f32m1x2)
+DEF_RVV_TUPLE_TYPE (vfloat32m1x3_t, 19, __rvv_float32m1x3_t, vfloat32m1_t, float, 3, _f32m1x3)
+DEF_RVV_TUPLE_TYPE (vfloat32m1x4_t, 19, __rvv_float32m1x4_t, vfloat32m1_t, float, 4, _f32m1x4)
+DEF_RVV_TUPLE_TYPE (vfloat32m1x5_t, 19, __rvv_float32m1x5_t, vfloat32m1_t, float, 5, _f32m1x5)
+DEF_RVV_TUPLE_TYPE (vfloat32m1x6_t, 19, __rvv_float32m1x6_t, vfloat32m1_t, float, 6, _f32m1x6)
+DEF_RVV_TUPLE_TYPE (vfloat32m1x7_t, 19, __rvv_float32m1x7_t, vfloat32m1_t, float, 7, _f32m1x7)
+DEF_RVV_TUPLE_TYPE (vfloat32m1x8_t, 19, __rvv_float32m1x8_t, vfloat32m1_t, float, 8, _f32m1x8)
 /* LMUL = 2:
    Machine mode = VNx8SFmode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx4SFmode when TARGET_MIN_VLEN > 32.
@@ -510,9 +510,9 @@ DEF_RVV_TUPLE_TYPE (vfloat32m1x8_t, 19, __rvv_float32m1x8_t, vfloat32m1_t, doubl
 DEF_RVV_TYPE (vfloat32m2_t, 17, __rvv_float32m2_t, float, VNx8SF, VNx4SF, VNx2SF,
              _f32m2, _f32, _e32m2)
 /* Define tuple types for SEW = 32, LMUL = M2.  */
-DEF_RVV_TUPLE_TYPE (vfloat32m2x2_t, 19, __rvv_float32m2x2_t, vfloat32m2_t, double, 2, _f32m2x2)
-DEF_RVV_TUPLE_TYPE (vfloat32m2x3_t, 19, __rvv_float32m2x3_t, vfloat32m2_t, double, 3, _f32m2x3)
-DEF_RVV_TUPLE_TYPE (vfloat32m2x4_t, 19, __rvv_float32m2x4_t, vfloat32m2_t, double, 4, _f32m2x4)
+DEF_RVV_TUPLE_TYPE (vfloat32m2x2_t, 19, __rvv_float32m2x2_t, vfloat32m2_t, float, 2, _f32m2x2)
+DEF_RVV_TUPLE_TYPE (vfloat32m2x3_t, 19, __rvv_float32m2x3_t, vfloat32m2_t, float, 3, _f32m2x3)
+DEF_RVV_TUPLE_TYPE (vfloat32m2x4_t, 19, __rvv_float32m2x4_t, vfloat32m2_t, float, 4, _f32m2x4)
 /* LMUL = 4:
    Machine mode = VNx16SFmode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx8SFmode when TARGET_MIN_VLEN > 32.
@@ -520,7 +520,7 @@ DEF_RVV_TUPLE_TYPE (vfloat32m2x4_t, 19, __rvv_float32m2x4_t, vfloat32m2_t, doubl
 DEF_RVV_TYPE (vfloat32m4_t, 17, __rvv_float32m4_t, float, VNx16SF, VNx8SF, VNx4SF,
              _f32m4, _f32, _e32m4)
 /* Define tuple types for SEW = 32, LMUL = M4.  */
-DEF_RVV_TUPLE_TYPE (vfloat32m4x2_t, 19, __rvv_float32m4x2_t, vfloat32m4_t, double, 2, _f32m4x2)
+DEF_RVV_TUPLE_TYPE (vfloat32m4x2_t, 19, __rvv_float32m4x2_t, vfloat32m4_t, float, 2, _f32m4x2)
 /* LMUL = 8:
    Machine mode = VNx32SFmode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx16SFmode when TARGET_MIN_VLEN > 32.
index 9acfa035027092b09b7023be1490a9d1df20b539..b024731e37af78d18ed5bcf9cd26bd80a2c22d8f 100644 (file)
@@ -280,6 +280,7 @@ public:
   void append_name (const char *);
   void append_base_name (const char *);
   void append_sew (int);
+  void append_nf (int);
   char *finish_name ();
 
 private:
index 51109e963c14b35b5bdc5336aa6a445b662312e0..2275bc7d8fdcfbda1e98b3c03742cebaf00e47df 100644 (file)
@@ -200,7 +200,9 @@ scalar_move_insn_p (rtx_insn *rinsn)
 static bool
 fault_first_load_p (rtx_insn *rinsn)
 {
-  return recog_memoized (rinsn) >= 0 && get_attr_type (rinsn) == TYPE_VLDFF;
+  return recog_memoized (rinsn) >= 0
+        && (get_attr_type (rinsn) == TYPE_VLDFF
+            || get_attr_type (rinsn) == TYPE_VLSEGDFF);
 }
 
 /* Return true if the instruction is read vl instruction.  */
index 6f2c5962ec46745fa7ed57c0d84d8b67e54c7bc2..7065e68c0b72012f113f603578587a80ea32e483 100644 (file)
 ;; vldff       vector unit-stride fault-only-first load instructions
 ;; vldr        vector whole register load instructions
 ;; vstr        vector whole register store instructions
+;; vlsegde     vector segment unit-stride load instructions
+;; vssegte     vector segment unit-stride store instructions
+;; vlsegds     vector segment strided load instructions
+;; vssegts     vector segment strided store instructions
+;; vlsegdux    vector segment unordered indexed load instructions
+;; vlsegdox    vector segment ordered indexed load instructions
+;; vssegtux    vector segment unordered indexed store instructions
+;; vssegtox    vector segment ordered indexed store instructions
+;; vlsegdff    vector segment unit-stride fault-only-first load instructions
 ;; 11. Vector integer arithmetic instructions
 ;; vialu       vector single-width integer add and subtract and logical nstructions
 ;; viwalu      vector widening integer add/subtract
    clmul,min,max,minu,maxu,clz,ctz,cpop,
    atomic,condmove,crypto,rdvlenb,rdvl,vsetvl,vlde,vste,vldm,vstm,vlds,vsts,
    vldux,vldox,vstux,vstox,vldff,vldr,vstr,
+   vlsegde,vssegte,vlsegds,vssegts,vlsegdux,vlsegdox,vssegtux,vssegtox,vlsegdff,
    vialu,viwalu,vext,vicalu,vshift,vnshift,vicmp,viminmax,
    vimul,vidiv,viwmul,vimuladd,viwmuladd,vimerge,vimov,
    vsalu,vaalu,vsmul,vsshift,vnclip,
index 20c87515e069c4ee18c708a6e5018ad00fc218eb..29c9d77674bca0f8b888d323eae29da83569ca5c 100644 (file)
@@ -23,6 +23,7 @@
   UNSPEC_VUNDEF
   UNSPEC_VPREDICATE
   UNSPEC_VLMAX
+  UNSPEC_UNIT_STRIDED
   UNSPEC_STRIDED
 
   ;; It's used to specify ordered/unorderd operation.
   (VNx8x1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
 ])
 
+(define_mode_iterator V1I [
+  (VNx1QI "TARGET_MIN_VLEN < 128")
+  (VNx1HI "TARGET_MIN_VLEN < 128")
+  (VNx1SI "TARGET_MIN_VLEN < 128")
+  (VNx1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128")
+])
+
+(define_mode_iterator V2I [
+  VNx2QI
+  VNx2HI
+  VNx2SI
+  (VNx2DI "TARGET_VECTOR_ELEN_64")
+])
+
+(define_mode_iterator V4I [
+  VNx4QI
+  VNx4HI
+  VNx4SI
+  (VNx4DI "TARGET_VECTOR_ELEN_64")
+])
+
+(define_mode_iterator V8I [
+  VNx8QI
+  VNx8HI
+  VNx8SI
+  (VNx8DI "TARGET_VECTOR_ELEN_64")
+])
+
+(define_mode_iterator V16I [
+  VNx16QI
+  VNx16HI
+  (VNx16SI "TARGET_MIN_VLEN > 32")
+  (VNx16DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
+])
+
+(define_mode_iterator V32I [
+  VNx32QI
+  (VNx32HI "TARGET_MIN_VLEN > 32")
+  (VNx32SI "TARGET_MIN_VLEN >= 128")
+])
+
+(define_mode_iterator V64I [
+  (VNx64QI "TARGET_MIN_VLEN > 32")
+  (VNx64HI "TARGET_MIN_VLEN >= 128")
+])
+
+(define_mode_iterator V1T [
+  (VNx2x1QI "TARGET_MIN_VLEN < 128")
+  (VNx3x1QI "TARGET_MIN_VLEN < 128")
+  (VNx4x1QI "TARGET_MIN_VLEN < 128")
+  (VNx5x1QI "TARGET_MIN_VLEN < 128")
+  (VNx6x1QI "TARGET_MIN_VLEN < 128")
+  (VNx7x1QI "TARGET_MIN_VLEN < 128")
+  (VNx8x1QI "TARGET_MIN_VLEN < 128")
+  (VNx2x1HI "TARGET_MIN_VLEN < 128")
+  (VNx3x1HI "TARGET_MIN_VLEN < 128")
+  (VNx4x1HI "TARGET_MIN_VLEN < 128")
+  (VNx5x1HI "TARGET_MIN_VLEN < 128")
+  (VNx6x1HI "TARGET_MIN_VLEN < 128")
+  (VNx7x1HI "TARGET_MIN_VLEN < 128")
+  (VNx8x1HI "TARGET_MIN_VLEN < 128")
+  (VNx2x1SI "TARGET_MIN_VLEN < 128")
+  (VNx3x1SI "TARGET_MIN_VLEN < 128")
+  (VNx4x1SI "TARGET_MIN_VLEN < 128")
+  (VNx5x1SI "TARGET_MIN_VLEN < 128")
+  (VNx6x1SI "TARGET_MIN_VLEN < 128")
+  (VNx7x1SI "TARGET_MIN_VLEN < 128")
+  (VNx8x1SI "TARGET_MIN_VLEN < 128")
+  (VNx2x1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128")
+  (VNx3x1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128")
+  (VNx4x1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128")
+  (VNx5x1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128")
+  (VNx6x1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128")
+  (VNx7x1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128")
+  (VNx8x1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128")
+  (VNx2x1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128")
+  (VNx3x1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128")
+  (VNx4x1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128")
+  (VNx5x1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128")
+  (VNx6x1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128")
+  (VNx7x1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128")
+  (VNx8x1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128")
+  (VNx2x1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
+  (VNx3x1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
+  (VNx4x1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
+  (VNx5x1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
+  (VNx6x1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
+  (VNx7x1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
+  (VNx8x1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
+])
+
+(define_mode_iterator V2T [
+  VNx2x2QI
+  VNx3x2QI
+  VNx4x2QI
+  VNx5x2QI
+  VNx6x2QI
+  VNx7x2QI
+  VNx8x2QI
+  VNx2x2HI
+  VNx3x2HI
+  VNx4x2HI
+  VNx5x2HI
+  VNx6x2HI
+  VNx7x2HI
+  VNx8x2HI
+  VNx2x2SI
+  VNx3x2SI
+  VNx4x2SI
+  (VNx5x2SI "TARGET_MIN_VLEN >= 64")
+  (VNx6x2SI "TARGET_MIN_VLEN >= 64")
+  (VNx7x2SI "TARGET_MIN_VLEN >= 64")
+  (VNx8x2SI "TARGET_MIN_VLEN >= 64")
+  (VNx2x2DI "TARGET_VECTOR_ELEN_64")
+  (VNx3x2DI "TARGET_VECTOR_ELEN_64")
+  (VNx4x2DI "TARGET_VECTOR_ELEN_64")
+  (VNx5x2DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
+  (VNx6x2DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
+  (VNx7x2DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
+  (VNx8x2DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
+  (VNx2x2SF "TARGET_VECTOR_ELEN_FP_32")
+  (VNx3x2SF "TARGET_VECTOR_ELEN_FP_32")
+  (VNx4x2SF "TARGET_VECTOR_ELEN_FP_32")
+  (VNx5x2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64")
+  (VNx6x2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64")
+  (VNx7x2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64")
+  (VNx8x2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64")
+  (VNx2x2DF "TARGET_VECTOR_ELEN_FP_64")
+  (VNx3x2DF "TARGET_VECTOR_ELEN_FP_64")
+  (VNx4x2DF "TARGET_VECTOR_ELEN_FP_64")
+  (VNx5x2DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128")
+  (VNx6x2DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128")
+  (VNx7x2DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128")
+  (VNx8x2DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128")
+])
+
+(define_mode_iterator V4T [
+  VNx2x4QI
+  VNx3x4QI
+  VNx4x4QI
+  VNx5x4QI
+  VNx6x4QI
+  VNx7x4QI
+  VNx8x4QI
+  VNx2x4HI
+  VNx3x4HI
+  VNx4x4HI
+  (VNx5x4HI "TARGET_MIN_VLEN >= 64")
+  (VNx6x4HI "TARGET_MIN_VLEN >= 64")
+  (VNx7x4HI "TARGET_MIN_VLEN >= 64")
+  (VNx8x4HI "TARGET_MIN_VLEN >= 64")
+  VNx2x4SI
+  (VNx3x4SI "TARGET_MIN_VLEN >= 64")
+  (VNx4x4SI "TARGET_MIN_VLEN >= 64")
+  (VNx5x4SI "TARGET_MIN_VLEN >= 128")
+  (VNx6x4SI "TARGET_MIN_VLEN >= 128")
+  (VNx7x4SI "TARGET_MIN_VLEN >= 128")
+  (VNx8x4SI "TARGET_MIN_VLEN >= 128")
+  (VNx2x4DI "TARGET_VECTOR_ELEN_64")
+  (VNx3x4DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
+  (VNx4x4DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
+  (VNx2x4SF "TARGET_VECTOR_ELEN_FP_32")
+  (VNx3x4SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64")
+  (VNx4x4SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64")
+  (VNx5x4SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
+  (VNx6x4SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
+  (VNx7x4SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
+  (VNx8x4SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
+  (VNx2x4DF "TARGET_VECTOR_ELEN_FP_64")
+  (VNx3x4DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128")
+  (VNx4x4DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128")
+])
+
+(define_mode_iterator V8T [
+  VNx2x8QI
+  VNx3x8QI
+  VNx4x8QI
+  (VNx5x8QI "TARGET_MIN_VLEN >= 64")
+  (VNx6x8QI "TARGET_MIN_VLEN >= 64")
+  (VNx7x8QI "TARGET_MIN_VLEN >= 64")
+  (VNx8x8QI "TARGET_MIN_VLEN >= 64")
+  VNx2x8HI
+  (VNx3x8HI "TARGET_MIN_VLEN >= 64")
+  (VNx4x8HI "TARGET_MIN_VLEN >= 64")
+  (VNx5x8HI "TARGET_MIN_VLEN >= 128")
+  (VNx6x8HI "TARGET_MIN_VLEN >= 128")
+  (VNx7x8HI "TARGET_MIN_VLEN >= 128")
+  (VNx8x8HI "TARGET_MIN_VLEN >= 128")
+  (VNx2x8SI "TARGET_MIN_VLEN >= 64")
+  (VNx3x8SI "TARGET_MIN_VLEN >= 128")
+  (VNx4x8SI "TARGET_MIN_VLEN >= 128")
+  (VNx2x8DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
+  (VNx2x8SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64")
+  (VNx3x8SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
+  (VNx4x8SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
+  (VNx2x8DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128")
+])
+
+(define_mode_iterator V16T [
+  VNx2x16QI
+  (VNx3x16QI "TARGET_MIN_VLEN >= 64")
+  (VNx4x16QI "TARGET_MIN_VLEN >= 64")
+  (VNx5x16QI "TARGET_MIN_VLEN >= 128")
+  (VNx6x16QI "TARGET_MIN_VLEN >= 128")
+  (VNx7x16QI "TARGET_MIN_VLEN >= 128")
+  (VNx8x16QI "TARGET_MIN_VLEN >= 128")
+  (VNx2x16HI "TARGET_MIN_VLEN >= 64")
+  (VNx3x16HI "TARGET_MIN_VLEN >= 128")
+  (VNx4x16HI "TARGET_MIN_VLEN >= 128")
+  (VNx2x16SI "TARGET_MIN_VLEN >= 128")
+  (VNx2x16SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+])
+
+(define_mode_iterator V32T [
+  (VNx2x32QI "TARGET_MIN_VLEN >= 64")
+  (VNx3x32QI "TARGET_MIN_VLEN >= 128")
+  (VNx4x32QI "TARGET_MIN_VLEN >= 128")
+  (VNx2x32HI "TARGET_MIN_VLEN >= 128")
+])
+
+(define_mode_iterator V64T [
+  (VNx2x64QI "TARGET_MIN_VLEN >= 128")
+])
+
 (define_mode_attr VLMULX2 [
   (VNx1QI "VNx2QI") (VNx2QI "VNx4QI") (VNx4QI "VNx8QI") (VNx8QI "VNx16QI") (VNx16QI "VNx32QI") (VNx32QI "VNx64QI") (VNx64QI "VNx128QI")
   (VNx1HI "VNx2HI") (VNx2HI "VNx4HI") (VNx4HI "VNx8HI") (VNx8HI "VNx16HI") (VNx16HI "VNx32HI") (VNx32HI "VNx64HI")
   (VNx1DF "SF") (VNx2DF "SF") (VNx4DF "SF") (VNx8DF "SF") (VNx16DF "SF")
 ])
 
+(define_mode_attr nf [
+  (VNx2x64QI "2") (VNx2x32QI "2") (VNx3x32QI "3") (VNx4x32QI "4")
+  (VNx2x16QI "2") (VNx3x16QI "3") (VNx4x16QI "4") (VNx5x16QI "5") (VNx6x16QI "6") (VNx7x16QI "7") (VNx8x16QI "8")
+  (VNx2x8QI "2") (VNx3x8QI "3") (VNx4x8QI "4") (VNx5x8QI "5") (VNx6x8QI "6") (VNx7x8QI "7") (VNx8x8QI "8")
+  (VNx2x4QI "2") (VNx3x4QI "3") (VNx4x4QI "4") (VNx5x4QI "5") (VNx6x4QI "6") (VNx7x4QI "7") (VNx8x4QI "8")
+  (VNx2x2QI "2") (VNx3x2QI "3") (VNx4x2QI "4") (VNx5x2QI "5") (VNx6x2QI "6") (VNx7x2QI "7") (VNx8x2QI "8")
+  (VNx2x1QI "2") (VNx3x1QI "3") (VNx4x1QI "4") (VNx5x1QI "5") (VNx6x1QI "6") (VNx7x1QI "7") (VNx8x1QI "8")
+  (VNx2x32HI "2") (VNx2x16HI "2") (VNx3x16HI "3") (VNx4x16HI "4")
+  (VNx2x8HI "2") (VNx3x8HI "3") (VNx4x8HI "4") (VNx5x8HI "5") (VNx6x8HI "6") (VNx7x8HI "7") (VNx8x8HI "8")
+  (VNx2x4HI "2") (VNx3x4HI "3") (VNx4x4HI "4") (VNx5x4HI "5") (VNx6x4HI "6") (VNx7x4HI "7") (VNx8x4HI "8")
+  (VNx2x2HI "2") (VNx3x2HI "3") (VNx4x2HI "4") (VNx5x2HI "5") (VNx6x2HI "6") (VNx7x2HI "7") (VNx8x2HI "8")
+  (VNx2x1HI "2") (VNx3x1HI "3") (VNx4x1HI "4") (VNx5x1HI "5") (VNx6x1HI "6") (VNx7x1HI "7") (VNx8x1HI "8")
+  (VNx2x16SI "2") (VNx2x8SI "2") (VNx3x8SI "3") (VNx4x8SI "4")
+  (VNx2x4SI "2") (VNx3x4SI "3") (VNx4x4SI "4") (VNx5x4SI "5") (VNx6x4SI "6") (VNx7x4SI "7") (VNx8x4SI "8")
+  (VNx2x2SI "2") (VNx3x2SI "3") (VNx4x2SI "4") (VNx5x2SI "5") (VNx6x2SI "6") (VNx7x2SI "7") (VNx8x2SI "8")
+  (VNx2x1SI "2") (VNx3x1SI "3") (VNx4x1SI "4") (VNx5x1SI "5") (VNx6x1SI "6") (VNx7x1SI "7") (VNx8x1SI "8")
+  (VNx2x8DI "2") (VNx2x4DI "2") (VNx3x4DI "3") (VNx4x4DI "4")
+  (VNx2x2DI "2") (VNx3x2DI "3") (VNx4x2DI "4") (VNx5x2DI "5") (VNx6x2DI "6") (VNx7x2DI "7") (VNx8x2DI "8")
+  (VNx2x1DI "2") (VNx3x1DI "3") (VNx4x1DI "4") (VNx5x1DI "5") (VNx6x1DI "6") (VNx7x1DI "7") (VNx8x1DI "8")
+  (VNx2x16SF "2") (VNx2x8SF "2") (VNx3x8SF "3") (VNx4x8SF "4")
+  (VNx2x4SF "2") (VNx3x4SF "3") (VNx4x4SF "4") (VNx5x4SF "5") (VNx6x4SF "6") (VNx7x4SF "7") (VNx8x4SF "8")
+  (VNx2x2SF "2") (VNx3x2SF "3") (VNx4x2SF "4") (VNx5x2SF "5") (VNx6x2SF "6") (VNx7x2SF "7") (VNx8x2SF "8")
+  (VNx2x1SF "2") (VNx3x1SF "3") (VNx4x1SF "4") (VNx5x1SF "5") (VNx6x1SF "6") (VNx7x1SF "7") (VNx8x1SF "8")
+  (VNx2x8DF "2")
+  (VNx2x4DF "2") (VNx3x4DF "3") (VNx4x4DF "4")
+  (VNx2x2DF "2") (VNx3x2DF "3") (VNx4x2DF "4") (VNx5x2DF "5") (VNx6x2DF "6") (VNx7x2DF "7") (VNx8x2DF "8")
+  (VNx2x1DF "2") (VNx3x1DF "3") (VNx4x1DF "4") (VNx5x1DF "5") (VNx6x1DF "6") (VNx7x1DF "7") (VNx8x1DF "8")
+])
+
 (define_mode_attr sew [
   (VNx1QI "8") (VNx2QI "8") (VNx4QI "8") (VNx8QI "8") (VNx16QI "8") (VNx32QI "8") (VNx64QI "8") (VNx128QI "8")
   (VNx1HI "16") (VNx2HI "16") (VNx4HI "16") (VNx8HI "16") (VNx16HI "16") (VNx32HI "16") (VNx64HI "16")
   (VNx1DI "64") (VNx2DI "64") (VNx4DI "64") (VNx8DI "64") (VNx16DI "64")
   (VNx1SF "32") (VNx2SF "32") (VNx4SF "32") (VNx8SF "32") (VNx16SF "32") (VNx32SF "32")
   (VNx1DF "64") (VNx2DF "64") (VNx4DF "64") (VNx8DF "64") (VNx16DF "64")
+  (VNx2x64QI "8") (VNx2x32QI "8") (VNx3x32QI "8") (VNx4x32QI "8")
+  (VNx2x16QI "8") (VNx3x16QI "8") (VNx4x16QI "8") (VNx5x16QI "8") (VNx6x16QI "8") (VNx7x16QI "8") (VNx8x16QI "8")
+  (VNx2x8QI "8") (VNx3x8QI "8") (VNx4x8QI "8") (VNx5x8QI "8") (VNx6x8QI "8") (VNx7x8QI "8") (VNx8x8QI "8")
+  (VNx2x4QI "8") (VNx3x4QI "8") (VNx4x4QI "8") (VNx5x4QI "8") (VNx6x4QI "8") (VNx7x4QI "8") (VNx8x4QI "8")
+  (VNx2x2QI "8") (VNx3x2QI "8") (VNx4x2QI "8") (VNx5x2QI "8") (VNx6x2QI "8") (VNx7x2QI "8") (VNx8x2QI "8")
+  (VNx2x1QI "8") (VNx3x1QI "8") (VNx4x1QI "8") (VNx5x1QI "8") (VNx6x1QI "8") (VNx7x1QI "8") (VNx8x1QI "8")
+  (VNx2x32HI "16") (VNx2x16HI "16") (VNx3x16HI "16") (VNx4x16HI "16")
+  (VNx2x8HI "16") (VNx3x8HI "16") (VNx4x8HI "16") (VNx5x8HI "16") (VNx6x8HI "16") (VNx7x8HI "16") (VNx8x8HI "16")
+  (VNx2x4HI "16") (VNx3x4HI "16") (VNx4x4HI "16") (VNx5x4HI "16") (VNx6x4HI "16") (VNx7x4HI "16") (VNx8x4HI "16")
+  (VNx2x2HI "16") (VNx3x2HI "16") (VNx4x2HI "16") (VNx5x2HI "16") (VNx6x2HI "16") (VNx7x2HI "16") (VNx8x2HI "16")
+  (VNx2x1HI "16") (VNx3x1HI "16") (VNx4x1HI "16") (VNx5x1HI "16") (VNx6x1HI "16") (VNx7x1HI "16") (VNx8x1HI "16")
+  (VNx2x16SI "32") (VNx2x8SI "32") (VNx3x8SI "32") (VNx4x8SI "32")
+  (VNx2x4SI "32") (VNx3x4SI "32") (VNx4x4SI "32") (VNx5x4SI "32") (VNx6x4SI "32") (VNx7x4SI "32") (VNx8x4SI "32")
+  (VNx2x2SI "32") (VNx3x2SI "32") (VNx4x2SI "32") (VNx5x2SI "32") (VNx6x2SI "32") (VNx7x2SI "32") (VNx8x2SI "32")
+  (VNx2x1SI "32") (VNx3x1SI "32") (VNx4x1SI "32") (VNx5x1SI "32") (VNx6x1SI "32") (VNx7x1SI "32") (VNx8x1SI "32")
+  (VNx2x8DI "64") (VNx2x4DI "64") (VNx3x4DI "64") (VNx4x4DI "64")
+  (VNx2x2DI "64") (VNx3x2DI "64") (VNx4x2DI "64") (VNx5x2DI "64") (VNx6x2DI "64") (VNx7x2DI "64") (VNx8x2DI "64")
+  (VNx2x1DI "64") (VNx3x1DI "64") (VNx4x1DI "64") (VNx5x1DI "64") (VNx6x1DI "64") (VNx7x1DI "64") (VNx8x1DI "64")
+  (VNx2x16SF "32") (VNx2x8SF "32") (VNx3x8SF "32") (VNx4x8SF "32")
+  (VNx2x4SF "32") (VNx3x4SF "32") (VNx4x4SF "32") (VNx5x4SF "32") (VNx6x4SF "32") (VNx7x4SF "32") (VNx8x4SF "32")
+  (VNx2x2SF "32") (VNx3x2SF "32") (VNx4x2SF "32") (VNx5x2SF "32") (VNx6x2SF "32") (VNx7x2SF "32") (VNx8x2SF "32")
+  (VNx2x1SF "32") (VNx3x1SF "32") (VNx4x1SF "32") (VNx5x1SF "32") (VNx6x1SF "32") (VNx7x1SF "32") (VNx8x1SF "32")
+  (VNx2x8DF "64")
+  (VNx2x4DF "64") (VNx3x4DF "64") (VNx4x4DF "64")
+  (VNx2x2DF "64") (VNx3x2DF "64") (VNx4x2DF "64") (VNx5x2DF "64") (VNx6x2DF "64") (VNx7x2DF "64") (VNx8x2DF "64")
+  (VNx2x1DF "64") (VNx3x1DF "64") (VNx4x1DF "64") (VNx5x1DF "64") (VNx6x1DF "64") (VNx7x1DF "64") (VNx8x1DF "64")
 ])
 
 (define_mode_attr double_trunc_sew [
index 4775e188358371ad007fc2c9218ed77bbb6a52da..0a05b2a913ffb68e5ed60361cdbccb2223f62c76 100644 (file)
@@ -51,7 +51,8 @@
                          vired,viwred,vfredu,vfredo,vfwredu,vfwredo,\
                          vmalu,vmpop,vmffs,vmsfs,vmiota,vmidx,vimovvx,vimovxv,vfmovvf,vfmovfv,\
                          vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,\
-                         vgather,vcompress")
+                         vgather,vcompress,vlsegde,vssegte,vlsegds,vssegts,vlsegdux,vlsegdox,\
+                         vssegtux,vssegtox,vlsegdff")
         (const_string "true")]
        (const_string "false")))
 
@@ -72,7 +73,8 @@
                          vired,viwred,vfredu,vfredo,vfwredu,vfwredo,\
                          vmalu,vmpop,vmffs,vmsfs,vmiota,vmidx,vimovxv,vfmovfv,\
                          vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,\
-                         vgather,vcompress")
+                         vgather,vcompress,vlsegde,vssegte,vlsegds,vssegts,vlsegdux,vlsegdox,\
+                         vssegtux,vssegtox,vlsegdff")
         (const_string "true")]
        (const_string "false")))
 
 (define_attr "sew" ""
   (cond [(eq_attr "mode" "VNx1QI,VNx2QI,VNx4QI,VNx8QI,VNx16QI,VNx32QI,VNx64QI,\
                          VNx1BI,VNx2BI,VNx4BI,VNx8BI,VNx16BI,VNx32BI,VNx64BI,\
-                         VNx128QI,VNx128BI")
+                         VNx128QI,VNx128BI,VNx2x64QI,VNx2x32QI,VNx3x32QI,VNx4x32QI,\
+                         VNx2x16QI,VNx3x16QI,VNx4x16QI,VNx5x16QI,VNx6x16QI,VNx7x16QI,VNx8x16QI,\
+                         VNx2x8QI,VNx3x8QI,VNx4x8QI,VNx5x8QI,VNx6x8QI,VNx7x8QI,VNx8x8QI,\
+                         VNx2x4QI,VNx3x4QI,VNx4x4QI,VNx5x4QI,VNx6x4QI,VNx7x4QI,VNx8x4QI,\
+                         VNx2x2QI,VNx3x2QI,VNx4x2QI,VNx5x2QI,VNx6x2QI,VNx7x2QI,VNx8x2QI,\
+                         VNx2x1QI,VNx3x1QI,VNx4x1QI,VNx5x1QI,VNx6x1QI,VNx7x1QI,VNx8x1QI")
         (const_int 8)
-        (eq_attr "mode" "VNx1HI,VNx2HI,VNx4HI,VNx8HI,VNx16HI,VNx32HI,VNx64HI")
+        (eq_attr "mode" "VNx1HI,VNx2HI,VNx4HI,VNx8HI,VNx16HI,VNx32HI,VNx64HI,\
+                         VNx2x32HI,VNx2x16HI,VNx3x16HI,VNx4x16HI,\
+                         VNx2x8HI,VNx3x8HI,VNx4x8HI,VNx5x8HI,VNx6x8HI,VNx7x8HI,VNx8x8HI,\
+                         VNx2x4HI,VNx3x4HI,VNx4x4HI,VNx5x4HI,VNx6x4HI,VNx7x4HI,VNx8x4HI,\
+                         VNx2x2HI,VNx3x2HI,VNx4x2HI,VNx5x2HI,VNx6x2HI,VNx7x2HI,VNx8x2HI,\
+                         VNx2x1HI,VNx3x1HI,VNx4x1HI,VNx5x1HI,VNx6x1HI,VNx7x1HI,VNx8x1HI")
         (const_int 16)
         (eq_attr "mode" "VNx1SI,VNx2SI,VNx4SI,VNx8SI,VNx16SI,VNx32SI,\
-                         VNx1SF,VNx2SF,VNx4SF,VNx8SF,VNx16SF,VNx32SF")
+                         VNx1SF,VNx2SF,VNx4SF,VNx8SF,VNx16SF,VNx32SF,\
+                         VNx2x16SI,VNx2x8SI,VNx3x8SI,VNx4x8SI,\
+                         VNx2x4SI,VNx3x4SI,VNx4x4SI,VNx5x4SI,VNx6x4SI,VNx7x4SI,VNx8x4SI,\
+                         VNx2x2SI,VNx3x2SI,VNx4x2SI,VNx5x2SI,VNx6x2SI,VNx7x2SI,VNx8x2SI,\
+                         VNx2x1SI,VNx3x1SI,VNx4x1SI,VNx5x1SI,VNx6x1SI,VNx7x1SI,VNx8x1SI,\
+                         VNx2x16SF,VNx2x8SF,VNx3x8SF,VNx4x8SF,\
+                         VNx2x4SF,VNx3x4SF,VNx4x4SF,VNx5x4SF,VNx6x4SF,VNx7x4SF,VNx8x4SF,\
+                         VNx2x2SF,VNx3x2SF,VNx4x2SF,VNx5x2SF,VNx6x2SF,VNx7x2SF,VNx8x2SF,\
+                         VNx2x1SF,VNx3x1SF,VNx4x1SF,VNx5x1SF,VNx6x1SF,VNx7x1SF,VNx8x1SF")
         (const_int 32)
         (eq_attr "mode" "VNx1DI,VNx2DI,VNx4DI,VNx8DI,VNx16DI,\
-                         VNx1DF,VNx2DF,VNx4DF,VNx8DF,VNx16DF")
+                         VNx1DF,VNx2DF,VNx4DF,VNx8DF,VNx16DF,\
+                         VNx2x8DI,VNx2x4DI,VNx3x4DI,VNx4x4DI,\
+                         VNx2x2DI,VNx3x2DI,VNx4x2DI,VNx5x2DI,VNx6x2DI,VNx7x2DI,VNx8x2DI,\
+                         VNx2x1DI,VNx3x1DI,VNx4x1DI,VNx5x1DI,VNx6x1DI,VNx7x1DI,VNx8x1DI,\
+                         VNx2x8DF,VNx2x4DF,VNx3x4DF,VNx4x4DF,\
+                         VNx2x2DF,VNx3x2DF,VNx4x2DF,VNx5x2DF,VNx6x2DF,VNx7x2DF,VNx8x2DF,\
+                         VNx2x1DF,VNx3x1DF,VNx4x1DF,VNx5x1DF,VNx6x1DF,VNx7x1DF,VNx8x1DF")
         (const_int 64)]
        (const_int INVALID_ATTRIBUTE)))
 
 ;; Ditto to LMUL.
 (define_attr "vlmul" ""
-  (cond [(eq_attr "mode" "VNx1QI,VNx1BI")
+  (cond [(eq_attr "mode" "VNx1QI,VNx1BI,VNx2x1QI,VNx3x1QI,VNx4x1QI,VNx5x1QI,VNx6x1QI,VNx7x1QI,VNx8x1QI")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx1QImode)")
-        (eq_attr "mode" "VNx2QI,VNx2BI")
+        (eq_attr "mode" "VNx2QI,VNx2BI,VNx2x2QI,VNx3x2QI,VNx4x2QI,VNx5x2QI,VNx6x2QI,VNx7x2QI,VNx8x2QI")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx2QImode)")
-        (eq_attr "mode" "VNx4QI,VNx4BI")
+        (eq_attr "mode" "VNx4QI,VNx4BI,VNx2x4QI,VNx3x4QI,VNx4x4QI,VNx5x4QI,VNx6x4QI,VNx7x4QI,VNx8x4QI")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx4QImode)")
-        (eq_attr "mode" "VNx8QI,VNx8BI")
+        (eq_attr "mode" "VNx8QI,VNx8BI,VNx2x8QI,VNx3x8QI,VNx4x8QI,VNx5x8QI,VNx6x8QI,VNx7x8QI,VNx8x8QI")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx8QImode)")
-        (eq_attr "mode" "VNx16QI,VNx16BI")
+        (eq_attr "mode" "VNx16QI,VNx16BI,VNx2x16QI,VNx3x16QI,VNx4x16QI,VNx5x16QI,VNx6x16QI,VNx7x16QI,VNx8x16QI")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx16QImode)")
-        (eq_attr "mode" "VNx32QI,VNx32BI")
+        (eq_attr "mode" "VNx32QI,VNx32BI,VNx2x32QI,VNx3x32QI,VNx4x32QI")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx32QImode)")
-        (eq_attr "mode" "VNx64QI,VNx64BI")
+        (eq_attr "mode" "VNx64QI,VNx64BI,VNx2x64QI")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx64QImode)")
         (eq_attr "mode" "VNx128QI,VNx128BI")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx128QImode)")
-        (eq_attr "mode" "VNx1HI")
+        (eq_attr "mode" "VNx1HI,VNx2x1HI,VNx3x1HI,VNx4x1HI,VNx5x1HI,VNx6x1HI,VNx7x1HI,VNx8x1HI")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx1HImode)")
-        (eq_attr "mode" "VNx2HI")
+        (eq_attr "mode" "VNx2HI,VNx2x2HI,VNx3x2HI,VNx4x2HI,VNx5x2HI,VNx6x2HI,VNx7x2HI,VNx8x2HI")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx2HImode)")
-        (eq_attr "mode" "VNx4HI")
+        (eq_attr "mode" "VNx4HI,VNx2x4HI,VNx3x4HI,VNx4x4HI,VNx5x4HI,VNx6x4HI,VNx7x4HI,VNx8x4HI")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx4HImode)")
-        (eq_attr "mode" "VNx8HI")
+        (eq_attr "mode" "VNx8HI,VNx2x8HI,VNx3x8HI,VNx4x8HI,VNx5x8HI,VNx6x8HI,VNx7x8HI,VNx8x8HI")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx8HImode)")
-        (eq_attr "mode" "VNx16HI")
+        (eq_attr "mode" "VNx16HI,VNx2x16HI,VNx3x16HI,VNx4x16HI")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx16HImode)")
-        (eq_attr "mode" "VNx32HI")
+        (eq_attr "mode" "VNx32HI,VNx2x32HI")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx32HImode)")
         (eq_attr "mode" "VNx64HI")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx64HImode)")
-        (eq_attr "mode" "VNx1SI,VNx1SF")
+        (eq_attr "mode" "VNx1SI,VNx1SF,VNx2x1SI,VNx3x1SI,VNx4x1SI,VNx5x1SI,VNx6x1SI,VNx7x1SI,VNx8x1SI,\
+                         VNx2x1SF,VNx3x1SF,VNx4x1SF,VNx5x1SF,VNx6x1SF,VNx7x1SF,VNx8x1SF")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx1SImode)")
-        (eq_attr "mode" "VNx2SI,VNx2SF")
+        (eq_attr "mode" "VNx2SI,VNx2SF,VNx2x2SI,VNx3x2SI,VNx4x2SI,VNx5x2SI,VNx6x2SI,VNx7x2SI,VNx8x2SI,\
+                         VNx2x2SF,VNx3x2SF,VNx4x2SF,VNx5x2SF,VNx6x2SF,VNx7x2SF,VNx8x2SF")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx2SImode)")
-        (eq_attr "mode" "VNx4SI,VNx4SF")
+        (eq_attr "mode" "VNx4SI,VNx4SF,VNx2x4SI,VNx3x4SI,VNx4x4SI,VNx5x4SI,VNx6x4SI,VNx7x4SI,VNx8x4SI,\
+                         VNx2x4SF,VNx3x4SF,VNx4x4SF,VNx5x4SF,VNx6x4SF,VNx7x4SF,VNx8x4SF")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx4SImode)")
-        (eq_attr "mode" "VNx8SI,VNx8SF")
+        (eq_attr "mode" "VNx8SI,VNx8SF,VNx2x8SI,VNx3x8SI,VNx4x8SI,VNx2x8SF,VNx3x8SF,VNx4x8SF")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx8SImode)")
-        (eq_attr "mode" "VNx16SI,VNx16SF")
+        (eq_attr "mode" "VNx16SI,VNx16SF,VNx2x16SI,VNx2x16SF")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx16SImode)")
         (eq_attr "mode" "VNx32SI,VNx32SF")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx32SImode)")
-        (eq_attr "mode" "VNx1DI,VNx1DF")
+        (eq_attr "mode" "VNx1DI,VNx1DF,VNx2x1DI,VNx3x1DI,VNx4x1DI,VNx5x1DI,VNx6x1DI,VNx7x1DI,VNx8x1DI,\
+                         VNx2x1DF,VNx3x1DF,VNx4x1DF,VNx5x1DF,VNx6x1DF,VNx7x1DF,VNx8x1DF")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx1DImode)")
-        (eq_attr "mode" "VNx2DI,VNx2DF")
+        (eq_attr "mode" "VNx2DI,VNx2DF,VNx2x2DI,VNx3x2DI,VNx4x2DI,VNx5x2DI,VNx6x2DI,VNx7x2DI,VNx8x2DI,\
+                         VNx2x2DF,VNx3x2DF,VNx4x2DF,VNx5x2DF,VNx6x2DF,VNx7x2DF,VNx8x2DF")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx2DImode)")
-        (eq_attr "mode" "VNx4DI,VNx4DF")
+        (eq_attr "mode" "VNx4DI,VNx4DF,VNx2x4DI,VNx3x4DI,VNx4x4DI,VNx2x4DF,VNx3x4DF,VNx4x4DF")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx4DImode)")
-        (eq_attr "mode" "VNx8DI,VNx8DF")
+        (eq_attr "mode" "VNx8DI,VNx8DF,VNx2x8DI,VNx2x8DF")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx8DImode)")
         (eq_attr "mode" "VNx16DI,VNx16DF")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx16DImode)")]
                          viwred,vfredu,vfredo,vfwredu,vfwredo,vimovvx,\
                          vimovxv,vfmovvf,vfmovfv,vslideup,vslidedown,\
                          vislide1up,vislide1down,vfslide1up,vfslide1down,\
-                         vgather,vcompress")
+                         vgather,vcompress,vlsegdux,vlsegdox,vssegtux,vssegtox")
           (const_int INVALID_ATTRIBUTE)
-        (eq_attr "mode" "VNx1QI,VNx1BI")
+        (eq_attr "mode" "VNx1QI,VNx1BI,VNx2x1QI,VNx3x1QI,VNx4x1QI,VNx5x1QI,VNx6x1QI,VNx7x1QI,VNx8x1QI")
           (symbol_ref "riscv_vector::get_ratio(E_VNx1QImode)")
-        (eq_attr "mode" "VNx2QI,VNx2BI")
+        (eq_attr "mode" "VNx2QI,VNx2BI,VNx2x2QI,VNx3x2QI,VNx4x2QI,VNx5x2QI,VNx6x2QI,VNx7x2QI,VNx8x2QI")
           (symbol_ref "riscv_vector::get_ratio(E_VNx2QImode)")
-        (eq_attr "mode" "VNx4QI,VNx4BI")
+        (eq_attr "mode" "VNx4QI,VNx4BI,VNx2x4QI,VNx3x4QI,VNx4x4QI,VNx5x4QI,VNx6x4QI,VNx7x4QI,VNx8x4QI")
           (symbol_ref "riscv_vector::get_ratio(E_VNx4QImode)")
-        (eq_attr "mode" "VNx8QI,VNx8BI")
+        (eq_attr "mode" "VNx8QI,VNx8BI,VNx2x8QI,VNx3x8QI,VNx4x8QI,VNx5x8QI,VNx6x8QI,VNx7x8QI,VNx8x8QI")
           (symbol_ref "riscv_vector::get_ratio(E_VNx8QImode)")
-        (eq_attr "mode" "VNx16QI,VNx16BI")
+        (eq_attr "mode" "VNx16QI,VNx16BI,VNx2x16QI,VNx3x16QI,VNx4x16QI,VNx5x16QI,VNx6x16QI,VNx7x16QI,VNx8x16QI")
           (symbol_ref "riscv_vector::get_ratio(E_VNx16QImode)")
-        (eq_attr "mode" "VNx32QI,VNx32BI")
+        (eq_attr "mode" "VNx32QI,VNx32BI,VNx2x32QI,VNx3x32QI,VNx4x32QI")
           (symbol_ref "riscv_vector::get_ratio(E_VNx32QImode)")
-        (eq_attr "mode" "VNx64QI,VNx64BI")
+        (eq_attr "mode" "VNx64QI,VNx64BI,VNx2x64QI")
           (symbol_ref "riscv_vector::get_ratio(E_VNx64QImode)")
         (eq_attr "mode" "VNx128QI,VNx128BI")
           (symbol_ref "riscv_vector::get_ratio(E_VNx128QImode)")
-        (eq_attr "mode" "VNx1HI")
+        (eq_attr "mode" "VNx1HI,VNx2x1HI,VNx3x1HI,VNx4x1HI,VNx5x1HI,VNx6x1HI,VNx7x1HI,VNx8x1HI")
           (symbol_ref "riscv_vector::get_ratio(E_VNx1HImode)")
-        (eq_attr "mode" "VNx2HI")
+        (eq_attr "mode" "VNx2HI,VNx2x2HI,VNx3x2HI,VNx4x2HI,VNx5x2HI,VNx6x2HI,VNx7x2HI,VNx8x2HI")
           (symbol_ref "riscv_vector::get_ratio(E_VNx2HImode)")
-        (eq_attr "mode" "VNx4HI")
+        (eq_attr "mode" "VNx4HI,VNx2x4HI,VNx3x4HI,VNx4x4HI,VNx5x4HI,VNx6x4HI,VNx7x4HI,VNx8x4HI")
           (symbol_ref "riscv_vector::get_ratio(E_VNx4HImode)")
-        (eq_attr "mode" "VNx8HI")
+        (eq_attr "mode" "VNx8HI,VNx2x8HI,VNx3x8HI,VNx4x8HI,VNx5x8HI,VNx6x8HI,VNx7x8HI,VNx8x8HI")
           (symbol_ref "riscv_vector::get_ratio(E_VNx8HImode)")
-        (eq_attr "mode" "VNx16HI")
+        (eq_attr "mode" "VNx16HI,VNx2x16HI,VNx3x16HI,VNx4x16HI")
           (symbol_ref "riscv_vector::get_ratio(E_VNx16HImode)")
-        (eq_attr "mode" "VNx32HI")
+        (eq_attr "mode" "VNx32HI,VNx2x32HI")
           (symbol_ref "riscv_vector::get_ratio(E_VNx32HImode)")
         (eq_attr "mode" "VNx64HI")
           (symbol_ref "riscv_vector::get_ratio(E_VNx64HImode)")
-        (eq_attr "mode" "VNx1SI,VNx1SF")
+        (eq_attr "mode" "VNx1SI,VNx1SF,VNx2x1SI,VNx3x1SI,VNx4x1SI,VNx5x1SI,VNx6x1SI,VNx7x1SI,VNx8x1SI,\
+                         VNx2x1SF,VNx3x1SF,VNx4x1SF,VNx5x1SF,VNx6x1SF,VNx7x1SF,VNx8x1SF")
           (symbol_ref "riscv_vector::get_ratio(E_VNx1SImode)")
-        (eq_attr "mode" "VNx2SI,VNx2SF")
+        (eq_attr "mode" "VNx2SI,VNx2SF,VNx2x2SI,VNx3x2SI,VNx4x2SI,VNx5x2SI,VNx6x2SI,VNx7x2SI,VNx8x2SI,\
+                         VNx2x2SF,VNx3x2SF,VNx4x2SF,VNx5x2SF,VNx6x2SF,VNx7x2SF,VNx8x2SF")
           (symbol_ref "riscv_vector::get_ratio(E_VNx2SImode)")
-        (eq_attr "mode" "VNx4SI,VNx4SF")
+        (eq_attr "mode" "VNx4SI,VNx4SF,VNx2x4SI,VNx3x4SI,VNx4x4SI,VNx5x4SI,VNx6x4SI,VNx7x4SI,VNx8x4SI,\
+                         VNx2x4SF,VNx3x4SF,VNx4x4SF,VNx5x4SF,VNx6x4SF,VNx7x4SF,VNx8x4SF")
           (symbol_ref "riscv_vector::get_ratio(E_VNx4SImode)")
-        (eq_attr "mode" "VNx8SI,VNx8SF")
+        (eq_attr "mode" "VNx8SI,VNx8SF,VNx2x8SI,VNx3x8SI,VNx4x8SI,VNx2x8SF,VNx3x8SF,VNx4x8SF")
           (symbol_ref "riscv_vector::get_ratio(E_VNx8SImode)")
-        (eq_attr "mode" "VNx16SI,VNx16SF")
+        (eq_attr "mode" "VNx16SI,VNx16SF,VNx2x16SI,VNx2x16SF")
           (symbol_ref "riscv_vector::get_ratio(E_VNx16SImode)")
         (eq_attr "mode" "VNx32SI,VNx32SF")
           (symbol_ref "riscv_vector::get_ratio(E_VNx32SImode)")
-        (eq_attr "mode" "VNx1DI,VNx1DF")
+        (eq_attr "mode" "VNx1DI,VNx1DF,VNx2x1DI,VNx3x1DI,VNx4x1DI,VNx5x1DI,VNx6x1DI,VNx7x1DI,VNx8x1DI,\
+                         VNx2x1DF,VNx3x1DF,VNx4x1DF,VNx5x1DF,VNx6x1DF,VNx7x1DF,VNx8x1DF")
           (symbol_ref "riscv_vector::get_ratio(E_VNx1DImode)")
-        (eq_attr "mode" "VNx2DI,VNx2DF")
+        (eq_attr "mode" "VNx2DI,VNx2DF,VNx2x2DI,VNx3x2DI,VNx4x2DI,VNx5x2DI,VNx6x2DI,VNx7x2DI,VNx8x2DI,\
+                         VNx2x2DF,VNx3x2DF,VNx4x2DF,VNx5x2DF,VNx6x2DF,VNx7x2DF,VNx8x2DF")
           (symbol_ref "riscv_vector::get_ratio(E_VNx2DImode)")
-        (eq_attr "mode" "VNx4DI,VNx4DF")
+        (eq_attr "mode" "VNx4DI,VNx4DF,VNx2x4DI,VNx3x4DI,VNx4x4DI,VNx2x4DF,VNx3x4DF,VNx4x4DF")
           (symbol_ref "riscv_vector::get_ratio(E_VNx4DImode)")
-        (eq_attr "mode" "VNx8DI,VNx8DF")
+        (eq_attr "mode" "VNx8DI,VNx8DF,VNx2x8DI,VNx2x8DF")
           (symbol_ref "riscv_vector::get_ratio(E_VNx8DImode)")
         (eq_attr "mode" "VNx16DI,VNx16DF")
           (symbol_ref "riscv_vector::get_ratio(E_VNx16DImode)")]
                                vfwcvtftoi,vfwcvtftof,vfncvtitof,vfncvtftoi,vfncvtftof,vfclass,\
                                vired,viwred,vfredu,vfredo,vfwredu,vfwredo,vimovxv,vfmovfv,\
                                vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,\
-                               vgather,vldff,viwmuladd,vfwmuladd")
+                               vgather,vldff,viwmuladd,vfwmuladd,vlsegde,vlsegds,vlsegdux,vlsegdox,vlsegdff")
               (const_int 2)
 
               (eq_attr "type" "vimerge,vfmerge,vcompress")
   (cond [(eq_attr "type" "vlde,vste,vimov,vfmov,vldm,vstm,vmalu,vsts,vstux,\
                          vstox,vext,vmsfs,vmiota,vfsqrt,vfrecp,vfcvtitof,vldff,\
                          vfcvtftoi,vfwcvtitof,vfwcvtftoi,vfwcvtftof,vfncvtitof,\
-                         vfncvtftoi,vfncvtftof,vfclass,vimovxv,vfmovfv,vcompress")
+                         vfncvtftoi,vfncvtftof,vfclass,vimovxv,vfmovfv,vcompress,\
+                         vlsegde,vssegts,vssegtux,vssegtox,vlsegdff")
           (const_int 4)
 
         ;; If operands[3] of "vlds" is not vector mode, it is pred_broadcast.
                          vsshift,vnclip,vfalu,vfmul,vfminmax,vfdiv,vfwalu,vfwmul,\
                          vfsgnj,vfmerge,vired,viwred,vfredu,vfredo,vfwredu,vfwredo,\
                          vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,\
-                         vgather,viwmuladd,vfwmuladd")
+                         vgather,viwmuladd,vfwmuladd,vlsegds,vlsegdux,vlsegdox")
           (const_int 5)
 
         (eq_attr "type" "vicmp,vimuladd,vfcmp,vfmuladd")
           (const_int 6)
 
-        (eq_attr "type" "vmpop,vmffs,vmidx")
+        (eq_attr "type" "vmpop,vmffs,vmidx,vssegte")
           (const_int 3)]
   (const_int INVALID_ATTRIBUTE)))
 
   (cond [(eq_attr "type" "vlde,vimov,vfmov,vext,vmiota,vfsqrt,vfrecp,\
                          vfcvtitof,vfcvtftoi,vfwcvtitof,vfwcvtftoi,vfwcvtftof,\
                          vfncvtitof,vfncvtftoi,vfncvtftof,vfclass,vimovxv,vfmovfv,\
-                         vcompress,vldff")
+                         vcompress,vldff,vlsegde,vlsegdff")
           (symbol_ref "riscv_vector::get_ta(operands[5])")
 
         ;; If operands[3] of "vlds" is not vector mode, it is pred_broadcast.
                          vsshift,vnclip,vfalu,vfmul,vfminmax,vfdiv,\
                          vfwalu,vfwmul,vfsgnj,vfmerge,vired,viwred,vfredu,\
                          vfredo,vfwredu,vfwredo,vslideup,vslidedown,vislide1up,\
-                         vislide1down,vfslide1up,vfslide1down,vgather,viwmuladd,vfwmuladd")
+                         vislide1down,vfslide1up,vfslide1down,vgather,viwmuladd,vfwmuladd,\
+                         vlsegds,vlsegdux,vlsegdox")
           (symbol_ref "riscv_vector::get_ta(operands[6])")
 
         (eq_attr "type" "vimuladd,vfmuladd")
 (define_attr "ma" ""
   (cond [(eq_attr "type" "vlde,vext,vmiota,vfsqrt,vfrecp,vfcvtitof,vfcvtftoi,\
                          vfwcvtitof,vfwcvtftoi,vfwcvtftof,vfncvtitof,vfncvtftoi,\
-                         vfncvtftof,vfclass,vldff")
+                         vfncvtftof,vfclass,vldff,vlsegde,vlsegdff")
           (symbol_ref "riscv_vector::get_ma(operands[6])")
 
         ;; If operands[3] of "vlds" is not vector mode, it is pred_broadcast.
                          vnclip,vicmp,vfalu,vfmul,vfminmax,vfdiv,\
                          vfwalu,vfwmul,vfsgnj,vfcmp,vslideup,vslidedown,\
                          vislide1up,vislide1down,vfslide1up,vfslide1down,vgather,\
-                         viwmuladd,vfwmuladd")
+                         viwmuladd,vfwmuladd,vlsegds,vlsegdux,vlsegdox")
           (symbol_ref "riscv_vector::get_ma(operands[7])")
 
         (eq_attr "type" "vimuladd,vfmuladd")
                          vfsqrt,vfrecp,vfmerge,vfcvtitof,vfcvtftoi,vfwcvtitof,\
                          vfwcvtftoi,vfwcvtftof,vfncvtitof,vfncvtftoi,vfncvtftof,\
                          vfclass,vired,viwred,vfredu,vfredo,vfwredu,vfwredo,\
-                         vimovxv,vfmovfv")
+                         vimovxv,vfmovfv,vlsegde,vlsegdff")
           (symbol_ref "INTVAL (operands[7])")
         (eq_attr "type" "vldm,vstm,vimov,vmalu,vmalu")
           (symbol_ref "INTVAL (operands[5])")
                          viwalu,viwmul,vnshift,vaalu,vsmul,vsshift,\
                          vnclip,vicmp,vfalu,vfmul,vfminmax,vfdiv,vfwalu,vfwmul,\
                          vfsgnj,vfcmp,vfmuladd,vslideup,vslidedown,vislide1up,\
-                         vislide1down,vfslide1up,vfslide1down,vgather,viwmuladd,vfwmuladd")
+                         vislide1down,vfslide1up,vfslide1down,vgather,viwmuladd,vfwmuladd,\
+                         vlsegds,vlsegdux,vlsegdox")
           (symbol_ref "INTVAL (operands[8])")
-        (eq_attr "type" "vstux,vstox")
+        (eq_attr "type" "vstux,vstox,vssegts,vssegtux,vssegtox")
           (symbol_ref "INTVAL (operands[5])")
 
         (eq_attr "type" "vimuladd")
         (eq_attr "type" "vmsfs,vmidx,vcompress")
           (symbol_ref "INTVAL (operands[6])")
 
-        (eq_attr "type" "vmpop,vmffs")
+        (eq_attr "type" "vmpop,vmffs,vssegte")
           (symbol_ref "INTVAL (operands[4])")]
        (const_int INVALID_ATTRIBUTE)))
 
   [(set_attr "type" "vldff")
    (set_attr "mode" "<MODE>")])
 
+
+;; -------------------------------------------------------------------------------
+;; ---- Predicated Segment loads/stores
+;; -------------------------------------------------------------------------------
+;; Includes:
+;; - 7.8.1. Vector Unit-Stride Segment Loads and Stores
+;; - 7.8.2. Vector Strided Segment Loads and Stores
+;; - 7.8.3. Vector Indexed Segment Loads and Stores
+;; -------------------------------------------------------------------------------
+
+(define_insn "@pred_unit_strided_load<mode>"
+  [(set (match_operand:VT 0 "register_operand"             "=vr,    vr,    vd")
+       (if_then_else:VT
+         (unspec:<VM>
+           [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,   Wc1,    vm")
+            (match_operand 4 "vector_length_operand"    "   rK,    rK,    rK")
+            (match_operand 5 "const_int_operand"        "    i,     i,     i")
+            (match_operand 6 "const_int_operand"        "    i,     i,     i")
+            (match_operand 7 "const_int_operand"        "    i,     i,     i")
+            (reg:SI VL_REGNUM)
+            (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+         (unspec:VT
+           [(match_operand 3 "pmode_reg_or_0_operand"   "   rJ,    rJ,    rJ")
+            (mem:BLK (scratch))] UNSPEC_UNIT_STRIDED)
+         (match_operand:VT 2 "vector_merge_operand"     "    0,    vu,    vu")))]
+  "TARGET_VECTOR"
+  "vlseg<nf>e<sew>.v\t%0,(%z3)%p1"
+  [(set_attr "type" "vlsegde")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@pred_unit_strided_store<mode>"
+  [(set (mem:BLK (scratch))
+       (unspec:BLK
+         [(unspec:<VM>
+            [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+             (match_operand 3 "vector_length_operand"    "   rK")
+             (match_operand 4 "const_int_operand"        "    i")
+             (reg:SI VL_REGNUM)
+             (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+          (match_operand 1 "pmode_reg_or_0_operand"      "   rJ")
+          (match_operand:VT 2 "register_operand"         "   vr")
+          (mem:BLK (scratch))] UNSPEC_UNIT_STRIDED))]
+  "TARGET_VECTOR"
+  "vsseg<nf>e<sew>.v\t%2,(%z1)%p0"
+  [(set_attr "type" "vssegte")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@pred_strided_load<mode>"
+  [(set (match_operand:VT 0 "register_operand"             "=vr,    vr,    vd")
+       (if_then_else:VT
+         (unspec:<VM>
+           [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,   Wc1,    vm")
+            (match_operand 5 "vector_length_operand"    "   rK,    rK,    rK")
+            (match_operand 6 "const_int_operand"        "    i,     i,     i")
+            (match_operand 7 "const_int_operand"        "    i,     i,     i")
+            (match_operand 8 "const_int_operand"        "    i,     i,     i")
+            (reg:SI VL_REGNUM)
+            (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+         (unspec:VT
+           [(match_operand 3 "pmode_reg_or_0_operand"   "   rJ,    rJ,    rJ")
+            (match_operand 4 "pmode_reg_or_0_operand"   "   rJ,    rJ,    rJ")
+            (mem:BLK (scratch))] UNSPEC_STRIDED)
+         (match_operand:VT 2 "vector_merge_operand"     "    0,    vu,    vu")))]
+  "TARGET_VECTOR"
+  "vlsseg<nf>e<sew>.v\t%0,(%z3),%z4%p1"
+  [(set_attr "type" "vlsegds")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@pred_strided_store<mode>"
+  [(set (mem:BLK (scratch))
+       (unspec:BLK
+         [(unspec:<VM>
+            [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+             (match_operand 4 "vector_length_operand"    "   rK")
+             (match_operand 5 "const_int_operand"        "    i")
+             (reg:SI VL_REGNUM)
+             (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+          (match_operand 1 "pmode_reg_or_0_operand"      "   rJ")
+          (match_operand 2 "pmode_reg_or_0_operand"      "   rJ")
+          (match_operand:VT 3 "register_operand"         "   vr")
+          (mem:BLK (scratch))] UNSPEC_STRIDED))]
+  "TARGET_VECTOR"
+  "vssseg<nf>e<sew>.v\t%3,(%z1),%z2%p0"
+  [(set_attr "type" "vssegts")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@pred_fault_load<mode>"
+  [(set (match_operand:VT 0 "register_operand"             "=vr,    vr,    vd")
+       (if_then_else:VT
+         (unspec:<VM>
+           [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,   Wc1,    vm")
+            (match_operand 4 "vector_length_operand"    "   rK,    rK,    rK")
+            (match_operand 5 "const_int_operand"        "    i,     i,     i")
+            (match_operand 6 "const_int_operand"        "    i,     i,     i")
+            (match_operand 7 "const_int_operand"        "    i,     i,     i")
+            (reg:SI VL_REGNUM)
+            (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+         (unspec:VT
+           [(match_operand 3 "pmode_reg_or_0_operand"   "   rJ,    rJ,    rJ")
+            (mem:BLK (scratch))] UNSPEC_VLEFF)
+         (match_operand:VT 2 "vector_merge_operand"     "    0,    vu,    vu")))
+   (set (reg:SI VL_REGNUM)
+        (unspec:SI
+          [(if_then_else:VT
+            (unspec:<VM>
+              [(match_dup 1) (match_dup 4) (match_dup 5)
+               (match_dup 6) (match_dup 7)
+               (reg:SI VL_REGNUM)
+               (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+            (unspec:VT
+               [(match_dup 3) (mem:BLK (scratch))] UNSPEC_VLEFF)
+            (match_dup 2))] UNSPEC_MODIFY_VL))]
+  "TARGET_VECTOR"
+  "vlseg<nf>e<sew>ff.v\t%0,(%z3)%p1"
+  [(set_attr "type" "vlsegdff")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@pred_indexed_<order>load<V1T:mode><V1I:mode>"
+  [(set (match_operand:V1T 0 "register_operand"           "=&vr,  &vr")
+       (if_then_else:V1T
+         (unspec:<VM>
+           [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
+            (match_operand 5 "vector_length_operand"    "   rK,   rK")
+            (match_operand 6 "const_int_operand"        "    i,    i")
+            (match_operand 7 "const_int_operand"        "    i,    i")
+            (match_operand 8 "const_int_operand"        "    i,    i")
+            (reg:SI VL_REGNUM)
+            (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+         (unspec:V1T
+           [(match_operand 3 "pmode_reg_or_0_operand"   "   rJ,   rJ")
+            (mem:BLK (scratch))
+            (match_operand:V1I 4 "register_operand"     "   vr,   vr")] ORDER)
+         (match_operand:V1T 2 "vector_merge_operand"    "   vu,    0")))]
+  "TARGET_VECTOR"
+  "vl<order>xseg<nf>ei<V1I:sew>.v\t%0,(%z3),%4%p1"
+  [(set_attr "type" "vlsegd<order>x")
+   (set_attr "mode" "<V1T:MODE>")])
+
+(define_insn "@pred_indexed_<order>load<V2T:mode><V2I:mode>"
+  [(set (match_operand:V2T 0 "register_operand"           "=&vr,  &vr")
+       (if_then_else:V2T
+         (unspec:<VM>
+           [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
+            (match_operand 5 "vector_length_operand"    "   rK,   rK")
+            (match_operand 6 "const_int_operand"        "    i,    i")
+            (match_operand 7 "const_int_operand"        "    i,    i")
+            (match_operand 8 "const_int_operand"        "    i,    i")
+            (reg:SI VL_REGNUM)
+            (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+         (unspec:V2T
+           [(match_operand 3 "pmode_reg_or_0_operand"   "   rJ,   rJ")
+            (mem:BLK (scratch))
+            (match_operand:V2I 4 "register_operand"     "   vr,   vr")] ORDER)
+         (match_operand:V2T 2 "vector_merge_operand"    "   vu,    0")))]
+  "TARGET_VECTOR"
+  "vl<order>xseg<nf>ei<V2I:sew>.v\t%0,(%z3),%4%p1"
+  [(set_attr "type" "vlsegd<order>x")
+   (set_attr "mode" "<V2T:MODE>")])
+
+(define_insn "@pred_indexed_<order>load<V4T:mode><V4I:mode>"
+  [(set (match_operand:V4T 0 "register_operand"           "=&vr,  &vr")
+       (if_then_else:V4T
+         (unspec:<VM>
+           [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
+            (match_operand 5 "vector_length_operand"    "   rK,   rK")
+            (match_operand 6 "const_int_operand"        "    i,    i")
+            (match_operand 7 "const_int_operand"        "    i,    i")
+            (match_operand 8 "const_int_operand"        "    i,    i")
+            (reg:SI VL_REGNUM)
+            (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+         (unspec:V4T
+           [(match_operand 3 "pmode_reg_or_0_operand"   "   rJ,   rJ")
+            (mem:BLK (scratch))
+            (match_operand:V4I 4 "register_operand"     "   vr,   vr")] ORDER)
+         (match_operand:V4T 2 "vector_merge_operand"    "   vu,    0")))]
+  "TARGET_VECTOR"
+  "vl<order>xseg<nf>ei<V4I:sew>.v\t%0,(%z3),%4%p1"
+  [(set_attr "type" "vlsegd<order>x")
+   (set_attr "mode" "<V4T:MODE>")])
+
+(define_insn "@pred_indexed_<order>load<V8T:mode><V8I:mode>"
+  [(set (match_operand:V8T 0 "register_operand"           "=&vr,  &vr")
+       (if_then_else:V8T
+         (unspec:<VM>
+           [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
+            (match_operand 5 "vector_length_operand"    "   rK,   rK")
+            (match_operand 6 "const_int_operand"        "    i,    i")
+            (match_operand 7 "const_int_operand"        "    i,    i")
+            (match_operand 8 "const_int_operand"        "    i,    i")
+            (reg:SI VL_REGNUM)
+            (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+         (unspec:V8T
+           [(match_operand 3 "pmode_reg_or_0_operand"   "   rJ,   rJ")
+            (mem:BLK (scratch))
+            (match_operand:V8I 4 "register_operand"     "   vr,   vr")] ORDER)
+         (match_operand:V8T 2 "vector_merge_operand"    "   vu,    0")))]
+  "TARGET_VECTOR"
+  "vl<order>xseg<nf>ei<V8I:sew>.v\t%0,(%z3),%4%p1"
+  [(set_attr "type" "vlsegd<order>x")
+   (set_attr "mode" "<V8T:MODE>")])
+
+(define_insn "@pred_indexed_<order>load<V16T:mode><V16I:mode>"
+  [(set (match_operand:V16T 0 "register_operand"          "=&vr,  &vr")
+       (if_then_else:V16T
+         (unspec:<VM>
+           [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
+            (match_operand 5 "vector_length_operand"    "   rK,   rK")
+            (match_operand 6 "const_int_operand"        "    i,    i")
+            (match_operand 7 "const_int_operand"        "    i,    i")
+            (match_operand 8 "const_int_operand"        "    i,    i")
+            (reg:SI VL_REGNUM)
+            (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+         (unspec:V16T
+           [(match_operand 3 "pmode_reg_or_0_operand"   "   rJ,   rJ")
+            (mem:BLK (scratch))
+            (match_operand:V16I 4 "register_operand"    "   vr,   vr")] ORDER)
+         (match_operand:V16T 2 "vector_merge_operand"   "   vu,    0")))]
+  "TARGET_VECTOR"
+  "vl<order>xseg<nf>ei<V16I:sew>.v\t%0,(%z3),%4%p1"
+  [(set_attr "type" "vlsegd<order>x")
+   (set_attr "mode" "<V16T:MODE>")])
+
+(define_insn "@pred_indexed_<order>load<V32T:mode><V32I:mode>"
+  [(set (match_operand:V32T 0 "register_operand"          "=&vr,  &vr")
+       (if_then_else:V32T
+         (unspec:<VM>
+           [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
+            (match_operand 5 "vector_length_operand"    "   rK,   rK")
+            (match_operand 6 "const_int_operand"        "    i,    i")
+            (match_operand 7 "const_int_operand"        "    i,    i")
+            (match_operand 8 "const_int_operand"        "    i,    i")
+            (reg:SI VL_REGNUM)
+            (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+         (unspec:V32T
+           [(match_operand 3 "pmode_reg_or_0_operand"   "   rJ,   rJ")
+            (mem:BLK (scratch))
+            (match_operand:V32I 4 "register_operand"    "   vr,   vr")] ORDER)
+         (match_operand:V32T 2 "vector_merge_operand"   "   vu,    0")))]
+  "TARGET_VECTOR"
+  "vl<order>xseg<nf>ei<V32I:sew>.v\t%0,(%z3),%4%p1"
+  [(set_attr "type" "vlsegd<order>x")
+   (set_attr "mode" "<V32T:MODE>")])
+
+(define_insn "@pred_indexed_<order>load<V64T:mode><V64I:mode>"
+  [(set (match_operand:V64T 0 "register_operand"          "=&vr,  &vr")
+       (if_then_else:V64T
+         (unspec:<VM>
+           [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
+            (match_operand 5 "vector_length_operand"    "   rK,   rK")
+            (match_operand 6 "const_int_operand"        "    i,    i")
+            (match_operand 7 "const_int_operand"        "    i,    i")
+            (match_operand 8 "const_int_operand"        "    i,    i")
+            (reg:SI VL_REGNUM)
+            (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+         (unspec:V64T
+           [(match_operand 3 "pmode_reg_or_0_operand"   "   rJ,   rJ")
+            (mem:BLK (scratch))
+            (match_operand:V64I 4 "register_operand"    "   vr,   vr")] ORDER)
+         (match_operand:V64T 2 "vector_merge_operand"   "   vu,    0")))]
+  "TARGET_VECTOR"
+  "vl<order>xseg<nf>ei<V64I:sew>.v\t%0,(%z3),%4%p1"
+  [(set_attr "type" "vlsegd<order>x")
+   (set_attr "mode" "<V64T:MODE>")])
+
+(define_insn "@pred_indexed_<order>store<V1T:mode><V1I:mode>"
+  [(set (mem:BLK (scratch))
+       (unspec:BLK
+         [(unspec:<VM>
+           [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+            (match_operand 4 "vector_length_operand"    "   rK")
+            (match_operand 5 "const_int_operand"        "    i")
+            (reg:SI VL_REGNUM)
+            (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+          (match_operand 1 "pmode_reg_or_0_operand"     "   rJ")
+          (match_operand:V1I 2 "register_operand"       "   vr")
+          (match_operand:V1T 3 "register_operand"       "   vr")] ORDER))]
+  "TARGET_VECTOR"
+  "vs<order>xseg<nf>ei<V1I:sew>.v\t%3,(%z1),%2%p0"
+  [(set_attr "type" "vssegt<order>x")
+   (set_attr "mode" "<V1T:MODE>")])
+
+(define_insn "@pred_indexed_<order>store<V2T:mode><V2I:mode>"
+  [(set (mem:BLK (scratch))
+       (unspec:BLK
+         [(unspec:<VM>
+           [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+            (match_operand 4 "vector_length_operand"    "   rK")
+            (match_operand 5 "const_int_operand"        "    i")
+            (reg:SI VL_REGNUM)
+            (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+          (match_operand 1 "pmode_reg_or_0_operand"     "   rJ")
+          (match_operand:V2I 2 "register_operand"       "   vr")
+          (match_operand:V2T 3 "register_operand"       "   vr")] ORDER))]
+  "TARGET_VECTOR"
+  "vs<order>xseg<nf>ei<V2I:sew>.v\t%3,(%z1),%2%p0"
+  [(set_attr "type" "vssegt<order>x")
+   (set_attr "mode" "<V2T:MODE>")])
+
+(define_insn "@pred_indexed_<order>store<V4T:mode><V4I:mode>"
+  [(set (mem:BLK (scratch))
+       (unspec:BLK
+         [(unspec:<VM>
+           [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+            (match_operand 4 "vector_length_operand"    "   rK")
+            (match_operand 5 "const_int_operand"        "    i")
+            (reg:SI VL_REGNUM)
+            (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+          (match_operand 1 "pmode_reg_or_0_operand"     "   rJ")
+          (match_operand:V4I 2 "register_operand"       "   vr")
+          (match_operand:V4T 3 "register_operand"       "   vr")] ORDER))]
+  "TARGET_VECTOR"
+  "vs<order>xseg<nf>ei<V4I:sew>.v\t%3,(%z1),%2%p0"
+  [(set_attr "type" "vssegt<order>x")
+   (set_attr "mode" "<V4T:MODE>")])
+
+(define_insn "@pred_indexed_<order>store<V8T:mode><V8I:mode>"
+  [(set (mem:BLK (scratch))
+       (unspec:BLK
+         [(unspec:<VM>
+           [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+            (match_operand 4 "vector_length_operand"    "   rK")
+            (match_operand 5 "const_int_operand"        "    i")
+            (reg:SI VL_REGNUM)
+            (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+          (match_operand 1 "pmode_reg_or_0_operand"     "   rJ")
+          (match_operand:V8I 2 "register_operand"       "   vr")
+          (match_operand:V8T 3 "register_operand"       "   vr")] ORDER))]
+  "TARGET_VECTOR"
+  "vs<order>xseg<nf>ei<V8I:sew>.v\t%3,(%z1),%2%p0"
+  [(set_attr "type" "vssegt<order>x")
+   (set_attr "mode" "<V8T:MODE>")])
+
+(define_insn "@pred_indexed_<order>store<V16T:mode><V16I:mode>"
+  [(set (mem:BLK (scratch))
+       (unspec:BLK
+         [(unspec:<VM>
+           [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+            (match_operand 4 "vector_length_operand"    "   rK")
+            (match_operand 5 "const_int_operand"        "    i")
+            (reg:SI VL_REGNUM)
+            (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+          (match_operand 1 "pmode_reg_or_0_operand"     "   rJ")
+          (match_operand:V16I 2 "register_operand"      "   vr")
+          (match_operand:V16T 3 "register_operand"      "   vr")] ORDER))]
+  "TARGET_VECTOR"
+  "vs<order>xseg<nf>ei<V16I:sew>.v\t%3,(%z1),%2%p0"
+  [(set_attr "type" "vssegt<order>x")
+   (set_attr "mode" "<V16T:MODE>")])
+
+(define_insn "@pred_indexed_<order>store<V32T:mode><V32I:mode>"
+  [(set (mem:BLK (scratch))
+       (unspec:BLK
+         [(unspec:<VM>
+           [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+            (match_operand 4 "vector_length_operand"    "   rK")
+            (match_operand 5 "const_int_operand"        "    i")
+            (reg:SI VL_REGNUM)
+            (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+          (match_operand 1 "pmode_reg_or_0_operand"     "   rJ")
+          (match_operand:V32I 2 "register_operand"      "   vr")
+          (match_operand:V32T 3 "register_operand"      "   vr")] ORDER))]
+  "TARGET_VECTOR"
+  "vs<order>xseg<nf>ei<V32I:sew>.v\t%3,(%z1),%2%p0"
+  [(set_attr "type" "vssegt<order>x")
+   (set_attr "mode" "<V32T:MODE>")])
+
+(define_insn "@pred_indexed_<order>store<V64T:mode><V64I:mode>"
+  [(set (mem:BLK (scratch))
+       (unspec:BLK
+         [(unspec:<VM>
+           [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+            (match_operand 4 "vector_length_operand"    "   rK")
+            (match_operand 5 "const_int_operand"        "    i")
+            (reg:SI VL_REGNUM)
+            (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+          (match_operand 1 "pmode_reg_or_0_operand"     "   rJ")
+          (match_operand:V64I 2 "register_operand"      "   vr")
+          (match_operand:V64T 3 "register_operand"      "   vr")] ORDER))]
+  "TARGET_VECTOR"
+  "vs<order>xseg<nf>ei<V64I:sew>.v\t%3,(%z1),%2%p0"
+  [(set_attr "type" "vssegt<order>x")
+   (set_attr "mode" "<V64T:MODE>")])
+
 ;; -----------------------------------------------------------------------------
 ;; ---- Integer Compare Instructions Simplification
 ;; -----------------------------------------------------------------------------
This page took 0.121158 seconds and 5 git commands to generate.