[PATCH v2 2/9] RISC-V: Handle differences between xtheadvector and vector
Jun Sha (Joshua)
cooper.joshua@linux.alibaba.com
Sat Nov 18 04:28:39 GMT 2023
This patch is to handle the differences in instruction generation
between vector and xtheadvector, mainly adding th. prefix
to all xtheadvector instructions.
Contributors:
Jun Sha (Joshua) <cooper.joshua@linux.alibaba.com>
Jin Ma <jinma@linux.alibaba.com>
Christoph Müllner <christoph.muellner@vrull.eu>
gcc/ChangeLog:
* config.gcc: Add header for XTheadVector intrinsics.
* config/riscv/riscv-c.cc (riscv_pragma_intrinsic):
Add XTheadVector.
* config/riscv/riscv.cc (riscv_print_operand):
Add new operand format directives.
(riscv_print_operand_punct_valid_p): Likewise.
* config/riscv/vector-iterators.md: Split any_int_unop
for not and neg.
* config/riscv/vector.md (@pred_<optab><mode>):
Add th. for xtheadvector instructions.
* config/riscv/riscv_th_vector.h: New file.
gcc/testsuite/ChangeLog:
* gcc.target/riscv/rvv/base/pragma-1.c: Add XTheadVector.
---
gcc/config.gcc | 2 +-
gcc/config/riscv/riscv-c.cc | 4 +-
gcc/config/riscv/riscv.cc | 11 +-
gcc/config/riscv/riscv_th_vector.h | 49 ++
gcc/config/riscv/vector-iterators.md | 4 +
gcc/config/riscv/vector.md | 777 +++++++++---------
.../gcc.target/riscv/rvv/base/pragma-1.c | 2 +-
7 files changed, 466 insertions(+), 383 deletions(-)
create mode 100644 gcc/config/riscv/riscv_th_vector.h
diff --git a/gcc/config.gcc b/gcc/config.gcc
index ba6d63e33ac..e0fc2b1a27c 100644
--- a/gcc/config.gcc
+++ b/gcc/config.gcc
@@ -548,7 +548,7 @@ riscv*)
extra_objs="${extra_objs} riscv-vector-builtins.o riscv-vector-builtins-shapes.o riscv-vector-builtins-bases.o"
extra_objs="${extra_objs} thead.o"
d_target_objs="riscv-d.o"
- extra_headers="riscv_vector.h"
+ extra_headers="riscv_vector.h riscv_th_vector.h"
target_gtfiles="$target_gtfiles \$(srcdir)/config/riscv/riscv-vector-builtins.cc"
target_gtfiles="$target_gtfiles \$(srcdir)/config/riscv/riscv-vector-builtins.h"
;;
diff --git a/gcc/config/riscv/riscv-c.cc b/gcc/config/riscv/riscv-c.cc
index 184fff905b2..0a17d5f6656 100644
--- a/gcc/config/riscv/riscv-c.cc
+++ b/gcc/config/riscv/riscv-c.cc
@@ -194,8 +194,8 @@ riscv_pragma_intrinsic (cpp_reader *)
{
if (!TARGET_VECTOR)
{
- error ("%<#pragma riscv intrinsic%> option %qs needs 'V' extension "
- "enabled",
+ error ("%<#pragma riscv intrinsic%> option %qs needs 'V' or "
+ "'XTHEADVECTOR' extension enabled",
name);
return;
}
diff --git a/gcc/config/riscv/riscv.cc b/gcc/config/riscv/riscv.cc
index ecee7eb4727..754107cdaac 100644
--- a/gcc/config/riscv/riscv.cc
+++ b/gcc/config/riscv/riscv.cc
@@ -5323,7 +5323,7 @@ riscv_get_v_regno_alignment (machine_mode mode)
static void
riscv_print_operand (FILE *file, rtx op, int letter)
{
- /* `~` does not take an operand so op will be null
+ /* `~` and '^' does not take an operand so op will be null
Check for before accessing op.
*/
if (letter == '~')
@@ -5332,6 +5332,13 @@ riscv_print_operand (FILE *file, rtx op, int letter)
fputc('w', file);
return;
}
+
+ if (letter == '^')
+ {
+ if (TARGET_XTHEADVECTOR)
+ fputs ("th.", file);
+ return;
+ }
machine_mode mode = GET_MODE (op);
enum rtx_code code = GET_CODE (op);
@@ -5584,7 +5591,7 @@ riscv_print_operand (FILE *file, rtx op, int letter)
static bool
riscv_print_operand_punct_valid_p (unsigned char code)
{
- return (code == '~');
+ return (code == '~' || code == '^');
}
/* Implement TARGET_PRINT_OPERAND_ADDRESS. */
diff --git a/gcc/config/riscv/riscv_th_vector.h b/gcc/config/riscv/riscv_th_vector.h
new file mode 100644
index 00000000000..194652032bc
--- /dev/null
+++ b/gcc/config/riscv/riscv_th_vector.h
@@ -0,0 +1,49 @@
+/* RISC-V 'XTheadVector' Extension intrinsics include file.
+ Copyright (C) 2022-2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef __RISCV_TH_VECTOR_H
+#define __RISCV_TH_VECTOR_H
+
+#include <stdint.h>
+#include <stddef.h>
+
+#ifndef __riscv_xtheadvector
+#error "XTheadVector intrinsics require the xtheadvector extension."
+#else
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* NOTE: This implementation of riscv_vector.h is intentionally short. It does
+ not define the RVV types and intrinsic functions directly in C and C++
+ code, but instead uses the following pragma to tell GCC to insert the
+ necessary type and function definitions itself. The net effect is the
+ same, and the file is a complete implementation of riscv_vector.h. */
+#pragma riscv intrinsic "vector"
+
+#ifdef __cplusplus
+}
+#endif // __cplusplus
+#endif // __riscv_xtheadvector
+#endif // __RISCV_TH_ECTOR_H
diff --git a/gcc/config/riscv/vector-iterators.md b/gcc/config/riscv/vector-iterators.md
index f04c7fe5491..4b1ba84750c 100644
--- a/gcc/config/riscv/vector-iterators.md
+++ b/gcc/config/riscv/vector-iterators.md
@@ -3679,6 +3679,10 @@ (define_code_iterator any_int_binop [plus minus and ior xor ashift ashiftrt lshi
(define_code_iterator any_int_unop [neg not])
+(define_code_iterator neg_unop [neg])
+
+(define_code_iterator not_unop [not])
+
(define_code_iterator any_commutative_binop [plus and ior xor
smax umax smin umin mult
])
diff --git a/gcc/config/riscv/vector.md b/gcc/config/riscv/vector.md
index d1499d330ff..2af237854f9 100644
--- a/gcc/config/riscv/vector.md
+++ b/gcc/config/riscv/vector.md
@@ -1099,9 +1099,9 @@ (define_insn "*mov<mode>_whole"
(match_operand:V_WHOLE 1 "reg_or_mem_operand" " m,vr,vr"))]
"TARGET_VECTOR"
"@
- vl%m1re<sew>.v\t%0,%1
- vs%m1r.v\t%1,%0
- vmv%m1r.v\t%0,%1"
+ * return TARGET_XTHEADVECTOR ? \"th.vl%m1re.v\t%0,%1\" : \"vl%m1re<sew>.v\t%0,%1\";
+ %^vs%m1r.v\t%1,%0
+ %^vmv%m1r.v\t%0,%1"
[(set_attr "type" "vldr,vstr,vmov")
(set_attr "mode" "<MODE>")])
@@ -1109,7 +1109,7 @@ (define_insn "*mov<mode>_fract"
[(set (match_operand:V_FRACT 0 "register_operand" "=vr")
(match_operand:V_FRACT 1 "register_operand" " vr"))]
"TARGET_VECTOR"
- "vmv1r.v\t%0,%1"
+ "%^vmv1r.v\t%0,%1"
[(set_attr "type" "vmov")
(set_attr "mode" "<MODE>")])
@@ -1126,7 +1126,7 @@ (define_insn "*mov<mode>"
[(set (match_operand:VB 0 "register_operand" "=vr")
(match_operand:VB 1 "register_operand" " vr"))]
"TARGET_VECTOR"
- "vmv1r.v\t%0,%1"
+ "%^vmv1r.v\t%0,%1"
[(set_attr "type" "vmov")
(set_attr "mode" "<MODE>")])
@@ -1135,7 +1135,7 @@ (define_expand "@mov<V_FRACT:mode><P:mode>_lra"
[(set (match_operand:V_FRACT 0 "reg_or_mem_operand")
(match_operand:V_FRACT 1 "reg_or_mem_operand"))
(clobber (match_scratch:P 2))])]
- "TARGET_VECTOR && (lra_in_progress || reload_completed)"
+ "TARGET_VECTOR && (lra_in_progress || reload_completed)"
{})
(define_expand "@mov<VB:mode><P:mode>_lra"
@@ -1143,14 +1143,14 @@ (define_expand "@mov<VB:mode><P:mode>_lra"
[(set (match_operand:VB 0 "reg_or_mem_operand")
(match_operand:VB 1 "reg_or_mem_operand"))
(clobber (match_scratch:P 2))])]
- "TARGET_VECTOR && (lra_in_progress || reload_completed)"
+ "TARGET_VECTOR && (lra_in_progress || reload_completed)"
{})
(define_insn_and_split "*mov<V_FRACT:mode><P:mode>_lra"
[(set (match_operand:V_FRACT 0 "reg_or_mem_operand" "=vr, m,vr")
(match_operand:V_FRACT 1 "reg_or_mem_operand" " m,vr,vr"))
(clobber (match_scratch:P 2 "=&r,&r,X"))]
- "TARGET_VECTOR && (lra_in_progress || reload_completed)"
+ "TARGET_VECTOR && (lra_in_progress || reload_completed)"
"#"
"&& reload_completed"
[(const_int 0)]
@@ -1172,7 +1172,7 @@ (define_insn_and_split "*mov<VB:mode><P:mode>_lra"
[(set (match_operand:VB 0 "reg_or_mem_operand" "=vr, m,vr")
(match_operand:VB 1 "reg_or_mem_operand" " m,vr,vr"))
(clobber (match_scratch:P 2 "=&r,&r,X"))]
- "TARGET_VECTOR && (lra_in_progress || reload_completed)"
+ "TARGET_VECTOR && (lra_in_progress || reload_completed)"
"#"
"&& reload_completed"
[(const_int 0)]
@@ -1258,7 +1258,7 @@ (define_insn_and_split "*mov<mode>"
"@
#
#
- vmv%m1r.v\t%0,%1"
+ %^vmv%m1r.v\t%0,%1"
"&& reload_completed
&& (!register_operand (operands[0], <MODE>mode)
|| !register_operand (operands[1], <MODE>mode))"
@@ -1286,14 +1286,14 @@ (define_expand "@mov<VLS_AVL_REG:mode><P:mode>_lra"
[(set (match_operand:VLS_AVL_REG 0 "reg_or_mem_operand")
(match_operand:VLS_AVL_REG 1 "reg_or_mem_operand"))
(clobber (match_scratch:P 2))])]
- "TARGET_VECTOR && (lra_in_progress || reload_completed)"
+ "TARGET_VECTOR && (lra_in_progress || reload_completed)"
{})
(define_insn_and_split "*mov<VLS_AVL_REG:mode><P:mode>_lra"
[(set (match_operand:VLS_AVL_REG 0 "reg_or_mem_operand" "=vr, m,vr")
(match_operand:VLS_AVL_REG 1 "reg_or_mem_operand" " m,vr,vr"))
(clobber (match_scratch:P 2 "=&r,&r,X"))]
- "TARGET_VECTOR && (lra_in_progress || reload_completed)
+ "TARGET_VECTOR && (lra_in_progress || reload_completed)
&& (register_operand (operands[0], <VLS_AVL_REG:MODE>mode)
|| register_operand (operands[1], <VLS_AVL_REG:MODE>mode))"
"#"
@@ -1322,7 +1322,7 @@ (define_insn "*mov<mode>_vls"
[(set (match_operand:VLS 0 "register_operand" "=vr")
(match_operand:VLS 1 "register_operand" " vr"))]
"TARGET_VECTOR"
- "vmv%m1r.v\t%0,%1"
+ "%^vmv%m1r.v\t%0,%1"
[(set_attr "type" "vmov")
(set_attr "mode" "<MODE>")])
@@ -1330,7 +1330,7 @@ (define_insn "*mov<mode>_vls"
[(set (match_operand:VLSB 0 "register_operand" "=vr")
(match_operand:VLSB 1 "register_operand" " vr"))]
"TARGET_VECTOR"
- "vmv1r.v\t%0,%1"
+ "%^vmv1r.v\t%0,%1"
[(set_attr "type" "vmov")
(set_attr "mode" "<MODE>")])
@@ -1359,7 +1359,7 @@ (define_expand "movmisalign<mode>"
(define_expand "movmisalign<mode>"
[(set (match_operand:V 0 "nonimmediate_operand")
(match_operand:V 1 "general_operand"))]
- "TARGET_VECTOR && TARGET_VECTOR_MISALIGN_SUPPORTED"
+ "TARGET_VECTOR && TARGET_VECTOR_MISALIGN_SUPPORTED"
{
emit_move_insn (operands[0], operands[1]);
DONE;
@@ -1396,7 +1396,7 @@ (define_insn_and_split "*vec_duplicate<mode>"
[(set (match_operand:V_VLS 0 "register_operand")
(vec_duplicate:V_VLS
(match_operand:<VEL> 1 "direct_broadcast_operand")))]
- "TARGET_VECTOR && can_create_pseudo_p ()"
+ "TARGET_VECTOR && can_create_pseudo_p ()"
"#"
"&& 1"
[(const_int 0)]
@@ -1530,7 +1530,7 @@ (define_insn "@vsetvl<mode>"
(match_dup 4)
(match_dup 5)] UNSPEC_VSETVL))]
"TARGET_VECTOR"
- "vset%i1vli\t%0,%1,e%2,%m3,t%p4,m%p5"
+ "%^vset%i1vli\t%0,%1,e%2,%m3,t%p4,m%p5"
[(set_attr "type" "vsetvl")
(set_attr "mode" "<MODE>")
(set (attr "sew") (symbol_ref "INTVAL (operands[2])"))
@@ -1548,7 +1548,7 @@ (define_insn "vsetvl_vtype_change_only"
(match_operand 2 "const_int_operand" "i")
(match_operand 3 "const_int_operand" "i")] UNSPEC_VSETVL))]
"TARGET_VECTOR"
- "vsetvli\tzero,zero,e%0,%m1,t%p2,m%p3"
+ "%^vsetvli\tzero,zero,e%0,%m1,t%p2,m%p3"
[(set_attr "type" "vsetvl")
(set_attr "mode" "SI")
(set (attr "sew") (symbol_ref "INTVAL (operands[0])"))
@@ -1570,7 +1570,7 @@ (define_insn "@vsetvl_discard_result<mode>"
(match_operand 3 "const_int_operand" "i")
(match_operand 4 "const_int_operand" "i")] UNSPEC_VSETVL))]
"TARGET_VECTOR"
- "vset%i0vli\tzero,%0,e%1,%m2,t%p3,m%p4"
+ "%^vset%i0vli\tzero,%0,e%1,%m2,t%p3,m%p4"
[(set_attr "type" "vsetvl")
(set_attr "mode" "<MODE>")
(set (attr "sew") (symbol_ref "INTVAL (operands[1])"))
@@ -1720,12 +1720,12 @@ (define_insn_and_split "*pred_mov<mode>"
&& (register_operand (operands[0], <MODE>mode)
|| register_operand (operands[3], <MODE>mode)))"
"@
- vle<sew>.v\t%0,%3%p1
- vle<sew>.v\t%0,%3
- vle<sew>.v\t%0,%3,%1.t
- vse<sew>.v\t%3,%0%p1
- vmv.v.v\t%0,%3
- vmv.v.v\t%0,%3"
+ * return TARGET_XTHEADVECTOR ? \"th.vle.v\t%0,%3%p1\" : \"vle<sew>.v\t%0,%3%p1\";
+ * return TARGET_XTHEADVECTOR ? \"th.vle.v\t%0,%3\" : \"vle<sew>.v\t%0,%3\";
+ * return TARGET_XTHEADVECTOR ? \"th.vle.v\t%0,%3,%1.t\" : \"vle<sew>.v\t%0,%3,%1.t\";
+ * return TARGET_XTHEADVECTOR ? \"th.vse.v\t%3,%0%p1\" : \"vse<sew>.v\t%3,%0%p1\";
+ %^vmv.v.v\t%0,%3
+ %^vmv.v.v\t%0,%3"
"&& register_operand (operands[0], <MODE>mode)
&& register_operand (operands[3], <MODE>mode)
&& satisfies_constraint_vu (operands[2])
@@ -1749,7 +1749,7 @@ (define_insn "@pred_store<mode>"
(match_operand:V 2 "register_operand" " vr")
(match_dup 0)))]
"TARGET_VECTOR"
- "vse<sew>.v\t%2,%0%p1"
+ { return TARGET_XTHEADVECTOR ? "th.vse.v\t%2,%0%p1" : "vse<sew>.v\t%2,%0%p1"; }
[(set_attr "type" "vste")
(set_attr "mode" "<MODE>")
(set (attr "avl_type_idx") (const_int 4))
@@ -1773,11 +1773,11 @@ (define_insn_and_split "@pred_mov<mode>"
(match_operand:VB_VLS 2 "vector_undef_operand" " vu, vu, vu, vu, vu")))]
"TARGET_VECTOR"
"@
- vlm.v\t%0,%3
- vsm.v\t%3,%0
- vmmv.m\t%0,%3
- vmclr.m\t%0
- vmset.m\t%0"
+ %^vlm.v\t%0,%3
+ %^vsm.v\t%3,%0
+ %^vmmv.m\t%0,%3
+ %^vmclr.m\t%0
+ %^vmset.m\t%0"
"&& register_operand (operands[0], <MODE>mode)
&& register_operand (operands[3], <MODE>mode)
&& INTVAL (operands[5]) == riscv_vector::VLMAX"
@@ -1800,7 +1800,7 @@ (define_insn "@pred_store<mode>"
(match_operand:VB 2 "register_operand" " vr")
(match_dup 0)))]
"TARGET_VECTOR"
- "vsm.v\t%2,%0"
+ "%^vsm.v\t%2,%0"
[(set_attr "type" "vstm")
(set_attr "mode" "<MODE>")
(set (attr "avl_type_idx") (const_int 4))
@@ -1821,7 +1821,7 @@ (define_insn "@pred_merge<mode>"
(match_operand:<VM> 4 "register_operand" " vm,vm,vm,vm"))
(match_operand:V_VLS 1 "vector_merge_operand" " vu, 0,vu, 0")))]
"TARGET_VECTOR"
- "vmerge.v%o3m\t%0,%2,%v3,%4"
+ "%^vmerge.v%o3m\t%0,%2,%v3,%4"
[(set_attr "type" "vimerge")
(set_attr "mode" "<MODE>")])
@@ -1841,7 +1841,7 @@ (define_insn "@pred_merge<mode>_scalar"
(match_operand:<VM> 4 "register_operand" " vm,vm"))
(match_operand:V_VLSI_QHS 1 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vmerge.vxm\t%0,%2,%3,%4"
+ "%^vmerge.vxm\t%0,%2,%3,%4"
[(set_attr "type" "vimerge")
(set_attr "mode" "<MODE>")])
@@ -1893,7 +1893,7 @@ (define_insn "*pred_merge<mode>_scalar"
(match_operand:<VM> 4 "register_operand" " vm,vm"))
(match_operand:V_VLSI_D 1 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vmerge.vxm\t%0,%2,%3,%4"
+ "%^vmerge.vxm\t%0,%2,%3,%4"
[(set_attr "type" "vimerge")
(set_attr "mode" "<MODE>")])
@@ -1914,7 +1914,7 @@ (define_insn "*pred_merge<mode>_extended_scalar"
(match_operand:<VM> 4 "register_operand" " vm,vm"))
(match_operand:V_VLSI_D 1 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vmerge.vxm\t%0,%2,%3,%4"
+ "%^vmerge.vxm\t%0,%2,%3,%4"
[(set_attr "type" "vimerge")
(set_attr "mode" "<MODE>")])
@@ -2004,14 +2004,14 @@ (define_insn_and_split "*pred_broadcast<mode>"
(match_operand:V_VLSI 2 "vector_merge_operand" "vu, 0, vu, 0, vu, 0, vu, 0")))]
"TARGET_VECTOR"
"@
- vmv.v.x\t%0,%3
- vmv.v.x\t%0,%3
- vlse<sew>.v\t%0,%3,zero,%1.t
- vlse<sew>.v\t%0,%3,zero,%1.t
- vlse<sew>.v\t%0,%3,zero
- vlse<sew>.v\t%0,%3,zero
- vmv.s.x\t%0,%3
- vmv.s.x\t%0,%3"
+ %^vmv.v.x\t%0,%3
+ %^vmv.v.x\t%0,%3
+ * return TARGET_XTHEADVECTOR ? \"th.vlse.v\t%0,%3,zero,%1.t\" : \"vlse<sew>.v\t%0,%3,zero,%1.t\";
+ * return TARGET_XTHEADVECTOR ? \"th.vlse.v\t%0,%3,zero,%1.t\" : \"vlse<sew>.v\t%0,%3,zero,%1.t\";
+ * return TARGET_XTHEADVECTOR ? \"th.vlse.v\t%0,%3,zero\" : \"vlse<sew>.v\t%0,%3,zero\";
+ * return TARGET_XTHEADVECTOR ? \"th.vlse.v\t%0,%3,zero\" : \"vlse<sew>.v\t%0,%3,zero\";
+ %^vmv.s.x\t%0,%3
+ %^vmv.s.x\t%0,%3"
"(register_operand (operands[3], <VEL>mode)
|| CONST_POLY_INT_P (operands[3]))
&& GET_MODE_BITSIZE (<VEL>mode) > GET_MODE_BITSIZE (Pmode)"
@@ -2065,14 +2065,14 @@ (define_insn "*pred_broadcast<mode>"
(match_operand:V_VLSF_ZVFHMIN 2 "vector_merge_operand" "vu, 0, vu, 0, vu, 0, vu, 0")))]
"TARGET_VECTOR"
"@
- vfmv.v.f\t%0,%3
- vfmv.v.f\t%0,%3
- vlse<sew>.v\t%0,%3,zero,%1.t
- vlse<sew>.v\t%0,%3,zero,%1.t
- vlse<sew>.v\t%0,%3,zero
- vlse<sew>.v\t%0,%3,zero
- vfmv.s.f\t%0,%3
- vfmv.s.f\t%0,%3"
+ %^vfmv.v.f\t%0,%3
+ %^vfmv.v.f\t%0,%3
+ * return TARGET_XTHEADVECTOR ? \"th.vlse.v\t%0,%3,zero,%1.t\" : \"vlse<sew>.v\t%0,%3,zero,%1.t\";
+ * return TARGET_XTHEADVECTOR ? \"th.vlse.v\t%0,%3,zero,%1.t\" : \"vlse<sew>.v\t%0,%3,zero,%1.t\";
+ * return TARGET_XTHEADVECTOR ? \"th.vlse.v\t%0,%3,zero\" : \"vlse<sew>.v\t%0,%3,zero\";
+ * return TARGET_XTHEADVECTOR ? \"th.vlse.v\t%0,%3,zero\" : \"vlse<sew>.v\t%0,%3,zero\";
+ %^vfmv.s.f\t%0,%3
+ %^vfmv.s.f\t%0,%3"
[(set_attr "type" "vfmov,vfmov,vlds,vlds,vlds,vlds,vfmovfv,vfmovfv")
(set_attr "mode" "<MODE>")])
@@ -2093,10 +2093,10 @@ (define_insn "*pred_broadcast<mode>_extended_scalar"
(match_operand:V_VLSI_D 2 "vector_merge_operand" "vu, 0, vu, 0")))]
"TARGET_VECTOR"
"@
- vmv.v.x\t%0,%3
- vmv.v.x\t%0,%3
- vmv.s.x\t%0,%3
- vmv.s.x\t%0,%3"
+ %^vmv.v.x\t%0,%3
+ %^vmv.v.x\t%0,%3
+ %^vmv.s.x\t%0,%3
+ %^vmv.s.x\t%0,%3"
[(set_attr "type" "vimov,vimov,vimovxv,vimovxv")
(set_attr "mode" "<MODE>")])
@@ -2114,7 +2114,7 @@ (define_insn "*pred_broadcast<mode>_zero"
(match_operand:V_VLS 3 "vector_const_0_operand" "Wc0, Wc0")
(match_operand:V_VLS 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vmv.s.x\t%0,zero"
+ "%^vmv.s.x\t%0,zero"
[(set_attr "type" "vimovxv,vimovxv")
(set_attr "mode" "<MODE>")])
@@ -2134,7 +2134,7 @@ (define_insn "*pred_broadcast<mode>_imm"
(match_operand:V_VLS 3 "vector_const_int_or_double_0_operand" "viWc0, viWc0")
(match_operand:V_VLS 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vmv.v.i\t%0,%v3"
+ "%^vmv.v.i\t%0,%v3"
[(set_attr "type" "vimov,vimov")
(set_attr "mode" "<MODE>")])
@@ -2162,12 +2162,12 @@ (define_insn "@pred_strided_load<mode>"
(match_operand:V 2 "vector_merge_operand" " 0, vu, vu, 0, vu, vu")))]
"TARGET_VECTOR"
"@
- vlse<sew>.v\t%0,%3,%z4%p1
- vlse<sew>.v\t%0,%3,%z4
- vlse<sew>.v\t%0,%3,%z4,%1.t
- vle<sew>.v\t%0,%3%p1
- vle<sew>.v\t%0,%3
- vle<sew>.v\t%0,%3,%1.t"
+ * return TARGET_XTHEADVECTOR ? \"th.vlse.v\t%0,%3,%z4%p1\" : \"vlse<sew>.v\t%0,%3,%z4%p1\";
+ * return TARGET_XTHEADVECTOR ? \"th.vlse.v\t%0,%3,%z4\" : \"vlse<sew>.v\t%0,%3,%z4\";
+ * return TARGET_XTHEADVECTOR ? \"th.vlse.v\t%0,%3,%z4,%1.t\" : \"vlse<sew>.v\t%0,%3,%z4,%1.t\";
+ * return TARGET_XTHEADVECTOR ? \"th.vle.v\t%0,%3%p1\" : \"vle<sew>.v\t%0,%3%p1\";
+ * return TARGET_XTHEADVECTOR ? \"th.vle.v\t%0,%3\" : \"vle<sew>.v\t%0,%3\";
+ * return TARGET_XTHEADVECTOR ? \"th.vle.v\t%0,%3,%1.t\" : \"vle<sew>.v\t%0,%3,%1.t\";"
[(set_attr "type" "vlds")
(set_attr "mode" "<MODE>")])
@@ -2186,8 +2186,8 @@ (define_insn "@pred_strided_store<mode>"
(match_dup 0)))]
"TARGET_VECTOR"
"@
- vsse<sew>.v\t%3,%0,%z2%p1
- vse<sew>.v\t%3,%0%p1"
+ * return TARGET_XTHEADVECTOR ? \"th.vsse.v\t%3,%0,%z2%p1\" : \"vsse<sew>.v\t%3,%0,%z2%p1\";
+ * return TARGET_XTHEADVECTOR ? \"th.vse.v\t%3,%0%p1\" : \"vse<sew>.v\t%3,%0%p1\";"
[(set_attr "type" "vsts")
(set_attr "mode" "<MODE>")
(set (attr "avl_type_idx") (const_int 5))])
@@ -2217,7 +2217,7 @@ (define_insn "@pred_indexed_<order>load<mode>_same_eew"
(match_operand:<VINDEX> 4 "register_operand" " vr, vr,vr, vr")] ORDER)
(match_operand:V 2 "vector_merge_operand" " vu, vu, 0, 0")))]
"TARGET_VECTOR"
- "vl<order>xei<sew>.v\t%0,(%z3),%4%p1"
+ { return TARGET_XTHEADVECTOR ? "th.vlxe.v\t%0,(%z3),%4%p1" : "vl<order>xei<sew>.v\t%0,(%z3),%4%p1"; }
[(set_attr "type" "vld<order>x")
(set_attr "mode" "<MODE>")])
@@ -2498,18 +2498,18 @@ (define_insn "@pred_<optab><mode>"
(match_operand:V_VLSI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))]
"TARGET_VECTOR"
"@
- v<insn>.vv\t%0,%3,%4%p1
- v<insn>.vv\t%0,%3,%4%p1
- v<insn>.vv\t%0,%3,%4%p1
- v<insn>.vv\t%0,%3,%4%p1
- v<binop_vi_variant_insn>\t%0,<binop_vi_variant_op>%p1
- v<binop_vi_variant_insn>\t%0,<binop_vi_variant_op>%p1
- v<binop_vi_variant_insn>\t%0,<binop_vi_variant_op>%p1
- v<binop_vi_variant_insn>\t%0,<binop_vi_variant_op>%p1
- v<binop_reverse_vi_variant_insn>\t%0,<binop_reverse_vi_variant_op>%p1
- v<binop_reverse_vi_variant_insn>\t%0,<binop_reverse_vi_variant_op>%p1
- v<binop_reverse_vi_variant_insn>\t%0,<binop_reverse_vi_variant_op>%p1
- v<binop_reverse_vi_variant_insn>\t%0,<binop_reverse_vi_variant_op>%p1"
+ %^v<insn>.vv\t%0,%3,%4%p1
+ %^v<insn>.vv\t%0,%3,%4%p1
+ %^v<insn>.vv\t%0,%3,%4%p1
+ %^v<insn>.vv\t%0,%3,%4%p1
+ %^v<binop_vi_variant_insn>\t%0,<binop_vi_variant_op>%p1
+ %^v<binop_vi_variant_insn>\t%0,<binop_vi_variant_op>%p1
+ %^v<binop_vi_variant_insn>\t%0,<binop_vi_variant_op>%p1
+ %^v<binop_vi_variant_insn>\t%0,<binop_vi_variant_op>%p1
+ %^v<binop_reverse_vi_variant_insn>\t%0,<binop_reverse_vi_variant_op>%p1
+ %^v<binop_reverse_vi_variant_insn>\t%0,<binop_reverse_vi_variant_op>%p1
+ %^v<binop_reverse_vi_variant_insn>\t%0,<binop_reverse_vi_variant_op>%p1
+ %^v<binop_reverse_vi_variant_insn>\t%0,<binop_reverse_vi_variant_op>%p1"
[(set_attr "type" "<int_binop_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -2533,7 +2533,7 @@ (define_insn "@pred_<optab><mode>_scalar"
(match_operand 4 "pmode_reg_or_uimm5_operand" " r, r, r, r, K, K, K, K"))
(match_operand:V_VLSI 2 "vector_merge_operand" "vu, 0, vu, 0,vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "v<insn>.v%o4\t%0,%3,%4%p1"
+ "%^v<insn>.v%o4\t%0,%3,%4%p1"
[(set_attr "type" "vshift")
(set_attr "mode" "<MODE>")])
@@ -2555,7 +2555,7 @@ (define_insn "@pred_<optab><mode>_scalar"
(match_operand:V_VLSI_QHS 3 "register_operand" "vr,vr, vr, vr"))
(match_operand:V_VLSI_QHS 2 "vector_merge_operand" "vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "v<insn>.vx\t%0,%3,%z4%p1"
+ "%^v<insn>.vx\t%0,%3,%z4%p1"
[(set_attr "type" "<int_binop_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -2576,7 +2576,7 @@ (define_insn "@pred_<optab><mode>_scalar"
(match_operand:<VEL> 4 "reg_or_0_operand" "rJ,rJ, rJ, rJ")))
(match_operand:V_VLSI_QHS 2 "vector_merge_operand" "vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "v<insn>.vx\t%0,%3,%z4%p1"
+ "%^v<insn>.vx\t%0,%3,%z4%p1"
[(set_attr "type" "<int_binop_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -2597,7 +2597,7 @@ (define_insn "@pred_sub<mode>_reverse_scalar"
(match_operand:V_VLSI_QHS 3 "register_operand" "vr,vr, vr, vr"))
(match_operand:V_VLSI_QHS 2 "vector_merge_operand" "vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vrsub.vx\t%0,%3,%z4%p1"
+ "%^vrsub.vx\t%0,%3,%z4%p1"
[(set_attr "type" "vialu")
(set_attr "mode" "<MODE>")])
@@ -2653,7 +2653,7 @@ (define_insn "*pred_<optab><mode>_scalar"
(match_operand:V_VLSI_D 3 "register_operand" "vr,vr, vr, vr"))
(match_operand:V_VLSI_D 2 "vector_merge_operand" "vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "v<insn>.vx\t%0,%3,%z4%p1"
+ "%^v<insn>.vx\t%0,%3,%z4%p1"
[(set_attr "type" "<int_binop_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -2675,7 +2675,7 @@ (define_insn "*pred_<optab><mode>_extended_scalar"
(match_operand:V_VLSI_D 3 "register_operand" "vr,vr, vr, vr"))
(match_operand:V_VLSI_D 2 "vector_merge_operand" "vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "v<insn>.vx\t%0,%3,%z4%p1"
+ "%^v<insn>.vx\t%0,%3,%z4%p1"
[(set_attr "type" "<int_binop_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -2729,7 +2729,7 @@ (define_insn "*pred_<optab><mode>_scalar"
(match_operand:<VEL> 4 "reg_or_0_operand" "rJ,rJ, rJ, rJ")))
(match_operand:V_VLSI_D 2 "vector_merge_operand" "vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "v<insn>.vx\t%0,%3,%z4%p1"
+ "%^v<insn>.vx\t%0,%3,%z4%p1"
[(set_attr "type" "<int_binop_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -2751,7 +2751,7 @@ (define_insn "*pred_<optab><mode>_extended_scalar"
(match_operand:<VSUBEL> 4 "reg_or_0_operand" "rJ,rJ, rJ, rJ"))))
(match_operand:V_VLSI_D 2 "vector_merge_operand" "vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "v<insn>.vx\t%0,%3,%z4%p1"
+ "%^v<insn>.vx\t%0,%3,%z4%p1"
[(set_attr "type" "<int_binop_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -2805,7 +2805,7 @@ (define_insn "*pred_sub<mode>_reverse_scalar"
(match_operand:V_VLSI_D 3 "register_operand" "vr,vr, vr, vr"))
(match_operand:V_VLSI_D 2 "vector_merge_operand" "vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vrsub.vx\t%0,%3,%z4%p1"
+ "%^vrsub.vx\t%0,%3,%z4%p1"
[(set_attr "type" "vialu")
(set_attr "mode" "<MODE>")])
@@ -2827,7 +2827,7 @@ (define_insn "*pred_sub<mode>_extended_reverse_scalar"
(match_operand:V_VLSI_D 3 "register_operand" "vr,vr, vr, vr"))
(match_operand:V_VLSI_D 2 "vector_merge_operand" "vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vrsub.vx\t%0,%3,%z4%p1"
+ "%^vrsub.vx\t%0,%3,%z4%p1"
[(set_attr "type" "vialu")
(set_attr "mode" "<MODE>")])
@@ -2848,7 +2848,7 @@ (define_insn "@pred_mulh<v_su><mode>"
(match_operand:VFULLI 4 "register_operand" "vr,vr, vr, vr")] VMULH)
(match_operand:VFULLI 2 "vector_merge_operand" "vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vmulh<v_su>.vv\t%0,%3,%4%p1"
+ "%^vmulh<v_su>.vv\t%0,%3,%4%p1"
[(set_attr "type" "vimul")
(set_attr "mode" "<MODE>")])
@@ -2869,7 +2869,7 @@ (define_insn "@pred_mulh<v_su><mode>_scalar"
(match_operand:VI_QHS 3 "register_operand" "vr,vr, vr, vr")] VMULH)
(match_operand:VI_QHS 2 "vector_merge_operand" "vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vmulh<v_su>.vx\t%0,%3,%z4%p1"
+ "%^vmulh<v_su>.vx\t%0,%3,%z4%p1"
[(set_attr "type" "vimul")
(set_attr "mode" "<MODE>")])
@@ -2923,7 +2923,7 @@ (define_insn "*pred_mulh<v_su><mode>_scalar"
(match_operand:VFULLI_D 3 "register_operand" "vr,vr, vr, vr")] VMULH)
(match_operand:VFULLI_D 2 "vector_merge_operand" "vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vmulh<v_su>.vx\t%0,%3,%z4%p1"
+ "%^vmulh<v_su>.vx\t%0,%3,%z4%p1"
[(set_attr "type" "vimul")
(set_attr "mode" "<MODE>")])
@@ -2945,7 +2945,7 @@ (define_insn "*pred_mulh<v_su><mode>_extended_scalar"
(match_operand:VFULLI_D 3 "register_operand" "vr,vr, vr, vr")] VMULH)
(match_operand:VFULLI_D 2 "vector_merge_operand" "vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vmulh<v_su>.vx\t%0,%3,%z4%p1"
+ "%^vmulh<v_su>.vx\t%0,%3,%z4%p1"
[(set_attr "type" "vimul")
(set_attr "mode" "<MODE>")])
@@ -2966,7 +2966,7 @@ (define_insn "@pred_adc<mode>"
(match_operand:<VM> 4 "register_operand" "vm,vm,vm,vm")] UNSPEC_VADC)
(match_operand:VI 1 "vector_merge_operand" "vu, 0,vu, 0")))]
"TARGET_VECTOR"
- "vadc.v%o3m\t%0,%2,%v3,%4"
+ "%^vadc.v%o3m\t%0,%2,%v3,%4"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "1")
@@ -2990,7 +2990,7 @@ (define_insn "@pred_sbc<mode>"
(match_operand:<VM> 4 "register_operand" "vm,vm")] UNSPEC_VSBC)
(match_operand:VI 1 "vector_merge_operand" "vu, 0")))]
"TARGET_VECTOR"
- "vsbc.vvm\t%0,%2,%3,%4"
+ "%^vsbc.vvm\t%0,%2,%3,%4"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "1")
@@ -3015,7 +3015,7 @@ (define_insn "@pred_adc<mode>_scalar"
(match_operand:<VM> 4 "register_operand" "vm,vm")] UNSPEC_VADC)
(match_operand:VI_QHS 1 "vector_merge_operand" "vu, 0")))]
"TARGET_VECTOR"
- "vadc.vxm\t%0,%2,%3,%4"
+ "%^vadc.vxm\t%0,%2,%3,%4"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "1")
@@ -3040,7 +3040,7 @@ (define_insn "@pred_sbc<mode>_scalar"
(match_operand:<VM> 4 "register_operand" "vm,vm")] UNSPEC_VSBC)
(match_operand:VI_QHS 1 "vector_merge_operand" "vu, 0")))]
"TARGET_VECTOR"
- "vsbc.vxm\t%0,%2,%z3,%4"
+ "%^vsbc.vxm\t%0,%2,%z3,%4"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "1")
@@ -3098,7 +3098,7 @@ (define_insn "*pred_adc<mode>_scalar"
(match_operand:<VM> 4 "register_operand" "vm,vm")] UNSPEC_VADC)
(match_operand:VI_D 1 "vector_merge_operand" "vu, 0")))]
"TARGET_VECTOR"
- "vadc.vxm\t%0,%2,%z3,%4"
+ "%^vadc.vxm\t%0,%2,%z3,%4"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "1")
@@ -3124,7 +3124,7 @@ (define_insn "*pred_adc<mode>_extended_scalar"
(match_operand:<VM> 4 "register_operand" "vm,vm")] UNSPEC_VADC)
(match_operand:VI_D 1 "vector_merge_operand" "vu, 0")))]
"TARGET_VECTOR"
- "vadc.vxm\t%0,%2,%z3,%4"
+ "%^vadc.vxm\t%0,%2,%z3,%4"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "1")
@@ -3182,7 +3182,7 @@ (define_insn "*pred_sbc<mode>_scalar"
(match_operand:<VM> 4 "register_operand" "vm,vm")] UNSPEC_VSBC)
(match_operand:VI_D 1 "vector_merge_operand" "vu, 0")))]
"TARGET_VECTOR"
- "vsbc.vxm\t%0,%2,%z3,%4"
+ "%^vsbc.vxm\t%0,%2,%z3,%4"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "1")
@@ -3208,7 +3208,7 @@ (define_insn "*pred_sbc<mode>_extended_scalar"
(match_operand:<VM> 4 "register_operand" "vm,vm")] UNSPEC_VSBC)
(match_operand:VI_D 1 "vector_merge_operand" "vu, 0")))]
"TARGET_VECTOR"
- "vsbc.vxm\t%0,%2,%z3,%4"
+ "%^vsbc.vxm\t%0,%2,%z3,%4"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "1")
@@ -3229,7 +3229,7 @@ (define_insn "@pred_madc<mode>"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)] UNSPEC_VMADC))]
"TARGET_VECTOR"
- "vmadc.v%o2m\t%0,%1,%v2,%3"
+ "%^vmadc.v%o2m\t%0,%1,%v2,%3"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
(set_attr "vl_op_idx" "4")
@@ -3248,7 +3248,7 @@ (define_insn "@pred_msbc<mode>"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)] UNSPEC_VMSBC))]
"TARGET_VECTOR"
- "vmsbc.vvm\t%0,%1,%2,%3"
+ "%^vmsbc.vvm\t%0,%1,%2,%3"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
(set_attr "vl_op_idx" "4")
@@ -3268,7 +3268,7 @@ (define_insn "@pred_madc<mode>_scalar"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)] UNSPEC_VMADC))]
"TARGET_VECTOR"
- "vmadc.vxm\t%0,%1,%2,%3"
+ "%^vmadc.vxm\t%0,%1,%2,%3"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
(set_attr "vl_op_idx" "4")
@@ -3288,7 +3288,7 @@ (define_insn "@pred_msbc<mode>_scalar"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)] UNSPEC_VMSBC))]
"TARGET_VECTOR"
- "vmsbc.vxm\t%0,%1,%z2,%3"
+ "%^vmsbc.vxm\t%0,%1,%z2,%3"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
(set_attr "vl_op_idx" "4")
@@ -3337,7 +3337,7 @@ (define_insn "*pred_madc<mode>_scalar"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)] UNSPEC_VMADC))]
"TARGET_VECTOR"
- "vmadc.vxm\t%0,%1,%z2,%3"
+ "%^vmadc.vxm\t%0,%1,%z2,%3"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
(set_attr "vl_op_idx" "4")
@@ -3358,7 +3358,7 @@ (define_insn "*pred_madc<mode>_extended_scalar"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)] UNSPEC_VMADC))]
"TARGET_VECTOR"
- "vmadc.vxm\t%0,%1,%z2,%3"
+ "%^vmadc.vxm\t%0,%1,%z2,%3"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
(set_attr "vl_op_idx" "4")
@@ -3407,7 +3407,7 @@ (define_insn "*pred_msbc<mode>_scalar"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)] UNSPEC_VMSBC))]
"TARGET_VECTOR"
- "vmsbc.vxm\t%0,%1,%z2,%3"
+ "%^vmsbc.vxm\t%0,%1,%z2,%3"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
(set_attr "vl_op_idx" "4")
@@ -3428,7 +3428,7 @@ (define_insn "*pred_msbc<mode>_extended_scalar"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)] UNSPEC_VMSBC))]
"TARGET_VECTOR"
- "vmsbc.vxm\t%0,%1,%z2,%3"
+ "%^vmsbc.vxm\t%0,%1,%z2,%3"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
(set_attr "vl_op_idx" "4")
@@ -3446,7 +3446,7 @@ (define_insn "@pred_madc<mode>_overflow"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)] UNSPEC_OVERFLOW))]
"TARGET_VECTOR"
- "vmadc.v%o2\t%0,%1,%v2"
+ "%^vmadc.v%o2\t%0,%1,%v2"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
(set_attr "vl_op_idx" "3")
@@ -3464,7 +3464,7 @@ (define_insn "@pred_msbc<mode>_overflow"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)] UNSPEC_OVERFLOW))]
"TARGET_VECTOR"
- "vmsbc.vv\t%0,%1,%2"
+ "%^vmsbc.vv\t%0,%1,%2"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
(set_attr "vl_op_idx" "3")
@@ -3483,7 +3483,7 @@ (define_insn "@pred_madc<mode>_overflow_scalar"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)] UNSPEC_OVERFLOW))]
"TARGET_VECTOR"
- "vmadc.vx\t%0,%1,%z2"
+ "%^vmadc.vx\t%0,%1,%z2"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
(set_attr "vl_op_idx" "3")
@@ -3502,7 +3502,7 @@ (define_insn "@pred_msbc<mode>_overflow_scalar"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)] UNSPEC_OVERFLOW))]
"TARGET_VECTOR"
- "vmsbc.vx\t%0,%1,%z2"
+ "%^vmsbc.vx\t%0,%1,%z2"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
(set_attr "vl_op_idx" "3")
@@ -3549,7 +3549,7 @@ (define_insn "*pred_madc<mode>_overflow_scalar"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)] UNSPEC_OVERFLOW))]
"TARGET_VECTOR"
- "vmadc.vx\t%0,%1,%z2"
+ "%^vmadc.vx\t%0,%1,%z2"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
(set_attr "vl_op_idx" "3")
@@ -3569,7 +3569,7 @@ (define_insn "*pred_madc<mode>_overflow_extended_scalar"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)] UNSPEC_OVERFLOW))]
"TARGET_VECTOR"
- "vmadc.vx\t%0,%1,%z2"
+ "%^vmadc.vx\t%0,%1,%z2"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
(set_attr "vl_op_idx" "3")
@@ -3616,7 +3616,7 @@ (define_insn "*pred_msbc<mode>_overflow_scalar"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)] UNSPEC_OVERFLOW))]
"TARGET_VECTOR"
- "vmsbc.vx\t%0,%1,%z2"
+ "%^vmsbc.vx\t%0,%1,%z2"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
(set_attr "vl_op_idx" "3")
@@ -3636,7 +3636,7 @@ (define_insn "*pred_msbc<mode>_overflow_extended_scalar"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)] UNSPEC_OVERFLOW))]
"TARGET_VECTOR"
- "vmsbc.vx\t%0,%1,%z2"
+ "%^vmsbc.vx\t%0,%1,%z2"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
(set_attr "vl_op_idx" "3")
@@ -3660,11 +3660,34 @@ (define_insn "@pred_<optab><mode>"
(match_operand 7 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
- (any_int_unop:V_VLSI
+ (not_unop:V_VLSI
(match_operand:V_VLSI 3 "register_operand" "vr,vr, vr, vr"))
(match_operand:V_VLSI 2 "vector_merge_operand" "vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "v<insn>.v\t%0,%3%p1"
+ "%^vnot.v\t%0,%3%p1"
+ [(set_attr "type" "vialu")
+ (set_attr "mode" "<MODE>")
+ (set_attr "vl_op_idx" "4")
+ (set (attr "ta") (symbol_ref "riscv_vector::get_ta (operands[5])"))
+ (set (attr "ma") (symbol_ref "riscv_vector::get_ma (operands[6])"))
+ (set (attr "avl_type_idx") (const_int 7))])
+
+(define_insn "@pred_<optab><mode>"
+ [(set (match_operand:V_VLSI 0 "register_operand" "=vd,vd, vr, vr")
+ (if_then_else:V_VLSI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vm,vm,Wc1,Wc1")
+ (match_operand 4 "vector_length_operand" "rK,rK, rK, rK")
+ (match_operand 5 "const_int_operand" " i, i, i, i")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (neg_unop:V_VLSI
+ (match_operand:V_VLSI 3 "register_operand" "vr,vr, vr, vr"))
+ (match_operand:V_VLSI 2 "vector_merge_operand" "vu, 0, vu, 0")))]
+ "TARGET_VECTOR"
+ { return TARGET_XTHEADVECTOR ? "th.vrsub.vx\t%0,%3,x0%p1" : "vneg.v\t%0,%3%p1"; }
[(set_attr "type" "vialu")
(set_attr "mode" "<MODE>")
(set_attr "vl_op_idx" "4")
@@ -3696,7 +3719,7 @@ (define_insn "@pred_<optab><mode>_vf2"
(any_extend:VWEXTI
(match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " vr, vr"))
(match_operand:VWEXTI 2 "vector_merge_operand" " vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
"v<sz>ext.vf2\t%0,%3%p1"
[(set_attr "type" "vext")
(set_attr "mode" "<MODE>")])
@@ -3716,7 +3739,7 @@ (define_insn "@pred_<optab><mode>_vf4"
(any_extend:VQEXTI
(match_operand:<V_QUAD_TRUNC> 3 "register_operand" " vr, vr"))
(match_operand:VQEXTI 2 "vector_merge_operand" " vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
"v<sz>ext.vf4\t%0,%3%p1"
[(set_attr "type" "vext")
(set_attr "mode" "<MODE>")])
@@ -3736,7 +3759,7 @@ (define_insn "@pred_<optab><mode>_vf8"
(any_extend:VOEXTI
(match_operand:<V_OCT_TRUNC> 3 "register_operand" " vr, vr"))
(match_operand:VOEXTI 2 "vector_merge_operand" " vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
"v<sz>ext.vf8\t%0,%3%p1"
[(set_attr "type" "vext")
(set_attr "mode" "<MODE>")])
@@ -3760,7 +3783,7 @@ (define_insn "@pred_dual_widen_<any_widen_binop:optab><any_extend:su><mode>"
(match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " vr, vr")))
(match_operand:VWEXTI 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vw<any_widen_binop:insn><any_extend:u>.vv\t%0,%3,%4%p1"
+ "%^vw<any_widen_binop:insn><any_extend:u>.vv\t%0,%3,%4%p1"
[(set_attr "type" "vi<widen_binop_insn_type>")
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
@@ -3783,7 +3806,7 @@ (define_insn "@pred_dual_widen_<any_widen_binop:optab><any_extend:su><mode>_scal
(match_operand:<VSUBEL> 4 "reg_or_0_operand" " rJ, rJ"))))
(match_operand:VWEXTI 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vw<any_widen_binop:insn><any_extend:u>.vx\t%0,%3,%z4%p1"
+ "%^vw<any_widen_binop:insn><any_extend:u>.vx\t%0,%3,%z4%p1"
[(set_attr "type" "vi<widen_binop_insn_type>")
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
@@ -3804,7 +3827,7 @@ (define_insn "@pred_single_widen_sub<any_extend:su><mode>"
(match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " vr, vr")))
(match_operand:VWEXTI 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vwsub<any_extend:u>.wv\t%0,%3,%4%p1"
+ "%^vwsub<any_extend:u>.wv\t%0,%3,%4%p1"
[(set_attr "type" "viwalu")
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
@@ -3825,7 +3848,7 @@ (define_insn "@pred_single_widen_add<any_extend:su><mode>"
(match_operand:VWEXTI 3 "register_operand" " vr, vr"))
(match_operand:VWEXTI 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vwadd<any_extend:u>.wv\t%0,%3,%4%p1"
+ "%^vwadd<any_extend:u>.wv\t%0,%3,%4%p1"
[(set_attr "type" "viwalu")
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
@@ -3847,7 +3870,7 @@ (define_insn "@pred_single_widen_<plus_minus:optab><any_extend:su><mode>_scalar"
(match_operand:<VSUBEL> 4 "reg_or_0_operand" " rJ, rJ"))))
(match_operand:VWEXTI 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vw<plus_minus:insn><any_extend:u>.wx\t%0,%3,%z4%p1"
+ "%^vw<plus_minus:insn><any_extend:u>.wx\t%0,%3,%z4%p1"
[(set_attr "type" "vi<widen_binop_insn_type>")
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
@@ -3869,7 +3892,7 @@ (define_insn "@pred_widen_mulsu<mode>"
(match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " vr, vr")))
(match_operand:VWEXTI 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vwmulsu.vv\t%0,%3,%4%p1"
+ "%^vwmulsu.vv\t%0,%3,%4%p1"
[(set_attr "type" "viwmul")
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
@@ -3892,7 +3915,7 @@ (define_insn "@pred_widen_mulsu<mode>_scalar"
(match_operand:<VSUBEL> 4 "reg_or_0_operand" " rJ, rJ"))))
(match_operand:VWEXTI 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vwmulsu.vx\t%0,%3,%z4%p1"
+ "%^vwmulsu.vx\t%0,%3,%z4%p1"
[(set_attr "type" "viwmul")
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
@@ -3915,7 +3938,7 @@ (define_insn "@pred_<optab><mode>"
(reg:<VEL> X0_REGNUM)))
(match_operand:VWEXTI 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vwcvt<u>.x.x.v\t%0,%3%p1"
+ "%^vwcvt<u>.x.x.v\t%0,%3%p1"
[(set_attr "type" "viwalu")
(set_attr "mode" "<V_DOUBLE_TRUNC>")
(set_attr "vl_op_idx" "4")
@@ -3950,7 +3973,7 @@ (define_insn "@pred_narrow_<optab><mode>"
(match_operand:<V_DOUBLE_TRUNC> 4 "vector_shift_operand" " 0, 0, 0, 0,vr, vr, vr, vr, vk, vk, vk, vk")))
(match_operand:<V_DOUBLE_TRUNC> 2 "vector_merge_operand" " 0,vu, 0, vu,vu, vu, vu, 0, vu, vu, vu, 0")))]
"TARGET_VECTOR"
- "vn<insn>.w%o4\t%0,%3,%v4%p1"
+ "%^vn<insn>.w%o4\t%0,%3,%v4%p1"
[(set_attr "type" "vnshift")
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
@@ -3971,7 +3994,7 @@ (define_insn "@pred_narrow_<optab><mode>_scalar"
(match_operand 4 "pmode_reg_or_uimm5_operand" " rK, rK, rK, rK, rK, rK")))
(match_operand:<V_DOUBLE_TRUNC> 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vn<insn>.w%o4\t%0,%3,%4%p1"
+ "%^vn<insn>.w%o4\t%0,%3,%4%p1"
[(set_attr "type" "vnshift")
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
@@ -3991,7 +4014,7 @@ (define_insn "@pred_trunc<mode>"
(match_operand:VWEXTI 3 "register_operand" " 0, 0, 0, 0, vr, vr"))
(match_operand:<V_DOUBLE_TRUNC> 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vncvt.x.x.w\t%0,%3%p1"
+ "%^vncvt.x.x.w\t%0,%3%p1"
[(set_attr "type" "vnshift")
(set_attr "mode" "<V_DOUBLE_TRUNC>")
(set_attr "vl_op_idx" "4")
@@ -4028,14 +4051,14 @@ (define_insn "@pred_<optab><mode>"
(match_operand:VI 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0, vu, 0")))]
"TARGET_VECTOR"
"@
- v<insn>.vv\t%0,%3,%4%p1
- v<insn>.vv\t%0,%3,%4%p1
- v<insn>.vv\t%0,%3,%4%p1
- v<insn>.vv\t%0,%3,%4%p1
- v<binop_vi_variant_insn>\t%0,<binop_vi_variant_op>%p1
- v<binop_vi_variant_insn>\t%0,<binop_vi_variant_op>%p1
- v<binop_vi_variant_insn>\t%0,<binop_vi_variant_op>%p1
- v<binop_vi_variant_insn>\t%0,<binop_vi_variant_op>%p1"
+ %^v<insn>.vv\t%0,%3,%4%p1
+ %^v<insn>.vv\t%0,%3,%4%p1
+ %^v<insn>.vv\t%0,%3,%4%p1
+ %^v<insn>.vv\t%0,%3,%4%p1
+ %^v<binop_vi_variant_insn>\t%0,<binop_vi_variant_op>%p1
+ %^v<binop_vi_variant_insn>\t%0,<binop_vi_variant_op>%p1
+ %^v<binop_vi_variant_insn>\t%0,<binop_vi_variant_op>%p1
+ %^v<binop_vi_variant_insn>\t%0,<binop_vi_variant_op>%p1"
[(set_attr "type" "<int_binop_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -4057,7 +4080,7 @@ (define_insn "@pred_<optab><mode>_scalar"
(match_operand:VI_QHS 3 "register_operand" " vr, vr, vr, vr"))
(match_operand:VI_QHS 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "v<insn>.vx\t%0,%3,%4%p1"
+ "%^v<insn>.vx\t%0,%3,%4%p1"
[(set_attr "type" "<int_binop_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -4078,7 +4101,7 @@ (define_insn "@pred_<optab><mode>_scalar"
(match_operand:<VEL> 4 "register_operand" " r, r, r, r")))
(match_operand:VI_QHS 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "v<insn>.vx\t%0,%3,%4%p1"
+ "%^v<insn>.vx\t%0,%3,%4%p1"
[(set_attr "type" "<int_binop_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -4132,7 +4155,7 @@ (define_insn "*pred_<optab><mode>_scalar"
(match_operand:VI_D 3 "register_operand" " vr, vr, vr, vr"))
(match_operand:VI_D 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "v<insn>.vx\t%0,%3,%4%p1"
+ "%^v<insn>.vx\t%0,%3,%4%p1"
[(set_attr "type" "<int_binop_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -4154,7 +4177,7 @@ (define_insn "*pred_<optab><mode>_extended_scalar"
(match_operand:VI_D 3 "register_operand" " vr, vr, vr, vr"))
(match_operand:VI_D 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "v<insn>.vx\t%0,%3,%4%p1"
+ "%^v<insn>.vx\t%0,%3,%4%p1"
[(set_attr "type" "<int_binop_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -4208,7 +4231,7 @@ (define_insn "*pred_<optab><mode>_scalar"
(match_operand:<VEL> 4 "register_operand" " r, r, r, r")))
(match_operand:VI_D 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "v<insn>.vx\t%0,%3,%4%p1"
+ "%^v<insn>.vx\t%0,%3,%4%p1"
[(set_attr "type" "<int_binop_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -4230,7 +4253,7 @@ (define_insn "*pred_<optab><mode>_extended_scalar"
(match_operand:<VSUBEL> 4 "register_operand" " r, r, r, r"))))
(match_operand:VI_D 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "v<insn>.vx\t%0,%3,%4%p1"
+ "%^v<insn>.vx\t%0,%3,%4%p1"
[(set_attr "type" "<int_binop_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -4252,7 +4275,7 @@ (define_insn "@pred_<sat_op><mode>"
(match_operand:VI 4 "register_operand" " vr, vr, vr, vr")] VSAT_OP)
(match_operand:VI 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "v<sat_op>.vv\t%0,%3,%4%p1"
+ "%^v<sat_op>.vv\t%0,%3,%4%p1"
[(set_attr "type" "<sat_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -4275,7 +4298,7 @@ (define_insn "@pred_<sat_op><mode>_scalar"
(match_operand:<VEL> 4 "reg_or_0_operand" " rJ, rJ, rJ, rJ")] VSAT_ARITH_OP)
(match_operand:VI_QHS 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "v<sat_op>.vx\t%0,%3,%z4%p1"
+ "%^v<sat_op>.vx\t%0,%3,%z4%p1"
[(set_attr "type" "<sat_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -4297,7 +4320,7 @@ (define_insn "@pred_<sat_op><mode>_scalar"
(match_operand 4 "pmode_reg_or_uimm5_operand" " rK, rK, rK, rK")] VSAT_SHIFT_OP)
(match_operand:VI 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "v<sat_op>.v%o4\t%0,%3,%4%p1"
+ "%^v<sat_op>.v%o4\t%0,%3,%4%p1"
[(set_attr "type" "<sat_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -4355,7 +4378,7 @@ (define_insn "*pred_<sat_op><mode>_scalar"
(match_operand:<VEL> 4 "reg_or_0_operand" " rJ, rJ, rJ, rJ")] VSAT_ARITH_OP)
(match_operand:VI_D 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "v<sat_op>.vx\t%0,%3,%z4%p1"
+ "%^v<sat_op>.vx\t%0,%3,%z4%p1"
[(set_attr "type" "<sat_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -4378,7 +4401,7 @@ (define_insn "*pred_<sat_op><mode>_extended_scalar"
(match_operand:<VSUBEL> 4 "reg_or_0_operand" " rJ, rJ, rJ, rJ"))] VSAT_ARITH_OP)
(match_operand:VI_D 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "v<sat_op>.vx\t%0,%3,%z4%p1"
+ "%^v<sat_op>.vx\t%0,%3,%z4%p1"
[(set_attr "type" "<sat_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -4401,7 +4424,7 @@ (define_insn "@pred_narrow_clip<v_su><mode>"
(match_operand:<V_DOUBLE_TRUNC> 4 "vector_shift_operand" " 0, 0, 0, 0,vr, vr, vr, vr, vk, vk, vk, vk")] VNCLIP)
(match_operand:<V_DOUBLE_TRUNC> 2 "vector_merge_operand" " 0,vu, 0, vu,vu, vu, vu, 0, vu, vu, vu, 0")))]
"TARGET_VECTOR"
- "vnclip<v_su>.w%o4\t%0,%3,%v4%p1"
+ "%^vnclip<v_su>.w%o4\t%0,%3,%v4%p1"
[(set_attr "type" "vnclip")
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
@@ -4423,7 +4446,7 @@ (define_insn "@pred_narrow_clip<v_su><mode>_scalar"
(match_operand 4 "pmode_reg_or_uimm5_operand" " rK, rK, rK, rK, rK, rK")] VNCLIP)
(match_operand:<V_DOUBLE_TRUNC> 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vnclip<v_su>.w%o4\t%0,%3,%4%p1"
+ "%^vnclip<v_su>.w%o4\t%0,%3,%4%p1"
[(set_attr "type" "vnclip")
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
@@ -4466,7 +4489,7 @@ (define_insn "*pred_cmp<mode>_merge_tie_mask"
(match_operand:V_VLSI 4 "vector_arith_operand" "vrvi")])
(match_dup 1)))]
"TARGET_VECTOR"
- "vms%B2.v%o4\t%0,%3,%v4,v0.t"
+ "%^vms%B2.v%o4\t%0,%3,%v4,v0.t"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "1")
@@ -4490,7 +4513,7 @@ (define_insn "*pred_cmp<mode>"
(match_operand:V_VLSI 5 "vector_arith_operand" " vr, vr, vi, vi")])
(match_operand:<VM> 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR && riscv_vector::cmp_lmul_le_one (<MODE>mode)"
- "vms%B3.v%o5\t%0,%4,%v5%p1"
+ "%^vms%B3.v%o5\t%0,%4,%v5%p1"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")])
@@ -4510,7 +4533,7 @@ (define_insn "*pred_cmp<mode>_narrow"
(match_operand:V_VLSI 5 "vector_arith_operand" " vrvi, vrvi, 0, 0, vrvi, 0, 0, vrvi, vrvi")])
(match_operand:<VM> 2 "vector_merge_operand" " vu, vu, vu, vu, 0, 0, 0, vu, 0")))]
"TARGET_VECTOR && riscv_vector::cmp_lmul_gt_one (<MODE>mode)"
- "vms%B3.v%o5\t%0,%4,%v5%p1"
+ "%^vms%B3.v%o5\t%0,%4,%v5%p1"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")])
@@ -4546,7 +4569,7 @@ (define_insn "*pred_ltge<mode>_merge_tie_mask"
(match_operand:V_VLSI 4 "vector_neg_arith_operand" "vrvj")])
(match_dup 1)))]
"TARGET_VECTOR"
- "vms%B2.v%o4\t%0,%3,%v4,v0.t"
+ "%^vms%B2.v%o4\t%0,%3,%v4,v0.t"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "1")
@@ -4570,7 +4593,7 @@ (define_insn "*pred_ltge<mode>"
(match_operand:V_VLSI 5 "vector_neg_arith_operand" " vr, vr, vj, vj")])
(match_operand:<VM> 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR && riscv_vector::cmp_lmul_le_one (<MODE>mode)"
- "vms%B3.v%o5\t%0,%4,%v5%p1"
+ "%^vms%B3.v%o5\t%0,%4,%v5%p1"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")])
@@ -4590,7 +4613,7 @@ (define_insn "*pred_ltge<mode>_narrow"
(match_operand:V_VLSI 5 "vector_neg_arith_operand" " vrvj, vrvj, 0, 0, vrvj, 0, 0, vrvj, vrvj")])
(match_operand:<VM> 2 "vector_merge_operand" " vu, vu, vu, vu, 0, 0, 0, vu, 0")))]
"TARGET_VECTOR && riscv_vector::cmp_lmul_gt_one (<MODE>mode)"
- "vms%B3.v%o5\t%0,%4,%v5%p1"
+ "%^vms%B3.v%o5\t%0,%4,%v5%p1"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")])
@@ -4628,7 +4651,7 @@ (define_insn "*pred_cmp<mode>_scalar_merge_tie_mask"
(match_operand:<VEL> 4 "register_operand" " r"))])
(match_dup 1)))]
"TARGET_VECTOR"
- "vms%B2.vx\t%0,%3,%4,v0.t"
+ "%^vms%B2.vx\t%0,%3,%4,v0.t"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "1")
@@ -4653,7 +4676,7 @@ (define_insn "*pred_cmp<mode>_scalar"
(match_operand:<VEL> 5 "register_operand" " r, r"))])
(match_operand:<VM> 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR && riscv_vector::cmp_lmul_le_one (<MODE>mode)"
- "vms%B3.vx\t%0,%4,%5%p1"
+ "%^vms%B3.vx\t%0,%4,%5%p1"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")])
@@ -4674,7 +4697,7 @@ (define_insn "*pred_cmp<mode>_scalar_narrow"
(match_operand:<VEL> 5 "register_operand" " r, r, r, r, r"))])
(match_operand:<VM> 2 "vector_merge_operand" " vu, vu, 0, vu, 0")))]
"TARGET_VECTOR && riscv_vector::cmp_lmul_gt_one (<MODE>mode)"
- "vms%B3.vx\t%0,%4,%5%p1"
+ "%^vms%B3.vx\t%0,%4,%5%p1"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")])
@@ -4712,7 +4735,7 @@ (define_insn "*pred_eqne<mode>_scalar_merge_tie_mask"
(match_operand:V_VLSI_QHS 3 "register_operand" " vr")])
(match_dup 1)))]
"TARGET_VECTOR"
- "vms%B2.vx\t%0,%3,%4,v0.t"
+ "%^vms%B2.vx\t%0,%3,%4,v0.t"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "1")
@@ -4737,7 +4760,7 @@ (define_insn "*pred_eqne<mode>_scalar"
(match_operand:V_VLSI_QHS 4 "register_operand" " vr, vr")])
(match_operand:<VM> 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR && riscv_vector::cmp_lmul_le_one (<MODE>mode)"
- "vms%B3.vx\t%0,%4,%5%p1"
+ "%^vms%B3.vx\t%0,%4,%5%p1"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")])
@@ -4758,7 +4781,7 @@ (define_insn "*pred_eqne<mode>_scalar_narrow"
(match_operand:V_VLSI_QHS 4 "register_operand" " vr, 0, 0, vr, vr")])
(match_operand:<VM> 2 "vector_merge_operand" " vu, vu, 0, vu, 0")))]
"TARGET_VECTOR && riscv_vector::cmp_lmul_gt_one (<MODE>mode)"
- "vms%B3.vx\t%0,%4,%5%p1"
+ "%^vms%B3.vx\t%0,%4,%5%p1"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")])
@@ -4853,7 +4876,7 @@ (define_insn "*pred_cmp<mode>_scalar_merge_tie_mask"
(match_operand:<VEL> 4 "register_operand" " r"))])
(match_dup 1)))]
"TARGET_VECTOR"
- "vms%B2.vx\t%0,%3,%4,v0.t"
+ "%^vms%B2.vx\t%0,%3,%4,v0.t"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "1")
@@ -4877,7 +4900,7 @@ (define_insn "*pred_eqne<mode>_scalar_merge_tie_mask"
(match_operand:V_VLSI_D 3 "register_operand" " vr")])
(match_dup 1)))]
"TARGET_VECTOR"
- "vms%B2.vx\t%0,%3,%4,v0.t"
+ "%^vms%B2.vx\t%0,%3,%4,v0.t"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "1")
@@ -4902,7 +4925,7 @@ (define_insn "*pred_cmp<mode>_scalar"
(match_operand:<VEL> 5 "register_operand" " r, r"))])
(match_operand:<VM> 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR && riscv_vector::cmp_lmul_le_one (<MODE>mode)"
- "vms%B3.vx\t%0,%4,%5%p1"
+ "%^vms%B3.vx\t%0,%4,%5%p1"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")])
@@ -4923,7 +4946,7 @@ (define_insn "*pred_cmp<mode>_scalar_narrow"
(match_operand:<VEL> 5 "register_operand" " r, r, r, r, r"))])
(match_operand:<VM> 2 "vector_merge_operand" " vu, vu, 0, vu, 0")))]
"TARGET_VECTOR && riscv_vector::cmp_lmul_gt_one (<MODE>mode)"
- "vms%B3.vx\t%0,%4,%5%p1"
+ "%^vms%B3.vx\t%0,%4,%5%p1"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")])
@@ -4944,7 +4967,7 @@ (define_insn "*pred_eqne<mode>_scalar"
(match_operand:V_VLSI_D 4 "register_operand" " vr, vr")])
(match_operand:<VM> 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR && riscv_vector::cmp_lmul_le_one (<MODE>mode)"
- "vms%B3.vx\t%0,%4,%5%p1"
+ "%^vms%B3.vx\t%0,%4,%5%p1"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")])
@@ -4965,7 +4988,7 @@ (define_insn "*pred_eqne<mode>_scalar_narrow"
(match_operand:V_VLSI_D 4 "register_operand" " vr, 0, 0, vr, vr")])
(match_operand:<VM> 2 "vector_merge_operand" " vu, vu, 0, vu, 0")))]
"TARGET_VECTOR && riscv_vector::cmp_lmul_gt_one (<MODE>mode)"
- "vms%B3.vx\t%0,%4,%5%p1"
+ "%^vms%B3.vx\t%0,%4,%5%p1"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")])
@@ -4986,7 +5009,7 @@ (define_insn "*pred_cmp<mode>_extended_scalar_merge_tie_mask"
(match_operand:<VSUBEL> 4 "register_operand" " r")))])
(match_dup 1)))]
"TARGET_VECTOR"
- "vms%B2.vx\t%0,%3,%4,v0.t"
+ "%^vms%B2.vx\t%0,%3,%4,v0.t"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "1")
@@ -5012,7 +5035,7 @@ (define_insn "*pred_cmp<mode>_extended_scalar"
(match_operand:<VSUBEL> 5 "register_operand" " r, r")))])
(match_operand:<VM> 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR && riscv_vector::cmp_lmul_le_one (<MODE>mode)"
- "vms%B3.vx\t%0,%4,%5%p1"
+ "%^vms%B3.vx\t%0,%4,%5%p1"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")])
@@ -5033,7 +5056,7 @@ (define_insn "*pred_cmp<mode>_extended_scalar_narrow"
(match_operand:<VSUBEL> 5 "register_operand" " r, r, r, r, r")))])
(match_operand:<VM> 2 "vector_merge_operand" " vu, vu, 0, vu, 0")))]
"TARGET_VECTOR && riscv_vector::cmp_lmul_gt_one (<MODE>mode)"
- "vms%B3.vx\t%0,%4,%5%p1"
+ "%^vms%B3.vx\t%0,%4,%5%p1"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")])
@@ -5054,7 +5077,7 @@ (define_insn "*pred_eqne<mode>_extended_scalar_merge_tie_mask"
(match_operand:V_VLSI_D 3 "register_operand" " vr")])
(match_dup 1)))]
"TARGET_VECTOR"
- "vms%B2.vx\t%0,%3,%4,v0.t"
+ "%^vms%B2.vx\t%0,%3,%4,v0.t"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "1")
@@ -5080,7 +5103,7 @@ (define_insn "*pred_eqne<mode>_extended_scalar"
(match_operand:V_VLSI_D 4 "register_operand" " vr, vr")])
(match_operand:<VM> 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR && riscv_vector::cmp_lmul_le_one (<MODE>mode)"
- "vms%B3.vx\t%0,%4,%5%p1"
+ "%^vms%B3.vx\t%0,%4,%5%p1"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")])
@@ -5101,7 +5124,7 @@ (define_insn "*pred_eqne<mode>_extended_scalar_narrow"
(match_operand:V_VLSI_D 4 "register_operand" " vr, 0, 0, vr, vr")])
(match_operand:<VM> 2 "vector_merge_operand" " vu, vu, 0, vu, 0")))]
"TARGET_VECTOR && riscv_vector::cmp_lmul_gt_one (<MODE>mode)"
- "vms%B3.vx\t%0,%4,%5%p1"
+ "%^vms%B3.vx\t%0,%4,%5%p1"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")])
@@ -5270,12 +5293,12 @@ (define_insn "*pred_mul_plus<mode>_undef"
(match_operand:V_VLSI 2 "vector_undef_operand")))]
"TARGET_VECTOR"
"@
- vmadd.vv\t%0,%4,%5%p1
- vmacc.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%4\;vmacc.vv\t%0,%3,%4%p1
- vmadd.vv\t%0,%4,%5%p1
- vmacc.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%5\;vmacc.vv\t%0,%3,%4%p1"
+ %^vmadd.vv\t%0,%4,%5%p1
+ %^vmacc.vv\t%0,%3,%4%p1
+ %^vmv.v.v\t%0,%4\;%^vmacc.vv\t%0,%3,%4%p1
+ %^vmadd.vv\t%0,%4,%5%p1
+ %^vmacc.vv\t%0,%3,%4%p1
+ %^vmv.v.v\t%0,%5\;%^vmacc.vv\t%0,%3,%4%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")])
@@ -5298,10 +5321,10 @@ (define_insn "*pred_madd<mode>"
(match_dup 2)))]
"TARGET_VECTOR"
"@
- vmadd.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%2\;vmadd.vv\t%0,%3,%4%p1
- vmadd.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%2\;vmadd.vv\t%0,%3,%4%p1"
+ %^vmadd.vv\t%0,%3,%4%p1
+ %^vmv.v.v\t%0,%2\;%^vmadd.vv\t%0,%3,%4%p1
+ %^vmadd.vv\t%0,%3,%4%p1
+ %^vmv.v.v\t%0,%2\;%^vmadd.vv\t%0,%3,%4%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -5329,10 +5352,10 @@ (define_insn "*pred_macc<mode>"
(match_dup 4)))]
"TARGET_VECTOR"
"@
- vmacc.vv\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vmacc.vv\t%0,%2,%3%p1
- vmacc.vv\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vmacc.vv\t%0,%2,%3%p1"
+ %^vmacc.vv\t%0,%2,%3%p1
+ %^vmv.v.v\t%0,%4\;%^vmacc.vv\t%0,%2,%3%p1
+ %^vmacc.vv\t%0,%2,%3%p1
+ %^vmv.v.v\t%0,%4\;%^vmacc.vv\t%0,%2,%3%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "2")
@@ -5382,10 +5405,10 @@ (define_insn "*pred_madd<mode>_scalar"
(match_dup 3)))]
"TARGET_VECTOR"
"@
- vmadd.vx\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vmadd.vx\t%0,%2,%4%p1
- vmadd.vx\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vmadd.vx\t%0,%2,%4%p1"
+ %^vmadd.vx\t%0,%2,%4%p1
+ %^vmv.v.v\t%0,%3\;%^vmadd.vx\t%0,%2,%4%p1
+ %^vmadd.vx\t%0,%2,%4%p1
+ %^vmv.v.v\t%0,%3\;%^vmadd.vx\t%0,%2,%4%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -5414,10 +5437,10 @@ (define_insn "*pred_macc<mode>_scalar"
(match_dup 4)))]
"TARGET_VECTOR"
"@
- vmacc.vx\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1
- vmacc.vx\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1"
+ %^vmacc.vx\t%0,%2,%3%p1
+ %^vmv.v.v\t%0,%4\;%^vmacc.vx\t%0,%2,%3%p1
+ %^vmacc.vx\t%0,%2,%3%p1
+ %^vmv.v.v\t%0,%4\;%^vmacc.vx\t%0,%2,%3%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "2")
@@ -5482,10 +5505,10 @@ (define_insn "*pred_madd<mode>_extended_scalar"
(match_dup 3)))]
"TARGET_VECTOR"
"@
- vmadd.vx\t%0,%2,%4%p1
- vmv.v.v\t%0,%2\;vmadd.vx\t%0,%2,%4%p1
- vmadd.vx\t%0,%2,%4%p1
- vmv.v.v\t%0,%2\;vmadd.vx\t%0,%2,%4%p1"
+ %^vmadd.vx\t%0,%2,%4%p1
+ %^vmv.v.v\t%0,%2\;%^vmadd.vx\t%0,%2,%4%p1
+ %^vmadd.vx\t%0,%2,%4%p1
+ %^vmv.v.v\t%0,%2\;%^vmadd.vx\t%0,%2,%4%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -5515,10 +5538,10 @@ (define_insn "*pred_macc<mode>_extended_scalar"
(match_dup 4)))]
"TARGET_VECTOR"
"@
- vmacc.vx\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1
- vmacc.vx\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1"
+ %^vmacc.vx\t%0,%2,%3%p1
+ %^vmv.v.v\t%0,%4\;%^vmacc.vx\t%0,%2,%3%p1
+ %^vmacc.vx\t%0,%2,%3%p1
+ %^vmv.v.v\t%0,%4\;%^vmacc.vx\t%0,%2,%3%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "2")
@@ -5568,12 +5591,12 @@ (define_insn "*pred_minus_mul<mode>_undef"
(match_operand:V_VLSI 2 "vector_undef_operand")))]
"TARGET_VECTOR"
"@
- vnmsub.vv\t%0,%4,%5%p1
- vnmsac.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%3\;vnmsub.vv\t%0,%4,%5%p1
- vnmsub.vv\t%0,%4,%5%p1
- vnmsac.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%3\;vnmsub.vv\t%0,%4,%5%p1"
+ %^vnmsub.vv\t%0,%4,%5%p1
+ %^vnmsac.vv\t%0,%3,%4%p1
+ %^vmv.v.v\t%0,%3\;%^vnmsub.vv\t%0,%4,%5%p1
+ %^vnmsub.vv\t%0,%4,%5%p1
+ %^vnmsac.vv\t%0,%3,%4%p1
+ %^vmv.v.v\t%0,%3\;%^vnmsub.vv\t%0,%4,%5%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")])
@@ -5596,10 +5619,10 @@ (define_insn "*pred_nmsub<mode>"
(match_dup 2)))]
"TARGET_VECTOR"
"@
- vnmsub.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%2\;vnmsub.vv\t%0,%3,%4%p1
- vnmsub.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%2\;vnmsub.vv\t%0,%3,%4%p1"
+ %^vnmsub.vv\t%0,%3,%4%p1
+ %^vmv.v.v\t%0,%2\;%^vnmsub.vv\t%0,%3,%4%p1
+ %^vnmsub.vv\t%0,%3,%4%p1
+ %^vmv.v.v\t%0,%2\;%^vnmsub.vv\t%0,%3,%4%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -5627,10 +5650,10 @@ (define_insn "*pred_nmsac<mode>"
(match_dup 4)))]
"TARGET_VECTOR"
"@
- vnmsac.vv\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vnmsac.vv\t%0,%2,%3%p1
- vnmsac.vv\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vnmsac.vv\t%0,%2,%3%p1"
+ %^vnmsac.vv\t%0,%2,%3%p1
+ %^vmv.v.v\t%0,%4\;%^vnmsac.vv\t%0,%2,%3%p1
+ %^vnmsac.vv\t%0,%2,%3%p1
+ %^vmv.v.v\t%0,%4\;%^vnmsac.vv\t%0,%2,%3%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "2")
@@ -5680,10 +5703,10 @@ (define_insn "*pred_nmsub<mode>_scalar"
(match_dup 3)))]
"TARGET_VECTOR"
"@
- vnmsub.vx\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1
- vnmsub.vx\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1"
+ %^vnmsub.vx\t%0,%2,%4%p1
+ %^vmv.v.v\t%0,%3\;%^vnmsub.vx\t%0,%2,%4%p1
+ %^vnmsub.vx\t%0,%2,%4%p1
+ %^vmv.v.v\t%0,%3\;%^vnmsub.vx\t%0,%2,%4%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -5712,10 +5735,10 @@ (define_insn "*pred_nmsac<mode>_scalar"
(match_dup 4)))]
"TARGET_VECTOR"
"@
- vnmsac.vx\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1
- vnmsac.vx\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1"
+ %^vnmsac.vx\t%0,%2,%3%p1
+ %^vmv.v.v\t%0,%4\;%^vnmsac.vx\t%0,%2,%3%p1
+ %^vnmsac.vx\t%0,%2,%3%p1
+ %^vmv.v.v\t%0,%4\;%^vnmsac.vx\t%0,%2,%3%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "2")
@@ -5780,10 +5803,10 @@ (define_insn "*pred_nmsub<mode>_extended_scalar"
(match_dup 3)))]
"TARGET_VECTOR"
"@
- vnmsub.vx\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1
- vnmsub.vx\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1"
+ %^vnmsub.vx\t%0,%2,%4%p1
+ %^vmv.v.v\t%0,%3\;%^vnmsub.vx\t%0,%2,%4%p1
+ %^vnmsub.vx\t%0,%2,%4%p1
+ %^vmv.v.v\t%0,%3\;%^vnmsub.vx\t%0,%2,%4%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -5813,10 +5836,10 @@ (define_insn "*pred_nmsac<mode>_extended_scalar"
(match_dup 4)))]
"TARGET_VECTOR"
"@
- vnmsac.vx\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1
- vnmsac.vx\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1"
+ %^vnmsac.vx\t%0,%2,%3%p1
+ %^vmv.v.v\t%0,%4\;%^vnmsac.vx\t%0,%2,%3%p1
+ %^vnmsac.vx\t%0,%2,%3%p1
+ %^vmv.v.v\t%0,%4\;%^vnmsac.vx\t%0,%2,%3%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "2")
@@ -5852,7 +5875,7 @@ (define_insn "@pred_widen_mul_plus<su><mode>"
(match_operand:VWEXTI 2 "register_operand" " 0"))
(match_dup 2)))]
"TARGET_VECTOR"
- "vwmacc<u>.vv\t%0,%3,%4%p1"
+ "%^vwmacc<u>.vv\t%0,%3,%4%p1"
[(set_attr "type" "viwmuladd")
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
@@ -5877,7 +5900,7 @@ (define_insn "@pred_widen_mul_plus<su><mode>_scalar"
(match_operand:VWEXTI 2 "register_operand" " 0"))
(match_dup 2)))]
"TARGET_VECTOR"
- "vwmacc<u>.vx\t%0,%3,%4%p1"
+ "%^vwmacc<u>.vx\t%0,%3,%4%p1"
[(set_attr "type" "viwmuladd")
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
@@ -5901,7 +5924,7 @@ (define_insn "@pred_widen_mul_plussu<mode>"
(match_operand:VWEXTI 2 "register_operand" " 0"))
(match_dup 2)))]
"TARGET_VECTOR"
- "vwmaccsu.vv\t%0,%3,%4%p1"
+ "%^vwmaccsu.vv\t%0,%3,%4%p1"
[(set_attr "type" "viwmuladd")
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
@@ -5926,7 +5949,7 @@ (define_insn "@pred_widen_mul_plussu<mode>_scalar"
(match_operand:VWEXTI 2 "register_operand" " 0"))
(match_dup 2)))]
"TARGET_VECTOR"
- "vwmaccsu.vx\t%0,%3,%4%p1"
+ "%^vwmaccsu.vx\t%0,%3,%4%p1"
[(set_attr "type" "viwmuladd")
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
@@ -5951,7 +5974,7 @@ (define_insn "@pred_widen_mul_plusus<mode>_scalar"
(match_operand:VWEXTI 2 "register_operand" " 0"))
(match_dup 2)))]
"TARGET_VECTOR"
- "vwmaccus.vx\t%0,%3,%4%p1"
+ "%^vwmaccus.vx\t%0,%3,%4%p1"
[(set_attr "type" "viwmuladd")
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
@@ -5986,7 +6009,7 @@ (define_insn "@pred_<optab><mode>"
(match_operand:VB_VLS 4 "register_operand" " vr"))
(match_operand:VB_VLS 2 "vector_undef_operand" " vu")))]
"TARGET_VECTOR"
- "vm<insn>.mm\t%0,%3,%4"
+ "%^vm<insn>.mm\t%0,%3,%4"
[(set_attr "type" "vmalu")
(set_attr "mode" "<MODE>")
(set_attr "vl_op_idx" "5")
@@ -6007,7 +6030,7 @@ (define_insn "@pred_n<optab><mode>"
(match_operand:VB_VLS 4 "register_operand" " vr")))
(match_operand:VB_VLS 2 "vector_undef_operand" " vu")))]
"TARGET_VECTOR"
- "vm<ninsn>.mm\t%0,%3,%4"
+ "%^vm<ninsn>.mm\t%0,%3,%4"
[(set_attr "type" "vmalu")
(set_attr "mode" "<MODE>")
(set_attr "vl_op_idx" "5")
@@ -6028,7 +6051,7 @@ (define_insn "@pred_<optab>not<mode>"
(match_operand:VB_VLS 4 "register_operand" " vr")))
(match_operand:VB_VLS 2 "vector_undef_operand" " vu")))]
"TARGET_VECTOR"
- "vm<insn>n.mm\t%0,%3,%4"
+ "%^vm<insn>n.mm\t%0,%3,%4"
[(set_attr "type" "vmalu")
(set_attr "mode" "<MODE>")
(set_attr "vl_op_idx" "5")
@@ -6047,7 +6070,7 @@ (define_insn "@pred_not<mode>"
(match_operand:VB_VLS 3 "register_operand" " vr"))
(match_operand:VB_VLS 2 "vector_undef_operand" " vu")))]
"TARGET_VECTOR"
- "vmnot.m\t%0,%3"
+ "%^vmnot.m\t%0,%3"
[(set_attr "type" "vmalu")
(set_attr "mode" "<MODE>")
(set_attr "vl_op_idx" "4")
@@ -6065,7 +6088,7 @@ (define_insn "@pred_popcount<VB:mode><P:mode>"
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)))]
"TARGET_VECTOR"
- "vcpop.m\t%0,%2%p1"
+ { return TARGET_XTHEADVECTOR ? "th.vmpopc.m\t%0,%2%p1" : "vcpop.m\t%0,%2%p1"; }
[(set_attr "type" "vmpop")
(set_attr "mode" "<VB:MODE>")])
@@ -6083,7 +6106,7 @@ (define_insn "@pred_ffs<VB:mode><P:mode>"
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE))
(const_int -1)))]
"TARGET_VECTOR"
- "vfirst.m\t%0,%2%p1"
+ { return TARGET_XTHEADVECTOR ? "th.vmfirst.m\t%0,%2%p1" : "vfirst.m\t%0,%2%p1"; }
[(set_attr "type" "vmffs")
(set_attr "mode" "<VB:MODE>")])
@@ -6101,7 +6124,7 @@ (define_insn "@pred_<misc_op><mode>"
[(match_operand:VB 3 "register_operand" " vr, vr")] VMISC)
(match_operand:VB 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vm<misc_op>.m\t%0,%3%p1"
+ "%^vm<misc_op>.m\t%0,%3%p1"
[(set_attr "type" "vmsfs")
(set_attr "mode" "<MODE>")])
@@ -6120,7 +6143,7 @@ (define_insn "@pred_iota<mode>"
[(match_operand:<VM> 3 "register_operand" " vr, vr")] UNSPEC_VIOTA)
(match_operand:VI 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "viota.m\t%0,%3%p1"
+ "%^viota.m\t%0,%3%p1"
[(set_attr "type" "vmiota")
(set_attr "mode" "<MODE>")])
@@ -6138,7 +6161,7 @@ (define_insn "@pred_series<mode>"
(vec_series:V_VLSI (const_int 0) (const_int 1))
(match_operand:V_VLSI 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vid.v\t%0%p1"
+ "%^vid.v\t%0%p1"
[(set_attr "type" "vmidx")
(set_attr "mode" "<MODE>")])
@@ -6170,7 +6193,7 @@ (define_insn "@pred_<optab><mode>"
(match_operand:V_VLSF 4 "register_operand" " vr, vr, vr, vr"))
(match_operand:V_VLSF 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vf<insn>.vv\t%0,%3,%4%p1"
+ "%^vf<insn>.vv\t%0,%3,%4%p1"
[(set_attr "type" "<float_insn_type>")
(set_attr "mode" "<MODE>")
(set (attr "frm_mode")
@@ -6192,7 +6215,7 @@ (define_insn "@pred_<optab><mode>"
(match_operand:V_VLSF 4 "register_operand" " vr, vr, vr, vr"))
(match_operand:V_VLSF 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vf<insn>.vv\t%0,%3,%4%p1"
+ "%^vf<insn>.vv\t%0,%3,%4%p1"
[(set_attr "type" "<float_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -6236,7 +6259,7 @@ (define_insn "@pred_<optab><mode>_scalar"
(match_operand:VF 3 "register_operand" " vr, vr, vr, vr"))
(match_operand:VF 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vf<insn>.vf\t%0,%3,%4%p1"
+ "%^vf<insn>.vf\t%0,%3,%4%p1"
[(set_attr "type" "<float_insn_type>")
(set_attr "mode" "<MODE>")
(set (attr "frm_mode")
@@ -6259,7 +6282,7 @@ (define_insn "@pred_<optab><mode>_scalar"
(match_operand:VF 3 "register_operand" " vr, vr, vr, vr"))
(match_operand:VF 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vf<insn>.vf\t%0,%3,%4%p1"
+ "%^vf<insn>.vf\t%0,%3,%4%p1"
[(set_attr "type" "<float_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -6304,7 +6327,7 @@ (define_insn "@pred_<optab><mode>_scalar"
(match_operand:<VEL> 4 "register_operand" " f, f, f, f")))
(match_operand:VF 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vf<insn>.vf\t%0,%3,%4%p1"
+ "%^vf<insn>.vf\t%0,%3,%4%p1"
[(set_attr "type" "<float_insn_type>")
(set_attr "mode" "<MODE>")
(set (attr "frm_mode")
@@ -6329,7 +6352,7 @@ (define_insn "@pred_<optab><mode>_reverse_scalar"
(match_operand:VF 3 "register_operand" " vr, vr, vr, vr"))
(match_operand:VF 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vfr<insn>.vf\t%0,%3,%4%p1"
+ "%^vfr<insn>.vf\t%0,%3,%4%p1"
[(set_attr "type" "<float_insn_type>")
(set_attr "mode" "<MODE>")
(set (attr "frm_mode")
@@ -6351,7 +6374,7 @@ (define_insn "@pred_<copysign><mode>"
(match_operand:V_VLSF 4 "register_operand" " vr, vr, vr, vr")] VCOPYSIGNS)
(match_operand:V_VLSF 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vfsgnj<nx>.vv\t%0,%3,%4%p1"
+ "%^vfsgnj<nx>.vv\t%0,%3,%4%p1"
[(set_attr "type" "vfsgnj")
(set_attr "mode" "<MODE>")])
@@ -6372,7 +6395,7 @@ (define_insn "@pred_ncopysign<mode>"
(match_operand:VF 4 "register_operand" " vr, vr, vr, vr")] UNSPEC_VCOPYSIGN))
(match_operand:VF 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vfsgnjn.vv\t%0,%3,%4%p1"
+ "%^vfsgnjn.vv\t%0,%3,%4%p1"
[(set_attr "type" "vfsgnj")
(set_attr "mode" "<MODE>")])
@@ -6393,7 +6416,7 @@ (define_insn "@pred_<copysign><mode>_scalar"
(match_operand:<VEL> 4 "register_operand" " f, f, f, f"))] VCOPYSIGNS)
(match_operand:V_VLSF 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vfsgnj<nx>.vf\t%0,%3,%4%p1"
+ "%^vfsgnj<nx>.vf\t%0,%3,%4%p1"
[(set_attr "type" "vfsgnj")
(set_attr "mode" "<MODE>")])
@@ -6415,7 +6438,7 @@ (define_insn "@pred_ncopysign<mode>_scalar"
(match_operand:<VEL> 4 "register_operand" " f, f, f, f"))] UNSPEC_VCOPYSIGN))
(match_operand:VF 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vfsgnjn.vf\t%0,%3,%4%p1"
+ "%^vfsgnjn.vf\t%0,%3,%4%p1"
[(set_attr "type" "vfsgnj")
(set_attr "mode" "<MODE>")])
@@ -6471,12 +6494,12 @@ (define_insn "*pred_mul_<optab><mode>_undef"
(match_operand:V_VLSF 2 "vector_undef_operand")))]
"TARGET_VECTOR"
"@
- vf<madd_msub>.vv\t%0,%4,%5%p1
- vf<macc_msac>.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%3\;vf<madd_msub>.vv\t%0,%4,%5%p1
- vf<madd_msub>.vv\t%0,%4,%5%p1
- vf<macc_msac>.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%3\;vf<madd_msub>.vv\t%0,%4,%5%p1"
+ %^vf<madd_msub>.vv\t%0,%4,%5%p1
+ %^vf<macc_msac>.vv\t%0,%3,%4%p1
+ %^vmv.v.v\t%0,%3\;%^vf<madd_msub>.vv\t%0,%4,%5%p1
+ %^vf<madd_msub>.vv\t%0,%4,%5%p1
+ %^vf<macc_msac>.vv\t%0,%3,%4%p1
+ %^vmv.v.v\t%0,%3\;%^vf<madd_msub>.vv\t%0,%4,%5%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set (attr "frm_mode")
@@ -6503,10 +6526,10 @@ (define_insn "*pred_<madd_msub><mode>"
(match_dup 2)))]
"TARGET_VECTOR"
"@
- vf<madd_msub>.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%2\;vf<madd_msub>.vv\t%0,%3,%4%p1
- vf<madd_msub>.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%2\;vf<madd_msub>.vv\t%0,%3,%4%p1"
+ %^vf<madd_msub>.vv\t%0,%3,%4%p1
+ %^vmv.v.v\t%0,%2\;%^vf<madd_msub>.vv\t%0,%3,%4%p1
+ %^vf<madd_msub>.vv\t%0,%3,%4%p1
+ %^vmv.v.v\t%0,%2\;%^vf<madd_msub>.vv\t%0,%3,%4%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -6538,10 +6561,10 @@ (define_insn "*pred_<macc_msac><mode>"
(match_dup 4)))]
"TARGET_VECTOR"
"@
- vf<macc_msac>.vv\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vf<macc_msac>.vv\t%0,%2,%3%p1
- vf<macc_msac>.vv\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vf<macc_msac>.vv\t%0,%2,%3%p1"
+ %^vf<macc_msac>.vv\t%0,%2,%3%p1
+ %^vmv.v.v\t%0,%4\;%^vf<macc_msac>.vv\t%0,%2,%3%p1
+ %^vf<macc_msac>.vv\t%0,%2,%3%p1
+ %^vmv.v.v\t%0,%4\;%^vf<macc_msac>.vv\t%0,%2,%3%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "2")
@@ -6597,10 +6620,10 @@ (define_insn "*pred_<madd_msub><mode>_scalar"
(match_dup 3)))]
"TARGET_VECTOR"
"@
- vf<madd_msub>.vf\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vf<madd_msub>.vf\t%0,%2,%4%p1
- vf<madd_msub>.vf\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vf<madd_msub>.vf\t%0,%2,%4%p1"
+ %^vf<madd_msub>.vf\t%0,%2,%4%p1
+ %^vmv.v.v\t%0,%3\;%^vf<madd_msub>.vf\t%0,%2,%4%p1
+ %^vf<madd_msub>.vf\t%0,%2,%4%p1
+ %^vmv.v.v\t%0,%3\;%^vf<madd_msub>.vf\t%0,%2,%4%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -6633,10 +6656,10 @@ (define_insn "*pred_<macc_msac><mode>_scalar"
(match_dup 4)))]
"TARGET_VECTOR"
"@
- vf<macc_msac>.vf\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vf<macc_msac>.vf\t%0,%2,%3%p1
- vf<macc_msac>.vf\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vf<macc_msac>.vf\t%0,%2,%3%p1"
+ %^vf<macc_msac>.vf\t%0,%2,%3%p1
+ %^vmv.v.v\t%0,%4\;%^vf<macc_msac>.vf\t%0,%2,%3%p1
+ %^vf<macc_msac>.vf\t%0,%2,%3%p1
+ %^vmv.v.v\t%0,%4\;%^vf<macc_msac>.vf\t%0,%2,%3%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "2")
@@ -6694,12 +6717,12 @@ (define_insn "*pred_mul_neg_<optab><mode>_undef"
(match_operand:V_VLSF 2 "vector_undef_operand")))]
"TARGET_VECTOR"
"@
- vf<nmsub_nmadd>.vv\t%0,%4,%5%p1
- vf<nmsac_nmacc>.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%3\;vf<nmsub_nmadd>.vv\t%0,%4,%5%p1
- vf<nmsub_nmadd>.vv\t%0,%4,%5%p1
- vf<nmsac_nmacc>.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%3\;vf<nmsub_nmadd>.vv\t%0,%4,%5%p1"
+ %^vf<nmsub_nmadd>.vv\t%0,%4,%5%p1
+ %^vf<nmsac_nmacc>.vv\t%0,%3,%4%p1
+ %^vmv.v.v\t%0,%3\;%^vf<nmsub_nmadd>.vv\t%0,%4,%5%p1
+ %^vf<nmsub_nmadd>.vv\t%0,%4,%5%p1
+ %^vf<nmsac_nmacc>.vv\t%0,%3,%4%p1
+ %^vmv.v.v\t%0,%3\;%^vf<nmsub_nmadd>.vv\t%0,%4,%5%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set (attr "frm_mode")
@@ -6727,10 +6750,10 @@ (define_insn "*pred_<nmsub_nmadd><mode>"
(match_dup 2)))]
"TARGET_VECTOR"
"@
- vf<nmsub_nmadd>.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%2\;vf<nmsub_nmadd>.vv\t%0,%3,%4%p1
- vf<nmsub_nmadd>.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%2\;vf<nmsub_nmadd>.vv\t%0,%3,%4%p1"
+ %^vf<nmsub_nmadd>.vv\t%0,%3,%4%p1
+ %^vmv.v.v\t%0,%2\;%^vf<nmsub_nmadd>.vv\t%0,%3,%4%p1
+ %^vf<nmsub_nmadd>.vv\t%0,%3,%4%p1
+ %^vmv.v.v\t%0,%2\;%^vf<nmsub_nmadd>.vv\t%0,%3,%4%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -6763,10 +6786,10 @@ (define_insn "*pred_<nmsac_nmacc><mode>"
(match_dup 4)))]
"TARGET_VECTOR"
"@
- vf<nmsac_nmacc>.vv\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vf<nmsac_nmacc>.vv\t%0,%2,%3%p1
- vf<nmsac_nmacc>.vv\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vf<nmsac_nmacc>.vv\t%0,%2,%3%p1"
+ %^vf<nmsac_nmacc>.vv\t%0,%2,%3%p1
+ %^vmv.v.v\t%0,%4\;%^vf<nmsac_nmacc>.vv\t%0,%2,%3%p1
+ %^vf<nmsac_nmacc>.vv\t%0,%2,%3%p1
+ %^vmv.v.v\t%0,%4\;%^vf<nmsac_nmacc>.vv\t%0,%2,%3%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "2")
@@ -6824,10 +6847,10 @@ (define_insn "*pred_<nmsub_nmadd><mode>_scalar"
(match_dup 3)))]
"TARGET_VECTOR"
"@
- vf<nmsub_nmadd>.vf\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vf<nmsub_nmadd>.vf\t%0,%2,%4%p1
- vf<nmsub_nmadd>.vf\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vf<nmsub_nmadd>.vf\t%0,%2,%4%p1"
+ %^vf<nmsub_nmadd>.vf\t%0,%2,%4%p1
+ %^vmv.v.v\t%0,%3\;%^vf<nmsub_nmadd>.vf\t%0,%2,%4%p1
+ %^vf<nmsub_nmadd>.vf\t%0,%2,%4%p1
+ %^vmv.v.v\t%0,%3\;%^vf<nmsub_nmadd>.vf\t%0,%2,%4%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -6861,10 +6884,10 @@ (define_insn "*pred_<nmsac_nmacc><mode>_scalar"
(match_dup 4)))]
"TARGET_VECTOR"
"@
- vf<nmsac_nmacc>.vf\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vf<nmsac_nmacc>.vf\t%0,%2,%3%p1
- vf<nmsac_nmacc>.vf\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vf<nmsac_nmacc>.vf\t%0,%2,%3%p1"
+ %^vf<nmsac_nmacc>.vf\t%0,%2,%3%p1
+ %^vmv.v.v\t%0,%4\;%^vf<nmsac_nmacc>.vf\t%0,%2,%3%p1
+ %^vf<nmsac_nmacc>.vf\t%0,%2,%3%p1
+ %^vmv.v.v\t%0,%4\;%^vf<nmsac_nmacc>.vf\t%0,%2,%3%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "2")
@@ -6903,7 +6926,7 @@ (define_insn "@pred_<optab><mode>"
(match_operand:V_VLSF 3 "register_operand" " vr, vr, vr, vr"))
(match_operand:V_VLSF 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vf<insn>.v\t%0,%3%p1"
+ "%^vf<insn>.v\t%0,%3%p1"
[(set_attr "type" "<float_insn_type>")
(set_attr "mode" "<MODE>")
(set_attr "vl_op_idx" "4")
@@ -6928,7 +6951,7 @@ (define_insn "@pred_<optab><mode>"
(match_operand:V_VLSF 3 "register_operand" " vr, vr, vr, vr"))
(match_operand:V_VLSF 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vf<insn>.v\t%0,%3%p1"
+ "%^vf<insn>.v\t%0,%3%p1"
[(set_attr "type" "<float_insn_type>")
(set_attr "mode" "<MODE>")
(set_attr "vl_op_idx" "4")
@@ -6951,7 +6974,7 @@ (define_insn "@pred_<misc_op><mode>"
[(match_operand:VF 3 "register_operand" " vr, vr, vr, vr")] VFMISC)
(match_operand:VF 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vf<misc_op>.v\t%0,%3%p1"
+ "%^vf<misc_op>.v\t%0,%3%p1"
[(set_attr "type" "<float_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -6972,7 +6995,7 @@ (define_insn "@pred_<misc_frm_op><mode>"
[(match_operand:VF 3 "register_operand" " vr, vr, vr, vr")] VFMISC_FRM)
(match_operand:VF 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vf<misc_frm_op>.v\t%0,%3%p1"
+ "%^vf<misc_frm_op>.v\t%0,%3%p1"
[(set_attr "type" "<float_frm_insn_type>")
(set_attr "mode" "<MODE>")
(set (attr "frm_mode")
@@ -6993,7 +7016,7 @@ (define_insn "@pred_class<mode>"
[(match_operand:VF 3 "register_operand" " vr, vr, vr, vr")] UNSPEC_VFCLASS)
(match_operand:<VCONVERT> 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vfclass.v\t%0,%3%p1"
+ "%^vfclass.v\t%0,%3%p1"
[(set_attr "type" "vfclass")
(set_attr "mode" "<MODE>")])
@@ -7026,7 +7049,7 @@ (define_insn "@pred_dual_widen_<optab><mode>"
(match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " vr, vr")))
(match_operand:VWEXTF 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vfw<insn>.vv\t%0,%3,%4%p1"
+ "%^vfw<insn>.vv\t%0,%3,%4%p1"
[(set_attr "type" "vf<widen_binop_insn_type>")
(set_attr "mode" "<V_DOUBLE_TRUNC>")
(set (attr "frm_mode")
@@ -7053,7 +7076,7 @@ (define_insn "@pred_dual_widen_<optab><mode>_scalar"
(match_operand:<VSUBEL> 4 "register_operand" " f, f"))))
(match_operand:VWEXTF 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vfw<insn>.vf\t%0,%3,%4%p1"
+ "%^vfw<insn>.vf\t%0,%3,%4%p1"
[(set_attr "type" "vf<widen_binop_insn_type>")
(set_attr "mode" "<V_DOUBLE_TRUNC>")
(set (attr "frm_mode")
@@ -7078,7 +7101,7 @@ (define_insn "@pred_single_widen_add<mode>"
(match_operand:VWEXTF 3 "register_operand" " vr, vr"))
(match_operand:VWEXTF 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vfwadd.wv\t%0,%3,%4%p1"
+ "%^vfwadd.wv\t%0,%3,%4%p1"
[(set_attr "type" "vfwalu")
(set_attr "mode" "<V_DOUBLE_TRUNC>")
(set (attr "frm_mode")
@@ -7103,7 +7126,7 @@ (define_insn "@pred_single_widen_sub<mode>"
(match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " vr, vr")))
(match_operand:VWEXTF 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vfwsub.wv\t%0,%3,%4%p1"
+ "%^vfwsub.wv\t%0,%3,%4%p1"
[(set_attr "type" "vfwalu")
(set_attr "mode" "<V_DOUBLE_TRUNC>")
(set (attr "frm_mode")
@@ -7129,7 +7152,7 @@ (define_insn "@pred_single_widen_<plus_minus:optab><mode>_scalar"
(match_operand:<VSUBEL> 4 "register_operand" " f, f"))))
(match_operand:VWEXTF 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vfw<insn>.wf\t%0,%3,%4%p1"
+ "%^vfw<insn>.wf\t%0,%3,%4%p1"
[(set_attr "type" "vf<widen_binop_insn_type>")
(set_attr "mode" "<V_DOUBLE_TRUNC>")
(set (attr "frm_mode")
@@ -7164,7 +7187,7 @@ (define_insn "@pred_widen_mul_<optab><mode>"
(match_operand:VWEXTF 2 "register_operand" " 0"))
(match_dup 2)))]
"TARGET_VECTOR"
- "vfw<macc_msac>.vv\t%0,%3,%4%p1"
+ "%^vfw<macc_msac>.vv\t%0,%3,%4%p1"
[(set_attr "type" "vfwmuladd")
(set_attr "mode" "<V_DOUBLE_TRUNC>")
(set (attr "frm_mode")
@@ -7193,7 +7216,7 @@ (define_insn "@pred_widen_mul_<optab><mode>_scalar"
(match_operand:VWEXTF 2 "register_operand" " 0"))
(match_dup 2)))]
"TARGET_VECTOR"
- "vfw<macc_msac>.vf\t%0,%3,%4%p1"
+ "%^vfw<macc_msac>.vf\t%0,%3,%4%p1"
[(set_attr "type" "vfwmuladd")
(set_attr "mode" "<V_DOUBLE_TRUNC>")
(set (attr "frm_mode")
@@ -7222,7 +7245,7 @@ (define_insn "@pred_widen_mul_neg_<optab><mode>"
(match_operand:VWEXTF 2 "register_operand" " 0"))
(match_dup 2)))]
"TARGET_VECTOR"
- "vfw<nmsac_nmacc>.vv\t%0,%3,%4%p1"
+ "%^vfw<nmsac_nmacc>.vv\t%0,%3,%4%p1"
[(set_attr "type" "vfwmuladd")
(set_attr "mode" "<V_DOUBLE_TRUNC>")
(set (attr "frm_mode")
@@ -7252,7 +7275,7 @@ (define_insn "@pred_widen_mul_neg_<optab><mode>_scalar"
(match_operand:VWEXTF 2 "register_operand" " 0"))
(match_dup 2)))]
"TARGET_VECTOR"
- "vfw<nmsac_nmacc>.vf\t%0,%3,%4%p1"
+ "%^vfw<nmsac_nmacc>.vf\t%0,%3,%4%p1"
[(set_attr "type" "vfwmuladd")
(set_attr "mode" "<V_DOUBLE_TRUNC>")
(set (attr "frm_mode")
@@ -7298,7 +7321,7 @@ (define_insn "*pred_cmp<mode>"
(match_operand:V_VLSF 5 "register_operand" " vr, vr")])
(match_operand:<VM> 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR && riscv_vector::cmp_lmul_le_one (<MODE>mode)"
- "vmf%B3.vv\t%0,%4,%5%p1"
+ "%^vmf%B3.vv\t%0,%4,%5%p1"
[(set_attr "type" "vfcmp")
(set_attr "mode" "<MODE>")])
@@ -7317,7 +7340,7 @@ (define_insn "*pred_cmp<mode>_narrow_merge_tie_mask"
(match_operand:V_VLSF 4 "register_operand" " vr")])
(match_dup 1)))]
"TARGET_VECTOR"
- "vmf%B2.vv\t%0,%3,%4,v0.t"
+ "%^vmf%B2.vv\t%0,%3,%4,v0.t"
[(set_attr "type" "vfcmp")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "1")
@@ -7341,7 +7364,7 @@ (define_insn "*pred_cmp<mode>_narrow"
(match_operand:V_VLSF 5 "register_operand" " vr, vr, 0, 0, vr, 0, 0, vr, vr")])
(match_operand:<VM> 2 "vector_merge_operand" " vu, vu, vu, vu, 0, 0, 0, vu, 0")))]
"TARGET_VECTOR && riscv_vector::cmp_lmul_gt_one (<MODE>mode)"
- "vmf%B3.vv\t%0,%4,%5%p1"
+ "%^vmf%B3.vv\t%0,%4,%5%p1"
[(set_attr "type" "vfcmp")
(set_attr "mode" "<MODE>")])
@@ -7379,7 +7402,7 @@ (define_insn "*pred_cmp<mode>_scalar_merge_tie_mask"
(match_operand:<VEL> 4 "register_operand" " f"))])
(match_dup 1)))]
"TARGET_VECTOR"
- "vmf%B2.vf\t%0,%3,%4,v0.t"
+ "%^vmf%B2.vf\t%0,%3,%4,v0.t"
[(set_attr "type" "vfcmp")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "1")
@@ -7404,7 +7427,7 @@ (define_insn "*pred_cmp<mode>_scalar"
(match_operand:<VEL> 5 "register_operand" " f, f"))])
(match_operand:<VM> 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR && riscv_vector::cmp_lmul_le_one (<MODE>mode)"
- "vmf%B3.vf\t%0,%4,%5%p1"
+ "%^vmf%B3.vf\t%0,%4,%5%p1"
[(set_attr "type" "vfcmp")
(set_attr "mode" "<MODE>")])
@@ -7425,7 +7448,7 @@ (define_insn "*pred_cmp<mode>_scalar_narrow"
(match_operand:<VEL> 5 "register_operand" " f, f, f, f, f"))])
(match_operand:<VM> 2 "vector_merge_operand" " vu, vu, 0, vu, 0")))]
"TARGET_VECTOR && riscv_vector::cmp_lmul_gt_one (<MODE>mode)"
- "vmf%B3.vf\t%0,%4,%5%p1"
+ "%^vmf%B3.vf\t%0,%4,%5%p1"
[(set_attr "type" "vfcmp")
(set_attr "mode" "<MODE>")])
@@ -7463,7 +7486,7 @@ (define_insn "*pred_eqne<mode>_scalar_merge_tie_mask"
(match_operand:V_VLSF 3 "register_operand" " vr")])
(match_dup 1)))]
"TARGET_VECTOR"
- "vmf%B2.vf\t%0,%3,%4,v0.t"
+ "%^vmf%B2.vf\t%0,%3,%4,v0.t"
[(set_attr "type" "vfcmp")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "1")
@@ -7488,7 +7511,7 @@ (define_insn "*pred_eqne<mode>_scalar"
(match_operand:V_VLSF 4 "register_operand" " vr, vr")])
(match_operand:<VM> 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR && riscv_vector::cmp_lmul_le_one (<MODE>mode)"
- "vmf%B3.vf\t%0,%4,%5%p1"
+ "%^vmf%B3.vf\t%0,%4,%5%p1"
[(set_attr "type" "vfcmp")
(set_attr "mode" "<MODE>")])
@@ -7509,7 +7532,7 @@ (define_insn "*pred_eqne<mode>_scalar_narrow"
(match_operand:V_VLSF 4 "register_operand" " vr, 0, 0, vr, vr")])
(match_operand:<VM> 2 "vector_merge_operand" " vu, vu, 0, vu, 0")))]
"TARGET_VECTOR && riscv_vector::cmp_lmul_gt_one (<MODE>mode)"
- "vmf%B3.vf\t%0,%4,%5%p1"
+ "%^vmf%B3.vf\t%0,%4,%5%p1"
[(set_attr "type" "vfcmp")
(set_attr "mode" "<MODE>")])
@@ -7536,7 +7559,7 @@ (define_insn "@pred_merge<mode>_scalar"
(match_operand:<VM> 4 "register_operand" " vm,vm"))
(match_operand:V_VLSF 1 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vfmerge.vfm\t%0,%2,%3,%4"
+ "%^vfmerge.vfm\t%0,%2,%3,%4"
[(set_attr "type" "vfmerge")
(set_attr "mode" "<MODE>")])
@@ -7564,7 +7587,7 @@ (define_insn "@pred_fcvt_x<v_su>_f<mode>"
[(match_operand:V_VLSF 3 "register_operand" " vr, vr, vr, vr")] VFCVTS)
(match_operand:<VCONVERT> 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vfcvt.x<v_su>.f.v\t%0,%3%p1"
+ "%^vfcvt.x<v_su>.f.v\t%0,%3%p1"
[(set_attr "type" "vfcvtftoi")
(set_attr "mode" "<MODE>")
(set (attr "frm_mode")
@@ -7584,7 +7607,7 @@ (define_insn "@pred_<fix_cvt><mode>"
(any_fix:<VCONVERT>
(match_operand:V_VLSF 3 "register_operand" " vr, vr, vr, vr"))
(match_operand:<VCONVERT> 2 "vector_merge_operand" " vu, 0, vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
"vfcvt.rtz.x<u>.f.v\t%0,%3%p1"
[(set_attr "type" "vfcvtftoi")
(set_attr "mode" "<MODE>")])
@@ -7606,7 +7629,7 @@ (define_insn "@pred_<float_cvt><mode>"
(match_operand:<VCONVERT> 3 "register_operand" " vr, vr, vr, vr"))
(match_operand:V_VLSF 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vfcvt.f.x<u>.v\t%0,%3%p1"
+ "%^vfcvt.f.x<u>.v\t%0,%3%p1"
[(set_attr "type" "vfcvtitof")
(set_attr "mode" "<MODE>")
(set (attr "frm_mode")
@@ -7636,7 +7659,7 @@ (define_insn "@pred_widen_fcvt_x<v_su>_f<mode>"
[(match_operand:<VNCONVERT> 3 "register_operand" " vr, vr")] VFCVTS)
(match_operand:VWCONVERTI 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vfwcvt.x<v_su>.f.v\t%0,%3%p1"
+ "%^vfwcvt.x<v_su>.f.v\t%0,%3%p1"
[(set_attr "type" "vfwcvtftoi")
(set_attr "mode" "<VNCONVERT>")
(set (attr "frm_mode")
@@ -7656,7 +7679,7 @@ (define_insn "@pred_widen_<fix_cvt><mode>"
(any_fix:VWCONVERTI
(match_operand:<VNCONVERT> 3 "register_operand" " vr, vr"))
(match_operand:VWCONVERTI 2 "vector_merge_operand" " vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
"vfwcvt.rtz.x<u>.f.v\t%0,%3%p1"
[(set_attr "type" "vfwcvtftoi")
(set_attr "mode" "<VNCONVERT>")])
@@ -7676,7 +7699,7 @@ (define_insn "@pred_widen_<float_cvt><mode>"
(match_operand:<VNCONVERT> 3 "register_operand" " vr, vr"))
(match_operand:V_VLSF 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vfwcvt.f.x<u>.v\t%0,%3%p1"
+ "%^vfwcvt.f.x<u>.v\t%0,%3%p1"
[(set_attr "type" "vfwcvtitof")
(set_attr "mode" "<VNCONVERT>")])
@@ -7695,7 +7718,7 @@ (define_insn "@pred_extend<mode>"
(match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " vr, vr"))
(match_operand:VWEXTF_ZVFHMIN 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vfwcvt.f.f.v\t%0,%3%p1"
+ "%^vfwcvt.f.f.v\t%0,%3%p1"
[(set_attr "type" "vfwcvtftof")
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
@@ -7723,7 +7746,7 @@ (define_insn "@pred_narrow_fcvt_x<v_su>_f<mode>"
[(match_operand:V_VLSF 3 "register_operand" " 0, 0, 0, 0, vr, vr")] VFCVTS)
(match_operand:<VNCONVERT> 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vfncvt.x<v_su>.f.w\t%0,%3%p1"
+ { return TARGET_XTHEADVECTOR ? "th.vfncvt.x<v_su>.f.v\t%0,%3%p1" : "vfncvt.x<v_su>.f.w\t%0,%3%p1"; }
[(set_attr "type" "vfncvtftoi")
(set_attr "mode" "<VNCONVERT>")
(set (attr "frm_mode")
@@ -7743,7 +7766,7 @@ (define_insn "@pred_narrow_<fix_cvt><mode>"
(any_fix:<VNCONVERT>
(match_operand:V_VLSF 3 "register_operand" " 0, 0, 0, 0, vr, vr"))
(match_operand:<VNCONVERT> 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
"vfncvt.rtz.x<u>.f.w\t%0,%3%p1"
[(set_attr "type" "vfncvtftoi")
(set_attr "mode" "<VNCONVERT>")])
@@ -7765,7 +7788,7 @@ (define_insn "@pred_narrow_<float_cvt><mode>"
(match_operand:VWCONVERTI 3 "register_operand" " 0, 0, 0, 0, vr, vr"))
(match_operand:<VNCONVERT> 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vfncvt.f.x<u>.w\t%0,%3%p1"
+ { return TARGET_XTHEADVECTOR ? "th.vfncvt.f.x<u>.v\t%0,%3%p1" : "vfncvt.f.x<u>.w\t%0,%3%p1"; }
[(set_attr "type" "vfncvtitof")
(set_attr "mode" "<VNCONVERT>")
(set (attr "frm_mode")
@@ -7788,7 +7811,7 @@ (define_insn "@pred_trunc<mode>"
(match_operand:VWEXTF_ZVFHMIN 3 "register_operand" " 0, 0, 0, 0, vr, vr"))
(match_operand:<V_DOUBLE_TRUNC> 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0")))]
"TARGET_VECTOR"
- "vfncvt.f.f.w\t%0,%3%p1"
+ { return TARGET_XTHEADVECTOR ? "th.vfncvt.f.f.v\t%0,%3%p1" : "vfncvt.f.f.w\t%0,%3%p1"; }
[(set_attr "type" "vfncvtftof")
(set_attr "mode" "<V_DOUBLE_TRUNC>")
(set (attr "frm_mode")
@@ -7809,7 +7832,7 @@ (define_insn "@pred_rod_trunc<mode>"
[(float_truncate:<V_DOUBLE_TRUNC>
(match_operand:VWEXTF_ZVFHMIN 3 "register_operand" " 0, 0, 0, 0, vr, vr"))] UNSPEC_ROD)
(match_operand:<V_DOUBLE_TRUNC> 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
"vfncvt.rod.f.f.w\t%0,%3%p1"
[(set_attr "type" "vfncvtftof")
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
@@ -7841,7 +7864,7 @@ (define_insn "@pred_<reduc_op><mode>"
] ANY_REDUC)
(match_operand:<V_LMUL1> 2 "vector_merge_operand" " vu, 0")] UNSPEC_REDUC))]
"TARGET_VECTOR"
- "v<reduc_op>.vs\t%0,%3,%4%p1"
+ "%^v<reduc_op>.vs\t%0,%3,%4%p1"
[(set_attr "type" "vired")
(set_attr "mode" "<MODE>")])
@@ -7862,7 +7885,7 @@ (define_insn "@pred_<reduc_op><mode>"
] ANY_WREDUC)
(match_operand:<V_EXT_LMUL1> 2 "vector_merge_operand" " vu, 0")] UNSPEC_REDUC))]
"TARGET_VECTOR"
- "v<reduc_op>.vs\t%0,%3,%4%p1"
+ "%^v<reduc_op>.vs\t%0,%3,%4%p1"
[(set_attr "type" "viwred")
(set_attr "mode" "<MODE>")])
@@ -7883,7 +7906,7 @@ (define_insn "@pred_<reduc_op><mode>"
] ANY_FREDUC)
(match_operand:<V_LMUL1> 2 "vector_merge_operand" " vu, 0")] UNSPEC_REDUC))]
"TARGET_VECTOR"
- "vf<reduc_op>.vs\t%0,%3,%4%p1"
+ "%^vf<reduc_op>.vs\t%0,%3,%4%p1"
[(set_attr "type" "vfredu")
(set_attr "mode" "<MODE>")])
@@ -7906,7 +7929,7 @@ (define_insn "@pred_<reduc_op><mode>"
] ANY_FREDUC_SUM)
(match_operand:<V_LMUL1> 2 "vector_merge_operand" " vu, 0")] UNSPEC_REDUC))]
"TARGET_VECTOR"
- "vf<reduc_op>.vs\t%0,%3,%4%p1"
+ "%^vf<reduc_op>.vs\t%0,%3,%4%p1"
[(set_attr "type" "vfred<order>")
(set_attr "mode" "<MODE>")
(set (attr "frm_mode")
@@ -7931,7 +7954,7 @@ (define_insn "@pred_<reduc_op><mode>"
] ANY_FWREDUC_SUM)
(match_operand:<V_EXT_LMUL1> 2 "vector_merge_operand" " vu, 0")] UNSPEC_REDUC))]
"TARGET_VECTOR"
- "vf<reduc_op>.vs\t%0,%3,%4%p1"
+ "%^vf<reduc_op>.vs\t%0,%3,%4%p1"
[(set_attr "type" "vfwred<order>")
(set_attr "mode" "<MODE>")
(set (attr "frm_mode")
@@ -7973,7 +7996,7 @@ (define_insn_and_split "*pred_extract_first<mode>"
(parallel [(const_int 0)]))
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE))]
"TARGET_VECTOR"
- "vmv.x.s\t%0,%1"
+ "%^vmv.x.s\t%0,%1"
"known_gt (GET_MODE_BITSIZE (<VEL>mode), GET_MODE_BITSIZE (Pmode))"
[(const_int 0)]
{
@@ -8007,7 +8030,7 @@ (define_insn "@pred_extract_first_trunc<mode>"
(parallel [(const_int 0)]))
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)))]
"TARGET_VECTOR"
- "vmv.x.s\t%0,%1"
+ "%^vmv.x.s\t%0,%1"
[(set_attr "type" "vimovvx")
(set_attr "mode" "<MODE>")])
@@ -8036,7 +8059,7 @@ (define_insn "*pred_extract_first<mode>"
(parallel [(const_int 0)]))
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE))]
"TARGET_VECTOR"
- "vfmv.f.s\t%0,%1"
+ "%^vfmv.f.s\t%0,%1"
[(set_attr "type" "vfmovvf")
(set_attr "mode" "<MODE>")])
@@ -8056,7 +8079,7 @@ (define_insn "@pred_slide<ud><mode>"
(match_operand:V_VLS 3 "register_operand" " vr, vr, vr, vr")
(match_operand 4 "pmode_reg_or_uimm5_operand" " rK, rK, rK, rK")] VSLIDES))]
"TARGET_VECTOR"
- "vslide<ud>.v%o4\t%0,%3,%4%p1"
+ "%^vslide<ud>.v%o4\t%0,%3,%4%p1"
[(set_attr "type" "vslide<ud>")
(set_attr "mode" "<MODE>")])
@@ -8076,7 +8099,7 @@ (define_insn "@pred_slide<ud><mode>"
(match_operand:V_VLSI_QHS 3 "register_operand" " vr, vr, vr, vr")
(match_operand:<VEL> 4 "reg_or_0_operand" " rJ, rJ, rJ, rJ")] VSLIDES1))]
"TARGET_VECTOR"
- "vslide<ud>.vx\t%0,%3,%z4%p1"
+ "%^vslide<ud>.vx\t%0,%3,%z4%p1"
[(set_attr "type" "vislide<ud>")
(set_attr "mode" "<MODE>")])
@@ -8117,7 +8140,7 @@ (define_insn "*pred_slide<ud><mode>"
(match_operand:V_VLSI_D 3 "register_operand" " vr, vr, vr, vr")
(match_operand:<VEL> 4 "reg_or_0_operand" " rJ, rJ, rJ, rJ")] VSLIDES1))]
"TARGET_VECTOR"
- "vslide<ud>.vx\t%0,%3,%z4%p1"
+ "%^vslide<ud>.vx\t%0,%3,%z4%p1"
[(set_attr "type" "vislide<ud>")
(set_attr "mode" "<MODE>")])
@@ -8137,7 +8160,7 @@ (define_insn "*pred_slide<ud><mode>_extended"
(sign_extend:<VEL>
(match_operand:<VSUBEL> 4 "reg_or_0_operand" " rJ, rJ, rJ, rJ"))] VSLIDES1))]
"TARGET_VECTOR"
- "vslide<ud>.vx\t%0,%3,%z4%p1"
+ "%^vslide<ud>.vx\t%0,%3,%z4%p1"
[(set_attr "type" "vislide<ud>")
(set_attr "mode" "<MODE>")])
@@ -8157,7 +8180,7 @@ (define_insn "@pred_slide<ud><mode>"
(match_operand:V_VLSF 3 "register_operand" " vr, vr, vr, vr")
(match_operand:<VEL> 4 "register_operand" " f, f, f, f")] VFSLIDES1))]
"TARGET_VECTOR"
- "vfslide<ud>.vf\t%0,%3,%4%p1"
+ "%^vfslide<ud>.vf\t%0,%3,%4%p1"
[(set_attr "type" "vfslide<ud>")
(set_attr "mode" "<MODE>")])
@@ -8178,7 +8201,7 @@ (define_insn "@pred_gather<mode>"
(match_operand:<VINDEX> 4 "register_operand" " vr, vr")] UNSPEC_VRGATHER)
(match_operand:V_VLS 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vrgather.vv\t%0,%3,%4%p1"
+ "%^vrgather.vv\t%0,%3,%4%p1"
[(set_attr "type" "vgather")
(set_attr "mode" "<MODE>")])
@@ -8198,7 +8221,7 @@ (define_insn "@pred_gather<mode>_scalar"
(match_operand 4 "pmode_reg_or_uimm5_operand" " rK, rK")] UNSPEC_VRGATHER)
(match_operand:V_VLS 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vrgather.v%o4\t%0,%3,%4%p1"
+ "%^vrgather.v%o4\t%0,%3,%4%p1"
[(set_attr "type" "vgather")
(set_attr "mode" "<MODE>")])
@@ -8219,7 +8242,7 @@ (define_insn "@pred_gatherei16<mode>"
(match_operand:<VINDEXEI16> 4 "register_operand" " vr, vr")] UNSPEC_VRGATHEREI16)
(match_operand:VEI16 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vrgatherei16.vv\t%0,%3,%4%p1"
+ "%^vrgatherei16.vv\t%0,%3,%4%p1"
[(set_attr "type" "vgather")
(set_attr "mode" "<MODE>")])
@@ -8237,7 +8260,7 @@ (define_insn "@pred_compress<mode>"
(match_operand:V_VLS 2 "register_operand" " vr, vr")
(match_operand:V_VLS 1 "vector_merge_operand" " vu, 0")] UNSPEC_VCOMPRESS))]
"TARGET_VECTOR"
- "vcompress.vm\t%0,%2,%3"
+ "%^vcompress.vm\t%0,%2,%3"
[(set_attr "type" "vcompress")
(set_attr "mode" "<MODE>")])
@@ -8288,7 +8311,7 @@ (define_insn "@pred_fault_load<mode>"
(unspec:V [(match_dup 3)] UNSPEC_VLEFF)
(match_dup 2))] UNSPEC_MODIFY_VL))]
"TARGET_VECTOR"
- "vle<sew>ff.v\t%0,%3%p1"
+ { return TARGET_XTHEADVECTOR ? "th.vleff.v\t%0,%3%p1" : "vle<sew>ff.v\t%0,%3%p1"; }
[(set_attr "type" "vldff")
(set_attr "mode" "<MODE>")])
@@ -8318,7 +8341,7 @@ (define_insn "@pred_unit_strided_load<mode>"
(mem:BLK (scratch))] UNSPEC_UNIT_STRIDED)
(match_operand:VT 2 "vector_merge_operand" " 0, vu, vu")))]
"TARGET_VECTOR"
- "vlseg<nf>e<sew>.v\t%0,(%z3)%p1"
+ { return TARGET_XTHEADVECTOR ? "th.vlseg<nf>e.v\t%0,(%z3)%p1" : "vlseg<nf>e<sew>.v\t%0,(%z3)%p1"; }
[(set_attr "type" "vlsegde")
(set_attr "mode" "<MODE>")])
@@ -8335,7 +8358,7 @@ (define_insn "@pred_unit_strided_store<mode>"
(match_operand:VT 2 "register_operand" " vr")
(mem:BLK (scratch))] UNSPEC_UNIT_STRIDED))]
"TARGET_VECTOR"
- "vsseg<nf>e<sew>.v\t%2,(%z1)%p0"
+ { return TARGET_XTHEADVECTOR ? "th.vsseg<nf>e.v\t%2,(%z1)%p0" : "vsseg<nf>e<sew>.v\t%2,(%z1)%p0"; }
[(set_attr "type" "vssegte")
(set_attr "mode" "<MODE>")])
@@ -8356,7 +8379,7 @@ (define_insn "@pred_strided_load<mode>"
(mem:BLK (scratch))] UNSPEC_STRIDED)
(match_operand:VT 2 "vector_merge_operand" " 0, vu, vu")))]
"TARGET_VECTOR"
- "vlsseg<nf>e<sew>.v\t%0,(%z3),%z4%p1"
+ { return TARGET_XTHEADVECTOR ? "th.vlsseg<nf>e.v\t%0,(%z3),%z4%p1" : "vlsseg<nf>e<sew>.v\t%0,(%z3),%z4%p1"; }
[(set_attr "type" "vlsegds")
(set_attr "mode" "<MODE>")])
@@ -8374,7 +8397,7 @@ (define_insn "@pred_strided_store<mode>"
(match_operand:VT 3 "register_operand" " vr")
(mem:BLK (scratch))] UNSPEC_STRIDED))]
"TARGET_VECTOR"
- "vssseg<nf>e<sew>.v\t%3,(%z1),%z2%p0"
+ { return TARGET_XTHEADVECTOR ? "th.vssseg<nf>e.v\t%3,(%z1),%z2%p0" : "vssseg<nf>e<sew>.v\t%3,(%z1),%z2%p0"; }
[(set_attr "type" "vssegts")
(set_attr "mode" "<MODE>")])
@@ -8405,7 +8428,7 @@ (define_insn "@pred_fault_load<mode>"
[(match_dup 3) (mem:BLK (scratch))] UNSPEC_VLEFF)
(match_dup 2))] UNSPEC_MODIFY_VL))]
"TARGET_VECTOR"
- "vlseg<nf>e<sew>ff.v\t%0,(%z3)%p1"
+ { return TARGET_XTHEADVECTOR ? "th.vlseg<nf>eff.v\t%0,(%z3)%p1" : "vlseg<nf>e<sew>ff.v\t%0,(%z3)%p1"; }
[(set_attr "type" "vlsegdff")
(set_attr "mode" "<MODE>")])
@@ -8426,7 +8449,7 @@ (define_insn "@pred_indexed_<order>load<V1T:mode><RATIO64I:mode>"
(match_operand:RATIO64I 4 "register_operand" " vr, vr")] ORDER)
(match_operand:V1T 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vl<order>xseg<nf>ei<RATIO64I:sew>.v\t%0,(%z3),%4%p1"
+ { return TARGET_XTHEADVECTOR ? "th.vlxseg<nf>e.v\t%0,(%z3),%4%p1" : "vl<order>xseg<nf>ei<RATIO64I:sew>.v\t%0,(%z3),%4%p1"; }
[(set_attr "type" "vlsegd<order>x")
(set_attr "mode" "<V1T:MODE>")])
@@ -8447,7 +8470,7 @@ (define_insn "@pred_indexed_<order>load<V2T:mode><RATIO32I:mode>"
(match_operand:RATIO32I 4 "register_operand" " vr, vr")] ORDER)
(match_operand:V2T 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vl<order>xseg<nf>ei<RATIO32I:sew>.v\t%0,(%z3),%4%p1"
+ { return TARGET_XTHEADVECTOR ? "th.vlxseg<nf>e.v\t%0,(%z3),%4%p1" : "vl<order>xseg<nf>ei<RATIO32I:sew>.v\t%0,(%z3),%4%p1"; }
[(set_attr "type" "vlsegd<order>x")
(set_attr "mode" "<V2T:MODE>")])
@@ -8468,7 +8491,7 @@ (define_insn "@pred_indexed_<order>load<V4T:mode><RATIO16I:mode>"
(match_operand:RATIO16I 4 "register_operand" " vr, vr")] ORDER)
(match_operand:V4T 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vl<order>xseg<nf>ei<RATIO16I:sew>.v\t%0,(%z3),%4%p1"
+ { return TARGET_XTHEADVECTOR ? "th.vlxseg<nf>e.v\t%0,(%z3),%4%p1" : "vl<order>xseg<nf>ei<RATIO16I:sew>.v\t%0,(%z3),%4%p1"; }
[(set_attr "type" "vlsegd<order>x")
(set_attr "mode" "<V4T:MODE>")])
@@ -8489,7 +8512,7 @@ (define_insn "@pred_indexed_<order>load<V8T:mode><RATIO8I:mode>"
(match_operand:RATIO8I 4 "register_operand" " vr, vr")] ORDER)
(match_operand:V8T 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vl<order>xseg<nf>ei<RATIO8I:sew>.v\t%0,(%z3),%4%p1"
+ { return TARGET_XTHEADVECTOR ? "th.vlxseg<nf>e.v\t%0,(%z3),%4%p1" : "vl<order>xseg<nf>ei<RATIO8I:sew>.v\t%0,(%z3),%4%p1"; }
[(set_attr "type" "vlsegd<order>x")
(set_attr "mode" "<V8T:MODE>")])
@@ -8510,7 +8533,7 @@ (define_insn "@pred_indexed_<order>load<V16T:mode><RATIO4I:mode>"
(match_operand:RATIO4I 4 "register_operand" " vr, vr")] ORDER)
(match_operand:V16T 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vl<order>xseg<nf>ei<RATIO4I:sew>.v\t%0,(%z3),%4%p1"
+ { return TARGET_XTHEADVECTOR ? "th.vlxseg<nf>e.v\t%0,(%z3),%4%p1" : "vl<order>xseg<nf>ei<RATIO4I:sew>.v\t%0,(%z3),%4%p1"; }
[(set_attr "type" "vlsegd<order>x")
(set_attr "mode" "<V16T:MODE>")])
@@ -8531,7 +8554,7 @@ (define_insn "@pred_indexed_<order>load<V32T:mode><RATIO2I:mode>"
(match_operand:RATIO2I 4 "register_operand" " vr, vr")] ORDER)
(match_operand:V32T 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
- "vl<order>xseg<nf>ei<RATIO2I:sew>.v\t%0,(%z3),%4%p1"
+ { return TARGET_XTHEADVECTOR ? "th.vlxseg<nf>e.v\t%0,(%z3),%4%p1" : "vl<order>xseg<nf>ei<RATIO2I:sew>.v\t%0,(%z3),%4%p1"; }
[(set_attr "type" "vlsegd<order>x")
(set_attr "mode" "<V32T:MODE>")])
@@ -8548,7 +8571,7 @@ (define_insn "@pred_indexed_<order>store<V1T:mode><RATIO64I:mode>"
(match_operand:RATIO64I 2 "register_operand" " vr")
(match_operand:V1T 3 "register_operand" " vr")] ORDER))]
"TARGET_VECTOR"
- "vs<order>xseg<nf>ei<RATIO64I:sew>.v\t%3,(%z1),%2%p0"
+ { return TARGET_XTHEADVECTOR ? "th.vsxseg<nf>e.v\t%3,(%z1),%2%p0" : "vs<order>xseg<nf>ei<RATIO64I:sew>.v\t%3,(%z1),%2%p0"; }
[(set_attr "type" "vssegt<order>x")
(set_attr "mode" "<V1T:MODE>")])
@@ -8565,7 +8588,7 @@ (define_insn "@pred_indexed_<order>store<V2T:mode><RATIO32I:mode>"
(match_operand:RATIO32I 2 "register_operand" " vr")
(match_operand:V2T 3 "register_operand" " vr")] ORDER))]
"TARGET_VECTOR"
- "vs<order>xseg<nf>ei<RATIO32I:sew>.v\t%3,(%z1),%2%p0"
+ { return TARGET_XTHEADVECTOR ? "th.vsxseg<nf>e.v\t%3,(%z1),%2%p0" : "vs<order>xseg<nf>ei<RATIO32I:sew>.v\t%3,(%z1),%2%p0"; }
[(set_attr "type" "vssegt<order>x")
(set_attr "mode" "<V2T:MODE>")])
@@ -8582,7 +8605,7 @@ (define_insn "@pred_indexed_<order>store<V4T:mode><RATIO16I:mode>"
(match_operand:RATIO16I 2 "register_operand" " vr")
(match_operand:V4T 3 "register_operand" " vr")] ORDER))]
"TARGET_VECTOR"
- "vs<order>xseg<nf>ei<RATIO16I:sew>.v\t%3,(%z1),%2%p0"
+ { return TARGET_XTHEADVECTOR ? "th.vsxseg<nf>e.v\t%3,(%z1),%2%p0" : "vs<order>xseg<nf>ei<RATIO16I:sew>.v\t%3,(%z1),%2%p0"; }
[(set_attr "type" "vssegt<order>x")
(set_attr "mode" "<V4T:MODE>")])
@@ -8599,7 +8622,7 @@ (define_insn "@pred_indexed_<order>store<V8T:mode><RATIO8I:mode>"
(match_operand:RATIO8I 2 "register_operand" " vr")
(match_operand:V8T 3 "register_operand" " vr")] ORDER))]
"TARGET_VECTOR"
- "vs<order>xseg<nf>ei<RATIO8I:sew>.v\t%3,(%z1),%2%p0"
+ { return TARGET_XTHEADVECTOR ? "th.vsxseg<nf>e.v\t%3,(%z1),%2%p0" : "vs<order>xseg<nf>ei<RATIO8I:sew>.v\t%3,(%z1),%2%p0"; }
[(set_attr "type" "vssegt<order>x")
(set_attr "mode" "<V8T:MODE>")])
@@ -8616,7 +8639,7 @@ (define_insn "@pred_indexed_<order>store<V16T:mode><RATIO4I:mode>"
(match_operand:RATIO4I 2 "register_operand" " vr")
(match_operand:V16T 3 "register_operand" " vr")] ORDER))]
"TARGET_VECTOR"
- "vs<order>xseg<nf>ei<RATIO4I:sew>.v\t%3,(%z1),%2%p0"
+ { return TARGET_XTHEADVECTOR ? "th.vsxseg<nf>e.v\t%3,(%z1),%2%p0" : "vs<order>xseg<nf>ei<RATIO4I:sew>.v\t%3,(%z1),%2%p0"; }
[(set_attr "type" "vssegt<order>x")
(set_attr "mode" "<V16T:MODE>")])
@@ -8633,7 +8656,7 @@ (define_insn "@pred_indexed_<order>store<V32T:mode><RATIO2I:mode>"
(match_operand:RATIO2I 2 "register_operand" " vr")
(match_operand:V32T 3 "register_operand" " vr")] ORDER))]
"TARGET_VECTOR"
- "vs<order>xseg<nf>ei<RATIO2I:sew>.v\t%3,(%z1),%2%p0"
+ { return TARGET_XTHEADVECTOR ? "th.vsxseg<nf>e.v\t%3,(%z1),%2%p0" : "vs<order>xseg<nf>ei<RATIO2I:sew>.v\t%3,(%z1),%2%p0"; }
[(set_attr "type" "vssegt<order>x")
(set_attr "mode" "<V32T:MODE>")])
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pragma-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pragma-1.c
index 3d81b179235..ef329e30785 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/pragma-1.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pragma-1.c
@@ -1,4 +1,4 @@
/* { dg-do compile } */
/* { dg-options "-O3 -march=rv32gc -mabi=ilp32d" } */
-#pragma riscv intrinsic "vector" /* { dg-error {#pragma riscv intrinsic' option 'vector' needs 'V' extension enabled} } */
+#pragma riscv intrinsic "vector" /* { dg-error {#pragma riscv intrinsic' option 'vector' needs 'V' or 'XTHEADVECTOR' extension enabled} } */
--
2.17.1
More information about the Gcc-patches
mailing list