UNSPEC_VUNDEF
UNSPEC_VPREDICATE
UNSPEC_VLMAX
+ UNSPEC_UNIT_STRIDED
UNSPEC_STRIDED
;; It's used to specify ordered/unorderd operation.
(VNx8x1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
])
+(define_mode_iterator V1I [
+ (VNx1QI "TARGET_MIN_VLEN < 128")
+ (VNx1HI "TARGET_MIN_VLEN < 128")
+ (VNx1SI "TARGET_MIN_VLEN < 128")
+ (VNx1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128")
+])
+
+(define_mode_iterator V2I [
+ VNx2QI
+ VNx2HI
+ VNx2SI
+ (VNx2DI "TARGET_VECTOR_ELEN_64")
+])
+
+(define_mode_iterator V4I [
+ VNx4QI
+ VNx4HI
+ VNx4SI
+ (VNx4DI "TARGET_VECTOR_ELEN_64")
+])
+
+(define_mode_iterator V8I [
+ VNx8QI
+ VNx8HI
+ VNx8SI
+ (VNx8DI "TARGET_VECTOR_ELEN_64")
+])
+
+(define_mode_iterator V16I [
+ VNx16QI
+ VNx16HI
+ (VNx16SI "TARGET_MIN_VLEN > 32")
+ (VNx16DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
+])
+
+(define_mode_iterator V32I [
+ VNx32QI
+ (VNx32HI "TARGET_MIN_VLEN > 32")
+ (VNx32SI "TARGET_MIN_VLEN >= 128")
+])
+
+(define_mode_iterator V64I [
+ (VNx64QI "TARGET_MIN_VLEN > 32")
+ (VNx64HI "TARGET_MIN_VLEN >= 128")
+])
+
+(define_mode_iterator V1T [
+ (VNx2x1QI "TARGET_MIN_VLEN < 128")
+ (VNx3x1QI "TARGET_MIN_VLEN < 128")
+ (VNx4x1QI "TARGET_MIN_VLEN < 128")
+ (VNx5x1QI "TARGET_MIN_VLEN < 128")
+ (VNx6x1QI "TARGET_MIN_VLEN < 128")
+ (VNx7x1QI "TARGET_MIN_VLEN < 128")
+ (VNx8x1QI "TARGET_MIN_VLEN < 128")
+ (VNx2x1HI "TARGET_MIN_VLEN < 128")
+ (VNx3x1HI "TARGET_MIN_VLEN < 128")
+ (VNx4x1HI "TARGET_MIN_VLEN < 128")
+ (VNx5x1HI "TARGET_MIN_VLEN < 128")
+ (VNx6x1HI "TARGET_MIN_VLEN < 128")
+ (VNx7x1HI "TARGET_MIN_VLEN < 128")
+ (VNx8x1HI "TARGET_MIN_VLEN < 128")
+ (VNx2x1SI "TARGET_MIN_VLEN < 128")
+ (VNx3x1SI "TARGET_MIN_VLEN < 128")
+ (VNx4x1SI "TARGET_MIN_VLEN < 128")
+ (VNx5x1SI "TARGET_MIN_VLEN < 128")
+ (VNx6x1SI "TARGET_MIN_VLEN < 128")
+ (VNx7x1SI "TARGET_MIN_VLEN < 128")
+ (VNx8x1SI "TARGET_MIN_VLEN < 128")
+ (VNx2x1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128")
+ (VNx3x1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128")
+ (VNx4x1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128")
+ (VNx5x1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128")
+ (VNx6x1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128")
+ (VNx7x1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128")
+ (VNx8x1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128")
+ (VNx2x1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128")
+ (VNx3x1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128")
+ (VNx4x1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128")
+ (VNx5x1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128")
+ (VNx6x1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128")
+ (VNx7x1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128")
+ (VNx8x1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128")
+ (VNx2x1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
+ (VNx3x1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
+ (VNx4x1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
+ (VNx5x1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
+ (VNx6x1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
+ (VNx7x1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
+ (VNx8x1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
+])
+
+(define_mode_iterator V2T [
+ VNx2x2QI
+ VNx3x2QI
+ VNx4x2QI
+ VNx5x2QI
+ VNx6x2QI
+ VNx7x2QI
+ VNx8x2QI
+ VNx2x2HI
+ VNx3x2HI
+ VNx4x2HI
+ VNx5x2HI
+ VNx6x2HI
+ VNx7x2HI
+ VNx8x2HI
+ VNx2x2SI
+ VNx3x2SI
+ VNx4x2SI
+ (VNx5x2SI "TARGET_MIN_VLEN >= 64")
+ (VNx6x2SI "TARGET_MIN_VLEN >= 64")
+ (VNx7x2SI "TARGET_MIN_VLEN >= 64")
+ (VNx8x2SI "TARGET_MIN_VLEN >= 64")
+ (VNx2x2DI "TARGET_VECTOR_ELEN_64")
+ (VNx3x2DI "TARGET_VECTOR_ELEN_64")
+ (VNx4x2DI "TARGET_VECTOR_ELEN_64")
+ (VNx5x2DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
+ (VNx6x2DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
+ (VNx7x2DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
+ (VNx8x2DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
+ (VNx2x2SF "TARGET_VECTOR_ELEN_FP_32")
+ (VNx3x2SF "TARGET_VECTOR_ELEN_FP_32")
+ (VNx4x2SF "TARGET_VECTOR_ELEN_FP_32")
+ (VNx5x2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64")
+ (VNx6x2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64")
+ (VNx7x2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64")
+ (VNx8x2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64")
+ (VNx2x2DF "TARGET_VECTOR_ELEN_FP_64")
+ (VNx3x2DF "TARGET_VECTOR_ELEN_FP_64")
+ (VNx4x2DF "TARGET_VECTOR_ELEN_FP_64")
+ (VNx5x2DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128")
+ (VNx6x2DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128")
+ (VNx7x2DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128")
+ (VNx8x2DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128")
+])
+
+(define_mode_iterator V4T [
+ VNx2x4QI
+ VNx3x4QI
+ VNx4x4QI
+ VNx5x4QI
+ VNx6x4QI
+ VNx7x4QI
+ VNx8x4QI
+ VNx2x4HI
+ VNx3x4HI
+ VNx4x4HI
+ (VNx5x4HI "TARGET_MIN_VLEN >= 64")
+ (VNx6x4HI "TARGET_MIN_VLEN >= 64")
+ (VNx7x4HI "TARGET_MIN_VLEN >= 64")
+ (VNx8x4HI "TARGET_MIN_VLEN >= 64")
+ VNx2x4SI
+ (VNx3x4SI "TARGET_MIN_VLEN >= 64")
+ (VNx4x4SI "TARGET_MIN_VLEN >= 64")
+ (VNx5x4SI "TARGET_MIN_VLEN >= 128")
+ (VNx6x4SI "TARGET_MIN_VLEN >= 128")
+ (VNx7x4SI "TARGET_MIN_VLEN >= 128")
+ (VNx8x4SI "TARGET_MIN_VLEN >= 128")
+ (VNx2x4DI "TARGET_VECTOR_ELEN_64")
+ (VNx3x4DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
+ (VNx4x4DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
+ (VNx2x4SF "TARGET_VECTOR_ELEN_FP_32")
+ (VNx3x4SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64")
+ (VNx4x4SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64")
+ (VNx5x4SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
+ (VNx6x4SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
+ (VNx7x4SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
+ (VNx8x4SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
+ (VNx2x4DF "TARGET_VECTOR_ELEN_FP_64")
+ (VNx3x4DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128")
+ (VNx4x4DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128")
+])
+
+(define_mode_iterator V8T [
+ VNx2x8QI
+ VNx3x8QI
+ VNx4x8QI
+ (VNx5x8QI "TARGET_MIN_VLEN >= 64")
+ (VNx6x8QI "TARGET_MIN_VLEN >= 64")
+ (VNx7x8QI "TARGET_MIN_VLEN >= 64")
+ (VNx8x8QI "TARGET_MIN_VLEN >= 64")
+ VNx2x8HI
+ (VNx3x8HI "TARGET_MIN_VLEN >= 64")
+ (VNx4x8HI "TARGET_MIN_VLEN >= 64")
+ (VNx5x8HI "TARGET_MIN_VLEN >= 128")
+ (VNx6x8HI "TARGET_MIN_VLEN >= 128")
+ (VNx7x8HI "TARGET_MIN_VLEN >= 128")
+ (VNx8x8HI "TARGET_MIN_VLEN >= 128")
+ (VNx2x8SI "TARGET_MIN_VLEN >= 64")
+ (VNx3x8SI "TARGET_MIN_VLEN >= 128")
+ (VNx4x8SI "TARGET_MIN_VLEN >= 128")
+ (VNx2x8DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
+ (VNx2x8SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64")
+ (VNx3x8SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
+ (VNx4x8SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
+ (VNx2x8DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128")
+])
+
+(define_mode_iterator V16T [
+ VNx2x16QI
+ (VNx3x16QI "TARGET_MIN_VLEN >= 64")
+ (VNx4x16QI "TARGET_MIN_VLEN >= 64")
+ (VNx5x16QI "TARGET_MIN_VLEN >= 128")
+ (VNx6x16QI "TARGET_MIN_VLEN >= 128")
+ (VNx7x16QI "TARGET_MIN_VLEN >= 128")
+ (VNx8x16QI "TARGET_MIN_VLEN >= 128")
+ (VNx2x16HI "TARGET_MIN_VLEN >= 64")
+ (VNx3x16HI "TARGET_MIN_VLEN >= 128")
+ (VNx4x16HI "TARGET_MIN_VLEN >= 128")
+ (VNx2x16SI "TARGET_MIN_VLEN >= 128")
+ (VNx2x16SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+])
+
+(define_mode_iterator V32T [
+ (VNx2x32QI "TARGET_MIN_VLEN >= 64")
+ (VNx3x32QI "TARGET_MIN_VLEN >= 128")
+ (VNx4x32QI "TARGET_MIN_VLEN >= 128")
+ (VNx2x32HI "TARGET_MIN_VLEN >= 128")
+])
+
+(define_mode_iterator V64T [
+ (VNx2x64QI "TARGET_MIN_VLEN >= 128")
+])
+
(define_mode_attr VLMULX2 [
(VNx1QI "VNx2QI") (VNx2QI "VNx4QI") (VNx4QI "VNx8QI") (VNx8QI "VNx16QI") (VNx16QI "VNx32QI") (VNx32QI "VNx64QI") (VNx64QI "VNx128QI")
(VNx1HI "VNx2HI") (VNx2HI "VNx4HI") (VNx4HI "VNx8HI") (VNx8HI "VNx16HI") (VNx16HI "VNx32HI") (VNx32HI "VNx64HI")
(VNx1DF "SF") (VNx2DF "SF") (VNx4DF "SF") (VNx8DF "SF") (VNx16DF "SF")
])
+(define_mode_attr nf [
+ (VNx2x64QI "2") (VNx2x32QI "2") (VNx3x32QI "3") (VNx4x32QI "4")
+ (VNx2x16QI "2") (VNx3x16QI "3") (VNx4x16QI "4") (VNx5x16QI "5") (VNx6x16QI "6") (VNx7x16QI "7") (VNx8x16QI "8")
+ (VNx2x8QI "2") (VNx3x8QI "3") (VNx4x8QI "4") (VNx5x8QI "5") (VNx6x8QI "6") (VNx7x8QI "7") (VNx8x8QI "8")
+ (VNx2x4QI "2") (VNx3x4QI "3") (VNx4x4QI "4") (VNx5x4QI "5") (VNx6x4QI "6") (VNx7x4QI "7") (VNx8x4QI "8")
+ (VNx2x2QI "2") (VNx3x2QI "3") (VNx4x2QI "4") (VNx5x2QI "5") (VNx6x2QI "6") (VNx7x2QI "7") (VNx8x2QI "8")
+ (VNx2x1QI "2") (VNx3x1QI "3") (VNx4x1QI "4") (VNx5x1QI "5") (VNx6x1QI "6") (VNx7x1QI "7") (VNx8x1QI "8")
+ (VNx2x32HI "2") (VNx2x16HI "2") (VNx3x16HI "3") (VNx4x16HI "4")
+ (VNx2x8HI "2") (VNx3x8HI "3") (VNx4x8HI "4") (VNx5x8HI "5") (VNx6x8HI "6") (VNx7x8HI "7") (VNx8x8HI "8")
+ (VNx2x4HI "2") (VNx3x4HI "3") (VNx4x4HI "4") (VNx5x4HI "5") (VNx6x4HI "6") (VNx7x4HI "7") (VNx8x4HI "8")
+ (VNx2x2HI "2") (VNx3x2HI "3") (VNx4x2HI "4") (VNx5x2HI "5") (VNx6x2HI "6") (VNx7x2HI "7") (VNx8x2HI "8")
+ (VNx2x1HI "2") (VNx3x1HI "3") (VNx4x1HI "4") (VNx5x1HI "5") (VNx6x1HI "6") (VNx7x1HI "7") (VNx8x1HI "8")
+ (VNx2x16SI "2") (VNx2x8SI "2") (VNx3x8SI "3") (VNx4x8SI "4")
+ (VNx2x4SI "2") (VNx3x4SI "3") (VNx4x4SI "4") (VNx5x4SI "5") (VNx6x4SI "6") (VNx7x4SI "7") (VNx8x4SI "8")
+ (VNx2x2SI "2") (VNx3x2SI "3") (VNx4x2SI "4") (VNx5x2SI "5") (VNx6x2SI "6") (VNx7x2SI "7") (VNx8x2SI "8")
+ (VNx2x1SI "2") (VNx3x1SI "3") (VNx4x1SI "4") (VNx5x1SI "5") (VNx6x1SI "6") (VNx7x1SI "7") (VNx8x1SI "8")
+ (VNx2x8DI "2") (VNx2x4DI "2") (VNx3x4DI "3") (VNx4x4DI "4")
+ (VNx2x2DI "2") (VNx3x2DI "3") (VNx4x2DI "4") (VNx5x2DI "5") (VNx6x2DI "6") (VNx7x2DI "7") (VNx8x2DI "8")
+ (VNx2x1DI "2") (VNx3x1DI "3") (VNx4x1DI "4") (VNx5x1DI "5") (VNx6x1DI "6") (VNx7x1DI "7") (VNx8x1DI "8")
+ (VNx2x16SF "2") (VNx2x8SF "2") (VNx3x8SF "3") (VNx4x8SF "4")
+ (VNx2x4SF "2") (VNx3x4SF "3") (VNx4x4SF "4") (VNx5x4SF "5") (VNx6x4SF "6") (VNx7x4SF "7") (VNx8x4SF "8")
+ (VNx2x2SF "2") (VNx3x2SF "3") (VNx4x2SF "4") (VNx5x2SF "5") (VNx6x2SF "6") (VNx7x2SF "7") (VNx8x2SF "8")
+ (VNx2x1SF "2") (VNx3x1SF "3") (VNx4x1SF "4") (VNx5x1SF "5") (VNx6x1SF "6") (VNx7x1SF "7") (VNx8x1SF "8")
+ (VNx2x8DF "2")
+ (VNx2x4DF "2") (VNx3x4DF "3") (VNx4x4DF "4")
+ (VNx2x2DF "2") (VNx3x2DF "3") (VNx4x2DF "4") (VNx5x2DF "5") (VNx6x2DF "6") (VNx7x2DF "7") (VNx8x2DF "8")
+ (VNx2x1DF "2") (VNx3x1DF "3") (VNx4x1DF "4") (VNx5x1DF "5") (VNx6x1DF "6") (VNx7x1DF "7") (VNx8x1DF "8")
+])
+
(define_mode_attr sew [
(VNx1QI "8") (VNx2QI "8") (VNx4QI "8") (VNx8QI "8") (VNx16QI "8") (VNx32QI "8") (VNx64QI "8") (VNx128QI "8")
(VNx1HI "16") (VNx2HI "16") (VNx4HI "16") (VNx8HI "16") (VNx16HI "16") (VNx32HI "16") (VNx64HI "16")
(VNx1DI "64") (VNx2DI "64") (VNx4DI "64") (VNx8DI "64") (VNx16DI "64")
(VNx1SF "32") (VNx2SF "32") (VNx4SF "32") (VNx8SF "32") (VNx16SF "32") (VNx32SF "32")
(VNx1DF "64") (VNx2DF "64") (VNx4DF "64") (VNx8DF "64") (VNx16DF "64")
+ (VNx2x64QI "8") (VNx2x32QI "8") (VNx3x32QI "8") (VNx4x32QI "8")
+ (VNx2x16QI "8") (VNx3x16QI "8") (VNx4x16QI "8") (VNx5x16QI "8") (VNx6x16QI "8") (VNx7x16QI "8") (VNx8x16QI "8")
+ (VNx2x8QI "8") (VNx3x8QI "8") (VNx4x8QI "8") (VNx5x8QI "8") (VNx6x8QI "8") (VNx7x8QI "8") (VNx8x8QI "8")
+ (VNx2x4QI "8") (VNx3x4QI "8") (VNx4x4QI "8") (VNx5x4QI "8") (VNx6x4QI "8") (VNx7x4QI "8") (VNx8x4QI "8")
+ (VNx2x2QI "8") (VNx3x2QI "8") (VNx4x2QI "8") (VNx5x2QI "8") (VNx6x2QI "8") (VNx7x2QI "8") (VNx8x2QI "8")
+ (VNx2x1QI "8") (VNx3x1QI "8") (VNx4x1QI "8") (VNx5x1QI "8") (VNx6x1QI "8") (VNx7x1QI "8") (VNx8x1QI "8")
+ (VNx2x32HI "16") (VNx2x16HI "16") (VNx3x16HI "16") (VNx4x16HI "16")
+ (VNx2x8HI "16") (VNx3x8HI "16") (VNx4x8HI "16") (VNx5x8HI "16") (VNx6x8HI "16") (VNx7x8HI "16") (VNx8x8HI "16")
+ (VNx2x4HI "16") (VNx3x4HI "16") (VNx4x4HI "16") (VNx5x4HI "16") (VNx6x4HI "16") (VNx7x4HI "16") (VNx8x4HI "16")
+ (VNx2x2HI "16") (VNx3x2HI "16") (VNx4x2HI "16") (VNx5x2HI "16") (VNx6x2HI "16") (VNx7x2HI "16") (VNx8x2HI "16")
+ (VNx2x1HI "16") (VNx3x1HI "16") (VNx4x1HI "16") (VNx5x1HI "16") (VNx6x1HI "16") (VNx7x1HI "16") (VNx8x1HI "16")
+ (VNx2x16SI "32") (VNx2x8SI "32") (VNx3x8SI "32") (VNx4x8SI "32")
+ (VNx2x4SI "32") (VNx3x4SI "32") (VNx4x4SI "32") (VNx5x4SI "32") (VNx6x4SI "32") (VNx7x4SI "32") (VNx8x4SI "32")
+ (VNx2x2SI "32") (VNx3x2SI "32") (VNx4x2SI "32") (VNx5x2SI "32") (VNx6x2SI "32") (VNx7x2SI "32") (VNx8x2SI "32")
+ (VNx2x1SI "32") (VNx3x1SI "32") (VNx4x1SI "32") (VNx5x1SI "32") (VNx6x1SI "32") (VNx7x1SI "32") (VNx8x1SI "32")
+ (VNx2x8DI "64") (VNx2x4DI "64") (VNx3x4DI "64") (VNx4x4DI "64")
+ (VNx2x2DI "64") (VNx3x2DI "64") (VNx4x2DI "64") (VNx5x2DI "64") (VNx6x2DI "64") (VNx7x2DI "64") (VNx8x2DI "64")
+ (VNx2x1DI "64") (VNx3x1DI "64") (VNx4x1DI "64") (VNx5x1DI "64") (VNx6x1DI "64") (VNx7x1DI "64") (VNx8x1DI "64")
+ (VNx2x16SF "32") (VNx2x8SF "32") (VNx3x8SF "32") (VNx4x8SF "32")
+ (VNx2x4SF "32") (VNx3x4SF "32") (VNx4x4SF "32") (VNx5x4SF "32") (VNx6x4SF "32") (VNx7x4SF "32") (VNx8x4SF "32")
+ (VNx2x2SF "32") (VNx3x2SF "32") (VNx4x2SF "32") (VNx5x2SF "32") (VNx6x2SF "32") (VNx7x2SF "32") (VNx8x2SF "32")
+ (VNx2x1SF "32") (VNx3x1SF "32") (VNx4x1SF "32") (VNx5x1SF "32") (VNx6x1SF "32") (VNx7x1SF "32") (VNx8x1SF "32")
+ (VNx2x8DF "64")
+ (VNx2x4DF "64") (VNx3x4DF "64") (VNx4x4DF "64")
+ (VNx2x2DF "64") (VNx3x2DF "64") (VNx4x2DF "64") (VNx5x2DF "64") (VNx6x2DF "64") (VNx7x2DF "64") (VNx8x2DF "64")
+ (VNx2x1DF "64") (VNx3x1DF "64") (VNx4x1DF "64") (VNx5x1DF "64") (VNx6x1DF "64") (VNx7x1DF "64") (VNx8x1DF "64")
])
(define_mode_attr double_trunc_sew [
vired,viwred,vfredu,vfredo,vfwredu,vfwredo,\
vmalu,vmpop,vmffs,vmsfs,vmiota,vmidx,vimovvx,vimovxv,vfmovvf,vfmovfv,\
vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,\
- vgather,vcompress")
+ vgather,vcompress,vlsegde,vssegte,vlsegds,vssegts,vlsegdux,vlsegdox,\
+ vssegtux,vssegtox,vlsegdff")
(const_string "true")]
(const_string "false")))
vired,viwred,vfredu,vfredo,vfwredu,vfwredo,\
vmalu,vmpop,vmffs,vmsfs,vmiota,vmidx,vimovxv,vfmovfv,\
vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,\
- vgather,vcompress")
+ vgather,vcompress,vlsegde,vssegte,vlsegds,vssegts,vlsegdux,vlsegdox,\
+ vssegtux,vssegtox,vlsegdff")
(const_string "true")]
(const_string "false")))
(define_attr "sew" ""
(cond [(eq_attr "mode" "VNx1QI,VNx2QI,VNx4QI,VNx8QI,VNx16QI,VNx32QI,VNx64QI,\
VNx1BI,VNx2BI,VNx4BI,VNx8BI,VNx16BI,VNx32BI,VNx64BI,\
- VNx128QI,VNx128BI")
+ VNx128QI,VNx128BI,VNx2x64QI,VNx2x32QI,VNx3x32QI,VNx4x32QI,\
+ VNx2x16QI,VNx3x16QI,VNx4x16QI,VNx5x16QI,VNx6x16QI,VNx7x16QI,VNx8x16QI,\
+ VNx2x8QI,VNx3x8QI,VNx4x8QI,VNx5x8QI,VNx6x8QI,VNx7x8QI,VNx8x8QI,\
+ VNx2x4QI,VNx3x4QI,VNx4x4QI,VNx5x4QI,VNx6x4QI,VNx7x4QI,VNx8x4QI,\
+ VNx2x2QI,VNx3x2QI,VNx4x2QI,VNx5x2QI,VNx6x2QI,VNx7x2QI,VNx8x2QI,\
+ VNx2x1QI,VNx3x1QI,VNx4x1QI,VNx5x1QI,VNx6x1QI,VNx7x1QI,VNx8x1QI")
(const_int 8)
- (eq_attr "mode" "VNx1HI,VNx2HI,VNx4HI,VNx8HI,VNx16HI,VNx32HI,VNx64HI")
+ (eq_attr "mode" "VNx1HI,VNx2HI,VNx4HI,VNx8HI,VNx16HI,VNx32HI,VNx64HI,\
+ VNx2x32HI,VNx2x16HI,VNx3x16HI,VNx4x16HI,\
+ VNx2x8HI,VNx3x8HI,VNx4x8HI,VNx5x8HI,VNx6x8HI,VNx7x8HI,VNx8x8HI,\
+ VNx2x4HI,VNx3x4HI,VNx4x4HI,VNx5x4HI,VNx6x4HI,VNx7x4HI,VNx8x4HI,\
+ VNx2x2HI,VNx3x2HI,VNx4x2HI,VNx5x2HI,VNx6x2HI,VNx7x2HI,VNx8x2HI,\
+ VNx2x1HI,VNx3x1HI,VNx4x1HI,VNx5x1HI,VNx6x1HI,VNx7x1HI,VNx8x1HI")
(const_int 16)
(eq_attr "mode" "VNx1SI,VNx2SI,VNx4SI,VNx8SI,VNx16SI,VNx32SI,\
- VNx1SF,VNx2SF,VNx4SF,VNx8SF,VNx16SF,VNx32SF")
+ VNx1SF,VNx2SF,VNx4SF,VNx8SF,VNx16SF,VNx32SF,\
+ VNx2x16SI,VNx2x8SI,VNx3x8SI,VNx4x8SI,\
+ VNx2x4SI,VNx3x4SI,VNx4x4SI,VNx5x4SI,VNx6x4SI,VNx7x4SI,VNx8x4SI,\
+ VNx2x2SI,VNx3x2SI,VNx4x2SI,VNx5x2SI,VNx6x2SI,VNx7x2SI,VNx8x2SI,\
+ VNx2x1SI,VNx3x1SI,VNx4x1SI,VNx5x1SI,VNx6x1SI,VNx7x1SI,VNx8x1SI,\
+ VNx2x16SF,VNx2x8SF,VNx3x8SF,VNx4x8SF,\
+ VNx2x4SF,VNx3x4SF,VNx4x4SF,VNx5x4SF,VNx6x4SF,VNx7x4SF,VNx8x4SF,\
+ VNx2x2SF,VNx3x2SF,VNx4x2SF,VNx5x2SF,VNx6x2SF,VNx7x2SF,VNx8x2SF,\
+ VNx2x1SF,VNx3x1SF,VNx4x1SF,VNx5x1SF,VNx6x1SF,VNx7x1SF,VNx8x1SF")
(const_int 32)
(eq_attr "mode" "VNx1DI,VNx2DI,VNx4DI,VNx8DI,VNx16DI,\
- VNx1DF,VNx2DF,VNx4DF,VNx8DF,VNx16DF")
+ VNx1DF,VNx2DF,VNx4DF,VNx8DF,VNx16DF,\
+ VNx2x8DI,VNx2x4DI,VNx3x4DI,VNx4x4DI,\
+ VNx2x2DI,VNx3x2DI,VNx4x2DI,VNx5x2DI,VNx6x2DI,VNx7x2DI,VNx8x2DI,\
+ VNx2x1DI,VNx3x1DI,VNx4x1DI,VNx5x1DI,VNx6x1DI,VNx7x1DI,VNx8x1DI,\
+ VNx2x8DF,VNx2x4DF,VNx3x4DF,VNx4x4DF,\
+ VNx2x2DF,VNx3x2DF,VNx4x2DF,VNx5x2DF,VNx6x2DF,VNx7x2DF,VNx8x2DF,\
+ VNx2x1DF,VNx3x1DF,VNx4x1DF,VNx5x1DF,VNx6x1DF,VNx7x1DF,VNx8x1DF")
(const_int 64)]
(const_int INVALID_ATTRIBUTE)))
;; Ditto to LMUL.
(define_attr "vlmul" ""
- (cond [(eq_attr "mode" "VNx1QI,VNx1BI")
+ (cond [(eq_attr "mode" "VNx1QI,VNx1BI,VNx2x1QI,VNx3x1QI,VNx4x1QI,VNx5x1QI,VNx6x1QI,VNx7x1QI,VNx8x1QI")
(symbol_ref "riscv_vector::get_vlmul(E_VNx1QImode)")
- (eq_attr "mode" "VNx2QI,VNx2BI")
+ (eq_attr "mode" "VNx2QI,VNx2BI,VNx2x2QI,VNx3x2QI,VNx4x2QI,VNx5x2QI,VNx6x2QI,VNx7x2QI,VNx8x2QI")
(symbol_ref "riscv_vector::get_vlmul(E_VNx2QImode)")
- (eq_attr "mode" "VNx4QI,VNx4BI")
+ (eq_attr "mode" "VNx4QI,VNx4BI,VNx2x4QI,VNx3x4QI,VNx4x4QI,VNx5x4QI,VNx6x4QI,VNx7x4QI,VNx8x4QI")
(symbol_ref "riscv_vector::get_vlmul(E_VNx4QImode)")
- (eq_attr "mode" "VNx8QI,VNx8BI")
+ (eq_attr "mode" "VNx8QI,VNx8BI,VNx2x8QI,VNx3x8QI,VNx4x8QI,VNx5x8QI,VNx6x8QI,VNx7x8QI,VNx8x8QI")
(symbol_ref "riscv_vector::get_vlmul(E_VNx8QImode)")
- (eq_attr "mode" "VNx16QI,VNx16BI")
+ (eq_attr "mode" "VNx16QI,VNx16BI,VNx2x16QI,VNx3x16QI,VNx4x16QI,VNx5x16QI,VNx6x16QI,VNx7x16QI,VNx8x16QI")
(symbol_ref "riscv_vector::get_vlmul(E_VNx16QImode)")
- (eq_attr "mode" "VNx32QI,VNx32BI")
+ (eq_attr "mode" "VNx32QI,VNx32BI,VNx2x32QI,VNx3x32QI,VNx4x32QI")
(symbol_ref "riscv_vector::get_vlmul(E_VNx32QImode)")
- (eq_attr "mode" "VNx64QI,VNx64BI")
+ (eq_attr "mode" "VNx64QI,VNx64BI,VNx2x64QI")
(symbol_ref "riscv_vector::get_vlmul(E_VNx64QImode)")
(eq_attr "mode" "VNx128QI,VNx128BI")
(symbol_ref "riscv_vector::get_vlmul(E_VNx128QImode)")
- (eq_attr "mode" "VNx1HI")
+ (eq_attr "mode" "VNx1HI,VNx2x1HI,VNx3x1HI,VNx4x1HI,VNx5x1HI,VNx6x1HI,VNx7x1HI,VNx8x1HI")
(symbol_ref "riscv_vector::get_vlmul(E_VNx1HImode)")
- (eq_attr "mode" "VNx2HI")
+ (eq_attr "mode" "VNx2HI,VNx2x2HI,VNx3x2HI,VNx4x2HI,VNx5x2HI,VNx6x2HI,VNx7x2HI,VNx8x2HI")
(symbol_ref "riscv_vector::get_vlmul(E_VNx2HImode)")
- (eq_attr "mode" "VNx4HI")
+ (eq_attr "mode" "VNx4HI,VNx2x4HI,VNx3x4HI,VNx4x4HI,VNx5x4HI,VNx6x4HI,VNx7x4HI,VNx8x4HI")
(symbol_ref "riscv_vector::get_vlmul(E_VNx4HImode)")
- (eq_attr "mode" "VNx8HI")
+ (eq_attr "mode" "VNx8HI,VNx2x8HI,VNx3x8HI,VNx4x8HI,VNx5x8HI,VNx6x8HI,VNx7x8HI,VNx8x8HI")
(symbol_ref "riscv_vector::get_vlmul(E_VNx8HImode)")
- (eq_attr "mode" "VNx16HI")
+ (eq_attr "mode" "VNx16HI,VNx2x16HI,VNx3x16HI,VNx4x16HI")
(symbol_ref "riscv_vector::get_vlmul(E_VNx16HImode)")
- (eq_attr "mode" "VNx32HI")
+ (eq_attr "mode" "VNx32HI,VNx2x32HI")
(symbol_ref "riscv_vector::get_vlmul(E_VNx32HImode)")
(eq_attr "mode" "VNx64HI")
(symbol_ref "riscv_vector::get_vlmul(E_VNx64HImode)")
- (eq_attr "mode" "VNx1SI,VNx1SF")
+ (eq_attr "mode" "VNx1SI,VNx1SF,VNx2x1SI,VNx3x1SI,VNx4x1SI,VNx5x1SI,VNx6x1SI,VNx7x1SI,VNx8x1SI,\
+ VNx2x1SF,VNx3x1SF,VNx4x1SF,VNx5x1SF,VNx6x1SF,VNx7x1SF,VNx8x1SF")
(symbol_ref "riscv_vector::get_vlmul(E_VNx1SImode)")
- (eq_attr "mode" "VNx2SI,VNx2SF")
+ (eq_attr "mode" "VNx2SI,VNx2SF,VNx2x2SI,VNx3x2SI,VNx4x2SI,VNx5x2SI,VNx6x2SI,VNx7x2SI,VNx8x2SI,\
+ VNx2x2SF,VNx3x2SF,VNx4x2SF,VNx5x2SF,VNx6x2SF,VNx7x2SF,VNx8x2SF")
(symbol_ref "riscv_vector::get_vlmul(E_VNx2SImode)")
- (eq_attr "mode" "VNx4SI,VNx4SF")
+ (eq_attr "mode" "VNx4SI,VNx4SF,VNx2x4SI,VNx3x4SI,VNx4x4SI,VNx5x4SI,VNx6x4SI,VNx7x4SI,VNx8x4SI,\
+ VNx2x4SF,VNx3x4SF,VNx4x4SF,VNx5x4SF,VNx6x4SF,VNx7x4SF,VNx8x4SF")
(symbol_ref "riscv_vector::get_vlmul(E_VNx4SImode)")
- (eq_attr "mode" "VNx8SI,VNx8SF")
+ (eq_attr "mode" "VNx8SI,VNx8SF,VNx2x8SI,VNx3x8SI,VNx4x8SI,VNx2x8SF,VNx3x8SF,VNx4x8SF")
(symbol_ref "riscv_vector::get_vlmul(E_VNx8SImode)")
- (eq_attr "mode" "VNx16SI,VNx16SF")
+ (eq_attr "mode" "VNx16SI,VNx16SF,VNx2x16SI,VNx2x16SF")
(symbol_ref "riscv_vector::get_vlmul(E_VNx16SImode)")
(eq_attr "mode" "VNx32SI,VNx32SF")
(symbol_ref "riscv_vector::get_vlmul(E_VNx32SImode)")
- (eq_attr "mode" "VNx1DI,VNx1DF")
+ (eq_attr "mode" "VNx1DI,VNx1DF,VNx2x1DI,VNx3x1DI,VNx4x1DI,VNx5x1DI,VNx6x1DI,VNx7x1DI,VNx8x1DI,\
+ VNx2x1DF,VNx3x1DF,VNx4x1DF,VNx5x1DF,VNx6x1DF,VNx7x1DF,VNx8x1DF")
(symbol_ref "riscv_vector::get_vlmul(E_VNx1DImode)")
- (eq_attr "mode" "VNx2DI,VNx2DF")
+ (eq_attr "mode" "VNx2DI,VNx2DF,VNx2x2DI,VNx3x2DI,VNx4x2DI,VNx5x2DI,VNx6x2DI,VNx7x2DI,VNx8x2DI,\
+ VNx2x2DF,VNx3x2DF,VNx4x2DF,VNx5x2DF,VNx6x2DF,VNx7x2DF,VNx8x2DF")
(symbol_ref "riscv_vector::get_vlmul(E_VNx2DImode)")
- (eq_attr "mode" "VNx4DI,VNx4DF")
+ (eq_attr "mode" "VNx4DI,VNx4DF,VNx2x4DI,VNx3x4DI,VNx4x4DI,VNx2x4DF,VNx3x4DF,VNx4x4DF")
(symbol_ref "riscv_vector::get_vlmul(E_VNx4DImode)")
- (eq_attr "mode" "VNx8DI,VNx8DF")
+ (eq_attr "mode" "VNx8DI,VNx8DF,VNx2x8DI,VNx2x8DF")
(symbol_ref "riscv_vector::get_vlmul(E_VNx8DImode)")
(eq_attr "mode" "VNx16DI,VNx16DF")
(symbol_ref "riscv_vector::get_vlmul(E_VNx16DImode)")]
viwred,vfredu,vfredo,vfwredu,vfwredo,vimovvx,\
vimovxv,vfmovvf,vfmovfv,vslideup,vslidedown,\
vislide1up,vislide1down,vfslide1up,vfslide1down,\
- vgather,vcompress")
+ vgather,vcompress,vlsegdux,vlsegdox,vssegtux,vssegtox")
(const_int INVALID_ATTRIBUTE)
- (eq_attr "mode" "VNx1QI,VNx1BI")
+ (eq_attr "mode" "VNx1QI,VNx1BI,VNx2x1QI,VNx3x1QI,VNx4x1QI,VNx5x1QI,VNx6x1QI,VNx7x1QI,VNx8x1QI")
(symbol_ref "riscv_vector::get_ratio(E_VNx1QImode)")
- (eq_attr "mode" "VNx2QI,VNx2BI")
+ (eq_attr "mode" "VNx2QI,VNx2BI,VNx2x2QI,VNx3x2QI,VNx4x2QI,VNx5x2QI,VNx6x2QI,VNx7x2QI,VNx8x2QI")
(symbol_ref "riscv_vector::get_ratio(E_VNx2QImode)")
- (eq_attr "mode" "VNx4QI,VNx4BI")
+ (eq_attr "mode" "VNx4QI,VNx4BI,VNx2x4QI,VNx3x4QI,VNx4x4QI,VNx5x4QI,VNx6x4QI,VNx7x4QI,VNx8x4QI")
(symbol_ref "riscv_vector::get_ratio(E_VNx4QImode)")
- (eq_attr "mode" "VNx8QI,VNx8BI")
+ (eq_attr "mode" "VNx8QI,VNx8BI,VNx2x8QI,VNx3x8QI,VNx4x8QI,VNx5x8QI,VNx6x8QI,VNx7x8QI,VNx8x8QI")
(symbol_ref "riscv_vector::get_ratio(E_VNx8QImode)")
- (eq_attr "mode" "VNx16QI,VNx16BI")
+ (eq_attr "mode" "VNx16QI,VNx16BI,VNx2x16QI,VNx3x16QI,VNx4x16QI,VNx5x16QI,VNx6x16QI,VNx7x16QI,VNx8x16QI")
(symbol_ref "riscv_vector::get_ratio(E_VNx16QImode)")
- (eq_attr "mode" "VNx32QI,VNx32BI")
+ (eq_attr "mode" "VNx32QI,VNx32BI,VNx2x32QI,VNx3x32QI,VNx4x32QI")
(symbol_ref "riscv_vector::get_ratio(E_VNx32QImode)")
- (eq_attr "mode" "VNx64QI,VNx64BI")
+ (eq_attr "mode" "VNx64QI,VNx64BI,VNx2x64QI")
(symbol_ref "riscv_vector::get_ratio(E_VNx64QImode)")
(eq_attr "mode" "VNx128QI,VNx128BI")
(symbol_ref "riscv_vector::get_ratio(E_VNx128QImode)")
- (eq_attr "mode" "VNx1HI")
+ (eq_attr "mode" "VNx1HI,VNx2x1HI,VNx3x1HI,VNx4x1HI,VNx5x1HI,VNx6x1HI,VNx7x1HI,VNx8x1HI")
(symbol_ref "riscv_vector::get_ratio(E_VNx1HImode)")
- (eq_attr "mode" "VNx2HI")
+ (eq_attr "mode" "VNx2HI,VNx2x2HI,VNx3x2HI,VNx4x2HI,VNx5x2HI,VNx6x2HI,VNx7x2HI,VNx8x2HI")
(symbol_ref "riscv_vector::get_ratio(E_VNx2HImode)")
- (eq_attr "mode" "VNx4HI")
+ (eq_attr "mode" "VNx4HI,VNx2x4HI,VNx3x4HI,VNx4x4HI,VNx5x4HI,VNx6x4HI,VNx7x4HI,VNx8x4HI")
(symbol_ref "riscv_vector::get_ratio(E_VNx4HImode)")
- (eq_attr "mode" "VNx8HI")
+ (eq_attr "mode" "VNx8HI,VNx2x8HI,VNx3x8HI,VNx4x8HI,VNx5x8HI,VNx6x8HI,VNx7x8HI,VNx8x8HI")
(symbol_ref "riscv_vector::get_ratio(E_VNx8HImode)")
- (eq_attr "mode" "VNx16HI")
+ (eq_attr "mode" "VNx16HI,VNx2x16HI,VNx3x16HI,VNx4x16HI")
(symbol_ref "riscv_vector::get_ratio(E_VNx16HImode)")
- (eq_attr "mode" "VNx32HI")
+ (eq_attr "mode" "VNx32HI,VNx2x32HI")
(symbol_ref "riscv_vector::get_ratio(E_VNx32HImode)")
(eq_attr "mode" "VNx64HI")
(symbol_ref "riscv_vector::get_ratio(E_VNx64HImode)")
- (eq_attr "mode" "VNx1SI,VNx1SF")
+ (eq_attr "mode" "VNx1SI,VNx1SF,VNx2x1SI,VNx3x1SI,VNx4x1SI,VNx5x1SI,VNx6x1SI,VNx7x1SI,VNx8x1SI,\
+ VNx2x1SF,VNx3x1SF,VNx4x1SF,VNx5x1SF,VNx6x1SF,VNx7x1SF,VNx8x1SF")
(symbol_ref "riscv_vector::get_ratio(E_VNx1SImode)")
- (eq_attr "mode" "VNx2SI,VNx2SF")
+ (eq_attr "mode" "VNx2SI,VNx2SF,VNx2x2SI,VNx3x2SI,VNx4x2SI,VNx5x2SI,VNx6x2SI,VNx7x2SI,VNx8x2SI,\
+ VNx2x2SF,VNx3x2SF,VNx4x2SF,VNx5x2SF,VNx6x2SF,VNx7x2SF,VNx8x2SF")
(symbol_ref "riscv_vector::get_ratio(E_VNx2SImode)")
- (eq_attr "mode" "VNx4SI,VNx4SF")
+ (eq_attr "mode" "VNx4SI,VNx4SF,VNx2x4SI,VNx3x4SI,VNx4x4SI,VNx5x4SI,VNx6x4SI,VNx7x4SI,VNx8x4SI,\
+ VNx2x4SF,VNx3x4SF,VNx4x4SF,VNx5x4SF,VNx6x4SF,VNx7x4SF,VNx8x4SF")
(symbol_ref "riscv_vector::get_ratio(E_VNx4SImode)")
- (eq_attr "mode" "VNx8SI,VNx8SF")
+ (eq_attr "mode" "VNx8SI,VNx8SF,VNx2x8SI,VNx3x8SI,VNx4x8SI,VNx2x8SF,VNx3x8SF,VNx4x8SF")
(symbol_ref "riscv_vector::get_ratio(E_VNx8SImode)")
- (eq_attr "mode" "VNx16SI,VNx16SF")
+ (eq_attr "mode" "VNx16SI,VNx16SF,VNx2x16SI,VNx2x16SF")
(symbol_ref "riscv_vector::get_ratio(E_VNx16SImode)")
(eq_attr "mode" "VNx32SI,VNx32SF")
(symbol_ref "riscv_vector::get_ratio(E_VNx32SImode)")
- (eq_attr "mode" "VNx1DI,VNx1DF")
+ (eq_attr "mode" "VNx1DI,VNx1DF,VNx2x1DI,VNx3x1DI,VNx4x1DI,VNx5x1DI,VNx6x1DI,VNx7x1DI,VNx8x1DI,\
+ VNx2x1DF,VNx3x1DF,VNx4x1DF,VNx5x1DF,VNx6x1DF,VNx7x1DF,VNx8x1DF")
(symbol_ref "riscv_vector::get_ratio(E_VNx1DImode)")
- (eq_attr "mode" "VNx2DI,VNx2DF")
+ (eq_attr "mode" "VNx2DI,VNx2DF,VNx2x2DI,VNx3x2DI,VNx4x2DI,VNx5x2DI,VNx6x2DI,VNx7x2DI,VNx8x2DI,\
+ VNx2x2DF,VNx3x2DF,VNx4x2DF,VNx5x2DF,VNx6x2DF,VNx7x2DF,VNx8x2DF")
(symbol_ref "riscv_vector::get_ratio(E_VNx2DImode)")
- (eq_attr "mode" "VNx4DI,VNx4DF")
+ (eq_attr "mode" "VNx4DI,VNx4DF,VNx2x4DI,VNx3x4DI,VNx4x4DI,VNx2x4DF,VNx3x4DF,VNx4x4DF")
(symbol_ref "riscv_vector::get_ratio(E_VNx4DImode)")
- (eq_attr "mode" "VNx8DI,VNx8DF")
+ (eq_attr "mode" "VNx8DI,VNx8DF,VNx2x8DI,VNx2x8DF")
(symbol_ref "riscv_vector::get_ratio(E_VNx8DImode)")
(eq_attr "mode" "VNx16DI,VNx16DF")
(symbol_ref "riscv_vector::get_ratio(E_VNx16DImode)")]
vfwcvtftoi,vfwcvtftof,vfncvtitof,vfncvtftoi,vfncvtftof,vfclass,\
vired,viwred,vfredu,vfredo,vfwredu,vfwredo,vimovxv,vfmovfv,\
vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,\
- vgather,vldff,viwmuladd,vfwmuladd")
+ vgather,vldff,viwmuladd,vfwmuladd,vlsegde,vlsegds,vlsegdux,vlsegdox,vlsegdff")
(const_int 2)
(eq_attr "type" "vimerge,vfmerge,vcompress")
(cond [(eq_attr "type" "vlde,vste,vimov,vfmov,vldm,vstm,vmalu,vsts,vstux,\
vstox,vext,vmsfs,vmiota,vfsqrt,vfrecp,vfcvtitof,vldff,\
vfcvtftoi,vfwcvtitof,vfwcvtftoi,vfwcvtftof,vfncvtitof,\
- vfncvtftoi,vfncvtftof,vfclass,vimovxv,vfmovfv,vcompress")
+ vfncvtftoi,vfncvtftof,vfclass,vimovxv,vfmovfv,vcompress,\
+ vlsegde,vssegts,vssegtux,vssegtox,vlsegdff")
(const_int 4)
;; If operands[3] of "vlds" is not vector mode, it is pred_broadcast.
vsshift,vnclip,vfalu,vfmul,vfminmax,vfdiv,vfwalu,vfwmul,\
vfsgnj,vfmerge,vired,viwred,vfredu,vfredo,vfwredu,vfwredo,\
vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,\
- vgather,viwmuladd,vfwmuladd")
+ vgather,viwmuladd,vfwmuladd,vlsegds,vlsegdux,vlsegdox")
(const_int 5)
(eq_attr "type" "vicmp,vimuladd,vfcmp,vfmuladd")
(const_int 6)
- (eq_attr "type" "vmpop,vmffs,vmidx")
+ (eq_attr "type" "vmpop,vmffs,vmidx,vssegte")
(const_int 3)]
(const_int INVALID_ATTRIBUTE)))
(cond [(eq_attr "type" "vlde,vimov,vfmov,vext,vmiota,vfsqrt,vfrecp,\
vfcvtitof,vfcvtftoi,vfwcvtitof,vfwcvtftoi,vfwcvtftof,\
vfncvtitof,vfncvtftoi,vfncvtftof,vfclass,vimovxv,vfmovfv,\
- vcompress,vldff")
+ vcompress,vldff,vlsegde,vlsegdff")
(symbol_ref "riscv_vector::get_ta(operands[5])")
;; If operands[3] of "vlds" is not vector mode, it is pred_broadcast.
vsshift,vnclip,vfalu,vfmul,vfminmax,vfdiv,\
vfwalu,vfwmul,vfsgnj,vfmerge,vired,viwred,vfredu,\
vfredo,vfwredu,vfwredo,vslideup,vslidedown,vislide1up,\
- vislide1down,vfslide1up,vfslide1down,vgather,viwmuladd,vfwmuladd")
+ vislide1down,vfslide1up,vfslide1down,vgather,viwmuladd,vfwmuladd,\
+ vlsegds,vlsegdux,vlsegdox")
(symbol_ref "riscv_vector::get_ta(operands[6])")
(eq_attr "type" "vimuladd,vfmuladd")
(define_attr "ma" ""
(cond [(eq_attr "type" "vlde,vext,vmiota,vfsqrt,vfrecp,vfcvtitof,vfcvtftoi,\
vfwcvtitof,vfwcvtftoi,vfwcvtftof,vfncvtitof,vfncvtftoi,\
- vfncvtftof,vfclass,vldff")
+ vfncvtftof,vfclass,vldff,vlsegde,vlsegdff")
(symbol_ref "riscv_vector::get_ma(operands[6])")
;; If operands[3] of "vlds" is not vector mode, it is pred_broadcast.
vnclip,vicmp,vfalu,vfmul,vfminmax,vfdiv,\
vfwalu,vfwmul,vfsgnj,vfcmp,vslideup,vslidedown,\
vislide1up,vislide1down,vfslide1up,vfslide1down,vgather,\
- viwmuladd,vfwmuladd")
+ viwmuladd,vfwmuladd,vlsegds,vlsegdux,vlsegdox")
(symbol_ref "riscv_vector::get_ma(operands[7])")
(eq_attr "type" "vimuladd,vfmuladd")
vfsqrt,vfrecp,vfmerge,vfcvtitof,vfcvtftoi,vfwcvtitof,\
vfwcvtftoi,vfwcvtftof,vfncvtitof,vfncvtftoi,vfncvtftof,\
vfclass,vired,viwred,vfredu,vfredo,vfwredu,vfwredo,\
- vimovxv,vfmovfv")
+ vimovxv,vfmovfv,vlsegde,vlsegdff")
(symbol_ref "INTVAL (operands[7])")
(eq_attr "type" "vldm,vstm,vimov,vmalu,vmalu")
(symbol_ref "INTVAL (operands[5])")
viwalu,viwmul,vnshift,vaalu,vsmul,vsshift,\
vnclip,vicmp,vfalu,vfmul,vfminmax,vfdiv,vfwalu,vfwmul,\
vfsgnj,vfcmp,vfmuladd,vslideup,vslidedown,vislide1up,\
- vislide1down,vfslide1up,vfslide1down,vgather,viwmuladd,vfwmuladd")
+ vislide1down,vfslide1up,vfslide1down,vgather,viwmuladd,vfwmuladd,\
+ vlsegds,vlsegdux,vlsegdox")
(symbol_ref "INTVAL (operands[8])")
- (eq_attr "type" "vstux,vstox")
+ (eq_attr "type" "vstux,vstox,vssegts,vssegtux,vssegtox")
(symbol_ref "INTVAL (operands[5])")
(eq_attr "type" "vimuladd")
(eq_attr "type" "vmsfs,vmidx,vcompress")
(symbol_ref "INTVAL (operands[6])")
- (eq_attr "type" "vmpop,vmffs")
+ (eq_attr "type" "vmpop,vmffs,vssegte")
(symbol_ref "INTVAL (operands[4])")]
(const_int INVALID_ATTRIBUTE)))
[(set_attr "type" "vldff")
(set_attr "mode" "<MODE>")])
+
+;; -------------------------------------------------------------------------------
+;; ---- Predicated Segment loads/stores
+;; -------------------------------------------------------------------------------
+;; Includes:
+;; - 7.8.1. Vector Unit-Stride Segment Loads and Stores
+;; - 7.8.2. Vector Strided Segment Loads and Stores
+;; - 7.8.3. Vector Indexed Segment Loads and Stores
+;; -------------------------------------------------------------------------------
+
+(define_insn "@pred_unit_strided_load<mode>"
+ [(set (match_operand:VT 0 "register_operand" "=vr, vr, vd")
+ (if_then_else:VT
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1, Wc1, vm")
+ (match_operand 4 "vector_length_operand" " rK, rK, rK")
+ (match_operand 5 "const_int_operand" " i, i, i")
+ (match_operand 6 "const_int_operand" " i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VT
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ, rJ")
+ (mem:BLK (scratch))] UNSPEC_UNIT_STRIDED)
+ (match_operand:VT 2 "vector_merge_operand" " 0, vu, vu")))]
+ "TARGET_VECTOR"
+ "vlseg<nf>e<sew>.v\t%0,(%z3)%p1"
+ [(set_attr "type" "vlsegde")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@pred_unit_strided_store<mode>"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+ (match_operand 3 "vector_length_operand" " rK")
+ (match_operand 4 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
+ (match_operand:VT 2 "register_operand" " vr")
+ (mem:BLK (scratch))] UNSPEC_UNIT_STRIDED))]
+ "TARGET_VECTOR"
+ "vsseg<nf>e<sew>.v\t%2,(%z1)%p0"
+ [(set_attr "type" "vssegte")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@pred_strided_load<mode>"
+ [(set (match_operand:VT 0 "register_operand" "=vr, vr, vd")
+ (if_then_else:VT
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1, Wc1, vm")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VT
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ, rJ")
+ (match_operand 4 "pmode_reg_or_0_operand" " rJ, rJ, rJ")
+ (mem:BLK (scratch))] UNSPEC_STRIDED)
+ (match_operand:VT 2 "vector_merge_operand" " 0, vu, vu")))]
+ "TARGET_VECTOR"
+ "vlsseg<nf>e<sew>.v\t%0,(%z3),%z4%p1"
+ [(set_attr "type" "vlsegds")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@pred_strided_store<mode>"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+ (match_operand 4 "vector_length_operand" " rK")
+ (match_operand 5 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
+ (match_operand 2 "pmode_reg_or_0_operand" " rJ")
+ (match_operand:VT 3 "register_operand" " vr")
+ (mem:BLK (scratch))] UNSPEC_STRIDED))]
+ "TARGET_VECTOR"
+ "vssseg<nf>e<sew>.v\t%3,(%z1),%z2%p0"
+ [(set_attr "type" "vssegts")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@pred_fault_load<mode>"
+ [(set (match_operand:VT 0 "register_operand" "=vr, vr, vd")
+ (if_then_else:VT
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1, Wc1, vm")
+ (match_operand 4 "vector_length_operand" " rK, rK, rK")
+ (match_operand 5 "const_int_operand" " i, i, i")
+ (match_operand 6 "const_int_operand" " i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VT
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ, rJ")
+ (mem:BLK (scratch))] UNSPEC_VLEFF)
+ (match_operand:VT 2 "vector_merge_operand" " 0, vu, vu")))
+ (set (reg:SI VL_REGNUM)
+ (unspec:SI
+ [(if_then_else:VT
+ (unspec:<VM>
+ [(match_dup 1) (match_dup 4) (match_dup 5)
+ (match_dup 6) (match_dup 7)
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VT
+ [(match_dup 3) (mem:BLK (scratch))] UNSPEC_VLEFF)
+ (match_dup 2))] UNSPEC_MODIFY_VL))]
+ "TARGET_VECTOR"
+ "vlseg<nf>e<sew>ff.v\t%0,(%z3)%p1"
+ [(set_attr "type" "vlsegdff")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@pred_indexed_<order>load<V1T:mode><V1I:mode>"
+ [(set (match_operand:V1T 0 "register_operand" "=&vr, &vr")
+ (if_then_else:V1T
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK")
+ (match_operand 6 "const_int_operand" " i, i")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:V1T
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ")
+ (mem:BLK (scratch))
+ (match_operand:V1I 4 "register_operand" " vr, vr")] ORDER)
+ (match_operand:V1T 2 "vector_merge_operand" " vu, 0")))]
+ "TARGET_VECTOR"
+ "vl<order>xseg<nf>ei<V1I:sew>.v\t%0,(%z3),%4%p1"
+ [(set_attr "type" "vlsegd<order>x")
+ (set_attr "mode" "<V1T:MODE>")])
+
+(define_insn "@pred_indexed_<order>load<V2T:mode><V2I:mode>"
+ [(set (match_operand:V2T 0 "register_operand" "=&vr, &vr")
+ (if_then_else:V2T
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK")
+ (match_operand 6 "const_int_operand" " i, i")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:V2T
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ")
+ (mem:BLK (scratch))
+ (match_operand:V2I 4 "register_operand" " vr, vr")] ORDER)
+ (match_operand:V2T 2 "vector_merge_operand" " vu, 0")))]
+ "TARGET_VECTOR"
+ "vl<order>xseg<nf>ei<V2I:sew>.v\t%0,(%z3),%4%p1"
+ [(set_attr "type" "vlsegd<order>x")
+ (set_attr "mode" "<V2T:MODE>")])
+
+(define_insn "@pred_indexed_<order>load<V4T:mode><V4I:mode>"
+ [(set (match_operand:V4T 0 "register_operand" "=&vr, &vr")
+ (if_then_else:V4T
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK")
+ (match_operand 6 "const_int_operand" " i, i")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:V4T
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ")
+ (mem:BLK (scratch))
+ (match_operand:V4I 4 "register_operand" " vr, vr")] ORDER)
+ (match_operand:V4T 2 "vector_merge_operand" " vu, 0")))]
+ "TARGET_VECTOR"
+ "vl<order>xseg<nf>ei<V4I:sew>.v\t%0,(%z3),%4%p1"
+ [(set_attr "type" "vlsegd<order>x")
+ (set_attr "mode" "<V4T:MODE>")])
+
+(define_insn "@pred_indexed_<order>load<V8T:mode><V8I:mode>"
+ [(set (match_operand:V8T 0 "register_operand" "=&vr, &vr")
+ (if_then_else:V8T
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK")
+ (match_operand 6 "const_int_operand" " i, i")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:V8T
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ")
+ (mem:BLK (scratch))
+ (match_operand:V8I 4 "register_operand" " vr, vr")] ORDER)
+ (match_operand:V8T 2 "vector_merge_operand" " vu, 0")))]
+ "TARGET_VECTOR"
+ "vl<order>xseg<nf>ei<V8I:sew>.v\t%0,(%z3),%4%p1"
+ [(set_attr "type" "vlsegd<order>x")
+ (set_attr "mode" "<V8T:MODE>")])
+
+(define_insn "@pred_indexed_<order>load<V16T:mode><V16I:mode>"
+ [(set (match_operand:V16T 0 "register_operand" "=&vr, &vr")
+ (if_then_else:V16T
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK")
+ (match_operand 6 "const_int_operand" " i, i")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:V16T
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ")
+ (mem:BLK (scratch))
+ (match_operand:V16I 4 "register_operand" " vr, vr")] ORDER)
+ (match_operand:V16T 2 "vector_merge_operand" " vu, 0")))]
+ "TARGET_VECTOR"
+ "vl<order>xseg<nf>ei<V16I:sew>.v\t%0,(%z3),%4%p1"
+ [(set_attr "type" "vlsegd<order>x")
+ (set_attr "mode" "<V16T:MODE>")])
+
+(define_insn "@pred_indexed_<order>load<V32T:mode><V32I:mode>"
+ [(set (match_operand:V32T 0 "register_operand" "=&vr, &vr")
+ (if_then_else:V32T
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK")
+ (match_operand 6 "const_int_operand" " i, i")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:V32T
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ")
+ (mem:BLK (scratch))
+ (match_operand:V32I 4 "register_operand" " vr, vr")] ORDER)
+ (match_operand:V32T 2 "vector_merge_operand" " vu, 0")))]
+ "TARGET_VECTOR"
+ "vl<order>xseg<nf>ei<V32I:sew>.v\t%0,(%z3),%4%p1"
+ [(set_attr "type" "vlsegd<order>x")
+ (set_attr "mode" "<V32T:MODE>")])
+
+(define_insn "@pred_indexed_<order>load<V64T:mode><V64I:mode>"
+ [(set (match_operand:V64T 0 "register_operand" "=&vr, &vr")
+ (if_then_else:V64T
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK")
+ (match_operand 6 "const_int_operand" " i, i")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:V64T
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ")
+ (mem:BLK (scratch))
+ (match_operand:V64I 4 "register_operand" " vr, vr")] ORDER)
+ (match_operand:V64T 2 "vector_merge_operand" " vu, 0")))]
+ "TARGET_VECTOR"
+ "vl<order>xseg<nf>ei<V64I:sew>.v\t%0,(%z3),%4%p1"
+ [(set_attr "type" "vlsegd<order>x")
+ (set_attr "mode" "<V64T:MODE>")])
+
+(define_insn "@pred_indexed_<order>store<V1T:mode><V1I:mode>"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+ (match_operand 4 "vector_length_operand" " rK")
+ (match_operand 5 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
+ (match_operand:V1I 2 "register_operand" " vr")
+ (match_operand:V1T 3 "register_operand" " vr")] ORDER))]
+ "TARGET_VECTOR"
+ "vs<order>xseg<nf>ei<V1I:sew>.v\t%3,(%z1),%2%p0"
+ [(set_attr "type" "vssegt<order>x")
+ (set_attr "mode" "<V1T:MODE>")])
+
+(define_insn "@pred_indexed_<order>store<V2T:mode><V2I:mode>"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+ (match_operand 4 "vector_length_operand" " rK")
+ (match_operand 5 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
+ (match_operand:V2I 2 "register_operand" " vr")
+ (match_operand:V2T 3 "register_operand" " vr")] ORDER))]
+ "TARGET_VECTOR"
+ "vs<order>xseg<nf>ei<V2I:sew>.v\t%3,(%z1),%2%p0"
+ [(set_attr "type" "vssegt<order>x")
+ (set_attr "mode" "<V2T:MODE>")])
+
+(define_insn "@pred_indexed_<order>store<V4T:mode><V4I:mode>"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+ (match_operand 4 "vector_length_operand" " rK")
+ (match_operand 5 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
+ (match_operand:V4I 2 "register_operand" " vr")
+ (match_operand:V4T 3 "register_operand" " vr")] ORDER))]
+ "TARGET_VECTOR"
+ "vs<order>xseg<nf>ei<V4I:sew>.v\t%3,(%z1),%2%p0"
+ [(set_attr "type" "vssegt<order>x")
+ (set_attr "mode" "<V4T:MODE>")])
+
+(define_insn "@pred_indexed_<order>store<V8T:mode><V8I:mode>"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+ (match_operand 4 "vector_length_operand" " rK")
+ (match_operand 5 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
+ (match_operand:V8I 2 "register_operand" " vr")
+ (match_operand:V8T 3 "register_operand" " vr")] ORDER))]
+ "TARGET_VECTOR"
+ "vs<order>xseg<nf>ei<V8I:sew>.v\t%3,(%z1),%2%p0"
+ [(set_attr "type" "vssegt<order>x")
+ (set_attr "mode" "<V8T:MODE>")])
+
+(define_insn "@pred_indexed_<order>store<V16T:mode><V16I:mode>"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+ (match_operand 4 "vector_length_operand" " rK")
+ (match_operand 5 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
+ (match_operand:V16I 2 "register_operand" " vr")
+ (match_operand:V16T 3 "register_operand" " vr")] ORDER))]
+ "TARGET_VECTOR"
+ "vs<order>xseg<nf>ei<V16I:sew>.v\t%3,(%z1),%2%p0"
+ [(set_attr "type" "vssegt<order>x")
+ (set_attr "mode" "<V16T:MODE>")])
+
+(define_insn "@pred_indexed_<order>store<V32T:mode><V32I:mode>"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+ (match_operand 4 "vector_length_operand" " rK")
+ (match_operand 5 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
+ (match_operand:V32I 2 "register_operand" " vr")
+ (match_operand:V32T 3 "register_operand" " vr")] ORDER))]
+ "TARGET_VECTOR"
+ "vs<order>xseg<nf>ei<V32I:sew>.v\t%3,(%z1),%2%p0"
+ [(set_attr "type" "vssegt<order>x")
+ (set_attr "mode" "<V32T:MODE>")])
+
+(define_insn "@pred_indexed_<order>store<V64T:mode><V64I:mode>"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+ (match_operand 4 "vector_length_operand" " rK")
+ (match_operand 5 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (match_operand 1 "pmode_reg_or_0_operand" " rJ")
+ (match_operand:V64I 2 "register_operand" " vr")
+ (match_operand:V64T 3 "register_operand" " vr")] ORDER))]
+ "TARGET_VECTOR"
+ "vs<order>xseg<nf>ei<V64I:sew>.v\t%3,(%z1),%2%p0"
+ [(set_attr "type" "vssegt<order>x")
+ (set_attr "mode" "<V64T:MODE>")])
+
;; -----------------------------------------------------------------------------
;; ---- Integer Compare Instructions Simplification
;; -----------------------------------------------------------------------------