This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH 17/17][ARM] Add tests for NEON FP16 ACLE intrinsics.


Support for using the half-precision floating point operations added by
the ARMv8.2-A FP16 extension is based on the macros and intrinsics added
to the ACLE for the extension.

This patch adds executable tests for the ACLE Adv.SIMD (NEON) intrinsics
to the advsimd-intrinsics testsuite. The tests were written by Jiong
Wang.

Tested the series for arm-none-linux-gnueabihf with native bootstrap and
make check and for arm-none-eabi and armeb-none-eabi with make check on
an ARMv8.2-A emulator. Also tested for aarch64-none-elf with the
advsimd-intrinsics testsuite using an ARMv8.2-A emulator.

Ok for trunk?
Matthew

testsuite/
2016-05-17  Jiong Wang  <jiong.wang@arm.com>
	    Matthew Wahab  <matthew.wahab@arm.com>

	* gcc.target/aarch64/advsimd-intrinsics/vabd_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vabs_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vadd_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcage_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcagt_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcale_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcalt_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vceq_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vceqz_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcge_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcgez_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcgt_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcgtz_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcle_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vclez_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vclt_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcltz_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvt_f16_s16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvt_f16_u16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvt_n_f16_s16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvt_n_f16_u16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvt_n_s16_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvt_n_u16_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvt_s16_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvt_u16_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvta_s16_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvta_u16_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvtm_s16_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvtm_u16_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvtp_s16_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvtp_u16_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vfma_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vfms_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vmax_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vmaxnm_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vmin_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vminnm_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vmul_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vmul_lane_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vmul_n_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vneg_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vpadd_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vpmax_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vpmin_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vrecpe_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vrecps_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vrnd_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vrnda_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vrndm_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vrndn_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vrndp_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vrndx_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vrsqrts_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vsub_f16_1.c: New.

>From ed12d5911f5cb5634ca6c014a366f4ae7559ad22 Mon Sep 17 00:00:00 2001
From: Matthew Wahab <matthew.wahab@arm.com>
Date: Thu, 7 Apr 2016 15:41:45 +0100
Subject: [PATCH 17/17] [PATCH 17/17][ARM] Add tests for NEON FP16 ACLE
 intrinsics.

testsuite/
2016-05-17  Jiong Wang  <jiong.wang@arm.com>
	    Matthew Wahab  <matthew.wahab@arm.com>

	* gcc.target/aarch64/advsimd-intrinsics/vabd_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vabs_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vadd_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcage_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcagt_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcale_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcalt_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vceq_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vceqz_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcge_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcgez_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcgt_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcgtz_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcle_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vclez_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vclt_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcltz_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvt_f16_s16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvt_f16_u16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvt_n_f16_s16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvt_n_f16_u16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvt_n_s16_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvt_n_u16_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvt_s16_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvt_u16_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvta_s16_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvta_u16_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvtm_s16_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvtm_u16_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvtp_s16_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vcvtp_u16_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vfma_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vfms_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vmax_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vmaxnm_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vmin_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vminnm_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vmul_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vmul_lane_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vmul_n_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vneg_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vpadd_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vpmax_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vpmin_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vrecpe_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vrecps_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vrnd_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vrnda_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vrndm_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vrndn_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vrndp_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vrndx_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vrsqrts_f16_1.c: New.
	* gcc.target/aarch64/advsimd-intrinsics/vsub_f16_1.c: New.
---
 .../aarch64/advsimd-intrinsics/vabd_f16_1.c        |  82 +++++++++
 .../aarch64/advsimd-intrinsics/vabs_f16_1.c        |  65 +++++++
 .../aarch64/advsimd-intrinsics/vadd_f16_1.c        |  82 +++++++++
 .../aarch64/advsimd-intrinsics/vcage_f16_1.c       |  77 +++++++++
 .../aarch64/advsimd-intrinsics/vcagt_f16_1.c       |  77 +++++++++
 .../aarch64/advsimd-intrinsics/vcale_f16_1.c       |  78 +++++++++
 .../aarch64/advsimd-intrinsics/vcalt_f16_1.c       |  78 +++++++++
 .../aarch64/advsimd-intrinsics/vceq_f16_1.c        |  76 ++++++++
 .../aarch64/advsimd-intrinsics/vceqz_f16_1.c       |  61 +++++++
 .../aarch64/advsimd-intrinsics/vcge_f16_1.c        |  76 ++++++++
 .../aarch64/advsimd-intrinsics/vcgez_f16_1.c       |  61 +++++++
 .../aarch64/advsimd-intrinsics/vcgt_f16_1.c        |  76 ++++++++
 .../aarch64/advsimd-intrinsics/vcgtz_f16_1.c       |  61 +++++++
 .../aarch64/advsimd-intrinsics/vcle_f16_1.c        |  76 ++++++++
 .../aarch64/advsimd-intrinsics/vclez_f16_1.c       |  61 +++++++
 .../aarch64/advsimd-intrinsics/vclt_f16_1.c        |  77 +++++++++
 .../aarch64/advsimd-intrinsics/vcltz_f16_1.c       |  61 +++++++
 .../aarch64/advsimd-intrinsics/vcvt_f16_s16_1.c    |  70 ++++++++
 .../aarch64/advsimd-intrinsics/vcvt_f16_u16_1.c    |  70 ++++++++
 .../aarch64/advsimd-intrinsics/vcvt_n_f16_s16_1.c  |  73 ++++++++
 .../aarch64/advsimd-intrinsics/vcvt_n_f16_u16_1.c  |  73 ++++++++
 .../aarch64/advsimd-intrinsics/vcvt_n_s16_f16_1.c  |  66 +++++++
 .../aarch64/advsimd-intrinsics/vcvt_n_u16_f16_1.c  |  67 +++++++
 .../aarch64/advsimd-intrinsics/vcvt_s16_f16_1.c    |  65 +++++++
 .../aarch64/advsimd-intrinsics/vcvt_u16_f16_1.c    |  65 +++++++
 .../aarch64/advsimd-intrinsics/vcvta_s16_f16_1.c   |  70 ++++++++
 .../aarch64/advsimd-intrinsics/vcvta_u16_f16_1.c   |  70 ++++++++
 .../aarch64/advsimd-intrinsics/vcvtm_s16_f16_1.c   |  70 ++++++++
 .../aarch64/advsimd-intrinsics/vcvtm_u16_f16_1.c   |  70 ++++++++
 .../aarch64/advsimd-intrinsics/vcvtp_s16_f16_1.c   |  70 ++++++++
 .../aarch64/advsimd-intrinsics/vcvtp_u16_f16_1.c   |  70 ++++++++
 .../aarch64/advsimd-intrinsics/vfma_f16_1.c        | 106 ++++++++++++
 .../aarch64/advsimd-intrinsics/vfms_f16_1.c        | 104 +++++++++++
 .../aarch64/advsimd-intrinsics/vmax_f16_1.c        |  81 +++++++++
 .../aarch64/advsimd-intrinsics/vmaxnm_f16_1.c      |  82 +++++++++
 .../aarch64/advsimd-intrinsics/vmin_f16_1.c        |  81 +++++++++
 .../aarch64/advsimd-intrinsics/vminnm_f16_1.c      |  83 +++++++++
 .../aarch64/advsimd-intrinsics/vmul_f16_1.c        |  82 +++++++++
 .../aarch64/advsimd-intrinsics/vmul_lane_f16_1.c   | 155 +++++++++++++++++
 .../aarch64/advsimd-intrinsics/vmul_n_f16_1.c      | 192 +++++++++++++++++++++
 .../aarch64/advsimd-intrinsics/vneg_f16_1.c        |  65 +++++++
 .../aarch64/advsimd-intrinsics/vpadd_f16_1.c       |  87 ++++++++++
 .../aarch64/advsimd-intrinsics/vpmax_f16_1.c       |  87 ++++++++++
 .../aarch64/advsimd-intrinsics/vpmin_f16_1.c       |  86 +++++++++
 .../aarch64/advsimd-intrinsics/vrecpe_f16_1.c      |  75 ++++++++
 .../aarch64/advsimd-intrinsics/vrecps_f16_1.c      |  86 +++++++++
 .../aarch64/advsimd-intrinsics/vrnd_f16_1.c        |  74 ++++++++
 .../aarch64/advsimd-intrinsics/vrnda_f16_1.c       |  74 ++++++++
 .../aarch64/advsimd-intrinsics/vrndm_f16_1.c       |  74 ++++++++
 .../aarch64/advsimd-intrinsics/vrndn_f16_1.c       |  74 ++++++++
 .../aarch64/advsimd-intrinsics/vrndp_f16_1.c       |  74 ++++++++
 .../aarch64/advsimd-intrinsics/vrndx_f16_1.c       |  74 ++++++++
 .../aarch64/advsimd-intrinsics/vrsqrts_f16_1.c     |  92 ++++++++++
 .../aarch64/advsimd-intrinsics/vsub_f16_1.c        |  82 +++++++++
 54 files changed, 4264 insertions(+)
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vabd_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vabs_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vadd_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcage_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcagt_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcale_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcalt_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vceq_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vceqz_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcge_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcgez_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcgt_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcgtz_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcle_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vclez_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vclt_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcltz_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_f16_s16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_f16_u16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_n_f16_s16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_n_f16_u16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_n_s16_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_n_u16_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_s16_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_u16_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvta_s16_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvta_u16_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvtm_s16_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvtm_u16_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvtp_s16_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvtp_u16_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vfma_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vfms_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmax_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmaxnm_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmin_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vminnm_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmul_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmul_lane_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmul_n_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vneg_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vpadd_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vpmax_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vpmin_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrecpe_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrecps_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrnd_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrnda_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrndm_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrndn_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrndp_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrndx_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrsqrts_f16_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vsub_f16_1.c

diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vabd_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vabd_f16_1.c
new file mode 100644
index 0000000..34dc784
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vabd_f16_1.c
@@ -0,0 +1,82 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (-567.8)
+#define C FP16_C (-34.8)
+#define D FP16_C (1024)
+#define E FP16_C (663.1)
+#define F FP16_C (169.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (77)
+
+#define I FP16_C (0.7)
+#define J FP16_C (-78)
+#define K FP16_C (101.23)
+#define L FP16_C (98)
+#define M FP16_C (870.1)
+#define N FP16_C (-8781)
+#define O FP16_C (-1.1)
+#define P FP16_C (47823)
+
+/* Expected results for vabd.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {E - A, F - B, -C + G, D - H};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {E - A, F - B, -C + G, D - H,
+					     M - I, -N + J, K - O, P - L};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vabd_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VABD (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  DECL_VARIABLE (vsrc_2, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A, B, C, D};
+  VECT_VAR_DECL (buf_src_2, float, 16, 4) [] = {E, F, G, H};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  VLOAD (vsrc_2, buf_src_2, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vabd_f16 (VECT_VAR (vsrc_1, float, 16, 4), VECT_VAR (vsrc_2, float, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VABDQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  DECL_VARIABLE (vsrc_2, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A, B, C, D, I, J, K, L};
+  VECT_VAR_DECL (buf_src_2, float, 16, 8) [] = {E, F, G, H, M, N, O, P};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  VLOAD (vsrc_2, buf_src_2, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vabdq_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+	       VECT_VAR (vsrc_2, float, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected_static, "");
+}
+
+int
+main (void)
+{
+  exec_vabd_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vabs_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vabs_f16_1.c
new file mode 100644
index 0000000..8748726
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vabs_f16_1.c
@@ -0,0 +1,65 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (-567.8)
+#define C FP16_C (-34.8)
+#define D FP16_C (1024)
+#define E FP16_C (663.1)
+#define F FP16_C (169.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (77)
+
+/* Expected results for vabs.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {A, -B, -C, D};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {A, -B, -C, D, E, F, -G, H};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vabs_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VABS (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 4);
+  VECT_VAR_DECL (buf_src, float, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vabs_f16 (VECT_VAR (vsrc, float, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VABSQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 8);
+  VECT_VAR_DECL (buf_src, float, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vabsq_f16 (VECT_VAR (vsrc, float, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected_static, "");
+}
+
+int
+main (void)
+{
+  exec_vabs_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vadd_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vadd_f16_1.c
new file mode 100644
index 0000000..c741bd4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vadd_f16_1.c
@@ -0,0 +1,82 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (-5.8)
+#define C FP16_C (-3.8)
+#define D FP16_C (1024)
+#define E FP16_C (663.1)
+#define F FP16_C (169.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (-77)
+
+#define I FP16_C (0.7)
+#define J FP16_C (-78)
+#define K FP16_C (101.23)
+#define L FP16_C (98)
+#define M FP16_C (870.1)
+#define N FP16_C (-8781)
+#define O FP16_C (-1.1)
+#define P FP16_C (47823)
+
+/* Expected results for vadd.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {A + E, B + F, C + G, D + H};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {A + E, B + F, C + G, D + H,
+					     I + M, J + N, K + O, L + P};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vadd_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VADD (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  DECL_VARIABLE (vsrc_2, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A, B, C, D};
+  VECT_VAR_DECL (buf_src_2, float, 16, 4) [] = {E, F, G, H};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  VLOAD (vsrc_2, buf_src_2, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vadd_f16 (VECT_VAR (vsrc_1, float, 16, 4),VECT_VAR (vsrc_2, float, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VADDQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  DECL_VARIABLE (vsrc_2, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A, B, C, D, I, J, K, L};
+  VECT_VAR_DECL (buf_src_2, float, 16, 8) [] = {E, F, G, H, M, N, O, P};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  VLOAD (vsrc_2, buf_src_2, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vaddq_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+	       VECT_VAR (vsrc_2, float, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected_static, "");
+}
+
+int
+main (void)
+{
+  exec_vadd_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcage_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcage_f16_1.c
new file mode 100644
index 0000000..1438fe3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcage_f16_1.c
@@ -0,0 +1,77 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (-5.8)
+#define C FP16_C (-3.8)
+#define D FP16_C (1024)
+#define E FP16_C (663.1)
+#define F FP16_C (169.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (-77)
+
+#define I FP16_C (0.7)
+#define J FP16_C (-78)
+#define K FP16_C (101.23)
+#define L FP16_C (98)
+#define M FP16_C (870.1)
+#define N FP16_C (-8781)
+#define O FP16_C (-1.1)
+#define P FP16_C (47823)
+
+/* Expected results for vcage.  */
+VECT_VAR_DECL (expected, uint, 16, 4) [] = {0x0, 0x0, 0x0, 0xFFFF};
+VECT_VAR_DECL (expected, uint, 16, 8) [] = {0x0, 0x0, 0x0, 0xFFFF,
+					    0x0, 0x0, 0xFFFF, 0x0};
+void
+exec_vcage_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCAGE (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  DECL_VARIABLE (vsrc_2, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A, B, C, D};
+  VECT_VAR_DECL (buf_src_2, float, 16, 4) [] = {E, F, G, H};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  VLOAD (vsrc_2, buf_src_2, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, uint, 16, 4) =
+    vcage_f16 (VECT_VAR (vsrc_1, float, 16, 4),
+	       VECT_VAR (vsrc_2, float, 16, 4));
+  vst1_u16 (VECT_VAR (result, uint, 16, 4),
+	    VECT_VAR (vector_res, uint, 16, 4));
+
+  CHECK (TEST_MSG, uint, 16, 4, PRIx16, expected, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCAGEQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  DECL_VARIABLE (vsrc_2, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A, B, C, D, I, J, K, L};
+  VECT_VAR_DECL (buf_src_2, float, 16, 8) [] = {E, F, G, H, M, N, O, P};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  VLOAD (vsrc_2, buf_src_2, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, uint, 16, 8) =
+    vcageq_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+		VECT_VAR (vsrc_2, float, 16, 8));
+  vst1q_u16 (VECT_VAR (result, uint, 16, 8),
+	     VECT_VAR (vector_res, uint, 16, 8));
+
+  CHECK (TEST_MSG, uint, 16, 8, PRIx16, expected, "");
+}
+
+int
+main (void)
+{
+  exec_vcage_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcagt_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcagt_f16_1.c
new file mode 100644
index 0000000..a4e1134
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcagt_f16_1.c
@@ -0,0 +1,77 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (-5.8)
+#define C FP16_C (-3.8)
+#define D FP16_C (1024)
+#define E FP16_C (123.1)
+#define F FP16_C (169.1)
+#define G FP16_C (3.8)
+#define H FP16_C (-77)
+
+#define I FP16_C (0.7)
+#define J FP16_C (-78)
+#define K FP16_C (101.23)
+#define L FP16_C (98)
+#define M FP16_C (870.1)
+#define N FP16_C (-78.3)
+#define O FP16_C (-1.1)
+#define P FP16_C (47823)
+
+/* Expected results for vcagt.  */
+VECT_VAR_DECL (expected, uint, 16, 4) [] = {0xFFFF, 0x0, 0x0, 0xFFFF};
+VECT_VAR_DECL (expected, uint, 16, 8) [] = {0xFFFF, 0x0, 0x0, 0xFFFF,
+					    0x0, 0x0, 0xFFFF, 0x0};
+void
+exec_vcagt_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCAGT (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  DECL_VARIABLE (vsrc_2, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A, B, C, D};
+  VECT_VAR_DECL (buf_src_2, float, 16, 4) [] = {E, F, G, H};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  VLOAD (vsrc_2, buf_src_2, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, uint, 16, 4) =
+    vcagt_f16 (VECT_VAR (vsrc_1, float, 16, 4),
+	       VECT_VAR (vsrc_2, float, 16, 4));
+  vst1_u16 (VECT_VAR (result, uint, 16, 4),
+	    VECT_VAR (vector_res, uint, 16, 4));
+
+  CHECK (TEST_MSG, uint, 16, 4, PRIx16, expected, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCAGTQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  DECL_VARIABLE (vsrc_2, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A, B, C, D, I, J, K, L};
+  VECT_VAR_DECL (buf_src_2, float, 16, 8) [] = {E, F, G, H, M, N, O, P};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  VLOAD (vsrc_2, buf_src_2, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, uint, 16, 8) =
+    vcagtq_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+		VECT_VAR (vsrc_2, float, 16, 8));
+  vst1q_u16 (VECT_VAR (result, uint, 16, 8),
+	     VECT_VAR (vector_res, uint, 16, 8));
+
+  CHECK (TEST_MSG, uint, 16, 8, PRIx16, expected, "");
+}
+
+int
+main (void)
+{
+  exec_vcagt_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcale_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcale_f16_1.c
new file mode 100644
index 0000000..08707d5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcale_f16_1.c
@@ -0,0 +1,78 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (-5.8)
+#define C FP16_C (-3.8)
+#define D FP16_C (1024)
+#define E FP16_C (-123.4)
+#define F FP16_C (169.1)
+#define G FP16_C (3.8)
+#define H FP16_C (-77)
+
+#define I FP16_C (0.7)
+#define J FP16_C (-78)
+#define K FP16_C (101.23)
+#define L FP16_C (98)
+#define M FP16_C (870.1)
+#define N FP16_C (78.3)
+#define O FP16_C (-1.1)
+#define P FP16_C (47823)
+
+/* Expected results for vcale.  */
+VECT_VAR_DECL (expected, uint, 16, 4) [] = {0xFFFF, 0xFFFF, 0xFFFF, 0x0};
+VECT_VAR_DECL (expected, uint, 16, 8) [] = {0xFFFF, 0xFFFF, 0xFFFF, 0x0,
+					    0xFFFF, 0xFFFF, 0x0, 0xFFFF};
+
+void
+exec_vcale_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCALE (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  DECL_VARIABLE (vsrc_2, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A, B, C, D};
+  VECT_VAR_DECL (buf_src_2, float, 16, 4) [] = {E, F, G, H};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  VLOAD (vsrc_2, buf_src_2, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, uint, 16, 4) =
+    vcale_f16 (VECT_VAR (vsrc_1, float, 16, 4),
+	       VECT_VAR (vsrc_2, float, 16, 4));
+  vst1_u16 (VECT_VAR (result, uint, 16, 4),
+	    VECT_VAR (vector_res, uint, 16, 4));
+
+  CHECK (TEST_MSG, uint, 16, 4, PRIx16, expected, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCALEQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  DECL_VARIABLE (vsrc_2, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A, B, C, D, I, J, K, L};
+  VECT_VAR_DECL (buf_src_2, float, 16, 8) [] = {E, F, G, H, M, N, O, P};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  VLOAD (vsrc_2, buf_src_2, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, uint, 16, 8) =
+    vcaleq_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+		VECT_VAR (vsrc_2, float, 16, 8));
+  vst1q_u16 (VECT_VAR (result, uint, 16, 8),
+	     VECT_VAR (vector_res, uint, 16, 8));
+
+  CHECK (TEST_MSG, uint, 16, 8, PRIx16, expected, "");
+}
+
+int
+main (void)
+{
+  exec_vcale_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcalt_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcalt_f16_1.c
new file mode 100644
index 0000000..ade4db5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcalt_f16_1.c
@@ -0,0 +1,78 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (-5.8)
+#define C FP16_C (-3.8)
+#define D FP16_C (1024)
+#define E FP16_C (-123.4)
+#define F FP16_C (169.1)
+#define G FP16_C (3.8)
+#define H FP16_C (-77)
+
+#define I FP16_C (0.7)
+#define J FP16_C (-78)
+#define K FP16_C (101.23)
+#define L FP16_C (98)
+#define M FP16_C (870.1)
+#define N FP16_C (78.3)
+#define O FP16_C (-1.1)
+#define P FP16_C (47823)
+
+/* Expected results for vcalt.  */
+VECT_VAR_DECL (expected, uint, 16, 4) [] = {0x0, 0xFFFF, 0x0, 0x0};
+VECT_VAR_DECL (expected, uint, 16, 8) [] = {0x0, 0xFFFF, 0x0, 0x0,
+					    0xFFFF, 0xFFFF, 0x0, 0xFFFF};
+
+void
+exec_vcalt_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCALT (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  DECL_VARIABLE (vsrc_2, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A, B, C, D};
+  VECT_VAR_DECL (buf_src_2, float, 16, 4) [] = {E, F, G, H};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  VLOAD (vsrc_2, buf_src_2, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, uint, 16, 4) =
+    vcalt_f16 (VECT_VAR (vsrc_1, float, 16, 4),
+	       VECT_VAR (vsrc_2, float, 16, 4));
+  vst1_u16 (VECT_VAR (result, uint, 16, 4),
+	    VECT_VAR (vector_res, uint, 16, 4));
+
+  CHECK (TEST_MSG, uint, 16, 4, PRIx16, expected, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCALTQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  DECL_VARIABLE (vsrc_2, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A, B, C, D, I, J, K, L};
+  VECT_VAR_DECL (buf_src_2, float, 16, 8) [] = {E, F, G, H, M, N, O, P};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  VLOAD (vsrc_2, buf_src_2, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, uint, 16, 8) =
+    vcaltq_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+		VECT_VAR (vsrc_2, float, 16, 8));
+  vst1q_u16 (VECT_VAR (result, uint, 16, 8),
+	     VECT_VAR (vector_res, uint, 16, 8));
+
+  CHECK (TEST_MSG, uint, 16, 8, PRIx16, expected, "");
+}
+
+int
+main (void)
+{
+  exec_vcalt_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vceq_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vceq_f16_1.c
new file mode 100644
index 0000000..1507299
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vceq_f16_1.c
@@ -0,0 +1,76 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (-5.8)
+#define C FP16_C (3.8)
+#define D FP16_C (1024)
+#define E FP16_C (-123.4)
+#define F FP16_C (169.1)
+#define G FP16_C (3.8)
+#define H FP16_C (-77)
+
+#define I FP16_C (0.7)
+#define J FP16_C (-78)
+#define K FP16_C (101.23)
+#define L FP16_C (98)
+#define M FP16_C (870.1)
+#define N FP16_C (-78)
+#define O FP16_C (-1.1)
+#define P FP16_C (47823)
+
+/* Expected results for vceq.  */
+VECT_VAR_DECL (expected, uint, 16, 4) [] = {0x0, 0x0, 0xFFFF, 0x0};
+VECT_VAR_DECL (expected, uint, 16, 8) [] = {0x0, 0x0, 0xFFFF, 0x0,
+					    0x0, 0xFFFF, 0x0, 0x0};
+void
+exec_vceq_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCEQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  DECL_VARIABLE (vsrc_2, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A, B, C, D};
+  VECT_VAR_DECL (buf_src_2, float, 16, 4) [] = {E, F, G, H};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  VLOAD (vsrc_2, buf_src_2, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, uint, 16, 4) =
+    vceq_f16 (VECT_VAR (vsrc_1, float, 16, 4), VECT_VAR (vsrc_2, float, 16, 4));
+  vst1_u16 (VECT_VAR (result, uint, 16, 4),
+	    VECT_VAR (vector_res, uint, 16, 4));
+
+  CHECK (TEST_MSG, uint, 16, 4, PRIx16, expected, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCEQQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  DECL_VARIABLE (vsrc_2, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A, B, C, D, I, J, K, L};
+  VECT_VAR_DECL (buf_src_2, float, 16, 8) [] = {E, F, G, H, M, N, O, P};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  VLOAD (vsrc_2, buf_src_2, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, uint, 16, 8) =
+    vceqq_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+		VECT_VAR (vsrc_2, float, 16, 8));
+  vst1q_u16 (VECT_VAR (result, uint, 16, 8),
+	     VECT_VAR (vector_res, uint, 16, 8));
+
+  CHECK (TEST_MSG, uint, 16, 8, PRIx16, expected, "");
+}
+
+int
+main (void)
+{
+  exec_vceq_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vceqz_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vceqz_f16_1.c
new file mode 100644
index 0000000..9c43dd1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vceqz_f16_1.c
@@ -0,0 +1,61 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (0)
+#define C FP16_C (-34.8)
+#define D FP16_C (0)
+#define E FP16_C (0)
+#define F FP16_C (169.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (77)
+
+/* Expected results for vceqz_f16  */
+VECT_VAR_DECL (expected, uint, 16, 4) [] = {0x0, 0xFFFF, 0x0, 0xFFFF};
+VECT_VAR_DECL (expected, uint, 16, 8) [] = {0x0, 0xFFFF, 0x0, 0xFFFF,
+					    0xFFFF, 0x0, 0x0, 0x0};
+
+void
+exec_vceqz_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCEQZ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 4);
+  VECT_VAR_DECL (buf_src, float, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, uint, 16, 4) =
+    vceqz_f16 (VECT_VAR (vsrc, float, 16, 4));
+  vst1_u16 (VECT_VAR (result, uint, 16, 4),
+	    VECT_VAR (vector_res, uint, 16, 4));
+
+  CHECK (TEST_MSG, uint, 16, 4, PRIx16, expected, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCEQZQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 8);
+  VECT_VAR_DECL (buf_src, float, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, uint, 16, 8) =
+    vceqzq_f16 (VECT_VAR (vsrc, float, 16, 8));
+  vst1q_u16 (VECT_VAR (result, uint, 16, 8),
+	     VECT_VAR (vector_res, uint, 16, 8));
+
+  CHECK (TEST_MSG, uint, 16, 8, PRIx16, expected, "");
+}
+
+int
+main (void)
+{
+  exec_vceqz_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcge_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcge_f16_1.c
new file mode 100644
index 0000000..f59146a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcge_f16_1.c
@@ -0,0 +1,76 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (-5.8)
+#define C FP16_C (3.8)
+#define D FP16_C (1024)
+#define E FP16_C (-123.4)
+#define F FP16_C (169.1)
+#define G FP16_C (3.8)
+#define H FP16_C (-77)
+
+#define I FP16_C (0.7)
+#define J FP16_C (-78)
+#define K FP16_C (101.23)
+#define L FP16_C (98)
+#define M FP16_C (870.1)
+#define N FP16_C (-78)
+#define O FP16_C (-1.1)
+#define P FP16_C (47823)
+
+/* Expected results for vcge.  */
+VECT_VAR_DECL (expected, uint, 16, 4) [] = {0xFFFF, 0x0, 0xFFFF, 0xFFFF};
+VECT_VAR_DECL (expected, uint, 16, 8) [] = {0xFFFF, 0x0, 0xFFFF, 0xFFFF,
+					    0x0, 0xFFFF, 0xFFFF, 0x0};
+void
+exec_vcge_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCGE (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  DECL_VARIABLE (vsrc_2, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A, B, C, D};
+  VECT_VAR_DECL (buf_src_2, float, 16, 4) [] = {E, F, G, H};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  VLOAD (vsrc_2, buf_src_2, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, uint, 16, 4) =
+    vcge_f16 (VECT_VAR (vsrc_1, float, 16, 4), VECT_VAR (vsrc_2, float, 16, 4));
+  vst1_u16 (VECT_VAR (result, uint, 16, 4),
+	    VECT_VAR (vector_res, uint, 16, 4));
+
+  CHECK (TEST_MSG, uint, 16, 4, PRIx16, expected, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCGEQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  DECL_VARIABLE (vsrc_2, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A, B, C, D, I, J, K, L};
+  VECT_VAR_DECL (buf_src_2, float, 16, 8) [] = {E, F, G, H, M, N, O, P};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  VLOAD (vsrc_2, buf_src_2, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, uint, 16, 8) =
+    vcgeq_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+	       VECT_VAR (vsrc_2, float, 16, 8));
+  vst1q_u16 (VECT_VAR (result, uint, 16, 8),
+	     VECT_VAR (vector_res, uint, 16, 8));
+
+  CHECK (TEST_MSG, uint, 16, 8, PRIx16, expected, "");
+}
+
+int
+main (void)
+{
+  exec_vcge_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcgez_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcgez_f16_1.c
new file mode 100644
index 0000000..725cb62
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcgez_f16_1.c
@@ -0,0 +1,61 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (0)
+#define C FP16_C (-34.8)
+#define D FP16_C (0)
+#define E FP16_C (0)
+#define F FP16_C (169.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (77)
+
+/* Expected results for vcgez_f16  */
+VECT_VAR_DECL (expected, uint, 16, 4) [] = {0xFFFF, 0xFFFF, 0x0, 0xFFFF};
+VECT_VAR_DECL (expected, uint, 16, 8) [] = {0xFFFF, 0xFFFF, 0x0, 0xFFFF,
+					    0xFFFF, 0xFFFF, 0x0, 0xFFFF};
+
+void
+exec_vcgez_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCGEZ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 4);
+  VECT_VAR_DECL (buf_src, float, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, uint, 16, 4) =
+    vcgez_f16 (VECT_VAR (vsrc, float, 16, 4));
+  vst1_u16 (VECT_VAR (result, uint, 16, 4),
+	    VECT_VAR (vector_res, uint, 16, 4));
+
+  CHECK (TEST_MSG, uint, 16, 4, PRIx16, expected, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCGEZQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 8);
+  VECT_VAR_DECL (buf_src, float, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, uint, 16, 8) =
+    vcgezq_f16 (VECT_VAR (vsrc, float, 16, 8));
+  vst1q_u16 (VECT_VAR (result, uint, 16, 8),
+	     VECT_VAR (vector_res, uint, 16, 8));
+
+  CHECK (TEST_MSG, uint, 16, 8, PRIx16, expected, "");
+}
+
+int
+main (void)
+{
+  exec_vcgez_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcgt_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcgt_f16_1.c
new file mode 100644
index 0000000..7ec0fda
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcgt_f16_1.c
@@ -0,0 +1,76 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (-5.8)
+#define C FP16_C (3.8)
+#define D FP16_C (1024)
+#define E FP16_C (-123.4)
+#define F FP16_C (169.1)
+#define G FP16_C (3.8)
+#define H FP16_C (-77)
+
+#define I FP16_C (0.7)
+#define J FP16_C (-78)
+#define K FP16_C (101.23)
+#define L FP16_C (98)
+#define M FP16_C (870.1)
+#define N FP16_C (-78)
+#define O FP16_C (-1.1)
+#define P FP16_C (47823)
+
+/* Expected results for vcgt.  */
+VECT_VAR_DECL (expected, uint, 16, 4) [] = {0xFFFF, 0x0, 0x0, 0xFFFF};
+VECT_VAR_DECL (expected, uint, 16, 8) [] = {0xFFFF, 0x0, 0x0, 0xFFFF,
+					    0x0, 0x0, 0xFFFF, 0x0};
+void
+exec_vcgt_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCGT (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  DECL_VARIABLE (vsrc_2, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A, B, C, D};
+  VECT_VAR_DECL (buf_src_2, float, 16, 4) [] = {E, F, G, H};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  VLOAD (vsrc_2, buf_src_2, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, uint, 16, 4) =
+    vcgt_f16 (VECT_VAR (vsrc_1, float, 16, 4), VECT_VAR (vsrc_2, float, 16, 4));
+  vst1_u16 (VECT_VAR (result, uint, 16, 4),
+	    VECT_VAR (vector_res, uint, 16, 4));
+
+  CHECK (TEST_MSG, uint, 16, 4, PRIx16, expected, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCGTQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  DECL_VARIABLE (vsrc_2, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A, B, C, D, I, J, K, L};
+  VECT_VAR_DECL (buf_src_2, float, 16, 8) [] = {E, F, G, H, M, N, O, P};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  VLOAD (vsrc_2, buf_src_2, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, uint, 16, 8) =
+    vcgtq_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+	       VECT_VAR (vsrc_2, float, 16, 8));
+  vst1q_u16 (VECT_VAR (result, uint, 16, 8),
+	     VECT_VAR (vector_res, uint, 16, 8));
+
+  CHECK (TEST_MSG, uint, 16, 8, PRIx16, expected, "");
+}
+
+int
+main (void)
+{
+  exec_vcgt_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcgtz_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcgtz_f16_1.c
new file mode 100644
index 0000000..1864b70
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcgtz_f16_1.c
@@ -0,0 +1,61 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (0)
+#define C FP16_C (-34.8)
+#define D FP16_C (0)
+#define E FP16_C (0)
+#define F FP16_C (169.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (77)
+
+/* Expected results for vcgtz_f16  */
+VECT_VAR_DECL (expected, uint, 16, 4) [] = {0xFFFF, 0x0, 0x0, 0x0};
+VECT_VAR_DECL (expected, uint, 16, 8) [] = {0xFFFF, 0x0, 0x0, 0x0,
+					    0x0, 0xFFFF, 0x0, 0xFFFF};
+
+void
+exec_vcgtz_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCGTZ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 4);
+  VECT_VAR_DECL (buf_src, float, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, uint, 16, 4) =
+    vcgtz_f16 (VECT_VAR (vsrc, float, 16, 4));
+  vst1_u16 (VECT_VAR (result, uint, 16, 4),
+	    VECT_VAR (vector_res, uint, 16, 4));
+
+  CHECK (TEST_MSG, uint, 16, 4, PRIx16, expected, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCGTZQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 8);
+  VECT_VAR_DECL (buf_src, float, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, uint, 16, 8) =
+    vcgtzq_f16 (VECT_VAR (vsrc, float, 16, 8));
+  vst1q_u16 (VECT_VAR (result, uint, 16, 8),
+	     VECT_VAR (vector_res, uint, 16, 8));
+
+  CHECK (TEST_MSG, uint, 16, 8, PRIx16, expected, "");
+}
+
+int
+main (void)
+{
+  exec_vcgtz_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcle_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcle_f16_1.c
new file mode 100644
index 0000000..92b1be4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcle_f16_1.c
@@ -0,0 +1,76 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (-5.8)
+#define C FP16_C (3.8)
+#define D FP16_C (1024)
+#define E FP16_C (-123.4)
+#define F FP16_C (169.1)
+#define G FP16_C (3.8)
+#define H FP16_C (-77)
+
+#define I FP16_C (0.7)
+#define J FP16_C (-78)
+#define K FP16_C (101.23)
+#define L FP16_C (98)
+#define M FP16_C (870.1)
+#define N FP16_C (-78)
+#define O FP16_C (-1.1)
+#define P FP16_C (47823)
+
+/* Expected results for vcle.  */
+VECT_VAR_DECL (expected, uint, 16, 4) [] = {0x0, 0xFFFF, 0xFFFF, 0x0};
+VECT_VAR_DECL (expected, uint, 16, 8) [] = {0x0, 0xFFFF, 0xFFFF, 0x0,
+					    0xFFFF, 0xFFFF, 0x0, 0xFFFF};
+void
+exec_vcle_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCLE (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  DECL_VARIABLE (vsrc_2, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A, B, C, D};
+  VECT_VAR_DECL (buf_src_2, float, 16, 4) [] = {E, F, G, H};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  VLOAD (vsrc_2, buf_src_2, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, uint, 16, 4) =
+    vcle_f16 (VECT_VAR (vsrc_1, float, 16, 4), VECT_VAR (vsrc_2, float, 16, 4));
+  vst1_u16 (VECT_VAR (result, uint, 16, 4),
+	    VECT_VAR (vector_res, uint, 16, 4));
+
+  CHECK (TEST_MSG, uint, 16, 4, PRIx16, expected, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCLEQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  DECL_VARIABLE (vsrc_2, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A, B, C, D, I, J, K, L};
+  VECT_VAR_DECL (buf_src_2, float, 16, 8) [] = {E, F, G, H, M, N, O, P};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  VLOAD (vsrc_2, buf_src_2, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, uint, 16, 8) =
+    vcleq_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+	       VECT_VAR (vsrc_2, float, 16, 8));
+  vst1q_u16 (VECT_VAR (result, uint, 16, 8),
+	     VECT_VAR (vector_res, uint, 16, 8));
+
+  CHECK (TEST_MSG, uint, 16, 8, PRIx16, expected, "");
+}
+
+int
+main (void)
+{
+  exec_vcle_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vclez_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vclez_f16_1.c
new file mode 100644
index 0000000..c84f0a6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vclez_f16_1.c
@@ -0,0 +1,61 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (0)
+#define C FP16_C (-34.8)
+#define D FP16_C (0)
+#define E FP16_C (0)
+#define F FP16_C (169.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (77)
+
+/* Expected results for vclez_f16  */
+VECT_VAR_DECL (expected, uint, 16, 4) [] = {0x0, 0xFFFF, 0xFFFF, 0xFFFF};
+VECT_VAR_DECL (expected, uint, 16, 8) [] = {0x0, 0xFFFF, 0xFFFF, 0xFFFF,
+					    0xFFFF, 0x0, 0xFFFF, 0x0};
+
+void
+exec_vclez_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCLEZ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 4);
+  VECT_VAR_DECL (buf_src, float, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, uint, 16, 4) =
+    vclez_f16 (VECT_VAR (vsrc, float, 16, 4));
+  vst1_u16 (VECT_VAR (result, uint, 16, 4),
+	    VECT_VAR (vector_res, uint, 16, 4));
+
+  CHECK (TEST_MSG, uint, 16, 4, PRIx16, expected, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCLEZQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 8);
+  VECT_VAR_DECL (buf_src, float, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, uint, 16, 8) =
+    vclezq_f16 (VECT_VAR (vsrc, float, 16, 8));
+  vst1q_u16 (VECT_VAR (result, uint, 16, 8),
+	     VECT_VAR (vector_res, uint, 16, 8));
+
+  CHECK (TEST_MSG, uint, 16, 8, PRIx16, expected, "");
+}
+
+int
+main (void)
+{
+  exec_vclez_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vclt_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vclt_f16_1.c
new file mode 100644
index 0000000..6a17c3d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vclt_f16_1.c
@@ -0,0 +1,77 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (-5.8)
+#define C FP16_C (3.8)
+#define D FP16_C (1024)
+#define E FP16_C (-123.4)
+#define F FP16_C (169.1)
+#define G FP16_C (3.8)
+#define H FP16_C (-77)
+
+#define I FP16_C (0.7)
+#define J FP16_C (-78)
+#define K FP16_C (101.23)
+#define L FP16_C (98)
+#define M FP16_C (870.1)
+#define N FP16_C (-78)
+#define O FP16_C (-1.1)
+#define P FP16_C (47823)
+
+/* Expected results for vclt.  */
+VECT_VAR_DECL (expected, uint, 16, 4) [] = {0x0, 0xFFFF, 0x0, 0x0};
+VECT_VAR_DECL (expected, uint, 16, 8) [] = {0x0, 0xFFFF, 0x0, 0x0,
+					    0xFFFF, 0x0, 0x0, 0xFFFF};
+
+void
+exec_vclt_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCLT (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  DECL_VARIABLE (vsrc_2, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A, B, C, D};
+  VECT_VAR_DECL (buf_src_2, float, 16, 4) [] = {E, F, G, H};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  VLOAD (vsrc_2, buf_src_2, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, uint, 16, 4) =
+    vclt_f16 (VECT_VAR (vsrc_1, float, 16, 4), VECT_VAR (vsrc_2, float, 16, 4));
+  vst1_u16 (VECT_VAR (result, uint, 16, 4),
+	    VECT_VAR (vector_res, uint, 16, 4));
+
+  CHECK (TEST_MSG, uint, 16, 4, PRIx16, expected, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCLTQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  DECL_VARIABLE (vsrc_2, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A, B, C, D, I, J, K, L};
+  VECT_VAR_DECL (buf_src_2, float, 16, 8) [] = {E, F, G, H, M, N, O, P};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  VLOAD (vsrc_2, buf_src_2, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, uint, 16, 8) =
+    vcltq_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+	       VECT_VAR (vsrc_2, float, 16, 8));
+  vst1q_u16 (VECT_VAR (result, uint, 16, 8),
+	     VECT_VAR (vector_res, uint, 16, 8));
+
+  CHECK (TEST_MSG, uint, 16, 8, PRIx16, expected, "");
+}
+
+int
+main (void)
+{
+  exec_vclt_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcltz_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcltz_f16_1.c
new file mode 100644
index 0000000..d9e414b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcltz_f16_1.c
@@ -0,0 +1,61 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (0)
+#define C FP16_C (-34.8)
+#define D FP16_C (0)
+#define E FP16_C (0)
+#define F FP16_C (169.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (77)
+
+/* Expected results for vcltz_f16  */
+VECT_VAR_DECL (expected, uint, 16, 4) [] = {0x0, 0x0, 0xFFFF, 0x0};
+VECT_VAR_DECL (expected, uint, 16, 8) [] = {0x0, 0x0, 0xFFFF, 0x0,
+					    0x0, 0x0, 0xFFFF, 0x0};
+
+void
+exec_vcltz_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCLTZ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 4);
+  VECT_VAR_DECL (buf_src, float, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, uint, 16, 4) =
+    vcltz_f16 (VECT_VAR (vsrc, float, 16, 4));
+  vst1_u16 (VECT_VAR (result, uint, 16, 4),
+	    VECT_VAR (vector_res, uint, 16, 4));
+
+  CHECK (TEST_MSG, uint, 16, 4, PRIx16, expected, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCLTZQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 8);
+  VECT_VAR_DECL (buf_src, float, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, uint, 16, 8) =
+    vcltzq_f16 (VECT_VAR (vsrc, float, 16, 8));
+  vst1q_u16 (VECT_VAR (result, uint, 16, 8),
+	     VECT_VAR (vector_res, uint, 16, 8));
+
+  CHECK (TEST_MSG, uint, 16, 8, PRIx16, expected, "");
+}
+
+int
+main (void)
+{
+  exec_vcltz_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_f16_s16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_f16_s16_1.c
new file mode 100644
index 0000000..ccdaa4e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_f16_s16_1.c
@@ -0,0 +1,70 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16(a) ((__fp16) (a))
+#define SHORT(a) ((short) (a))
+#define A SHORT(123)
+#define B SHORT(-567)
+#define C SHORT(-34)
+#define D SHORT(1024)
+#define E SHORT(663)
+#define F SHORT(169)
+#define G SHORT(-4)
+#define H SHORT(77)
+
+/* Expected results for vcvt.f16.s16.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {FP16 (A), FP16 (B),
+					     FP16 (C), FP16 (D)};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {FP16 (A), FP16 (B),
+					     FP16 (C), FP16 (D),
+					     FP16 (E), FP16 (F),
+					     FP16 (G), FP16 (H)};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vcvtf16s16_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCVT (F16 <- S16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, int, 16, 4);
+  VECT_VAR_DECL (buf_src, int, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , int, s, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vcvt_f16_s16 (VECT_VAR (vsrc, int, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCVTQ (F16 <- S16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, int, 16, 8);
+  VECT_VAR_DECL (buf_src, int, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, int, s, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vcvtq_f16_s16 (VECT_VAR (vsrc, int, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected_static, "");
+}
+
+int
+main (void)
+{
+  exec_vcvtf16s16_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_f16_u16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_f16_u16_1.c
new file mode 100644
index 0000000..5de5554
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_f16_u16_1.c
@@ -0,0 +1,70 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16(a) ((__fp16) (a))
+#define USHORT(a) ((unsigned short) (a))
+#define A USHORT(123)
+#define B USHORT(-567)
+#define C USHORT(-34)
+#define D USHORT(1024)
+#define E USHORT(663)
+#define F USHORT(169)
+#define G USHORT(-4)
+#define H USHORT(77)
+
+/* Expected results for vcvt.f16.u16.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {FP16 (A), FP16 (B),
+					     FP16 (C), FP16 (D)};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {FP16 (A), FP16 (B),
+					     FP16 (C), FP16 (D),
+					     FP16 (E), FP16 (F),
+					     FP16 (G), FP16 (H)};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vcvtf16u16_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCVT (F16 <- U16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, uint, 16, 4);
+  VECT_VAR_DECL (buf_src, uint, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , uint, u, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vcvt_f16_u16 (VECT_VAR (vsrc, uint, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCVTQ (F16 <- U16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, uint, 16, 8);
+  VECT_VAR_DECL (buf_src, uint, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, uint, u, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vcvtq_f16_u16 (VECT_VAR (vsrc, uint, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected_static, "");
+}
+
+int
+main (void)
+{
+  exec_vcvtf16u16_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_n_f16_s16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_n_f16_s16_1.c
new file mode 100644
index 0000000..373d267
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_n_f16_s16_1.c
@@ -0,0 +1,73 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FRAC_1 1
+#define FRAC_2 2
+
+#define FP16(a) ((__fp16) (a))
+#define SHORT(a) ((short) (a))
+#define A SHORT(1)
+#define B SHORT(10)
+#define C SHORT(48)
+#define D SHORT(100)
+#define E SHORT(-1)
+#define F SHORT(-10)
+#define G SHORT(7)
+#define H SHORT(-7)
+
+/* Expected results for vcvt (fixed).f16.s16.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {FP16 (0.5), FP16 (5),
+					     FP16 (24), FP16 (50)};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {FP16 (0.25), FP16 (2.5),
+					     FP16 (12), FP16 (25),
+					     FP16 (-0.25), FP16 (-2.5),
+					     FP16 (1.75), FP16 (-1.75)};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vcvt_n_f16s16_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCVT Fixed (F16 <- S16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, int, 16, 4);
+  VECT_VAR_DECL (buf_src, int, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , int, s, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vcvt_n_f16_s16 (VECT_VAR (vsrc, int, 16, 4), FRAC_1);
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCVTQ Fixed (F16 <- S16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, int, 16, 8);
+  VECT_VAR_DECL (buf_src, int, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, int, s, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vcvtq_n_f16_s16 (VECT_VAR (vsrc, int, 16, 8), FRAC_2);
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected_static, "");
+}
+
+int
+main (void)
+{
+  exec_vcvt_n_f16s16_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_n_f16_u16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_n_f16_u16_1.c
new file mode 100644
index 0000000..fa15f4e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_n_f16_u16_1.c
@@ -0,0 +1,73 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FRAC_1 1
+#define FRAC_2 2
+
+#define FP16(a) ((__fp16) (a))
+#define SHORT(a) ((short) (a))
+#define A SHORT(1)
+#define B SHORT(3)
+#define C SHORT(48)
+#define D SHORT(100)
+#define E SHORT(1000)
+#define F SHORT(4)
+#define G SHORT(0)
+#define H SHORT(9)
+
+/* Expected results for vcvt (fixed).f16.u16.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {FP16 (0.5), FP16 (1.5),
+					     FP16 (24), FP16 (50)};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {FP16 (0.25), FP16 (0.75),
+					     FP16 (12), FP16 (25),
+					     FP16 (250), FP16 (1),
+					     FP16 (0), FP16 (2.25)};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vcvt_n_f16u16_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCVT Fixed (F16 <- U16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, uint, 16, 4);
+  VECT_VAR_DECL (buf_src, uint, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , uint, u, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vcvt_n_f16_u16 (VECT_VAR (vsrc, uint, 16, 4), FRAC_1);
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCVTQ Fixed (F16 <- U16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, uint, 16, 8);
+  VECT_VAR_DECL (buf_src, uint, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, uint, u, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vcvtq_n_f16_u16 (VECT_VAR (vsrc, uint, 16, 8), FRAC_2);
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected_static, "");
+}
+
+int
+main (void)
+{
+  exec_vcvt_n_f16u16_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_n_s16_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_n_s16_f16_1.c
new file mode 100644
index 0000000..cf45635
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_n_s16_f16_1.c
@@ -0,0 +1,66 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FRAC_1 1
+#define FRAC_2 2
+
+#define SHORT(a) ((short) (a))
+#define FP16_C(a) ((__fp16) (a))
+#define A FP16_C (2.5)
+#define B FP16_C (100)
+#define C FP16_C (7.1)
+#define D FP16_C (-9.9)
+#define E FP16_C (-5.0)
+#define F FP16_C (9.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (77)
+
+/* Expected results for vcvt (fixed).s16.f16.  */
+VECT_VAR_DECL (expected, int, 16, 4) [] = {5, 200, 14, -19};
+
+VECT_VAR_DECL (expected, int, 16, 8) [] = {10, 400, 28, -39,
+					   -20, 36, -19, 308};
+
+void
+exec_vcvt_n_s16f16_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCVT Fixed (S16 <- F16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 4);
+  VECT_VAR_DECL (buf_src, float, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, int, 16, 4) =
+    vcvt_n_s16_f16 (VECT_VAR (vsrc, float, 16, 4), FRAC_1);
+  vst1_s16 (VECT_VAR (result, int, 16, 4),
+	    VECT_VAR (vector_res, int, 16, 4));
+
+  CHECK (TEST_MSG, int, 16, 4, PRIx16, expected, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCVTQ Fixed (S16 <- F16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 8);
+  VECT_VAR_DECL (buf_src, float, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, int, 16, 8) =
+    vcvtq_n_s16_f16 (VECT_VAR (vsrc, float, 16, 8), FRAC_2);
+  vst1q_s16 (VECT_VAR (result, int, 16, 8),
+	     VECT_VAR (vector_res, int, 16, 8));
+
+  CHECK (TEST_MSG, int, 16, 8, PRIx16, expected, "");
+}
+
+int
+main (void)
+{
+  exec_vcvt_n_s16f16_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_n_u16_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_n_u16_f16_1.c
new file mode 100644
index 0000000..e540f2a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_n_u16_f16_1.c
@@ -0,0 +1,67 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FRAC_1 1
+#define FRAC_2 2
+
+#define USHORT(a) ((unsigned short) (a))
+#define FP16_C(a) ((__fp16) (a))
+#define A FP16_C (2.5)
+#define B FP16_C (100)
+#define C FP16_C (7.1)
+#define D FP16_C (9.9)
+#define E FP16_C (5.0)
+#define F FP16_C (9.1)
+#define G FP16_C (4.8)
+#define H FP16_C (77)
+
+
+/* Expected results for vcvt (fixed).u16.f16.  */
+VECT_VAR_DECL (expected, uint, 16, 4) [] = {5, 200, 14, 19};
+
+VECT_VAR_DECL (expected, uint, 16, 8) [] = {10, 400, 28, 39,
+					    20, 36, 19, 308};
+
+void
+exec_vcvt_n_u16f16_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCVT Fixed (U16 <- F16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 4);
+  VECT_VAR_DECL (buf_src, float, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, uint, 16, 4) =
+    vcvt_n_u16_f16 (VECT_VAR (vsrc, float, 16, 4), FRAC_1);
+  vst1_u16 (VECT_VAR (result, uint, 16, 4),
+	    VECT_VAR (vector_res, uint, 16, 4));
+
+  CHECK (TEST_MSG, uint, 16, 4, PRIx16, expected, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCVTQ Fixed (U16 <- F16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 8);
+  VECT_VAR_DECL (buf_src, float, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, uint, 16, 8) =
+    vcvtq_n_u16_f16 (VECT_VAR (vsrc, float, 16, 8), FRAC_2);
+  vst1q_u16 (VECT_VAR (result, uint, 16, 8),
+	     VECT_VAR (vector_res, uint, 16, 8));
+
+  CHECK (TEST_MSG, uint, 16, 8, PRIx16, expected, "");
+}
+
+int
+main (void)
+{
+  exec_vcvt_n_u16f16_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_s16_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_s16_f16_1.c
new file mode 100644
index 0000000..73ae138
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_s16_f16_1.c
@@ -0,0 +1,65 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define SHORT(a) ((short) (a))
+#define FP16_C(a) ((__fp16) (a))
+#define A FP16_C (123.4)
+#define B FP16_C (-567.8)
+#define C FP16_C (-34.8)
+#define D FP16_C (1024)
+#define E FP16_C (663.1)
+#define F FP16_C (169.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (77)
+
+/* Expected results for vcvt.s16.f16.  */
+VECT_VAR_DECL (expected, int, 16, 4) [] = {SHORT (A), SHORT (B),
+					   SHORT (C), SHORT (D)};
+
+VECT_VAR_DECL (expected, int, 16, 8) [] = {SHORT (A), SHORT (B),
+					   SHORT (C), SHORT (D),
+					   SHORT (E), SHORT (F),
+					   SHORT (G), SHORT (H)};
+void
+exec_vcvts16f16_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCVT (S16 <- F16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 4);
+  VECT_VAR_DECL (buf_src, float, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, int, 16, 4) =
+    vcvt_s16_f16 (VECT_VAR (vsrc, float, 16, 4));
+  vst1_s16 (VECT_VAR (result, int, 16, 4),
+	    VECT_VAR (vector_res, int, 16, 4));
+
+  CHECK (TEST_MSG, int, 16, 4, PRIx16, expected, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCVTQ (S16 <- F16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 8);
+  VECT_VAR_DECL (buf_src, float, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, int, 16, 8) =
+    vcvtq_s16_f16 (VECT_VAR (vsrc, float, 16, 8));
+  vst1q_s16 (VECT_VAR (result, int, 16, 8),
+	     VECT_VAR (vector_res, int, 16, 8));
+
+  CHECK (TEST_MSG, int, 16, 8, PRIx16, expected, "");
+}
+
+int
+main (void)
+{
+  exec_vcvts16f16_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_u16_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_u16_f16_1.c
new file mode 100644
index 0000000..5fa7e76
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvt_u16_f16_1.c
@@ -0,0 +1,65 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define USHORT(a) ((unsigned short) (a))
+#define FP16_C(a) ((__fp16) (a))
+#define A FP16_C (123.4)
+#define B FP16_C (-567.8)
+#define C FP16_C (-34.8)
+#define D FP16_C (1024)
+#define E FP16_C (663.1)
+#define F FP16_C (169.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (77)
+
+/* Expected results for vcvt.u16.f16.  */
+VECT_VAR_DECL (expected, uint, 16, 4) [] = {USHORT (A), USHORT (B),
+					    USHORT (C), USHORT (D)};
+
+VECT_VAR_DECL (expected, uint, 16, 8) [] = {USHORT (A), USHORT (B),
+					    USHORT (C), USHORT (D),
+					    USHORT (E), USHORT (F),
+					    USHORT (G), USHORT (H)};
+void
+exec_vcvtu16f16_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCVT (U16 <- F16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 4);
+  VECT_VAR_DECL (buf_src, float, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, uint, 16, 4) =
+    vcvt_u16_f16 (VECT_VAR (vsrc, float, 16, 4));
+  vst1_u16 (VECT_VAR (result, uint, 16, 4),
+	    VECT_VAR (vector_res, uint, 16, 4));
+
+  CHECK (TEST_MSG, uint, 16, 4, PRIx16, expected, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCVTQ (U16 <- F16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 8);
+  VECT_VAR_DECL (buf_src, float, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, uint, 16, 8) =
+    vcvtq_u16_f16 (VECT_VAR (vsrc, float, 16, 8));
+  vst1q_u16 (VECT_VAR (result, uint, 16, 8),
+	     VECT_VAR (vector_res, uint, 16, 8));
+
+  CHECK (TEST_MSG, uint, 16, 8, PRIx16, expected, "");
+}
+
+int
+main (void)
+{
+  exec_vcvtu16f16_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvta_s16_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvta_s16_f16_1.c
new file mode 100644
index 0000000..771c4cd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvta_s16_f16_1.c
@@ -0,0 +1,70 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define SHORT(a) ((short) (a))
+#define FP16_C(a) ((__fp16) (a))
+#define A FP16_C (123.4)
+#define CVTA_A SHORT (123)
+#define B FP16_C (-567.8)
+#define CVTA_B SHORT (-568)
+#define C FP16_C (-34.5)
+#define CVTA_C SHORT (-35)
+#define D FP16_C (1024)
+#define CVTA_D SHORT (1024)
+#define E FP16_C (663.1)
+#define CVTA_E SHORT (663)
+#define F FP16_C (169.5)
+#define CVTA_F SHORT (170)
+#define G FP16_C (-4.8)
+#define CVTA_G SHORT (-5)
+#define H FP16_C (77)
+#define CVTA_H SHORT (77)
+
+/* Expected results for vcvta.s16.f16.  */
+VECT_VAR_DECL (expected, int, 16, 4) [] = {CVTA_A, CVTA_B, CVTA_C, CVTA_D};
+
+VECT_VAR_DECL (expected, int, 16, 8) [] = {CVTA_A, CVTA_B, CVTA_C, CVTA_D,
+					   CVTA_E, CVTA_F, CVTA_G, CVTA_H};
+void
+exec_vcvtas16f16_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCVTA (S16 <- F16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 4);
+  VECT_VAR_DECL (buf_src, float, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, int, 16, 4) =
+    vcvta_s16_f16 (VECT_VAR (vsrc, float, 16, 4));
+  vst1_s16 (VECT_VAR (result, int, 16, 4),
+	    VECT_VAR (vector_res, int, 16, 4));
+
+  CHECK (TEST_MSG, int, 16, 4, PRIx16, expected, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCVTAQ (S16 <- F16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 8);
+  VECT_VAR_DECL (buf_src, float, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, int, 16, 8) =
+    vcvtaq_s16_f16 (VECT_VAR (vsrc, float, 16, 8));
+  vst1q_s16 (VECT_VAR (result, int, 16, 8),
+	     VECT_VAR (vector_res, int, 16, 8));
+
+  CHECK (TEST_MSG, int, 16, 8, PRIx16, expected, "");
+}
+
+int
+main (void)
+{
+  exec_vcvtas16f16_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvta_u16_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvta_u16_f16_1.c
new file mode 100644
index 0000000..144223d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvta_u16_f16_1.c
@@ -0,0 +1,70 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define USHORT(a) ((unsigned short) (a))
+#define FP16_C(a) ((__fp16) (a))
+#define A FP16_C (123.4)
+#define CVTA_A USHORT (123)
+#define B FP16_C (567.8)
+#define CVTA_B USHORT (568)
+#define C FP16_C (34.5)
+#define CVTA_C USHORT (35)
+#define D FP16_C (1024)
+#define CVTA_D USHORT (1024)
+#define E FP16_C (663)
+#define CVTA_E USHORT (663)
+#define F FP16_C (169)
+#define CVTA_F USHORT (169)
+#define G FP16_C (4.8)
+#define CVTA_G USHORT (5)
+#define H FP16_C (77)
+#define CVTA_H USHORT (77)
+
+/* Expected results for vcvta.u16.f16.  */
+VECT_VAR_DECL (expected, uint, 16, 4) [] = {CVTA_A, CVTA_B, CVTA_C, CVTA_D};
+
+VECT_VAR_DECL (expected, uint, 16, 8) [] = {CVTA_A, CVTA_B, CVTA_C, CVTA_D,
+					    CVTA_E, CVTA_F, CVTA_G, CVTA_H};
+void
+exec_vcvtau16f16_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCVTA (U16 <- F16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 4);
+  VECT_VAR_DECL (buf_src, float, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, uint, 16, 4) =
+    vcvta_u16_f16 (VECT_VAR (vsrc, float, 16, 4));
+  vst1_u16 (VECT_VAR (result, uint, 16, 4),
+	    VECT_VAR (vector_res, uint, 16, 4));
+
+  CHECK (TEST_MSG, uint, 16, 4, PRIx16, expected, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCVTAQ (U16 <- F16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 8);
+  VECT_VAR_DECL (buf_src, float, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, uint, 16, 8) =
+    vcvtaq_u16_f16 (VECT_VAR (vsrc, float, 16, 8));
+  vst1q_u16 (VECT_VAR (result, uint, 16, 8),
+	     VECT_VAR (vector_res, uint, 16, 8));
+
+  CHECK (TEST_MSG, uint, 16, 8, PRIx16, expected, "");
+}
+
+int
+main (void)
+{
+  exec_vcvtau16f16_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvtm_s16_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvtm_s16_f16_1.c
new file mode 100644
index 0000000..86f7706
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvtm_s16_f16_1.c
@@ -0,0 +1,70 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define SHORT(a) ((short) (a))
+#define FP16_C(a) ((__fp16) (a))
+#define A FP16_C (123.4)
+#define CVTM_A SHORT (123)
+#define B FP16_C (-567.8)
+#define CVTM_B SHORT (-568)
+#define C FP16_C (-34.5)
+#define CVTM_C SHORT (-35)
+#define D FP16_C (1024)
+#define CVTM_D SHORT (1024)
+#define E FP16_C (663.1)
+#define CVTM_E SHORT (663)
+#define F FP16_C (169.5)
+#define CVTM_F SHORT (169)
+#define G FP16_C (-4.8)
+#define CVTM_G SHORT (-5)
+#define H FP16_C (77)
+#define CVTM_H SHORT (77)
+
+/* Expected results for vcvtm.s16.f16.  */
+VECT_VAR_DECL (expected, int, 16, 4) [] = {CVTM_A, CVTM_B, CVTM_C, CVTM_D};
+
+VECT_VAR_DECL (expected, int, 16, 8) [] = {CVTM_A, CVTM_B, CVTM_C, CVTM_D,
+					   CVTM_E, CVTM_F, CVTM_G, CVTM_H};
+void
+exec_vcvtms16f16_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCVTM (S16 <- F16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 4);
+  VECT_VAR_DECL (buf_src, float, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, int, 16, 4) =
+    vcvtm_s16_f16 (VECT_VAR (vsrc, float, 16, 4));
+  vst1_s16 (VECT_VAR (result, int, 16, 4),
+	    VECT_VAR (vector_res, int, 16, 4));
+
+  CHECK (TEST_MSG, int, 16, 4, PRIx16, expected, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCVTMQ (S16 <- F16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 8);
+  VECT_VAR_DECL (buf_src, float, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, int, 16, 8) =
+    vcvtmq_s16_f16 (VECT_VAR (vsrc, float, 16, 8));
+  vst1q_s16 (VECT_VAR (result, int, 16, 8),
+	     VECT_VAR (vector_res, int, 16, 8));
+
+  CHECK (TEST_MSG, int, 16, 8, PRIx16, expected, "");
+}
+
+int
+main (void)
+{
+  exec_vcvtms16f16_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvtm_u16_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvtm_u16_f16_1.c
new file mode 100644
index 0000000..7159e2a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvtm_u16_f16_1.c
@@ -0,0 +1,70 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define USHORT(a) ((unsigned short) (a))
+#define FP16_C(a) ((__fp16) (a))
+#define A FP16_C (123.4)
+#define CVTM_A USHORT (123)
+#define B FP16_C (567.8)
+#define CVTM_B USHORT (568)
+#define C FP16_C (34.5)
+#define CVTM_C USHORT (34)
+#define D FP16_C (1024.5)
+#define CVTM_D USHORT (1024)
+#define E FP16_C (663)
+#define CVTM_E USHORT (663)
+#define F FP16_C (169.5)
+#define CVTM_F USHORT (169)
+#define G FP16_C (4.8)
+#define CVTM_G USHORT (4)
+#define H FP16_C (77)
+#define CVTM_H USHORT (77)
+
+/* Expected results for vcvtm.u16.f16.  */
+VECT_VAR_DECL (expected, uint, 16, 4) [] = {CVTM_A, CVTM_B, CVTM_C, CVTM_D};
+
+VECT_VAR_DECL (expected, uint, 16, 8) [] = {CVTM_A, CVTM_B, CVTM_C, CVTM_D,
+					    CVTM_E, CVTM_F, CVTM_G, CVTM_H};
+void
+exec_vcvtmu16f16_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCVTM (U16 <- F16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 4);
+  VECT_VAR_DECL (buf_src, float, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, uint, 16, 4) =
+    vcvtm_u16_f16 (VECT_VAR (vsrc, float, 16, 4));
+  vst1_u16 (VECT_VAR (result, uint, 16, 4),
+	    VECT_VAR (vector_res, uint, 16, 4));
+
+  CHECK (TEST_MSG, uint, 16, 4, PRIx16, expected, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCVTMQ (U16 <- F16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 8);
+  VECT_VAR_DECL (buf_src, float, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, uint, 16, 8) =
+    vcvtmq_u16_f16 (VECT_VAR (vsrc, float, 16, 8));
+  vst1q_u16 (VECT_VAR (result, uint, 16, 8),
+	     VECT_VAR (vector_res, uint, 16, 8));
+
+  CHECK (TEST_MSG, uint, 16, 8, PRIx16, expected, "");
+}
+
+int
+main (void)
+{
+  exec_vcvtmu16f16_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvtp_s16_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvtp_s16_f16_1.c
new file mode 100644
index 0000000..fe79f23
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvtp_s16_f16_1.c
@@ -0,0 +1,70 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define SHORT(a) ((short) (a))
+#define FP16_C(a) ((__fp16) (a))
+#define A FP16_C (123.4)
+#define CVTP_A SHORT (124)
+#define B FP16_C (-567.8)
+#define CVTP_B SHORT (-568)
+#define C FP16_C (-34.5)
+#define CVTP_C SHORT (-34)
+#define D FP16_C (1024)
+#define CVTP_D SHORT (1024)
+#define E FP16_C (663.1)
+#define CVTP_E SHORT (663)
+#define F FP16_C (169.5)
+#define CVTP_F SHORT (170)
+#define G FP16_C (-4.8)
+#define CVTP_G SHORT (-4)
+#define H FP16_C (77)
+#define CVTP_H SHORT (77)
+
+/* Expected results for vcvtp.s16.f16.  */
+VECT_VAR_DECL (expected, int, 16, 4) [] = {CVTP_A, CVTP_B, CVTP_C, CVTP_D};
+
+VECT_VAR_DECL (expected, int, 16, 8) [] = {CVTP_A, CVTP_B, CVTP_C, CVTP_D,
+					   CVTP_E, CVTP_F, CVTP_G, CVTP_H};
+void
+exec_vcvtps16f16_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCVTP (S16 <- F16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 4);
+  VECT_VAR_DECL (buf_src, float, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, int, 16, 4) =
+    vcvtp_s16_f16 (VECT_VAR (vsrc, float, 16, 4));
+  vst1_s16 (VECT_VAR (result, int, 16, 4),
+	    VECT_VAR (vector_res, int, 16, 4));
+
+  CHECK (TEST_MSG, int, 16, 4, PRIx16, expected, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCVTPQ (S16 <- F16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 8);
+  VECT_VAR_DECL (buf_src, float, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, int, 16, 8) =
+    vcvtpq_s16_f16 (VECT_VAR (vsrc, float, 16, 8));
+  vst1q_s16 (VECT_VAR (result, int, 16, 8),
+	     VECT_VAR (vector_res, int, 16, 8));
+
+  CHECK (TEST_MSG, int, 16, 8, PRIx16, expected, "");
+}
+
+int
+main (void)
+{
+  exec_vcvtps16f16_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvtp_u16_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvtp_u16_f16_1.c
new file mode 100644
index 0000000..4955c61
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vcvtp_u16_f16_1.c
@@ -0,0 +1,70 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define USHORT(a) ((unsigned short) (a))
+#define FP16_C(a) ((__fp16) (a))
+#define A FP16_C (123.4)
+#define CVTP_A USHORT (124)
+#define B FP16_C (567.8)
+#define CVTP_B USHORT (568)
+#define C FP16_C (34.5)
+#define CVTP_C USHORT (35)
+#define D FP16_C (1024)
+#define CVTP_D USHORT (1024)
+#define E FP16_C (663)
+#define CVTP_E USHORT (663)
+#define F FP16_C (169)
+#define CVTP_F USHORT (169)
+#define G FP16_C (4.8)
+#define CVTP_G USHORT (5)
+#define H FP16_C (77)
+#define CVTP_H USHORT (77)
+
+/* Expected results for vcvtp.u16.f16.  */
+VECT_VAR_DECL (expected, uint, 16, 4) [] = {CVTP_A, CVTP_B, CVTP_C, CVTP_D};
+
+VECT_VAR_DECL (expected, uint, 16, 8) [] = {CVTP_A, CVTP_B, CVTP_C, CVTP_D,
+					    CVTP_E, CVTP_F, CVTP_G, CVTP_H};
+void
+exec_vcvtpu16f16_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VCVTP (U16 <- F16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 4);
+  VECT_VAR_DECL (buf_src, float, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, uint, 16, 4) =
+    vcvtp_u16_f16 (VECT_VAR (vsrc, float, 16, 4));
+  vst1_u16 (VECT_VAR (result, uint, 16, 4),
+	    VECT_VAR (vector_res, uint, 16, 4));
+
+  CHECK (TEST_MSG, uint, 16, 4, PRIx16, expected, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VCVTPQ (U16 <- F16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 8);
+  VECT_VAR_DECL (buf_src, float, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, uint, 16, 8) =
+    vcvtpq_u16_f16 (VECT_VAR (vsrc, float, 16, 8));
+  vst1q_u16 (VECT_VAR (result, uint, 16, 8),
+	     VECT_VAR (vector_res, uint, 16, 8));
+
+  CHECK (TEST_MSG, uint, 16, 8, PRIx16, expected, "");
+}
+
+int
+main (void)
+{
+  exec_vcvtpu16f16_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vfma_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vfma_f16_1.c
new file mode 100644
index 0000000..51208fe
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vfma_f16_1.c
@@ -0,0 +1,106 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A0 FP16_C (123.4)
+#define B0 FP16_C (-5.8)
+#define C0 FP16_C (-3.8)
+#define D0 FP16_C (10)
+
+#define A1 FP16_C (12.4)
+#define B1 FP16_C (-5.8)
+#define C1 FP16_C (90.8)
+#define D1 FP16_C (24)
+
+#define A2 FP16_C (23.4)
+#define B2 FP16_C (-5.8)
+#define C2 FP16_C (8.9)
+#define D2 FP16_C (4)
+
+#define E0 FP16_C (3.4)
+#define F0 FP16_C (-55.8)
+#define G0 FP16_C (-31.8)
+#define H0 FP16_C (2)
+
+#define E1 FP16_C (123.4)
+#define F1 FP16_C (-5.8)
+#define G1 FP16_C (-3.8)
+#define H1 FP16_C (102)
+
+#define E2 FP16_C (4.9)
+#define F2 FP16_C (-15.8)
+#define G2 FP16_C (39.8)
+#define H2 FP16_C (49)
+
+/* Expected results for vfma.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {A0 + A1 * A2, B0 + B1 * B2,
+					     C0 + C1 * C2, D0 + D1 * D2};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {A0 + A1 * A2, B0 + B1 * B2,
+					     C0 + C1 * C2, D0 + D1 * D2,
+					     E0 + E1 * E2, F0 + F1 * F2,
+					     G0 + G1 * G2, H0 + H1 * H2};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vfma_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VFMA (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  DECL_VARIABLE (vsrc_2, float, 16, 4);
+  DECL_VARIABLE (vsrc_3, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A0, B0, C0, D0};
+  VECT_VAR_DECL (buf_src_2, float, 16, 4) [] = {A1, B1, C1, D1};
+  VECT_VAR_DECL (buf_src_3, float, 16, 4) [] = {A2, B2, C2, D2};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  VLOAD (vsrc_2, buf_src_2, , float, f, 16, 4);
+  VLOAD (vsrc_3, buf_src_3, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vfma_f16 (VECT_VAR (vsrc_1, float, 16, 4),
+	      VECT_VAR (vsrc_2, float, 16, 4),
+	      VECT_VAR (vsrc_3, float, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VFMAQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  DECL_VARIABLE (vsrc_2, float, 16, 8);
+  DECL_VARIABLE (vsrc_3, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A0, B0, C0, D0, E0, F0, G0, H0};
+  VECT_VAR_DECL (buf_src_2, float, 16, 8) [] = {A1, B1, C1, D1, E1, F1, G1, H1};
+  VECT_VAR_DECL (buf_src_3, float, 16, 8) [] = {A2, B2, C2, D2, E2, F2, G2, H2};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  VLOAD (vsrc_2, buf_src_2, q, float, f, 16, 8);
+  VLOAD (vsrc_3, buf_src_3, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vfmaq_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+	       VECT_VAR (vsrc_2, float, 16, 8),
+	       VECT_VAR (vsrc_3, float, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected_static, "");
+}
+
+int
+main (void)
+{
+  exec_vfma_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vfms_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vfms_f16_1.c
new file mode 100644
index 0000000..3876e20
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vfms_f16_1.c
@@ -0,0 +1,104 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A0 FP16_C (123.4)
+#define B0 FP16_C (-5.8)
+#define C0 FP16_C (-3.8)
+#define D0 FP16_C (10)
+
+#define A1 FP16_C (12.4)
+#define B1 FP16_C (-5.8)
+#define C1 FP16_C (90.8)
+#define D1 FP16_C (24)
+
+#define A2 FP16_C (23.4)
+#define B2 FP16_C (-5.8)
+#define C2 FP16_C (8.9)
+#define D2 FP16_C (4)
+
+#define E0 FP16_C (3.4)
+#define F0 FP16_C (-55.8)
+#define G0 FP16_C (-31.8)
+#define H0 FP16_C (2)
+
+#define E1 FP16_C (123.4)
+#define F1 FP16_C (-5.8)
+#define G1 FP16_C (-3.8)
+#define H1 FP16_C (102)
+
+#define E2 FP16_C (4.9)
+#define F2 FP16_C (-15.8)
+#define G2 FP16_C (39.8)
+#define H2 FP16_C (49)
+
+/* Expected results for vfms.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {A0 + -A1 * A2, B0 + -B1 * B2,
+					     C0 + -C1 * C2, D0 + -D1 * D2};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {A0 + -A1 * A2, B0 + -B1 * B2,
+					     C0 + -C1 * C2, D0 + -D1 * D2,
+					     E0 + -E1 * E2, F0 + -F1 * F2,
+					     G0 + -G1 * G2, H0 + -H1 * H2};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vfms_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VFMS (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  DECL_VARIABLE (vsrc_2, float, 16, 4);
+  DECL_VARIABLE (vsrc_3, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A0, B0, C0, D0};
+  VECT_VAR_DECL (buf_src_2, float, 16, 4) [] = {A1, B1, C1, D1};
+  VECT_VAR_DECL (buf_src_3, float, 16, 4) [] = {A2, B2, C2, D2};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  VLOAD (vsrc_2, buf_src_2, , float, f, 16, 4);
+  VLOAD (vsrc_3, buf_src_3, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vfms_f16 (VECT_VAR (vsrc_1, float, 16, 4), VECT_VAR (vsrc_2, float, 16, 4),
+	      VECT_VAR (vsrc_3, float, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VFMSQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  DECL_VARIABLE (vsrc_2, float, 16, 8);
+  DECL_VARIABLE (vsrc_3, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A0, B0, C0, D0, E0, F0, G0, H0};
+  VECT_VAR_DECL (buf_src_2, float, 16, 8) [] = {A1, B1, C1, D1, E1, F1, G1, H1};
+  VECT_VAR_DECL (buf_src_3, float, 16, 8) [] = {A2, B2, C2, D2, E2, F2, G2, H2};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  VLOAD (vsrc_2, buf_src_2, q, float, f, 16, 8);
+  VLOAD (vsrc_3, buf_src_3, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vfmsq_f16 (VECT_VAR (vsrc_1, float, 16, 8), VECT_VAR (vsrc_2, float, 16, 8),
+	       VECT_VAR (vsrc_3, float, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected_static, "");
+}
+
+int
+main (void)
+{
+  exec_vfms_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmax_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmax_f16_1.c
new file mode 100644
index 0000000..830e439
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmax_f16_1.c
@@ -0,0 +1,81 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (-567.8)
+#define C FP16_C (-34.8)
+#define D FP16_C (1024)
+#define E FP16_C (663.1)
+#define F FP16_C (169.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (77)
+
+#define I FP16_C (0.7)
+#define J FP16_C (-78)
+#define K FP16_C (101.23)
+#define L FP16_C (98)
+#define M FP16_C (870.1)
+#define N FP16_C (-8781)
+#define O FP16_C (__builtin_inff ()) /* +Inf */
+#define P FP16_C (-__builtin_inff ()) /* -Inf */
+
+/* Expected results for vmax.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {E, F, G, D};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {E, F, G, D, M, J, O, L};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vmax_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VMAX (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  DECL_VARIABLE (vsrc_2, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A, B, C, D};
+  VECT_VAR_DECL (buf_src_2, float, 16, 4) [] = {E, F, G, H};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  VLOAD (vsrc_2, buf_src_2, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vmax_f16 (VECT_VAR (vsrc_1, float, 16, 4), VECT_VAR (vsrc_2, float, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VMAXQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  DECL_VARIABLE (vsrc_2, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A, B, C, D, I, J, K, L};
+  VECT_VAR_DECL (buf_src_2, float, 16, 8) [] = {E, F, G, H, M, N, O, P};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  VLOAD (vsrc_2, buf_src_2, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vmaxq_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+	       VECT_VAR (vsrc_2, float, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected_static, "");
+}
+
+int
+main (void)
+{
+  exec_vmax_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmaxnm_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmaxnm_f16_1.c
new file mode 100644
index 0000000..5b2c991
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmaxnm_f16_1.c
@@ -0,0 +1,82 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (__builtin_nanf ("")) /* NaN */
+#define C FP16_C (-34.8)
+#define D FP16_C (1024)
+#define E FP16_C (663.1)
+#define F FP16_C (169.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (-__builtin_nanf ("")) /* NaN */
+
+#define I FP16_C (0.7)
+#define J FP16_C (-78)
+#define K FP16_C (101.23)
+#define L FP16_C (-1098)
+#define M FP16_C (870.1)
+#define N FP16_C (-8781)
+#define O FP16_C (__builtin_inff ()) /* +Inf */
+#define P FP16_C (-__builtin_inff ()) /* -Inf */
+
+/* Expected results for vmaxnm.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {E, F, G, D};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {E, F, G, D, M, J, O, L};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vmaxnm_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VMAXNM (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  DECL_VARIABLE (vsrc_2, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A, B, C, D};
+  VECT_VAR_DECL (buf_src_2, float, 16, 4) [] = {E, F, G, H};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  VLOAD (vsrc_2, buf_src_2, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vmaxnm_f16 (VECT_VAR (vsrc_1, float, 16, 4),
+		VECT_VAR (vsrc_2, float, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VMAXNMQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  DECL_VARIABLE (vsrc_2, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A, B, C, D, I, J, K, L};
+  VECT_VAR_DECL (buf_src_2, float, 16, 8) [] = {E, F, G, H, M, N, O, P};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  VLOAD (vsrc_2, buf_src_2, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vmaxnmq_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+		 VECT_VAR (vsrc_2, float, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected_static, "");
+}
+
+int
+main (void)
+{
+  exec_vmaxnm_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmin_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmin_f16_1.c
new file mode 100644
index 0000000..0ee5b4d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmin_f16_1.c
@@ -0,0 +1,81 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (-567.8)
+#define C FP16_C (-34.8)
+#define D FP16_C (1024)
+#define E FP16_C (663.1)
+#define F FP16_C (169.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (77)
+
+#define I FP16_C (0.7)
+#define J FP16_C (-78)
+#define K FP16_C (101.23)
+#define L FP16_C (98)
+#define M FP16_C (870.1)
+#define N FP16_C (-8781)
+#define O FP16_C (__builtin_inff ()) /* +Inf */
+#define P FP16_C (-__builtin_inff ()) /* -Inf */
+
+/* Expected results for vmin.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {A, B, C, H};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {A, B, C, H, I, N, K, P};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vmin_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VMIN (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  DECL_VARIABLE (vsrc_2, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A, B, C, D};
+  VECT_VAR_DECL (buf_src_2, float, 16, 4) [] = {E, F, G, H};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  VLOAD (vsrc_2, buf_src_2, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vmin_f16 (VECT_VAR (vsrc_1, float, 16, 4), VECT_VAR (vsrc_2, float, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VMINQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  DECL_VARIABLE (vsrc_2, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A, B, C, D, I, J, K, L};
+  VECT_VAR_DECL (buf_src_2, float, 16, 8) [] = {E, F, G, H, M, N, O, P};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  VLOAD (vsrc_2, buf_src_2, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vminq_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+	       VECT_VAR (vsrc_2, float, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected_static, "");
+}
+
+int
+main (void)
+{
+  exec_vmin_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vminnm_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vminnm_f16_1.c
new file mode 100644
index 0000000..75150db
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vminnm_f16_1.c
@@ -0,0 +1,83 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (__builtin_nanf ("")) /* NaN */
+#define C FP16_C (-34.8)
+#define D FP16_C (1024)
+#define E FP16_C (663.1)
+#define F FP16_C (169.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (-__builtin_nanf ("")) /* NaN */
+
+#define I FP16_C (0.7)
+#define J FP16_C (-78)
+#define K FP16_C (101.23)
+#define L FP16_C (-1098)
+#define M FP16_C (870.1)
+#define N FP16_C (-8781)
+#define O FP16_C (__builtin_inff ()) /* +Inf */
+#define P FP16_C (-__builtin_inff ()) /* -Inf */
+
+
+/* Expected results for vminnm.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {A, F, C, D};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {A, F, C, D, I, N, K, P};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vminnm_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VMINNM (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  DECL_VARIABLE (vsrc_2, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A, B, C, D};
+  VECT_VAR_DECL (buf_src_2, float, 16, 4) [] = {E, F, G, H};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  VLOAD (vsrc_2, buf_src_2, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vminnm_f16 (VECT_VAR (vsrc_1, float, 16, 4),
+		VECT_VAR (vsrc_2, float, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VMINNMQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  DECL_VARIABLE (vsrc_2, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A, B, C, D, I, J, K, L};
+  VECT_VAR_DECL (buf_src_2, float, 16, 8) [] = {E, F, G, H, M, N, O, P};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  VLOAD (vsrc_2, buf_src_2, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vminnmq_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+		 VECT_VAR (vsrc_2, float, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected_static, "");
+}
+
+int
+main (void)
+{
+  exec_vminnm_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmul_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmul_f16_1.c
new file mode 100644
index 0000000..2bc26d8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmul_f16_1.c
@@ -0,0 +1,82 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (13.4)
+#define B FP16_C (-56.8)
+#define C FP16_C (-34.8)
+#define D FP16_C (12)
+#define E FP16_C (63.1)
+#define F FP16_C (19.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (77)
+
+#define I FP16_C (0.7)
+#define J FP16_C (-78)
+#define K FP16_C (11.23)
+#define L FP16_C (98)
+#define M FP16_C (87.1)
+#define N FP16_C (-8)
+#define O FP16_C (-1.1)
+#define P FP16_C (-9.7)
+
+/* Expected results for vmul.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {A * E, B * F, C * G, D * H};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {A * E, B * F, C * G, D * H,
+					     I * M, J * N, K * O, L * P};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vmul_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VMUL (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  DECL_VARIABLE (vsrc_2, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A, B, C, D};
+  VECT_VAR_DECL (buf_src_2, float, 16, 4) [] = {E, F, G, H};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  VLOAD (vsrc_2, buf_src_2, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vmul_f16 (VECT_VAR (vsrc_1, float, 16, 4), VECT_VAR (vsrc_2, float, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VMULQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  DECL_VARIABLE (vsrc_2, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A, B, C, D, I, J, K, L};
+  VECT_VAR_DECL (buf_src_2, float, 16, 8) [] = {E, F, G, H, M, N, O, P};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  VLOAD (vsrc_2, buf_src_2, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vmulq_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+	       VECT_VAR (vsrc_2, float, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected_static, "");
+}
+
+int
+main (void)
+{
+  exec_vmul_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmul_lane_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmul_lane_f16_1.c
new file mode 100644
index 0000000..fa82f19
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmul_lane_f16_1.c
@@ -0,0 +1,155 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (13.4)
+#define B FP16_C (-56.8)
+#define C FP16_C (-34.8)
+#define D FP16_C (12)
+#define E FP16_C (63.1)
+#define F FP16_C (19.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (77)
+
+#define I FP16_C (0.7)
+#define J FP16_C (-78)
+#define K FP16_C (11.23)
+#define L FP16_C (98)
+#define M FP16_C (87.1)
+#define N FP16_C (-8)
+#define O FP16_C (-1.1)
+#define P FP16_C (-9.7)
+
+/* Expected results for vmul_lane.  */
+VECT_VAR_DECL (expected0, float, 16, 4) [] = {A * E, B * E, C * E, D * E};
+hfloat16_t * VECT_VAR (expected0_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected0, float, 16, 4);
+
+VECT_VAR_DECL (expected1, float, 16, 4) [] = {A * F, B * F, C * F, D * F};
+hfloat16_t * VECT_VAR (expected1_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected1, float, 16, 4);
+
+VECT_VAR_DECL (expected2, float, 16, 4) [] = {A * G, B * G, C * G, D * G};
+hfloat16_t * VECT_VAR (expected2_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected2, float, 16, 4);
+
+VECT_VAR_DECL (expected3, float, 16, 4) [] = {A * H, B * H, C * H, D * H};
+hfloat16_t * VECT_VAR (expected3_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected3, float, 16, 4);
+
+VECT_VAR_DECL (expected0, float, 16, 8) [] = {A * E, B * E, C * E, D * E,
+					     I * E, J * E, K * E, L * E};
+hfloat16_t * VECT_VAR (expected0_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected0, float, 16, 8);
+
+VECT_VAR_DECL (expected1, float, 16, 8) [] = {A * F, B * F, C * F, D * F,
+					      I * F, J * F, K * F, L * F};
+hfloat16_t * VECT_VAR (expected1_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected1, float, 16, 8);
+
+VECT_VAR_DECL (expected2, float, 16, 8) [] = {A * G, B * G, C * G, D * G,
+					      I * G, J * G, K * G, L * G};
+hfloat16_t * VECT_VAR (expected2_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected2, float, 16, 8);
+
+VECT_VAR_DECL (expected3, float, 16, 8) [] = {A * H, B * H, C * H, D * H,
+					      I * H, J * H, K * H, L * H};
+hfloat16_t * VECT_VAR (expected3_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected3, float, 16, 8);
+
+void
+exec_vmul_lane_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VMUL_LANE (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  DECL_VARIABLE (vsrc_2, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A, B, C, D};
+  VECT_VAR_DECL (buf_src_2, float, 16, 4) [] = {E, F, G, H};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  VLOAD (vsrc_2, buf_src_2, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vmul_lane_f16 (VECT_VAR (vsrc_1, float, 16, 4),
+		   VECT_VAR (vsrc_2, float, 16, 4), 0);
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected0_static, "");
+
+  VECT_VAR (vector_res, float, 16, 4) =
+    vmul_lane_f16 (VECT_VAR (vsrc_1, float, 16, 4),
+		   VECT_VAR (vsrc_2, float, 16, 4), 1);
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected1_static, "");
+
+  VECT_VAR (vector_res, float, 16, 4) =
+    vmul_lane_f16 (VECT_VAR (vsrc_1, float, 16, 4),
+		   VECT_VAR (vsrc_2, float, 16, 4), 2);
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected2_static, "");
+
+  VECT_VAR (vector_res, float, 16, 4) =
+    vmul_lane_f16 (VECT_VAR (vsrc_1, float, 16, 4),
+		   VECT_VAR (vsrc_2, float, 16, 4), 3);
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected3_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VMULQ_LANE (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A, B, C, D, I, J, K, L};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vmulq_lane_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+		    VECT_VAR (vsrc_2, float, 16, 4), 0);
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected0_static, "");
+
+  VECT_VAR (vector_res, float, 16, 8) =
+    vmulq_lane_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+		    VECT_VAR (vsrc_2, float, 16, 4), 1);
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected1_static, "");
+
+  VECT_VAR (vector_res, float, 16, 8) =
+    vmulq_lane_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+		    VECT_VAR (vsrc_2, float, 16, 4), 2);
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected2_static, "");
+
+  VECT_VAR (vector_res, float, 16, 8) =
+    vmulq_lane_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+		    VECT_VAR (vsrc_2, float, 16, 4), 3);
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected3_static, "");
+}
+
+int
+main (void)
+{
+  exec_vmul_lane_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmul_n_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmul_n_f16_1.c
new file mode 100644
index 0000000..3e633d6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmul_n_f16_1.c
@@ -0,0 +1,192 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (13.4)
+#define B FP16_C (-56.8)
+#define C FP16_C (-34.8)
+#define D FP16_C (12)
+#define E FP16_C (63.1)
+#define F FP16_C (19.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (77)
+
+#define I FP16_C (0.7)
+#define J FP16_C (-78)
+#define K FP16_C (11.23)
+#define L FP16_C (98)
+#define M FP16_C (87.1)
+#define N FP16_C (-8)
+#define O FP16_C (-1.1)
+#define P FP16_C (-9.7)
+
+/* Expected results for vmul_n.  */
+VECT_VAR_DECL (expected0, float, 16, 4) [] = {A * E, B * E, C * E, D * E};
+hfloat16_t * VECT_VAR (expected0_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected0, float, 16, 4);
+
+VECT_VAR_DECL (expected1, float, 16, 4) [] = {A * F, B * F, C * F, D * F};
+hfloat16_t * VECT_VAR (expected1_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected1, float, 16, 4);
+
+VECT_VAR_DECL (expected2, float, 16, 4) [] = {A * G, B * G, C * G, D * G};
+hfloat16_t * VECT_VAR (expected2_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected2, float, 16, 4);
+
+VECT_VAR_DECL (expected3, float, 16, 4) [] = {A * H, B * H, C * H, D * H};
+hfloat16_t * VECT_VAR (expected3_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected3, float, 16, 4);
+
+VECT_VAR_DECL (expected0, float, 16, 8) [] = {A * E, B * E, C * E, D * E,
+					     I * E, J * E, K * E, L * E};
+hfloat16_t * VECT_VAR (expected0_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected0, float, 16, 8);
+
+VECT_VAR_DECL (expected1, float, 16, 8) [] = {A * F, B * F, C * F, D * F,
+					      I * F, J * F, K * F, L * F};
+hfloat16_t * VECT_VAR (expected1_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected1, float, 16, 8);
+
+VECT_VAR_DECL (expected2, float, 16, 8) [] = {A * G, B * G, C * G, D * G,
+					      I * G, J * G, K * G, L * G};
+hfloat16_t * VECT_VAR (expected2_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected2, float, 16, 8);
+
+VECT_VAR_DECL (expected3, float, 16, 8) [] = {A * H, B * H, C * H, D * H,
+					      I * H, J * H, K * H, L * H};
+hfloat16_t * VECT_VAR (expected3_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected3, float, 16, 8);
+
+VECT_VAR_DECL (expected4, float, 16, 8) [] = {A * M, B * M, C * M, D * M,
+					      I * M, J * M, K * M, L * M};
+hfloat16_t * VECT_VAR (expected4_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected4, float, 16, 8);
+
+VECT_VAR_DECL (expected5, float, 16, 8) [] = {A * N, B * N, C * N, D * N,
+					      I * N, J * N, K * N, L * N};
+hfloat16_t * VECT_VAR (expected5_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected5, float, 16, 8);
+
+VECT_VAR_DECL (expected6, float, 16, 8) [] = {A * O, B * O, C * O, D * O,
+					      I * O, J * O, K * O, L * O};
+hfloat16_t * VECT_VAR (expected6_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected6, float, 16, 8);
+
+VECT_VAR_DECL (expected7, float, 16, 8) [] = {A * P, B * P, C * P, D * P,
+					      I * P, J * P, K * P, L * P};
+hfloat16_t * VECT_VAR (expected7_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected7, float, 16, 8);
+
+void
+exec_vmul_n_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VMUL_N (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vmul_n_f16 (VECT_VAR (vsrc_1, float, 16, 4), E);
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected0_static, "");
+
+  VECT_VAR (vector_res, float, 16, 4) =
+    vmul_n_f16 (VECT_VAR (vsrc_1, float, 16, 4), F);
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected1_static, "");
+
+  VECT_VAR (vector_res, float, 16, 4) =
+    vmul_n_f16 (VECT_VAR (vsrc_1, float, 16, 4), G);
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected2_static, "");
+
+  VECT_VAR (vector_res, float, 16, 4) =
+    vmul_n_f16 (VECT_VAR (vsrc_1, float, 16, 4), H);
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected3_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VMULQ_N (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A, B, C, D, I, J, K, L};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vmulq_n_f16 (VECT_VAR (vsrc_1, float, 16, 8), E);
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected0_static, "");
+
+  VECT_VAR (vector_res, float, 16, 8) =
+    vmulq_n_f16 (VECT_VAR (vsrc_1, float, 16, 8), F);
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected1_static, "");
+
+  VECT_VAR (vector_res, float, 16, 8) =
+    vmulq_n_f16 (VECT_VAR (vsrc_1, float, 16, 8), G);
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected2_static, "");
+
+  VECT_VAR (vector_res, float, 16, 8) =
+    vmulq_n_f16 (VECT_VAR (vsrc_1, float, 16, 8), H);
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected3_static, "");
+
+  VECT_VAR (vector_res, float, 16, 8) =
+    vmulq_n_f16 (VECT_VAR (vsrc_1, float, 16, 8), M);
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected4_static, "");
+
+  VECT_VAR (vector_res, float, 16, 8) =
+    vmulq_n_f16 (VECT_VAR (vsrc_1, float, 16, 8), N);
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected5_static, "");
+
+  VECT_VAR (vector_res, float, 16, 8) =
+    vmulq_n_f16 (VECT_VAR (vsrc_1, float, 16, 8), O);
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected6_static, "");
+
+  VECT_VAR (vector_res, float, 16, 8) =
+    vmulq_n_f16 (VECT_VAR (vsrc_1, float, 16, 8), P);
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected7_static, "");
+}
+
+int
+main (void)
+{
+  exec_vmul_n_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vneg_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vneg_f16_1.c
new file mode 100644
index 0000000..515bb4d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vneg_f16_1.c
@@ -0,0 +1,65 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (-567.8)
+#define C FP16_C (-34.8)
+#define D FP16_C (1024)
+#define E FP16_C (663.1)
+#define F FP16_C (169.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (77)
+
+/* Expected results for vneg.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {-A, -B, -C, -D};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {-A, -B, -C, -D, -E, -F, -G, -H};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vneg_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VNEG (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 4);
+  VECT_VAR_DECL (buf_src, float, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vneg_f16 (VECT_VAR (vsrc, float, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VNEGQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 8);
+  VECT_VAR_DECL (buf_src, float, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vnegq_f16 (VECT_VAR (vsrc, float, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected_static, "");
+}
+
+int
+main (void)
+{
+  exec_vneg_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vpadd_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vpadd_f16_1.c
new file mode 100644
index 0000000..a5b48a1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vpadd_f16_1.c
@@ -0,0 +1,87 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (-567.8)
+#define C FP16_C (-34.8)
+#define D FP16_C (1024)
+#define E FP16_C (663.1)
+#define F FP16_C (169.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (77)
+
+#define I FP16_C (0.7)
+#define J FP16_C (-78)
+#define K FP16_C (101.23)
+#define L FP16_C (98)
+#define M FP16_C (870.1)
+#define N FP16_C (-8781)
+#define O FP16_C (-1.1)
+#define P FP16_C (47823)
+
+/* Expected results for vpadd.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {A + B, C + D, E + F, G + H};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {A + B, C + D, I + J, K + L,
+					     E + F, G + H, M + N, O + P};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vpadd_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VPADD (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  DECL_VARIABLE (vsrc_2, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A, B, C, D};
+  VECT_VAR_DECL (buf_src_2, float, 16, 4) [] = {E, F, G, H};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  VLOAD (vsrc_2, buf_src_2, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vpadd_f16 (VECT_VAR (vsrc_1, float, 16, 4),
+	       VECT_VAR (vsrc_2, float, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#ifdef __ARM_ARCH_ISA_A64
+
+#undef TEST_MSG
+#define TEST_MSG "VPADDQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  DECL_VARIABLE (vsrc_2, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A, B, C, D, I, J, K, L};
+  VECT_VAR_DECL (buf_src_2, float, 16, 8) [] = {E, F, G, H, M, N, O, P};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  VLOAD (vsrc_2, buf_src_2, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vpaddq_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+		VECT_VAR (vsrc_2, float, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected_static, "");
+
+#endif
+}
+
+int
+main (void)
+{
+  exec_vpadd_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vpmax_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vpmax_f16_1.c
new file mode 100644
index 0000000..af310b1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vpmax_f16_1.c
@@ -0,0 +1,87 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (-567.8)
+#define C FP16_C (-34.8)
+#define D FP16_C (1024)
+#define E FP16_C (663.1)
+#define F FP16_C (169.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (77)
+
+#define I FP16_C (0.7)
+#define J FP16_C (-78)
+#define K FP16_C (101.23)
+#define L FP16_C (98)
+#define M FP16_C (870.1)
+#define N FP16_C (-8781)
+#define O FP16_C (__builtin_inff ()) /* +Inf */
+#define P FP16_C (-__builtin_inff ()) /* -Inf */
+
+
+/* Expected results for vpmax.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {A, D, E, H};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {A, D, I, K, E, H, M, O};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vpmax_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VPMAX (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  DECL_VARIABLE (vsrc_2, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A, B, C, D};
+  VECT_VAR_DECL (buf_src_2, float, 16, 4) [] = {E, F, G, H};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  VLOAD (vsrc_2, buf_src_2, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vpmax_f16 (VECT_VAR (vsrc_1, float, 16, 4),
+	       VECT_VAR (vsrc_2, float, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#ifdef __ARM_ARCH_ISA_A64
+
+#undef TEST_MSG
+#define TEST_MSG "VPMAXQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  DECL_VARIABLE (vsrc_2, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A, B, C, D, I, J, K, L};
+  VECT_VAR_DECL (buf_src_2, float, 16, 8) [] = {E, F, G, H, M, N, O, P};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  VLOAD (vsrc_2, buf_src_2, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vpmaxq_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+		VECT_VAR (vsrc_2, float, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected_static, "");
+
+#endif
+}
+
+int
+main (void)
+{
+  exec_vpmax_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vpmin_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vpmin_f16_1.c
new file mode 100644
index 0000000..7230176
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vpmin_f16_1.c
@@ -0,0 +1,86 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (-567.8)
+#define C FP16_C (-34.8)
+#define D FP16_C (1024)
+#define E FP16_C (663.1)
+#define F FP16_C (169.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (77)
+
+#define I FP16_C (0.7)
+#define J FP16_C (-78)
+#define K FP16_C (101.23)
+#define L FP16_C (98)
+#define M FP16_C (870.1)
+#define N FP16_C (-8781)
+#define O FP16_C (__builtin_inff ()) /* +Inf */
+#define P FP16_C (-__builtin_inff ()) /* -Inf */
+
+/* Expected results for vpmin.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {B, C, F, G};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {B, C, J, L, F, G, N, P};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vpmin_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VPMIN (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  DECL_VARIABLE (vsrc_2, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A, B, C, D};
+  VECT_VAR_DECL (buf_src_2, float, 16, 4) [] = {E, F, G, H};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  VLOAD (vsrc_2, buf_src_2, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vpmin_f16 (VECT_VAR (vsrc_1, float, 16, 4),
+	       VECT_VAR (vsrc_2, float, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#ifdef __ARM_ARCH_ISA_A64
+
+#undef TEST_MSG
+#define TEST_MSG "VPMINQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  DECL_VARIABLE (vsrc_2, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A, B, C, D, I, J, K, L};
+  VECT_VAR_DECL (buf_src_2, float, 16, 8) [] = {E, F, G, H, M, N, O, P};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  VLOAD (vsrc_2, buf_src_2, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vpminq_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+		VECT_VAR (vsrc_2, float, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected_static, "");
+
+#endif
+}
+
+int
+main (void)
+{
+  exec_vpmin_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrecpe_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrecpe_f16_1.c
new file mode 100644
index 0000000..12cf5ae
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrecpe_f16_1.c
@@ -0,0 +1,75 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (567.8)
+#define C FP16_C (34.8)
+#define D FP16_C (1024)
+#define E FP16_C (663.1)
+#define F FP16_C (144.0)
+#define G FP16_C (4.8)
+#define H FP16_C (77)
+
+#define RECP_A FP16_C (1/A)
+#define RECP_B FP16_C (1/B)
+#define RECP_C FP16_C (1/C)
+#define RECP_D FP16_C (1/D)
+#define RECP_E FP16_C (1/E)
+#define RECP_F FP16_C (1/F)
+#define RECP_G FP16_C (1/G)
+#define RECP_H FP16_C (1/H)
+
+/* Expected results for vrecpe.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {RECP_A, RECP_B, RECP_C, RECP_D};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {RECP_A, RECP_B, RECP_C, RECP_D,
+					     RECP_E, RECP_F, RECP_G, RECP_H};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vrecpe_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VRECPE (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 4);
+  VECT_VAR_DECL (buf_src, float, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vrecpe_f16 (VECT_VAR (vsrc, float, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP_BIAS (TEST_MSG, float, 16, 4, PRIx16, expected_static, "", 5);
+
+#undef TEST_MSG
+#define TEST_MSG "VRECPEQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 8);
+  VECT_VAR_DECL (buf_src, float, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vrecpeq_f16 (VECT_VAR (vsrc, float, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP_BIAS (TEST_MSG, float, 16, 8, PRIx16, expected_static, "", 5);
+}
+
+int
+main (void)
+{
+  exec_vrecpe_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrecps_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrecps_f16_1.c
new file mode 100644
index 0000000..1e9c511
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrecps_f16_1.c
@@ -0,0 +1,86 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (12.4)
+#define B FP16_C (-5.8)
+#define C FP16_C (-3.8)
+#define D FP16_C (10)
+#define E FP16_C (66.1)
+#define F FP16_C (16.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (-77)
+
+#define I FP16_C (0.7)
+#define J FP16_C (-78)
+#define K FP16_C (10.23)
+#define L FP16_C (98)
+#define M FP16_C (87)
+#define N FP16_C (-87.81)
+#define O FP16_C (-1.1)
+#define P FP16_C (47.8)
+
+/* Expected results for vrecps.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {2.0f - A * E, 2.0f - B * F,
+					     2.0f - C * G, 2.0f - D * H};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {2.0f - A * E, 2.0f - B * F,
+					     2.0f - C * G, 2.0f - D * H,
+					     2.0f - I * M, 2.0f - J * N,
+					     2.0f - K * O, 2.0f - L * P};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vrecps_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VRECPS (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  DECL_VARIABLE (vsrc_2, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A, B, C, D};
+  VECT_VAR_DECL (buf_src_2, float, 16, 4) [] = {E, F, G, H};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  VLOAD (vsrc_2, buf_src_2, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vrecps_f16 (VECT_VAR (vsrc_1, float, 16, 4),
+		VECT_VAR (vsrc_2, float, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VRECPSQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  DECL_VARIABLE (vsrc_2, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A, B, C, D, I, J, K, L};
+  VECT_VAR_DECL (buf_src_2, float, 16, 8) [] = {E, F, G, H, M, N, O, P};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  VLOAD (vsrc_2, buf_src_2, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vrecpsq_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+		 VECT_VAR (vsrc_2, float, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP_BIAS (TEST_MSG, float, 16, 8, PRIx16, expected_static, "", 1);
+}
+
+int
+main (void)
+{
+  exec_vrecps_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrnd_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrnd_f16_1.c
new file mode 100644
index 0000000..99ea119
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrnd_f16_1.c
@@ -0,0 +1,74 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define RND_A FP16_C (123)
+#define B FP16_C (-567.5)
+#define RND_B FP16_C (-567)
+#define C FP16_C (-34.8)
+#define RND_C FP16_C (-34)
+#define D FP16_C (1024)
+#define RND_D FP16_C (1024)
+#define E FP16_C (663.1)
+#define RND_E FP16_C (663)
+#define F FP16_C (169.1)
+#define RND_F FP16_C (169)
+#define G FP16_C (-4.8)
+#define RND_G FP16_C (-4)
+#define H FP16_C (77)
+#define RND_H FP16_C (77)
+
+/* Expected results for vrnd.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {RND_A, RND_B, RND_C, RND_D};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {RND_A, RND_B, RND_C, RND_D,
+					     RND_E, RND_F, RND_G, RND_H};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vrnd_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VRND (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 4);
+  VECT_VAR_DECL (buf_src, float, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vrnd_f16 (VECT_VAR (vsrc, float, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VRNDQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 8);
+  VECT_VAR_DECL (buf_src, float, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vrndq_f16 (VECT_VAR (vsrc, float, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected_static, "");
+}
+
+int
+main (void)
+{
+  exec_vrnd_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrnda_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrnda_f16_1.c
new file mode 100644
index 0000000..86b7fb6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrnda_f16_1.c
@@ -0,0 +1,74 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define RNDA_A FP16_C (123)
+#define B FP16_C (-567.5)
+#define RNDA_B FP16_C (-568)
+#define C FP16_C (-34.8)
+#define RNDA_C FP16_C (-35)
+#define D FP16_C (1024)
+#define RNDA_D FP16_C (1024)
+#define E FP16_C (663.1)
+#define RNDA_E FP16_C (663)
+#define F FP16_C (169.1)
+#define RNDA_F FP16_C (169)
+#define G FP16_C (-4.8)
+#define RNDA_G FP16_C (-5)
+#define H FP16_C (77.5)
+#define RNDA_H FP16_C (78)
+
+/* Expected results for vrnda.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {RNDA_A, RNDA_B, RNDA_C, RNDA_D};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {RNDA_A, RNDA_B, RNDA_C, RNDA_D,
+					     RNDA_E, RNDA_F, RNDA_G, RNDA_H};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vrnda_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VRNDA (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 4);
+  VECT_VAR_DECL (buf_src, float, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vrnda_f16 (VECT_VAR (vsrc, float, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VRNDAQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 8);
+  VECT_VAR_DECL (buf_src, float, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vrndaq_f16 (VECT_VAR (vsrc, float, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected_static, "");
+}
+
+int
+main (void)
+{
+  exec_vrnda_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrndm_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrndm_f16_1.c
new file mode 100644
index 0000000..904c265
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrndm_f16_1.c
@@ -0,0 +1,74 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define RNDM_A FP16_C (123)
+#define B FP16_C (-567.5)
+#define RNDM_B FP16_C (-568)
+#define C FP16_C (-34.8)
+#define RNDM_C FP16_C (-35)
+#define D FP16_C (1024)
+#define RNDM_D FP16_C (1024)
+#define E FP16_C (663.1)
+#define RNDM_E FP16_C (663)
+#define F FP16_C (169.1)
+#define RNDM_F FP16_C (169)
+#define G FP16_C (-4.8)
+#define RNDM_G FP16_C (-5)
+#define H FP16_C (77.5)
+#define RNDM_H FP16_C (77)
+
+/* Expected results for vrndm.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {RNDM_A, RNDM_B, RNDM_C, RNDM_D};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {RNDM_A, RNDM_B, RNDM_C, RNDM_D,
+					     RNDM_E, RNDM_F, RNDM_G, RNDM_H};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vrndm_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VRNDM (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 4);
+  VECT_VAR_DECL (buf_src, float, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vrndm_f16 (VECT_VAR (vsrc, float, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VRNDMQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 8);
+  VECT_VAR_DECL (buf_src, float, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vrndmq_f16 (VECT_VAR (vsrc, float, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected_static, "");
+}
+
+int
+main (void)
+{
+  exec_vrndm_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrndn_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrndn_f16_1.c
new file mode 100644
index 0000000..c94fd54
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrndn_f16_1.c
@@ -0,0 +1,74 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define RNDN_A FP16_C (123)
+#define B FP16_C (-567.5)
+#define RNDN_B FP16_C (-568)
+#define C FP16_C (-34.8)
+#define RNDN_C FP16_C (-35)
+#define D FP16_C (1024)
+#define RNDN_D FP16_C (1024)
+#define E FP16_C (663.1)
+#define RNDN_E FP16_C (663)
+#define F FP16_C (169.1)
+#define RNDN_F FP16_C (169)
+#define G FP16_C (-4.8)
+#define RNDN_G FP16_C (-5)
+#define H FP16_C (77)
+#define RNDN_H FP16_C (77)
+
+/* Expected results for vrndn.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {RNDN_A, RNDN_B, RNDN_C, RNDN_D};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {RNDN_A, RNDN_B, RNDN_C, RNDN_D,
+					     RNDN_E, RNDN_F, RNDN_G, RNDN_H};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vrndn_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VRNDN (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 4);
+  VECT_VAR_DECL (buf_src, float, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vrndn_f16 (VECT_VAR (vsrc, float, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VRNDNQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 8);
+  VECT_VAR_DECL (buf_src, float, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vrndnq_f16 (VECT_VAR (vsrc, float, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected_static, "");
+}
+
+int
+main (void)
+{
+  exec_vrndn_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrndp_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrndp_f16_1.c
new file mode 100644
index 0000000..6bcab83
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrndp_f16_1.c
@@ -0,0 +1,74 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define RNDP_A FP16_C (124)
+#define B FP16_C (-567.5)
+#define RNDP_B FP16_C (-567)
+#define C FP16_C (-34.8)
+#define RNDP_C FP16_C (-34)
+#define D FP16_C (1024)
+#define RNDP_D FP16_C (1024)
+#define E FP16_C (163.1)
+#define RNDP_E FP16_C (164)
+#define F FP16_C (169.1)
+#define RNDP_F FP16_C (170)
+#define G FP16_C (-4.8)
+#define RNDP_G FP16_C (-4)
+#define H FP16_C (77.5)
+#define RNDP_H FP16_C (78)
+
+/* Expected results for vrndp.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {RNDP_A, RNDP_B, RNDP_C, RNDP_D};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {RNDP_A, RNDP_B, RNDP_C, RNDP_D,
+					     RNDP_E, RNDP_F, RNDP_G, RNDP_H};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vrndp_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VRNDP (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 4);
+  VECT_VAR_DECL (buf_src, float, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vrndp_f16 (VECT_VAR (vsrc, float, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VRNDPQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 8);
+  VECT_VAR_DECL (buf_src, float, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vrndpq_f16 (VECT_VAR (vsrc, float, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected_static, "");
+}
+
+int
+main (void)
+{
+  exec_vrndp_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrndx_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrndx_f16_1.c
new file mode 100644
index 0000000..2413dc4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrndx_f16_1.c
@@ -0,0 +1,74 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define RNDX_A FP16_C (123)
+#define B FP16_C (-567.8)
+#define RNDX_B FP16_C (-568)
+#define C FP16_C (-34.8)
+#define RNDX_C FP16_C (-35)
+#define D FP16_C (1024)
+#define RNDX_D FP16_C (1024)
+#define E FP16_C (663.1)
+#define RNDX_E FP16_C (663)
+#define F FP16_C (169.1)
+#define RNDX_F FP16_C (169)
+#define G FP16_C (-4.8)
+#define RNDX_G FP16_C (-5)
+#define H FP16_C (77)
+#define RNDX_H FP16_C (77)
+
+/* Expected results for vrndx.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {RNDX_A, RNDX_B, RNDX_C, RNDX_D};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {RNDX_A, RNDX_B, RNDX_C, RNDX_D,
+					     RNDX_E, RNDX_F, RNDX_G, RNDX_H};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vrndx_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VRNDX (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 4);
+  VECT_VAR_DECL (buf_src, float, 16, 4) [] = {A, B, C, D};
+  VLOAD (vsrc, buf_src, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vrndx_f16 (VECT_VAR (vsrc, float, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VRNDXQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc, float, 16, 8);
+  VECT_VAR_DECL (buf_src, float, 16, 8) [] = {A, B, C, D, E, F, G, H};
+  VLOAD (vsrc, buf_src, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vrndxq_f16 (VECT_VAR (vsrc, float, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected_static, "");
+}
+
+int
+main (void)
+{
+  exec_vrndx_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrsqrts_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrsqrts_f16_1.c
new file mode 100644
index 0000000..d8b7ee3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrsqrts_f16_1.c
@@ -0,0 +1,92 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (12.4)
+#define B FP16_C (-5.8)
+#define C FP16_C (-3.8)
+#define D FP16_C (10)
+#define E FP16_C (66.1)
+#define F FP16_C (16.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (-77)
+
+#define I FP16_C (0.7)
+#define J FP16_C (-78)
+#define K FP16_C (10.23)
+#define L FP16_C (98)
+#define M FP16_C (87)
+#define N FP16_C (-87.81)
+#define O FP16_C (-1.1)
+#define P FP16_C (47.8)
+
+/* Expected results for vrsqrts.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {(3.0f + (-A) * E) / 2.0f,
+					     (3.0f + (-B) * F) / 2.0f,
+					     (3.0f + (-C) * G) / 2.0f,
+					     (3.0f + (-D) * H) / 2.0f};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {(3.0f + (-A) * E) / 2.0f,
+					     (3.0f + (-B) * F) / 2.0f,
+					     (3.0f + (-C) * G) / 2.0f,
+					     (3.0f + (-D) * H) / 2.0f,
+					     (3.0f + (-I) * M) / 2.0f,
+					     (3.0f + (-J) * N) / 2.0f,
+					     (3.0f + (-K) * O) / 2.0f,
+					     (3.0f + (-L) * P) / 2.0f};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vrsqrts_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VRSQRTS (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  DECL_VARIABLE (vsrc_2, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A, B, C, D};
+  VECT_VAR_DECL (buf_src_2, float, 16, 4) [] = {E, F, G, H};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  VLOAD (vsrc_2, buf_src_2, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vrsqrts_f16 (VECT_VAR (vsrc_1, float, 16, 4),
+		 VECT_VAR (vsrc_2, float, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VRSQRTSQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  DECL_VARIABLE (vsrc_2, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A, B, C, D, I, J, K, L};
+  VECT_VAR_DECL (buf_src_2, float, 16, 8) [] = {E, F, G, H, M, N, O, P};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  VLOAD (vsrc_2, buf_src_2, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vrsqrtsq_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+		  VECT_VAR (vsrc_2, float, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP_BIAS (TEST_MSG, float, 16, 8, PRIx16, expected_static, "", 1);
+}
+
+int
+main (void)
+{
+  exec_vrsqrts_f16 ();
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vsub_f16_1.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vsub_f16_1.c
new file mode 100644
index 0000000..d54b011
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vsub_f16_1.c
@@ -0,0 +1,82 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_hw } */
+/* { dg-add-options arm_v8_2a_fp16_neon }  */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define FP16_C(a) ((__fp16) a)
+#define A FP16_C (123.4)
+#define B FP16_C (-567.8)
+#define C FP16_C (-34.8)
+#define D FP16_C (1024)
+#define E FP16_C (663.1)
+#define F FP16_C (169.1)
+#define G FP16_C (-4.8)
+#define H FP16_C (77)
+
+#define I FP16_C (0.7)
+#define J FP16_C (-78)
+#define K FP16_C (101.23)
+#define L FP16_C (98)
+#define M FP16_C (870.1)
+#define N FP16_C (-8781)
+#define O FP16_C (-1.1)
+#define P FP16_C (47823)
+
+/* Expected results for vsub.  */
+VECT_VAR_DECL (expected, float, 16, 4) [] = {A - E, B - F, C - G, D - H};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 4) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 4);
+
+VECT_VAR_DECL (expected, float, 16, 8) [] = {A - E, B - F, C - G, D - H,
+					     I - M, J - N, K - O, L - P};
+hfloat16_t * VECT_VAR (expected_static, hfloat, 16, 8) =
+  (hfloat16_t *) VECT_VAR (expected, float, 16, 8);
+
+void
+exec_vsub_f16 (void)
+{
+#undef TEST_MSG
+#define TEST_MSG "VSUB (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 4);
+  DECL_VARIABLE (vsrc_2, float, 16, 4);
+  VECT_VAR_DECL (buf_src_1, float, 16, 4) [] = {A, B, C, D};
+  VECT_VAR_DECL (buf_src_2, float, 16, 4) [] = {E, F, G, H};
+  VLOAD (vsrc_1, buf_src_1, , float, f, 16, 4);
+  VLOAD (vsrc_2, buf_src_2, , float, f, 16, 4);
+  DECL_VARIABLE (vector_res, float, 16, 4) =
+    vsub_f16 (VECT_VAR (vsrc_1, float, 16, 4), VECT_VAR (vsrc_2, float, 16, 4));
+  vst1_f16 (VECT_VAR (result, float, 16, 4),
+	    VECT_VAR (vector_res, float, 16, 4));
+
+  CHECK_FP (TEST_MSG, float, 16, 4, PRIx16, expected_static, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VSUBQ (FP16)"
+  clean_results ();
+
+  DECL_VARIABLE (vsrc_1, float, 16, 8);
+  DECL_VARIABLE (vsrc_2, float, 16, 8);
+  VECT_VAR_DECL (buf_src_1, float, 16, 8) [] = {A, B, C, D, I, J, K, L};
+  VECT_VAR_DECL (buf_src_2, float, 16, 8) [] = {E, F, G, H, M, N, O, P};
+  VLOAD (vsrc_1, buf_src_1, q, float, f, 16, 8);
+  VLOAD (vsrc_2, buf_src_2, q, float, f, 16, 8);
+  DECL_VARIABLE (vector_res, float, 16, 8) =
+    vsubq_f16 (VECT_VAR (vsrc_1, float, 16, 8),
+	       VECT_VAR (vsrc_2, float, 16, 8));
+  vst1q_f16 (VECT_VAR (result, float, 16, 8),
+	     VECT_VAR (vector_res, float, 16, 8));
+
+  CHECK_FP (TEST_MSG, float, 16, 8, PRIx16, expected_static, "");
+}
+
+int
+main (void)
+{
+  exec_vsub_f16 ();
+  return 0;
+}
-- 
2.1.4


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]