This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH] Enable java for aarch64


This enables building java for aarch64.  Most of the configuration bits
were copied from arm.

		=== libjava Summary ===

# of expected passes		2533
# of unexpected failures	29
# of untested testcases 	25

Andreas.

	* configure.ac (aarch64-*-*): Don't disable java.
	* configure: Regenerate.

libjava/:
	* configure.host: Add support for aarch64.
	* sysdep/aarch64/locks.h: New file.

libjava/classpath/:
	* native/fdlibm/ieeefp.h: Add support for aarch64.
---
 configure                                |  2 ++
 configure.ac                             |  2 ++
 libjava/classpath/native/fdlibm/ieeefp.h |  8 +++++
 libjava/configure.host                   |  8 ++++-
 libjava/sysdep/aarch64/locks.h           | 57 ++++++++++++++++++++++++++++++++
 5 files changed, 76 insertions(+), 1 deletion(-)
 create mode 100644 libjava/sysdep/aarch64/locks.h

diff --git a/configure b/configure
index d809535..e161cad 100755
--- a/configure
+++ b/configure
@@ -3272,6 +3272,8 @@ esac
 
 # Disable Java if libffi is not supported.
 case "${target}" in
+  aarch64-*-*)
+    ;;
   alpha*-*-*)
     ;;
   arm*-*-*)
diff --git a/configure.ac b/configure.ac
index 48ec1aa..bec489f 100644
--- a/configure.ac
+++ b/configure.ac
@@ -611,6 +611,8 @@ esac
 
 # Disable Java if libffi is not supported.
 case "${target}" in
+  aarch64-*-*)
+    ;;
   alpha*-*-*)
     ;;
   arm*-*-*)
diff --git a/libjava/classpath/native/fdlibm/ieeefp.h b/libjava/classpath/native/fdlibm/ieeefp.h
index c230bbb..7ef2ae7e 100644
--- a/libjava/classpath/native/fdlibm/ieeefp.h
+++ b/libjava/classpath/native/fdlibm/ieeefp.h
@@ -4,6 +4,14 @@
 #ifndef __IEEE_BIG_ENDIAN
 #ifndef __IEEE_LITTLE_ENDIAN
 
+#ifdef __aarch64__
+#ifdef __AARCH64EB__
+#define __IEEE_BIG_ENDIAN
+#else
+#define __IEEE_LITTLE_ENDIAN
+#endif
+#endif
+
 #ifdef __alpha__
 #define __IEEE_LITTLE_ENDIAN
 #endif
diff --git a/libjava/configure.host b/libjava/configure.host
index 0c3b41c..96f86fe 100644
--- a/libjava/configure.host
+++ b/libjava/configure.host
@@ -81,6 +81,11 @@ ATOMICSPEC=
 
 # This case statement supports per-CPU defaults.
 case "${host}" in
+  aarch64*-linux*)
+	libgcj_interpreter=yes
+	sysdeps_dir=aarch64
+	ATOMICSPEC=-fuse-atomic-builtins
+	;;
   arm*-elf)
 	with_libffi_default=no
 	PROCESS=Ecos
@@ -224,7 +229,8 @@ case "${host}" in
   x86_64*-linux* | \
   hppa*-linux* | \
   m68k*-linux* | \
-  sh-linux* | sh[34]*-linux*)
+  sh-linux* | sh[34]*-linux* | \
+  aarch64*-linux*)
   	can_unwind_signal=yes
 	libgcj_ld_symbolic='-Wl,-Bsymbolic'
 	if test x$slow_pthread_self = xyes \
diff --git a/libjava/sysdep/aarch64/locks.h b/libjava/sysdep/aarch64/locks.h
new file mode 100644
index 0000000..f91473d
--- /dev/null
+++ b/libjava/sysdep/aarch64/locks.h
@@ -0,0 +1,57 @@
+// locks.h - Thread synchronization primitives. AArch64 implementation.
+
+#ifndef __SYSDEP_LOCKS_H__
+#define __SYSDEP_LOCKS_H__
+
+typedef size_t obj_addr_t;	/* Integer type big enough for object	*/
+				/* address.				*/
+
+// Atomically replace *addr by new_val if it was initially equal to old.
+// Return true if the comparison succeeded.
+// Assumed to have acquire semantics, i.e. later memory operations
+// cannot execute before the compare_and_swap finishes.
+inline static bool
+compare_and_swap(volatile obj_addr_t *addr,
+                 obj_addr_t old,
+                 obj_addr_t new_val)
+{
+  return __sync_bool_compare_and_swap(addr, old, new_val);
+}
+
+// Set *addr to new_val with release semantics, i.e. making sure
+// that prior loads and stores complete before this
+// assignment.
+inline static void
+release_set(volatile obj_addr_t *addr, obj_addr_t new_val)
+{
+  __sync_synchronize();
+  *addr = new_val;
+}
+
+// Compare_and_swap with release semantics instead of acquire semantics.
+// On many architecture, the operation makes both guarantees, so the
+// implementation can be the same.
+inline static bool
+compare_and_swap_release(volatile obj_addr_t *addr,
+			 obj_addr_t old,
+			 obj_addr_t new_val)
+{
+  return __sync_bool_compare_and_swap(addr, old, new_val);
+}
+
+// Ensure that subsequent instructions do not execute on stale
+// data that was loaded from memory before the barrier.
+inline static void
+read_barrier()
+{
+  __sync_synchronize();
+}
+
+// Ensure that prior stores to memory are completed with respect to other
+// processors.
+inline static void
+write_barrier()
+{
+  __sync_synchronize();
+}
+#endif
-- 
1.8.2.1

-- 
Andreas Schwab, SUSE Labs, schwab@suse.de
GPG Key fingerprint = 0196 BAD8 1CE9 1970 F4BE  1748 E4D4 88E3 0EEA B9D7
"And now for something completely different."


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]