This is the mail archive of the
gcc-patches@gcc.gnu.org
mailing list for the GCC project.
[Patch 6/7 AArch64] Deprecate *_BY_PIECES_P, move to hookized version
- From: James Greenhalgh <james dot greenhalgh at arm dot com>
- To: gcc-patches at gcc dot gnu dot org
- Cc: marcus dot shawcroft at arm dot com, richard dot earnshaw at arm dot com
- Date: Fri, 31 Oct 2014 15:11:41 +0000
- Subject: [Patch 6/7 AArch64] Deprecate *_BY_PIECES_P, move to hookized version
- Authentication-results: sourceware.org; auth=none
- References: <1414768100-27840-1-git-send-email-james dot greenhalgh at arm dot com>
Hi,
This patch moves aarch64 to TARGET_USE_BY_PIECES_INFRASTRUCTURE_P.
AArch64 turns off STORE_BY_PIECES, so honour that and use the default
implementation for other operations.
Unlike the other patches in this series I do actually have some
hardware for AArch64! So this one has been through a bootstrap run
with no issues.
OK?
Cheers,
James
---
gcc/
2014-10-31 James Greenhalgh <james.greenhalgh@arm.com>
* config/aarch64/aarch64.c
(aarch64_use_by_pieces_infrastructre_p): New.
(TARGET_USE_BY_PIECES_INFRASTRUCTURE): Likewise.
* config/aarch64/aarch64.h (STORE_BY_PIECES_P): Delete.
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index 0400fd5..9aeac7c 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -10001,6 +10001,22 @@ aarch64_asan_shadow_offset (void)
return (HOST_WIDE_INT_1 << 36);
}
+static bool
+aarch64_use_by_pieces_infrastructure_p (unsigned int size,
+ unsigned int align,
+ enum by_pieces_operation op,
+ bool speed_p)
+{
+ /* STORE_BY_PIECES can be used when copying a constant string, but
+ in that case each 64-bit chunk takes 5 insns instead of 2 (LDR/STR).
+ For now we always fail this and let the move_by_pieces code copy
+ the string from read-only memory. */
+ if (op == STORE_BY_PIECES)
+ return false;
+
+ return default_use_by_pieces_infrastructure_p (size, align, op, speed_p);
+}
+
#undef TARGET_ADDRESS_COST
#define TARGET_ADDRESS_COST aarch64_address_cost
@@ -10253,6 +10269,10 @@ aarch64_asan_shadow_offset (void)
#undef TARGET_LEGITIMIZE_ADDRESS
#define TARGET_LEGITIMIZE_ADDRESS aarch64_legitimize_address
+#undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
+#define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
+ aarch64_use_by_pieces_infrastructure_p
+
struct gcc_target targetm = TARGET_INITIALIZER;
#include "gt-aarch64.h"
diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
index 97b1848..e22163e 100644
--- a/gcc/config/aarch64/aarch64.h
+++ b/gcc/config/aarch64/aarch64.h
@@ -723,12 +723,6 @@ do { \
#define SET_RATIO(speed) \
((speed) ? 15 : AARCH64_CALL_RATIO - 2)
-/* STORE_BY_PIECES_P can be used when copying a constant string, but
- in that case each 64-bit chunk takes 5 insns instead of 2 (LDR/STR).
- For now we always fail this and let the move_by_pieces code copy
- the string from read-only memory. */
-#define STORE_BY_PIECES_P(SIZE, ALIGN) 0
-
/* Disable auto-increment in move_by_pieces et al. Use of auto-increment is
rarely a good idea in straight-line code since it adds an extra address
dependency between each instruction. Better to use incrementing offsets. */