This is the mail archive of the
gcc-patches@gcc.gnu.org
mailing list for the GCC project.
[PATCH][AArch64] Generalize code alignment
- From: "Wilco Dijkstra" <wdijkstr at arm dot com>
- To: <gcc-patches at gcc dot gnu dot org>
- Date: Fri, 12 Dec 2014 14:48:55 -0000
- Subject: [PATCH][AArch64] Generalize code alignment
- Authentication-results: sourceware.org; auth=none
This patch generalizes the code alignment and lets each CPU set function, jump and loop alignment
independently. The defaults for A53/A57 are based the original patch by James Greenhalgh.
OK for trunk?
ChangeLog:
2014-12-13 Wilco Dijkstra wdijkstr@arm.com
* gcc/config/aarch64/aarch64-protos.h (tune-params):
Add code alignment tuning parameters.
* gcc/config/aarch64/aarch64.c (generic_tunings)
Add code alignment tuning parameters.
(cortexa53_tunings): Likewise.
(cortexa57_tunings): Likewise.
(thunderx_tunings): Likewise.
(aarch64_override_options): Use new alignment tunings.
---
gcc/config/aarch64/aarch64-protos.h | 4 +++-
gcc/config/aarch64/aarch64.c | 22 +++++++++++++++-------
2 files changed, 18 insertions(+), 8 deletions(-)
diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
index 234efcb..f22573b 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -170,8 +170,10 @@ struct tune_params
const struct cpu_vector_cost *const vec_costs;
const int memmov_cost;
const int issue_rate;
- const int align;
const unsigned int fuseable_ops;
+ const int function_align;
+ const int jump_align;
+ const int loop_align;
const int int_reassoc_width;
const int fp_reassoc_width;
const int vec_reassoc_width;
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index 5f51b97..db42164 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -323,8 +323,10 @@ static const struct tune_params generic_tunings =
&generic_vector_cost,
NAMED_PARAM (memmov_cost, 4),
NAMED_PARAM (issue_rate, 2),
- NAMED_PARAM (align, 4),
NAMED_PARAM (fuseable_ops, AARCH64_FUSE_NOTHING),
+ 8, /* function_align. */
+ 8, /* jump_align. */
+ 4, /* loop_align. */
2, /* int_reassoc_width. */
4, /* fp_reassoc_width. */
1 /* vec_reassoc_width. */
@@ -338,9 +340,11 @@ static const struct tune_params cortexa53_tunings =
&generic_vector_cost,
NAMED_PARAM (memmov_cost, 4),
NAMED_PARAM (issue_rate, 2),
- NAMED_PARAM (align, 8),
NAMED_PARAM (fuseable_ops, (AARCH64_FUSE_MOV_MOVK | AARCH64_FUSE_ADRP_ADD
| AARCH64_FUSE_MOVK_MOVK | AARCH64_FUSE_ADRP_LDR)),
+ 8, /* function_align. */
+ 8, /* jump_align. */
+ 4, /* loop_align. */
2, /* int_reassoc_width. */
4, /* fp_reassoc_width. */
1 /* vec_reassoc_width. */
@@ -354,8 +358,10 @@ static const struct tune_params cortexa57_tunings =
&cortexa57_vector_cost,
NAMED_PARAM (memmov_cost, 4),
NAMED_PARAM (issue_rate, 3),
- NAMED_PARAM (align, 8),
NAMED_PARAM (fuseable_ops, (AARCH64_FUSE_MOV_MOVK | AARCH64_FUSE_ADRP_ADD |
AARCH64_FUSE_MOVK_MOVK)),
+ 16, /* function_align. */
+ 8, /* jump_align. */
+ 4, /* loop_align. */
2, /* int_reassoc_width. */
4, /* fp_reassoc_width. */
1 /* vec_reassoc_width. */
@@ -369,8 +375,10 @@ static const struct tune_params thunderx_tunings =
&generic_vector_cost,
NAMED_PARAM (memmov_cost, 6),
NAMED_PARAM (issue_rate, 2),
- NAMED_PARAM (align, 8),
NAMED_PARAM (fuseable_ops, AARCH64_FUSE_CMP_BRANCH),
+ 8, /* function_align. */
+ 8, /* jump_align. */
+ 8, /* loop_align. */
2, /* int_reassoc_width. */
4, /* fp_reassoc_width. */
1 /* vec_reassoc_width. */
@@ -6773,11 +6781,11 @@ aarch64_override_options (void)
if (!optimize_size)
{
if (align_loops <= 0)
- align_loops = aarch64_tune_params->align;
+ align_loops = aarch64_tune_params->loop_align;
if (align_jumps <= 0)
- align_jumps = aarch64_tune_params->align;
+ align_jumps = aarch64_tune_params->jump_align;
if (align_functions <= 0)
- align_functions = aarch64_tune_params->align;
+ align_functions = aarch64_tune_params->function_align;
}
aarch64_override_options_after_change ();
--
1.9.1