This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[ARM][PATCH] Cortex-A5 scheduler


This patch, submitted on behalf of Julian, adds a scheduling model specific to the ARM Cortex-A5.

CodeSourcery have been using these patches in SourceryG++ for a while now, and I have done a before and after test run and found no regressions.

Ok to commit?

Andrew Stubbs
2010-08-20  Julian Brown  <julian@codesourcery.com>

	gcc/
	* config/arm/arm.c (arm_issue_rate): Return 2 for Cortex-A5.
	* config/arm/arm.md (generic_sched): No for Cortex-A5.
	(generic_vfp): Likewise.
	(cortex-a5.md): Include.
	* config/arm/cortex-a5.md: New.

---
 src/gcc-mainline/gcc/config/arm/arm.c        |    1 
 src/gcc-mainline/gcc/config/arm/arm.md       |    5 
 src/gcc-mainline/gcc/config/arm/cortex-a5.md |  310 ++++++++++++++++++++++++++
 3 files changed, 314 insertions(+), 2 deletions(-)
 create mode 100644 src/gcc-mainline/gcc/config/arm/cortex-a5.md

diff --git a/src/gcc-mainline/gcc/config/arm/arm.c b/src/gcc-mainline/gcc/config/arm/arm.c
index 416068d..e83b40b 100644
--- a/src/gcc-mainline/gcc/config/arm/arm.c
+++ b/src/gcc-mainline/gcc/config/arm/arm.c
@@ -22429,6 +22429,7 @@ arm_issue_rate (void)
     {
     case cortexr4:
     case cortexr4f:
+    case cortexa5:
     case cortexa8:
     case cortexa9:
       return 2;
diff --git a/src/gcc-mainline/gcc/config/arm/arm.md b/src/gcc-mainline/gcc/config/arm/arm.md
index 9da7101..5c09576 100644
--- a/src/gcc-mainline/gcc/config/arm/arm.md
+++ b/src/gcc-mainline/gcc/config/arm/arm.md
@@ -476,7 +476,7 @@
 
 (define_attr "generic_sched" "yes,no"
   (const (if_then_else 
-          (ior (eq_attr "tune" "arm926ejs,arm1020e,arm1026ejs,arm1136js,arm1136jfs,cortexa8,cortexa9")
+          (ior (eq_attr "tune" "arm926ejs,arm1020e,arm1026ejs,arm1136js,arm1136jfs,cortexa5,cortexa8,cortexa9")
 	      (eq_attr "tune_cortexr4" "yes"))
           (const_string "no")
           (const_string "yes"))))
@@ -484,7 +484,7 @@
 (define_attr "generic_vfp" "yes,no"
   (const (if_then_else
 	  (and (eq_attr "fpu" "vfp")
-	       (eq_attr "tune" "!arm1020e,arm1022e,cortexa8,cortexa9")
+	       (eq_attr "tune" "!arm1020e,arm1022e,cortexa5,cortexa8,cortexa9")
 	       (eq_attr "tune_cortexr4" "no"))
 	  (const_string "yes")
 	  (const_string "no"))))
@@ -494,6 +494,7 @@
 (include "arm1020e.md")
 (include "arm1026ejs.md")
 (include "arm1136jfs.md")
+(include "cortex-a5.md")
 (include "cortex-a8.md")
 (include "cortex-a9.md")
 (include "cortex-r4.md")
diff --git a/src/gcc-mainline/gcc/config/arm/cortex-a5.md b/src/gcc-mainline/gcc/config/arm/cortex-a5.md
new file mode 100644
index 0000000..b17033a
--- /dev/null
+++ b/src/gcc-mainline/gcc/config/arm/cortex-a5.md
@@ -0,0 +1,310 @@
+;; ARM Cortex-A5 pipeline description
+;; Copyright (C) 2010 Free Software Foundation, Inc.
+;; Contributed by CodeSourcery.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3.  If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_automaton "cortex_a5")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Functional units.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; The integer (ALU) pipeline.  There are five DPU pipeline stages. However the
+;; decode/issue stages operate the same for all instructions, so do not model
+;; them.  We only need to model the first execute stage because instructions
+;; always advance one stage per cycle in order. Only branch instructions may
+;; dual-issue, so a single unit  covers all of the LS, ALU, MAC and FPU
+;; pipelines.
+
+(define_cpu_unit "cortex_a5_ex1" "cortex_a5")
+
+;; The branch pipeline.  Branches can dual-issue with other instructions
+;; (except when those instructions take multiple cycles to issue).
+
+(define_cpu_unit "cortex_a5_branch" "cortex_a5")
+
+;; Pseudo-unit for blocking the multiply pipeline when a double-precision
+;; multiply is in progress.
+
+(define_cpu_unit "cortex_a5_fpmul_pipe" "cortex_a5")
+
+;; The floating-point add pipeline (ex1/f1 stage), used to model the usage
+;; of the add pipeline by fmac instructions, etc.
+
+(define_cpu_unit "cortex_a5_fpadd_pipe" "cortex_a5")
+
+;; Floating-point div/sqrt (long latency, out-of-order completion).
+
+(define_cpu_unit "cortex_a5_fp_div_sqrt" "cortex_a5")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; ALU instructions.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn_reservation "cortex_a5_alu" 2
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "alu"))
+  "cortex_a5_ex1")
+
+(define_insn_reservation "cortex_a5_alu_shift" 2
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "alu_shift,alu_shift_reg"))
+  "cortex_a5_ex1")
+
+;; Forwarding path for unshifted operands.
+
+(define_bypass 1 "cortex_a5_alu,cortex_a5_alu_shift"
+  "cortex_a5_alu")
+
+(define_bypass 1 "cortex_a5_alu,cortex_a5_alu_shift"
+  "cortex_a5_alu_shift"
+  "arm_no_early_alu_shift_dep")
+
+;; The multiplier pipeline can forward results from wr stage only (so I don't
+;; think there's any need to specify bypasses).
+
+(define_insn_reservation "cortex_a5_mul" 2
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "mult"))
+  "cortex_a5_ex1")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Load/store instructions.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; Address-generation happens in the issue stage, which is one stage behind
+;; the ex1 stage (the first stage we care about for scheduling purposes). The
+;; dc1 stage is parallel with ex1, dc2 with ex2 and rot with wr.
+
+;; FIXME: These might not be entirely accurate for load2, load3, load4. I think
+;; they make sense since there's a 32-bit interface between the DPU and the DCU,
+;; so we can't load more than that per cycle.  The store2, store3, store4
+;; reservations are similarly guessed.
+
+(define_insn_reservation "cortex_a5_load1" 2
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "load_byte,load1"))
+  "cortex_a5_ex1")
+
+(define_insn_reservation "cortex_a5_store1" 0
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "store1"))
+  "cortex_a5_ex1")
+
+(define_insn_reservation "cortex_a5_load2" 3
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "load2"))
+  "cortex_a5_ex1+cortex_a5_branch, cortex_a5_ex1")
+
+(define_insn_reservation "cortex_a5_store2" 0
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "store2"))
+  "cortex_a5_ex1+cortex_a5_branch, cortex_a5_ex1")
+
+(define_insn_reservation "cortex_a5_load3" 4
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "load3"))
+  "cortex_a5_ex1+cortex_a5_branch, cortex_a5_ex1+cortex_a5_branch,\
+   cortex_a5_ex1")
+
+(define_insn_reservation "cortex_a5_store3" 0
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "store3"))
+  "cortex_a5_ex1+cortex_a5_branch, cortex_a5_ex1+cortex_a5_branch,\
+   cortex_a5_ex1")
+
+(define_insn_reservation "cortex_a5_load4" 5
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "load3"))
+  "cortex_a5_ex1+cortex_a5_branch, cortex_a5_ex1+cortex_a5_branch,\
+   cortex_a5_ex1+cortex_a5_branch, cortex_a5_ex1")
+
+(define_insn_reservation "cortex_a5_store4" 0
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "store3"))
+  "cortex_a5_ex1+cortex_a5_branch, cortex_a5_ex1+cortex_a5_branch,\
+   cortex_a5_ex1+cortex_a5_branch, cortex_a5_ex1")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Branches.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; Direct branches are the only instructions we can dual-issue (also IT and
+;; nop, but those aren't very interesting for scheduling).  (The latency here
+;; is meant to represent when the branch actually takes place, but may not be
+;; entirely correct.)
+
+(define_insn_reservation "cortex_a5_branch" 3
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "branch,call"))
+  "cortex_a5_branch")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Floating-point arithmetic.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn_reservation "cortex_a5_fpalu" 4
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "ffariths, fadds, ffarithd, faddd, fcpys, fmuls, f_cvt,\
+			fcmps, fcmpd"))
+  "cortex_a5_ex1+cortex_a5_fpadd_pipe")
+
+;; For fconsts and fconstd, 8-bit immediate data is passed directly from
+;; f1 to f3 (which I think reduces the latency by one cycle).
+
+(define_insn_reservation "cortex_a5_fconst" 3
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "fconsts,fconstd"))
+  "cortex_a5_ex1+cortex_a5_fpadd_pipe")
+
+;; We should try not to attempt to issue a single-precision multiplication in
+;; the middle of a double-precision multiplication operation (the usage of
+;; cortex_a5_fpmul_pipe).
+
+(define_insn_reservation "cortex_a5_fpmuls" 4
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "fmuls"))
+  "cortex_a5_ex1+cortex_a5_fpmul_pipe")
+
+;; For single-precision multiply-accumulate, the add (accumulate) is issued
+;; whilst the multiply is in F4.  The multiply result can then be forwarded
+;; from F5 to F1.  The issue unit is only used once (when we first start
+;; processing the instruction), but the usage of the FP add pipeline could
+;; block other instructions attempting to use it simultaneously.  We try to
+;; avoid that using cortex_a5_fpadd_pipe.
+
+(define_insn_reservation "cortex_a5_fpmacs" 8
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "fmacs"))
+  "cortex_a5_ex1+cortex_a5_fpmul_pipe, nothing*3, cortex_a5_fpadd_pipe")
+
+;; Non-multiply instructions can issue in the middle two instructions of a
+;; double-precision multiply.  Note that it isn't entirely clear when a branch
+;; can dual-issue when a multi-cycle multiplication is in progress; we ignore
+;; that for now though.
+
+(define_insn_reservation "cortex_a5_fpmuld" 7
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "fmuld"))
+  "cortex_a5_ex1+cortex_a5_fpmul_pipe, cortex_a5_fpmul_pipe*2,\
+   cortex_a5_ex1+cortex_a5_fpmul_pipe")
+
+(define_insn_reservation "cortex_a5_fpmacd" 11
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "fmacd"))
+  "cortex_a5_ex1+cortex_a5_fpmul_pipe, cortex_a5_fpmul_pipe*2,\
+   cortex_a5_ex1+cortex_a5_fpmul_pipe, nothing*3, cortex_a5_fpadd_pipe")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Floating-point divide/square root instructions.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; ??? Not sure if the 14 cycles taken for single-precision divide to complete
+;; includes the time taken for the special instruction used to collect the
+;; result to travel down the multiply pipeline, or not.  Assuming so.  (If
+;; that's wrong, the latency should be increased by a few cycles.)
+
+;; fsqrt takes one cycle less, but that is not modelled, nor is the use of the
+;; multiply pipeline to collect the divide/square-root result.
+
+(define_insn_reservation "cortex_a5_fdivs" 14
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "fdivs"))
+  "cortex_a5_ex1, cortex_a5_fp_div_sqrt * 13")
+
+;; ??? Similarly for fdivd.
+
+(define_insn_reservation "cortex_a5_fdivd" 29
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "fdivd"))
+  "cortex_a5_ex1, cortex_a5_fp_div_sqrt * 28")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; VFP to/from core transfers.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; FP loads take data from wr/rot/f3.  Might need to define bypasses to model
+;; this?
+
+;; Core-to-VFP transfers use the multiply pipeline.
+;; Not sure about this at all... I think we need some bypasses too.
+
+(define_insn_reservation "cortex_a5_r2f" 4
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "r_2_f"))
+  "cortex_a5_ex1")
+
+;; Not sure about this either. 6.8.7 says "Additionally, the store pipe used
+;; for store and FP->core register transfers can forward into the F2 and F3
+;; stages."
+;; This doesn't correspond to what we have though.
+
+(define_insn_reservation "cortex_a5_f2r" 2
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "f_2_r"))
+  "cortex_a5_ex1")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; VFP flag transfer.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; ??? The flag forwarding described in section 6.8.11 of the Cortex-A5 DPU
+;; specification (from fmstat to the ex2 stage of the second instruction) is
+;; not modeled at present.
+
+(define_insn_reservation "cortex_a5_f_flags" 4
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "f_flag"))
+  "cortex_a5_ex1")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; VFP load/store.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn_reservation "cortex_a5_f_loads" 4
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "f_loads"))
+  "cortex_a5_ex1")
+
+(define_insn_reservation "cortex_a5_f_loadd" 5
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "f_load,f_loadd"))
+  "cortex_a5_ex1+cortex_a5_branch, cortex_a5_ex1")
+
+(define_insn_reservation "cortex_a5_f_stores" 0
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "f_stores"))
+  "cortex_a5_ex1")
+
+(define_insn_reservation "cortex_a5_f_stored" 0
+  (and (eq_attr "tune" "cortexa5")
+       (eq_attr "type" "f_store,f_stored"))
+  "cortex_a5_ex1+cortex_a5_branch, cortex_a5_ex1")
+
+;; Load-to-use for floating-point values has a penalty of one cycle, i.e. a
+;; latency of two (6.8.3).
+
+(define_bypass 2 "cortex_a5_f_loads"
+                 "cortex_a5_fpalu, cortex_a5_fpmacs, cortex_a5_fpmuld,\
+		  cortex_a5_fpmacd, cortex_a5_fdivs, cortex_a5_fdivd,\
+		  cortex_a5_f2r")
+
+(define_bypass 3 "cortex_a5_f_loadd"
+                 "cortex_a5_fpalu, cortex_a5_fpmacs, cortex_a5_fpmuld,\
+		  cortex_a5_fpmacd, cortex_a5_fdivs, cortex_a5_fdivd,\
+		  cortex_a5_f2r")

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]