1 |
282 |
jeremybenn |
;; ARM Cortex-A9 pipeline description
|
2 |
|
|
;; Copyright (C) 2008, 2009 Free Software Foundation, Inc.
|
3 |
|
|
;; Originally written by CodeSourcery for VFP.
|
4 |
|
|
;;
|
5 |
|
|
;; Integer core pipeline description contributed by ARM Ltd.
|
6 |
|
|
;;
|
7 |
|
|
;; This file is part of GCC.
|
8 |
|
|
;;
|
9 |
|
|
;; GCC is free software; you can redistribute it and/or modify it
|
10 |
|
|
;; under the terms of the GNU General Public License as published by
|
11 |
|
|
;; the Free Software Foundation; either version 3, or (at your option)
|
12 |
|
|
;; any later version.
|
13 |
|
|
;;
|
14 |
|
|
;; GCC is distributed in the hope that it will be useful, but
|
15 |
|
|
;; WITHOUT ANY WARRANTY; without even the implied warranty of
|
16 |
|
|
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
17 |
|
|
;; General Public License for more details.
|
18 |
|
|
;;
|
19 |
|
|
;; You should have received a copy of the GNU General Public License
|
20 |
|
|
;; along with GCC; see the file COPYING3. If not see
|
21 |
|
|
;; .
|
22 |
|
|
|
23 |
|
|
(define_automaton "cortex_a9")
|
24 |
|
|
|
25 |
|
|
;; The Cortex-A9 integer core is modelled as a dual issue pipeline that has
|
26 |
|
|
;; the following components.
|
27 |
|
|
;; 1. 1 Load Store Pipeline.
|
28 |
|
|
;; 2. P0 / main pipeline for data processing instructions.
|
29 |
|
|
;; 3. P1 / Dual pipeline for Data processing instructions.
|
30 |
|
|
;; 4. MAC pipeline for multiply as well as multiply
|
31 |
|
|
;; and accumulate instructions.
|
32 |
|
|
;; 5. 1 VFP / Neon pipeline.
|
33 |
|
|
;; The Load/Store and VFP/Neon pipeline are multiplexed.
|
34 |
|
|
;; The P0 / main pipeline and M1 stage of the MAC pipeline are
|
35 |
|
|
;; multiplexed.
|
36 |
|
|
;; The P1 / dual pipeline and M2 stage of the MAC pipeline are
|
37 |
|
|
;; multiplexed.
|
38 |
|
|
;; There are only 4 register read ports and hence at any point of
|
39 |
|
|
;; time we can't have issue down the E1 and the E2 ports unless
|
40 |
|
|
;; of course there are bypass paths that get exercised.
|
41 |
|
|
;; Both P0 and P1 have 2 stages E1 and E2.
|
42 |
|
|
;; Data processing instructions issue to E1 or E2 depending on
|
43 |
|
|
;; whether they have an early shift or not.
|
44 |
|
|
|
45 |
|
|
|
46 |
|
|
(define_cpu_unit "cortex_a9_vfp, cortex_a9_ls" "cortex_a9")
|
47 |
|
|
(define_cpu_unit "cortex_a9_p0_e1, cortex_a9_p0_e2" "cortex_a9")
|
48 |
|
|
(define_cpu_unit "cortex_a9_p1_e1, cortex_a9_p1_e2" "cortex_a9")
|
49 |
|
|
(define_cpu_unit "cortex_a9_p0_wb, cortex_a9_p1_wb" "cortex_a9")
|
50 |
|
|
(define_cpu_unit "cortex_a9_mac_m1, cortex_a9_mac_m2" "cortex_a9")
|
51 |
|
|
(define_cpu_unit "cortex_a9_branch, cortex_a9_issue_branch" "cortex_a9")
|
52 |
|
|
|
53 |
|
|
(define_reservation "cortex_a9_p0_default" "cortex_a9_p0_e2, cortex_a9_p0_wb")
|
54 |
|
|
(define_reservation "cortex_a9_p1_default" "cortex_a9_p1_e2, cortex_a9_p1_wb")
|
55 |
|
|
(define_reservation "cortex_a9_p0_shift" "cortex_a9_p0_e1, cortex_a9_p0_default")
|
56 |
|
|
(define_reservation "cortex_a9_p1_shift" "cortex_a9_p1_e1, cortex_a9_p1_default")
|
57 |
|
|
|
58 |
|
|
(define_reservation "cortex_a9_multcycle1"
|
59 |
|
|
"cortex_a9_p0_e2 + cortex_a9_mac_m1 + cortex_a9_mac_m2 + \
|
60 |
|
|
cortex_a9_p1_e2 + cortex_a9_p0_e1 + cortex_a9_p1_e1")
|
61 |
|
|
|
62 |
|
|
(define_reservation "cortex_a9_mult16"
|
63 |
|
|
"cortex_a9_mac_m1, cortex_a9_mac_m2, cortex_a9_p0_wb")
|
64 |
|
|
(define_reservation "cortex_a9_mac16"
|
65 |
|
|
"cortex_a9_multcycle1, cortex_a9_mac_m2, cortex_a9_p0_wb")
|
66 |
|
|
(define_reservation "cortex_a9_mult"
|
67 |
|
|
"cortex_a9_mac_m1*2, cortex_a9_mac_m2, cortex_a9_p0_wb")
|
68 |
|
|
(define_reservation "cortex_a9_mac"
|
69 |
|
|
"cortex_a9_multcycle1*2 ,cortex_a9_mac_m2, cortex_a9_p0_wb")
|
70 |
|
|
|
71 |
|
|
|
72 |
|
|
;; Issue at the same time along the load store pipeline and
|
73 |
|
|
;; the VFP / Neon pipeline is not possible.
|
74 |
|
|
;; FIXME:: At some point we need to model the issue
|
75 |
|
|
;; of the load store and the vfp being shared rather than anything else.
|
76 |
|
|
|
77 |
|
|
(exclusion_set "cortex_a9_ls" "cortex_a9_vfp")
|
78 |
|
|
|
79 |
|
|
|
80 |
|
|
;; Default data processing instruction without any shift
|
81 |
|
|
;; The only exception to this is the mov instruction
|
82 |
|
|
;; which can go down E2 without any problem.
|
83 |
|
|
(define_insn_reservation "cortex_a9_dp" 2
|
84 |
|
|
(and (eq_attr "tune" "cortexa9")
|
85 |
|
|
(ior (eq_attr "type" "alu")
|
86 |
|
|
(and (eq_attr "type" "alu_shift_reg, alu_shift")
|
87 |
|
|
(eq_attr "insn" "mov"))))
|
88 |
|
|
"cortex_a9_p0_default|cortex_a9_p1_default")
|
89 |
|
|
|
90 |
|
|
;; An instruction using the shifter will go down E1.
|
91 |
|
|
(define_insn_reservation "cortex_a9_dp_shift" 3
|
92 |
|
|
(and (eq_attr "tune" "cortexa9")
|
93 |
|
|
(and (eq_attr "type" "alu_shift_reg, alu_shift")
|
94 |
|
|
(not (eq_attr "insn" "mov"))))
|
95 |
|
|
"cortex_a9_p0_shift | cortex_a9_p1_shift")
|
96 |
|
|
|
97 |
|
|
;; Loads have a latency of 4 cycles.
|
98 |
|
|
;; We don't model autoincrement instructions. These
|
99 |
|
|
;; instructions use the load store pipeline and 1 of
|
100 |
|
|
;; the E2 units to write back the result of the increment.
|
101 |
|
|
|
102 |
|
|
(define_insn_reservation "cortex_a9_load1_2" 4
|
103 |
|
|
(and (eq_attr "tune" "cortexa9")
|
104 |
|
|
(eq_attr "type" "load1, load2, load_byte"))
|
105 |
|
|
"cortex_a9_ls")
|
106 |
|
|
|
107 |
|
|
;; Loads multiples and store multiples can't be issued for 2 cycles in a
|
108 |
|
|
;; row. The description below assumes that addresses are 64 bit aligned.
|
109 |
|
|
;; If not, there is an extra cycle latency which is not modelled.
|
110 |
|
|
|
111 |
|
|
;; FIXME:: This bit might need to be reworked when we get to
|
112 |
|
|
;; tuning for the VFP because strictly speaking the ldm
|
113 |
|
|
;; is sent to the LSU unit as is and there is only an
|
114 |
|
|
;; issue restriction between the LSU and the VFP/ Neon unit.
|
115 |
|
|
|
116 |
|
|
(define_insn_reservation "cortex_a9_load3_4" 5
|
117 |
|
|
(and (eq_attr "tune" "cortexa9")
|
118 |
|
|
(eq_attr "type" "load3, load4"))
|
119 |
|
|
"cortex_a9_ls, cortex_a9_ls")
|
120 |
|
|
|
121 |
|
|
(define_insn_reservation "cortex_a9_store1_2" 0
|
122 |
|
|
(and (eq_attr "tune" "cortexa9")
|
123 |
|
|
(eq_attr "type" "store1, store2"))
|
124 |
|
|
"cortex_a9_ls")
|
125 |
|
|
|
126 |
|
|
;; Almost all our store multiples use an auto-increment
|
127 |
|
|
;; form. Don't issue back to back load and store multiples
|
128 |
|
|
;; because the load store unit will stall.
|
129 |
|
|
(define_insn_reservation "cortex_a9_store3_4" 0
|
130 |
|
|
(and (eq_attr "tune" "cortexa9")
|
131 |
|
|
(eq_attr "type" "store3, store4"))
|
132 |
|
|
"cortex_a9_ls+(cortex_a9_p0_default | cortex_a9_p1_default), cortex_a9_ls")
|
133 |
|
|
|
134 |
|
|
;; We get 16*16 multiply / mac results in 3 cycles.
|
135 |
|
|
(define_insn_reservation "cortex_a9_mult16" 3
|
136 |
|
|
(and (eq_attr "tune" "cortexa9")
|
137 |
|
|
(eq_attr "insn" "smulxy"))
|
138 |
|
|
"cortex_a9_mult16")
|
139 |
|
|
|
140 |
|
|
;; The 16*16 mac is slightly different that it
|
141 |
|
|
;; reserves M1 and M2 in the same cycle.
|
142 |
|
|
(define_insn_reservation "cortex_a9_mac16" 3
|
143 |
|
|
(and (eq_attr "tune" "cortexa9")
|
144 |
|
|
(eq_attr "insn" "smlaxy"))
|
145 |
|
|
"cortex_a9_mac16")
|
146 |
|
|
|
147 |
|
|
|
148 |
|
|
(define_insn_reservation "cortex_a9_multiply" 4
|
149 |
|
|
(and (eq_attr "tune" "cortexa9")
|
150 |
|
|
(eq_attr "insn" "mul"))
|
151 |
|
|
"cortex_a9_mult")
|
152 |
|
|
|
153 |
|
|
(define_insn_reservation "cortex_a9_mac" 4
|
154 |
|
|
(and (eq_attr "tune" "cortexa9")
|
155 |
|
|
(eq_attr "insn" "mla"))
|
156 |
|
|
"cortex_a9_mac")
|
157 |
|
|
|
158 |
|
|
;; An instruction with a result in E2 can be forwarded
|
159 |
|
|
;; to E2 or E1 or M1 or the load store unit in the next cycle.
|
160 |
|
|
|
161 |
|
|
(define_bypass 1 "cortex_a9_dp"
|
162 |
|
|
"cortex_a9_dp_shift, cortex_a9_multiply,
|
163 |
|
|
cortex_a9_load1_2, cortex_a9_dp, cortex_a9_store1_2,
|
164 |
|
|
cortex_a9_mult16, cortex_a9_mac16, cortex_a9_mac, cortex_a9_store3_4, cortex_a9_load3_4")
|
165 |
|
|
|
166 |
|
|
(define_bypass 2 "cortex_a9_dp_shift"
|
167 |
|
|
"cortex_a9_dp_shift, cortex_a9_multiply,
|
168 |
|
|
cortex_a9_load1_2, cortex_a9_dp, cortex_a9_store1_2,
|
169 |
|
|
cortex_a9_mult16, cortex_a9_mac16, cortex_a9_mac, cortex_a9_store3_4, cortex_a9_load3_4")
|
170 |
|
|
|
171 |
|
|
;; An instruction in the load store pipeline can provide
|
172 |
|
|
;; read access to a DP instruction in the P0 default pipeline
|
173 |
|
|
;; before the writeback stage.
|
174 |
|
|
|
175 |
|
|
(define_bypass 3 "cortex_a9_load1_2" "cortex_a9_dp, cortex_a9_load1_2,
|
176 |
|
|
cortex_a9_store3_4, cortex_a9_store1_2")
|
177 |
|
|
|
178 |
|
|
(define_bypass 4 "cortex_a9_load3_4" "cortex_a9_dp, cortex_a9_load1_2,
|
179 |
|
|
cortex_a9_store3_4, cortex_a9_store1_2, cortex_a9_load3_4")
|
180 |
|
|
|
181 |
|
|
;; Calls and branches.
|
182 |
|
|
|
183 |
|
|
;; Branch instructions
|
184 |
|
|
|
185 |
|
|
(define_insn_reservation "cortex_a9_branch" 0
|
186 |
|
|
(and (eq_attr "tune" "cortexa9")
|
187 |
|
|
(eq_attr "type" "branch"))
|
188 |
|
|
"cortex_a9_branch")
|
189 |
|
|
|
190 |
|
|
;; Call latencies are essentially 0 but make sure
|
191 |
|
|
;; dual issue doesn't happen i.e the next instruction
|
192 |
|
|
;; starts at the next cycle.
|
193 |
|
|
(define_insn_reservation "cortex_a9_call" 0
|
194 |
|
|
(and (eq_attr "tune" "cortexa9")
|
195 |
|
|
(eq_attr "type" "call"))
|
196 |
|
|
"cortex_a9_issue_branch + cortex_a9_multcycle1 + cortex_a9_ls + cortex_a9_vfp")
|
197 |
|
|
|
198 |
|
|
|
199 |
|
|
;; Pipelining for VFP instructions.
|
200 |
|
|
|
201 |
|
|
(define_insn_reservation "cortex_a9_ffarith" 1
|
202 |
|
|
(and (eq_attr "tune" "cortexa9")
|
203 |
|
|
(eq_attr "type" "fcpys,ffariths,ffarithd,fcmps,fcmpd,fconsts,fconstd"))
|
204 |
|
|
"cortex_a9_vfp")
|
205 |
|
|
|
206 |
|
|
(define_insn_reservation "cortex_a9_fadd" 4
|
207 |
|
|
(and (eq_attr "tune" "cortexa9")
|
208 |
|
|
(eq_attr "type" "fadds,faddd,f_cvt"))
|
209 |
|
|
"cortex_a9_vfp")
|
210 |
|
|
|
211 |
|
|
(define_insn_reservation "cortex_a9_fmuls" 5
|
212 |
|
|
(and (eq_attr "tune" "cortexa9")
|
213 |
|
|
(eq_attr "type" "fmuls"))
|
214 |
|
|
"cortex_a9_vfp")
|
215 |
|
|
|
216 |
|
|
(define_insn_reservation "cortex_a9_fmuld" 6
|
217 |
|
|
(and (eq_attr "tune" "cortexa9")
|
218 |
|
|
(eq_attr "type" "fmuld"))
|
219 |
|
|
"cortex_a9_vfp*2")
|
220 |
|
|
|
221 |
|
|
(define_insn_reservation "cortex_a9_fmacs" 8
|
222 |
|
|
(and (eq_attr "tune" "cortexa9")
|
223 |
|
|
(eq_attr "type" "fmacs"))
|
224 |
|
|
"cortex_a9_vfp")
|
225 |
|
|
|
226 |
|
|
(define_insn_reservation "cortex_a9_fmacd" 8
|
227 |
|
|
(and (eq_attr "tune" "cortexa9")
|
228 |
|
|
(eq_attr "type" "fmacd"))
|
229 |
|
|
"cortex_a9_vfp*2")
|
230 |
|
|
|
231 |
|
|
(define_insn_reservation "cortex_a9_fdivs" 15
|
232 |
|
|
(and (eq_attr "tune" "cortexa9")
|
233 |
|
|
(eq_attr "type" "fdivs"))
|
234 |
|
|
"cortex_a9_vfp*10")
|
235 |
|
|
|
236 |
|
|
(define_insn_reservation "cortex_a9_fdivd" 25
|
237 |
|
|
(and (eq_attr "tune" "cortexa9")
|
238 |
|
|
(eq_attr "type" "fdivd"))
|
239 |
|
|
"cortex_a9_vfp*20")
|