1 |
227 |
jeremybenn |
# Simulator main loop for frv. -*- C -*-
|
2 |
|
|
# Copyright (C) 1998, 1999, 2000, 2001, 2003, 2007, 2008, 2009, 2010
|
3 |
|
|
# Free Software Foundation, Inc.
|
4 |
|
|
# Contributed by Red Hat.
|
5 |
|
|
#
|
6 |
|
|
# This file is part of the GNU Simulators.
|
7 |
|
|
#
|
8 |
|
|
# This program is free software; you can redistribute it and/or modify
|
9 |
|
|
# it under the terms of the GNU General Public License as published by
|
10 |
|
|
# the Free Software Foundation; either version 3 of the License, or
|
11 |
|
|
# (at your option) any later version.
|
12 |
|
|
#
|
13 |
|
|
# This program is distributed in the hope that it will be useful,
|
14 |
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
15 |
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
16 |
|
|
# GNU General Public License for more details.
|
17 |
|
|
#
|
18 |
|
|
# You should have received a copy of the GNU General Public License
|
19 |
|
|
# along with this program. If not, see .
|
20 |
|
|
|
21 |
|
|
# Syntax:
|
22 |
|
|
# /bin/sh mainloop.in command
|
23 |
|
|
#
|
24 |
|
|
# Command is one of:
|
25 |
|
|
#
|
26 |
|
|
# init
|
27 |
|
|
# support
|
28 |
|
|
# extract-{simple,scache,pbb}
|
29 |
|
|
# {full,fast}-exec-{simple,scache,pbb}
|
30 |
|
|
#
|
31 |
|
|
# A target need only provide a "full" version of one of simple,scache,pbb.
|
32 |
|
|
# If the target wants it can also provide a fast version of same.
|
33 |
|
|
# It can't provide more than this.
|
34 |
|
|
|
35 |
|
|
# ??? After a few more ports are done, revisit.
|
36 |
|
|
# Will eventually need to machine generate a lot of this.
|
37 |
|
|
|
38 |
|
|
case "x$1" in
|
39 |
|
|
|
40 |
|
|
xsupport)
|
41 |
|
|
|
42 |
|
|
cat <
|
43 |
|
|
|
44 |
|
|
static INLINE const IDESC *
|
45 |
|
|
extract (SIM_CPU *current_cpu, PCADDR pc, CGEN_INSN_INT insn, ARGBUF *abuf,
|
46 |
|
|
int fast_p)
|
47 |
|
|
{
|
48 |
|
|
const IDESC *id = @cpu@_decode (current_cpu, pc, insn, insn, abuf);
|
49 |
|
|
@cpu@_fill_argbuf (current_cpu, abuf, id, pc, fast_p);
|
50 |
|
|
if (! fast_p)
|
51 |
|
|
{
|
52 |
|
|
int trace_p = PC_IN_TRACE_RANGE_P (current_cpu, pc);
|
53 |
|
|
int profile_p = PC_IN_PROFILE_RANGE_P (current_cpu, pc);
|
54 |
|
|
@cpu@_fill_argbuf_tp (current_cpu, abuf, trace_p, profile_p);
|
55 |
|
|
}
|
56 |
|
|
return id;
|
57 |
|
|
}
|
58 |
|
|
|
59 |
|
|
static INLINE SEM_PC
|
60 |
|
|
execute (SIM_CPU *current_cpu, SCACHE *sc, int fast_p)
|
61 |
|
|
{
|
62 |
|
|
SEM_PC vpc;
|
63 |
|
|
|
64 |
|
|
/* Force gr0 to zero before every insn. */
|
65 |
|
|
@cpu@_h_gr_set (current_cpu, 0, 0);
|
66 |
|
|
|
67 |
|
|
if (fast_p)
|
68 |
|
|
{
|
69 |
|
|
vpc = (*sc->argbuf.semantic.sem_fast) (current_cpu, sc);
|
70 |
|
|
}
|
71 |
|
|
else
|
72 |
|
|
{
|
73 |
|
|
ARGBUF *abuf = &sc->argbuf;
|
74 |
|
|
const IDESC *idesc = abuf->idesc;
|
75 |
|
|
#if WITH_SCACHE_PBB
|
76 |
|
|
int virtual_p = CGEN_ATTR_VALUE (NULL, idesc->attrs, CGEN_INSN_VIRTUAL);
|
77 |
|
|
#else
|
78 |
|
|
int virtual_p = 0;
|
79 |
|
|
#endif
|
80 |
|
|
|
81 |
|
|
if (! virtual_p)
|
82 |
|
|
{
|
83 |
|
|
/* FIXME: call x-before */
|
84 |
|
|
if (ARGBUF_PROFILE_P (abuf))
|
85 |
|
|
PROFILE_COUNT_INSN (current_cpu, abuf->addr, idesc->num);
|
86 |
|
|
/* FIXME: Later make cover macros: PROFILE_INSN_{INIT,FINI}. */
|
87 |
|
|
if (FRV_COUNT_CYCLES (current_cpu, ARGBUF_PROFILE_P (abuf)))
|
88 |
|
|
{
|
89 |
|
|
@cpu@_model_insn_before (current_cpu, sc->first_insn_p);
|
90 |
|
|
model_insn = FRV_INSN_MODEL_PASS_1;
|
91 |
|
|
if (idesc->timing->model_fn != NULL)
|
92 |
|
|
(*idesc->timing->model_fn) (current_cpu, sc);
|
93 |
|
|
}
|
94 |
|
|
else
|
95 |
|
|
model_insn = FRV_INSN_NO_MODELING;
|
96 |
|
|
TRACE_INSN_INIT (current_cpu, abuf, 1);
|
97 |
|
|
TRACE_INSN (current_cpu, idesc->idata,
|
98 |
|
|
(const struct argbuf *) abuf, abuf->addr);
|
99 |
|
|
}
|
100 |
|
|
#if WITH_SCACHE
|
101 |
|
|
vpc = (*sc->argbuf.semantic.sem_full) (current_cpu, sc);
|
102 |
|
|
#else
|
103 |
|
|
vpc = (*sc->argbuf.semantic.sem_full) (current_cpu, abuf);
|
104 |
|
|
#endif
|
105 |
|
|
if (! virtual_p)
|
106 |
|
|
{
|
107 |
|
|
/* FIXME: call x-after */
|
108 |
|
|
if (FRV_COUNT_CYCLES (current_cpu, ARGBUF_PROFILE_P (abuf)))
|
109 |
|
|
{
|
110 |
|
|
int cycles;
|
111 |
|
|
if (idesc->timing->model_fn != NULL)
|
112 |
|
|
{
|
113 |
|
|
model_insn = FRV_INSN_MODEL_PASS_2;
|
114 |
|
|
cycles = (*idesc->timing->model_fn) (current_cpu, sc);
|
115 |
|
|
}
|
116 |
|
|
else
|
117 |
|
|
cycles = 1;
|
118 |
|
|
@cpu@_model_insn_after (current_cpu, sc->last_insn_p, cycles);
|
119 |
|
|
}
|
120 |
|
|
TRACE_INSN_FINI (current_cpu, abuf, 1);
|
121 |
|
|
}
|
122 |
|
|
}
|
123 |
|
|
|
124 |
|
|
return vpc;
|
125 |
|
|
}
|
126 |
|
|
|
127 |
|
|
static void
|
128 |
|
|
@cpu@_parallel_write_init (SIM_CPU *current_cpu)
|
129 |
|
|
{
|
130 |
|
|
CGEN_WRITE_QUEUE *q = CPU_WRITE_QUEUE (current_cpu);
|
131 |
|
|
CGEN_WRITE_QUEUE_CLEAR (q);
|
132 |
|
|
previous_vliw_pc = CPU_PC_GET(current_cpu);
|
133 |
|
|
frv_interrupt_state.f_ne_flags[0] = 0;
|
134 |
|
|
frv_interrupt_state.f_ne_flags[1] = 0;
|
135 |
|
|
frv_interrupt_state.imprecise_interrupt = NULL;
|
136 |
|
|
}
|
137 |
|
|
|
138 |
|
|
static void
|
139 |
|
|
@cpu@_parallel_write_queued (SIM_CPU *current_cpu)
|
140 |
|
|
{
|
141 |
|
|
int i;
|
142 |
|
|
|
143 |
|
|
FRV_VLIW *vliw = CPU_VLIW (current_cpu);
|
144 |
|
|
CGEN_WRITE_QUEUE *q = CPU_WRITE_QUEUE (current_cpu);
|
145 |
|
|
|
146 |
|
|
/* Loop over the queued writes, executing them. Set the pc to the address
|
147 |
|
|
of the insn which queued each write for the proper context in case an
|
148 |
|
|
interrupt is caused. Restore the proper pc after the writes are
|
149 |
|
|
completed. */
|
150 |
|
|
IADDR save_pc = CPU_PC_GET (current_cpu);
|
151 |
|
|
IADDR new_pc = save_pc;
|
152 |
|
|
int branch_taken = 0;
|
153 |
|
|
int limit = CGEN_WRITE_QUEUE_INDEX (q);
|
154 |
|
|
frv_interrupt_state.data_written.length = 0;
|
155 |
|
|
|
156 |
|
|
for (i = 0; i < limit; ++i)
|
157 |
|
|
{
|
158 |
|
|
CGEN_WRITE_QUEUE_ELEMENT *item = CGEN_WRITE_QUEUE_ELEMENT (q, i);
|
159 |
|
|
|
160 |
|
|
/* If an imprecise interrupt was generated, then, check whether the
|
161 |
|
|
result should still be written. */
|
162 |
|
|
if (frv_interrupt_state.imprecise_interrupt != NULL)
|
163 |
|
|
{
|
164 |
|
|
/* Only check writes by the insn causing the exception. */
|
165 |
|
|
if (CGEN_WRITE_QUEUE_ELEMENT_IADDR (item)
|
166 |
|
|
== frv_interrupt_state.imprecise_interrupt->vpc)
|
167 |
|
|
{
|
168 |
|
|
/* Execute writes of floating point operations resulting in
|
169 |
|
|
overflow, underflow or inexact. */
|
170 |
|
|
if (frv_interrupt_state.imprecise_interrupt->kind
|
171 |
|
|
== FRV_FP_EXCEPTION)
|
172 |
|
|
{
|
173 |
|
|
if ((frv_interrupt_state.imprecise_interrupt
|
174 |
|
|
->u.fp_info.fsr_mask
|
175 |
|
|
& ~(FSR_INEXACT | FSR_OVERFLOW | FSR_UNDERFLOW)))
|
176 |
|
|
continue; /* Don't execute */
|
177 |
|
|
}
|
178 |
|
|
/* Execute writes marked as 'forced'. */
|
179 |
|
|
else if (! (CGEN_WRITE_QUEUE_ELEMENT_FLAGS (item)
|
180 |
|
|
& FRV_WRITE_QUEUE_FORCE_WRITE))
|
181 |
|
|
continue; /* Don't execute */
|
182 |
|
|
}
|
183 |
|
|
}
|
184 |
|
|
|
185 |
|
|
/* Only execute the first branch on the queue. */
|
186 |
|
|
if (CGEN_WRITE_QUEUE_ELEMENT_KIND (item) == CGEN_PC_WRITE
|
187 |
|
|
|| CGEN_WRITE_QUEUE_ELEMENT_KIND (item) == CGEN_FN_PC_WRITE)
|
188 |
|
|
{
|
189 |
|
|
if (branch_taken)
|
190 |
|
|
continue;
|
191 |
|
|
branch_taken = 1;
|
192 |
|
|
if (CGEN_WRITE_QUEUE_ELEMENT_KIND (item) == CGEN_PC_WRITE)
|
193 |
|
|
new_pc = item->kinds.pc_write.value;
|
194 |
|
|
else
|
195 |
|
|
new_pc = item->kinds.fn_pc_write.value;
|
196 |
|
|
}
|
197 |
|
|
|
198 |
|
|
CPU_PC_SET (current_cpu, CGEN_WRITE_QUEUE_ELEMENT_IADDR (item));
|
199 |
|
|
frv_save_data_written_for_interrupts (current_cpu, item);
|
200 |
|
|
cgen_write_queue_element_execute (current_cpu, item);
|
201 |
|
|
}
|
202 |
|
|
|
203 |
|
|
/* Update the LR with the address of the next insn if the flag is set.
|
204 |
|
|
This flag gets set in frvbf_set_write_next_vliw_to_LR by the JMPL,
|
205 |
|
|
JMPIL and CALL insns. */
|
206 |
|
|
if (frvbf_write_next_vliw_addr_to_LR)
|
207 |
|
|
{
|
208 |
|
|
frvbf_h_spr_set_handler (current_cpu, H_SPR_LR, save_pc);
|
209 |
|
|
frvbf_write_next_vliw_addr_to_LR = 0;
|
210 |
|
|
}
|
211 |
|
|
|
212 |
|
|
CPU_PC_SET (current_cpu, new_pc);
|
213 |
|
|
CGEN_WRITE_QUEUE_CLEAR (q);
|
214 |
|
|
}
|
215 |
|
|
|
216 |
|
|
void
|
217 |
|
|
@cpu@_perform_writeback (SIM_CPU *current_cpu)
|
218 |
|
|
{
|
219 |
|
|
@cpu@_parallel_write_queued (current_cpu);
|
220 |
|
|
}
|
221 |
|
|
|
222 |
|
|
static unsigned cache_reqno = 0x80000000; /* Start value is for debugging. */
|
223 |
|
|
|
224 |
|
|
#if 0 /* experimental */
|
225 |
|
|
/* FR400 has single prefetch. */
|
226 |
|
|
static void
|
227 |
|
|
fr400_simulate_insn_prefetch (SIM_CPU *current_cpu, IADDR vpc)
|
228 |
|
|
{
|
229 |
|
|
int cur_ix;
|
230 |
|
|
FRV_CACHE *cache;
|
231 |
|
|
|
232 |
|
|
/* The cpu receives 8 bytes worth of insn data for each fetch aligned
|
233 |
|
|
on 8 byte boundary. */
|
234 |
|
|
#define FR400_FETCH_SIZE 8
|
235 |
|
|
|
236 |
|
|
cur_ix = LS;
|
237 |
|
|
vpc &= ~(FR400_FETCH_SIZE - 1);
|
238 |
|
|
cache = CPU_INSN_CACHE (current_cpu);
|
239 |
|
|
|
240 |
|
|
/* Request a load of the current address buffer, if necessary. */
|
241 |
|
|
if (frv_insn_fetch_buffer[cur_ix].address != vpc)
|
242 |
|
|
{
|
243 |
|
|
frv_insn_fetch_buffer[cur_ix].address = vpc;
|
244 |
|
|
frv_insn_fetch_buffer[cur_ix].reqno = cache_reqno++;
|
245 |
|
|
if (FRV_COUNT_CYCLES (current_cpu, 1))
|
246 |
|
|
frv_cache_request_load (cache, frv_insn_fetch_buffer[cur_ix].reqno,
|
247 |
|
|
frv_insn_fetch_buffer[cur_ix].address,
|
248 |
|
|
UNIT_I0 + cur_ix);
|
249 |
|
|
}
|
250 |
|
|
|
251 |
|
|
/* Wait for the current address buffer to be loaded, if necessary. */
|
252 |
|
|
if (FRV_COUNT_CYCLES (current_cpu, 1))
|
253 |
|
|
{
|
254 |
|
|
FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
|
255 |
|
|
int wait;
|
256 |
|
|
|
257 |
|
|
/* Account for any branch penalty. */
|
258 |
|
|
if (ps->branch_penalty > 0 && ! ps->past_first_p)
|
259 |
|
|
{
|
260 |
|
|
frv_model_advance_cycles (current_cpu, ps->branch_penalty);
|
261 |
|
|
frv_model_trace_wait_cycles (current_cpu, ps->branch_penalty,
|
262 |
|
|
"Branch penalty:");
|
263 |
|
|
ps->branch_penalty = 0;
|
264 |
|
|
}
|
265 |
|
|
|
266 |
|
|
/* Account for insn fetch latency. */
|
267 |
|
|
wait = 0;
|
268 |
|
|
while (frv_insn_fetch_buffer[cur_ix].reqno != NO_REQNO)
|
269 |
|
|
{
|
270 |
|
|
frv_model_advance_cycles (current_cpu, 1);
|
271 |
|
|
++wait;
|
272 |
|
|
}
|
273 |
|
|
frv_model_trace_wait_cycles (current_cpu, wait, "Insn fetch:");
|
274 |
|
|
return;
|
275 |
|
|
}
|
276 |
|
|
|
277 |
|
|
/* Otherwise just load the insns directly from the cache.
|
278 |
|
|
*/
|
279 |
|
|
if (frv_insn_fetch_buffer[cur_ix].reqno != NO_REQNO)
|
280 |
|
|
{
|
281 |
|
|
frv_cache_read (cache, cur_ix, vpc);
|
282 |
|
|
frv_insn_fetch_buffer[cur_ix].reqno = NO_REQNO;
|
283 |
|
|
}
|
284 |
|
|
}
|
285 |
|
|
#endif /* experimental */
|
286 |
|
|
|
287 |
|
|
/* FR500 has dual prefetch. */
|
288 |
|
|
static void
|
289 |
|
|
simulate_dual_insn_prefetch (SIM_CPU *current_cpu, IADDR vpc, int fetch_size)
|
290 |
|
|
{
|
291 |
|
|
int i;
|
292 |
|
|
int cur_ix, pre_ix;
|
293 |
|
|
SI pre_address;
|
294 |
|
|
FRV_CACHE *cache;
|
295 |
|
|
|
296 |
|
|
/* See if the pc is within the addresses specified by either of the
|
297 |
|
|
fetch buffers. If so, that will be the current buffer. Otherwise,
|
298 |
|
|
arbitrarily select the LD buffer as the current one since it gets
|
299 |
|
|
priority in the case of interfering load requests. */
|
300 |
|
|
cur_ix = LD;
|
301 |
|
|
vpc &= ~(fetch_size - 1);
|
302 |
|
|
for (i = LS; i < FRV_CACHE_PIPELINES; ++i)
|
303 |
|
|
{
|
304 |
|
|
if (frv_insn_fetch_buffer[i].address == vpc)
|
305 |
|
|
{
|
306 |
|
|
cur_ix = i;
|
307 |
|
|
break;
|
308 |
|
|
}
|
309 |
|
|
}
|
310 |
|
|
cache = CPU_INSN_CACHE (current_cpu);
|
311 |
|
|
|
312 |
|
|
/* Request a load of the current address buffer, if necessary. */
|
313 |
|
|
if (frv_insn_fetch_buffer[cur_ix].address != vpc)
|
314 |
|
|
{
|
315 |
|
|
frv_insn_fetch_buffer[cur_ix].address = vpc;
|
316 |
|
|
frv_insn_fetch_buffer[cur_ix].reqno = cache_reqno++;
|
317 |
|
|
if (FRV_COUNT_CYCLES (current_cpu, 1))
|
318 |
|
|
frv_cache_request_load (cache, frv_insn_fetch_buffer[cur_ix].reqno,
|
319 |
|
|
frv_insn_fetch_buffer[cur_ix].address,
|
320 |
|
|
UNIT_I0 + cur_ix);
|
321 |
|
|
}
|
322 |
|
|
|
323 |
|
|
/* If the prefetch buffer does not represent the next sequential address, then
|
324 |
|
|
request a load of the next sequential address. */
|
325 |
|
|
pre_ix = (cur_ix + 1) % FRV_CACHE_PIPELINES;
|
326 |
|
|
pre_address = vpc + fetch_size;
|
327 |
|
|
if (frv_insn_fetch_buffer[pre_ix].address != pre_address)
|
328 |
|
|
{
|
329 |
|
|
frv_insn_fetch_buffer[pre_ix].address = pre_address;
|
330 |
|
|
frv_insn_fetch_buffer[pre_ix].reqno = cache_reqno++;
|
331 |
|
|
if (FRV_COUNT_CYCLES (current_cpu, 1))
|
332 |
|
|
frv_cache_request_load (cache, frv_insn_fetch_buffer[pre_ix].reqno,
|
333 |
|
|
frv_insn_fetch_buffer[pre_ix].address,
|
334 |
|
|
UNIT_I0 + pre_ix);
|
335 |
|
|
}
|
336 |
|
|
|
337 |
|
|
/* If counting cycles, account for any branch penalty and/or insn fetch
|
338 |
|
|
latency here. */
|
339 |
|
|
if (FRV_COUNT_CYCLES (current_cpu, 1))
|
340 |
|
|
{
|
341 |
|
|
FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
|
342 |
|
|
int wait;
|
343 |
|
|
|
344 |
|
|
/* Account for any branch penalty. */
|
345 |
|
|
if (ps->branch_penalty > 0 && ! ps->past_first_p)
|
346 |
|
|
{
|
347 |
|
|
frv_model_advance_cycles (current_cpu, ps->branch_penalty);
|
348 |
|
|
frv_model_trace_wait_cycles (current_cpu, ps->branch_penalty,
|
349 |
|
|
"Branch penalty:");
|
350 |
|
|
ps->branch_penalty = 0;
|
351 |
|
|
}
|
352 |
|
|
|
353 |
|
|
/* Account for insn fetch latency. */
|
354 |
|
|
wait = 0;
|
355 |
|
|
while (frv_insn_fetch_buffer[cur_ix].reqno != NO_REQNO)
|
356 |
|
|
{
|
357 |
|
|
frv_model_advance_cycles (current_cpu, 1);
|
358 |
|
|
++wait;
|
359 |
|
|
}
|
360 |
|
|
frv_model_trace_wait_cycles (current_cpu, wait, "Insn fetch:");
|
361 |
|
|
return;
|
362 |
|
|
}
|
363 |
|
|
|
364 |
|
|
/* Otherwise just load the insns directly from the cache.
|
365 |
|
|
*/
|
366 |
|
|
if (frv_insn_fetch_buffer[cur_ix].reqno != NO_REQNO)
|
367 |
|
|
{
|
368 |
|
|
frv_cache_read (cache, cur_ix, vpc);
|
369 |
|
|
frv_insn_fetch_buffer[cur_ix].reqno = NO_REQNO;
|
370 |
|
|
}
|
371 |
|
|
if (frv_insn_fetch_buffer[pre_ix].reqno != NO_REQNO)
|
372 |
|
|
{
|
373 |
|
|
frv_cache_read (cache, pre_ix, pre_address);
|
374 |
|
|
frv_insn_fetch_buffer[pre_ix].reqno = NO_REQNO;
|
375 |
|
|
}
|
376 |
|
|
}
|
377 |
|
|
|
378 |
|
|
static void
|
379 |
|
|
@cpu@_simulate_insn_prefetch (SIM_CPU *current_cpu, IADDR vpc)
|
380 |
|
|
{
|
381 |
|
|
SI hsr0;
|
382 |
|
|
SIM_DESC sd;
|
383 |
|
|
|
384 |
|
|
/* Nothing to do if not counting cycles and the cache is not enabled. */
|
385 |
|
|
hsr0 = GET_HSR0 ();
|
386 |
|
|
if (! GET_HSR0_ICE (hsr0) && ! FRV_COUNT_CYCLES (current_cpu, 1))
|
387 |
|
|
return;
|
388 |
|
|
|
389 |
|
|
/* Different machines handle prefetch defferently. */
|
390 |
|
|
sd = CPU_STATE (current_cpu);
|
391 |
|
|
switch (STATE_ARCHITECTURE (sd)->mach)
|
392 |
|
|
{
|
393 |
|
|
case bfd_mach_fr400:
|
394 |
|
|
case bfd_mach_fr450:
|
395 |
|
|
simulate_dual_insn_prefetch (current_cpu, vpc, 8);
|
396 |
|
|
break;
|
397 |
|
|
case bfd_mach_frvtomcat:
|
398 |
|
|
case bfd_mach_fr500:
|
399 |
|
|
case bfd_mach_fr550:
|
400 |
|
|
case bfd_mach_frv:
|
401 |
|
|
simulate_dual_insn_prefetch (current_cpu, vpc, 16);
|
402 |
|
|
break;
|
403 |
|
|
default:
|
404 |
|
|
break;
|
405 |
|
|
}
|
406 |
|
|
}
|
407 |
|
|
|
408 |
|
|
int frv_save_profile_model_p;
|
409 |
|
|
EOF
|
410 |
|
|
|
411 |
|
|
;;
|
412 |
|
|
|
413 |
|
|
xinit)
|
414 |
|
|
|
415 |
|
|
cat <
|
416 |
|
|
/*xxxinit*/
|
417 |
|
|
/* If the timer is enabled, then we will enable model profiling during
|
418 |
|
|
execution. This is because the timer needs accurate cycles counts to
|
419 |
|
|
work properly. Save the original setting of model profiling. */
|
420 |
|
|
if (frv_interrupt_state.timer.enabled)
|
421 |
|
|
frv_save_profile_model_p = PROFILE_MODEL_P (current_cpu);
|
422 |
|
|
EOF
|
423 |
|
|
|
424 |
|
|
;;
|
425 |
|
|
|
426 |
|
|
xextract-simple | xextract-scache)
|
427 |
|
|
|
428 |
|
|
# Inputs: current_cpu, vpc, sc, FAST_P
|
429 |
|
|
# Outputs: sc filled in
|
430 |
|
|
# SET_LAST_INSN_P(last_p) called to indicate whether insn is last one
|
431 |
|
|
|
432 |
|
|
cat <
|
433 |
|
|
{
|
434 |
|
|
CGEN_INSN_INT insn = frvbf_read_imem_USI (current_cpu, vpc);
|
435 |
|
|
extract (current_cpu, vpc, insn, SEM_ARGBUF (sc), FAST_P);
|
436 |
|
|
SET_LAST_INSN_P ((insn & 0x80000000) != 0);
|
437 |
|
|
}
|
438 |
|
|
EOF
|
439 |
|
|
|
440 |
|
|
;;
|
441 |
|
|
|
442 |
|
|
xfull-exec-* | xfast-exec-*)
|
443 |
|
|
|
444 |
|
|
# Inputs: current_cpu, vpc, FAST_P
|
445 |
|
|
# Outputs:
|
446 |
|
|
# vpc contains the address of the next insn to execute
|
447 |
|
|
# pc of current_cpu must be up to date (=vpc) upon exit
|
448 |
|
|
# CPU_INSN_COUNT (current_cpu) must be updated by number of insns executed
|
449 |
|
|
#
|
450 |
|
|
# Unlike the non-parallel case, this version is responsible for doing the
|
451 |
|
|
# scache lookup.
|
452 |
|
|
|
453 |
|
|
cat <
|
454 |
|
|
{
|
455 |
|
|
FRV_VLIW *vliw;
|
456 |
|
|
int first_insn_p = 1;
|
457 |
|
|
int last_insn_p = 0;
|
458 |
|
|
int ninsns;
|
459 |
|
|
CGEN_ATTR_VALUE_ENUM_TYPE slot;
|
460 |
|
|
|
461 |
|
|
/* If the timer is enabled, then enable model profiling. This is because
|
462 |
|
|
the timer needs accurate cycles counts to work properly. */
|
463 |
|
|
if (frv_interrupt_state.timer.enabled && ! frv_save_profile_model_p)
|
464 |
|
|
sim_profile_set_option (current_state, "-model", PROFILE_MODEL_IDX, "1");
|
465 |
|
|
|
466 |
|
|
/* Init parallel-write queue and vliw. */
|
467 |
|
|
@cpu@_parallel_write_init (current_cpu);
|
468 |
|
|
vliw = CPU_VLIW (current_cpu);
|
469 |
|
|
frv_vliw_reset (vliw, STATE_ARCHITECTURE (CPU_STATE (current_cpu))->mach,
|
470 |
|
|
CPU_ELF_FLAGS (current_cpu));
|
471 |
|
|
frv_current_fm_slot = UNIT_NIL;
|
472 |
|
|
|
473 |
|
|
for (ninsns = 0; ! last_insn_p && ninsns < FRV_VLIW_SIZE; ++ninsns)
|
474 |
|
|
{
|
475 |
|
|
SCACHE *sc;
|
476 |
|
|
const CGEN_INSN *insn;
|
477 |
|
|
int error;
|
478 |
|
|
/* Go through the motions of finding the insns in the cache. */
|
479 |
|
|
@cpu@_simulate_insn_prefetch (current_cpu, vpc);
|
480 |
|
|
|
481 |
|
|
sc = @cpu@_scache_lookup (current_cpu, vpc, scache, hash_mask, FAST_P);
|
482 |
|
|
sc->first_insn_p = first_insn_p;
|
483 |
|
|
last_insn_p = sc->last_insn_p;
|
484 |
|
|
|
485 |
|
|
/* Add the insn to the vliw and set up the interrupt state. */
|
486 |
|
|
insn = sc->argbuf.idesc->idata;
|
487 |
|
|
error = frv_vliw_add_insn (vliw, insn);
|
488 |
|
|
if (! error)
|
489 |
|
|
frv_vliw_setup_insn (current_cpu, insn);
|
490 |
|
|
frv_detect_insn_access_interrupts (current_cpu, sc);
|
491 |
|
|
slot = (*vliw->current_vliw)[vliw->next_slot - 1];
|
492 |
|
|
if (slot >= UNIT_FM0 && slot <= UNIT_FM3)
|
493 |
|
|
frv_current_fm_slot = slot;
|
494 |
|
|
|
495 |
|
|
vpc = execute (current_cpu, sc, FAST_P);
|
496 |
|
|
|
497 |
|
|
SET_H_PC (vpc); /* needed for interrupt handling */
|
498 |
|
|
first_insn_p = 0;
|
499 |
|
|
}
|
500 |
|
|
|
501 |
|
|
/* If the timer is enabled, and model profiling was not originally enabled,
|
502 |
|
|
then turn it off again. This is the only place we can currently gain
|
503 |
|
|
control to do this. */
|
504 |
|
|
if (frv_interrupt_state.timer.enabled && ! frv_save_profile_model_p)
|
505 |
|
|
sim_profile_set_option (current_state, "-model", PROFILE_MODEL_IDX, "0");
|
506 |
|
|
|
507 |
|
|
/* Check for interrupts. Also handles writeback if necessary. */
|
508 |
|
|
frv_process_interrupts (current_cpu);
|
509 |
|
|
|
510 |
|
|
CPU_INSN_COUNT (current_cpu) += ninsns;
|
511 |
|
|
}
|
512 |
|
|
EOF
|
513 |
|
|
|
514 |
|
|
;;
|
515 |
|
|
|
516 |
|
|
*)
|
517 |
|
|
echo "Invalid argument to mainloop.in: $1" >&2
|
518 |
|
|
exit 1
|
519 |
|
|
;;
|
520 |
|
|
|
521 |
|
|
esac
|