OpenCores
URL https://opencores.org/ocsvn/openrisc_2011-10-31/openrisc_2011-10-31/trunk

Subversion Repositories openrisc_2011-10-31

[/] [openrisc/] [trunk/] [gnu-src/] [gdb-6.8/] [sim/] [sh64/] [mloop-media.c] - Blame information for rev 278

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 24 jeremybenn
/* This file is generated by the genmloop script.  DO NOT EDIT! */
2
 
3
/* Enable switch() support in cgen headers.  */
4
#define SEM_IN_SWITCH
5
 
6
#define WANT_CPU sh64
7
#define WANT_CPU_SH64
8
 
9
#include "sim-main.h"
10
#include "bfd.h"
11
#include "cgen-mem.h"
12
#include "cgen-ops.h"
13
#include "sim-assert.h"
14
 
15
/* Fill in the administrative ARGBUF fields required by all insns,
16
   virtual and real.  */
17
 
18
static INLINE void
19
sh64_media_fill_argbuf (const SIM_CPU *cpu, ARGBUF *abuf, const IDESC *idesc,
20
                    PCADDR pc, int fast_p)
21
{
22
#if WITH_SCACHE
23
  SEM_SET_CODE (abuf, idesc, fast_p);
24
  ARGBUF_ADDR (abuf) = pc;
25
#endif
26
  ARGBUF_IDESC (abuf) = idesc;
27
}
28
 
29
/* Fill in tracing/profiling fields of an ARGBUF.  */
30
 
31
static INLINE void
32
sh64_media_fill_argbuf_tp (const SIM_CPU *cpu, ARGBUF *abuf,
33
                       int trace_p, int profile_p)
34
{
35
  ARGBUF_TRACE_P (abuf) = trace_p;
36
  ARGBUF_PROFILE_P (abuf) = profile_p;
37
}
38
 
39
#if WITH_SCACHE_PBB
40
 
41
/* Emit the "x-before" handler.
42
   x-before is emitted before each insn (serial or parallel).
43
   This is as opposed to x-after which is only emitted at the end of a group
44
   of parallel insns.  */
45
 
46
static INLINE void
47
sh64_media_emit_before (SIM_CPU *current_cpu, SCACHE *sc, PCADDR pc, int first_p)
48
{
49
  ARGBUF *abuf = &sc[0].argbuf;
50
  const IDESC *id = & CPU_IDESC (current_cpu) [SH64_MEDIA_INSN_X_BEFORE];
51
 
52
  abuf->fields.before.first_p = first_p;
53
  sh64_media_fill_argbuf (current_cpu, abuf, id, pc, 0);
54
  /* no need to set trace_p,profile_p */
55
}
56
 
57
/* Emit the "x-after" handler.
58
   x-after is emitted after a serial insn or at the end of a group of
59
   parallel insns.  */
60
 
61
static INLINE void
62
sh64_media_emit_after (SIM_CPU *current_cpu, SCACHE *sc, PCADDR pc)
63
{
64
  ARGBUF *abuf = &sc[0].argbuf;
65
  const IDESC *id = & CPU_IDESC (current_cpu) [SH64_MEDIA_INSN_X_AFTER];
66
 
67
  sh64_media_fill_argbuf (current_cpu, abuf, id, pc, 0);
68
  /* no need to set trace_p,profile_p */
69
}
70
 
71
#endif /* WITH_SCACHE_PBB */
72
 
73
 
74
static INLINE const IDESC *
75
extract (SIM_CPU *current_cpu, PCADDR pc, CGEN_INSN_INT insn, ARGBUF *abuf,
76
         int fast_p)
77
{
78
  const IDESC *id = sh64_media_decode (current_cpu, pc, insn, insn, abuf);
79
 
80
  sh64_media_fill_argbuf (current_cpu, abuf, id, pc, fast_p);
81
  if (! fast_p)
82
    {
83
      int trace_p = PC_IN_TRACE_RANGE_P (current_cpu, pc);
84
      int profile_p = PC_IN_PROFILE_RANGE_P (current_cpu, pc);
85
      sh64_media_fill_argbuf_tp (current_cpu, abuf, trace_p, profile_p);
86
    }
87
  return id;
88
}
89
 
90
static INLINE SEM_PC
91
execute (SIM_CPU *current_cpu, SCACHE *sc, int fast_p)
92
{
93
  SEM_PC vpc;
94
 
95
  if (fast_p)
96
    {
97
#if ! WITH_SEM_SWITCH_FAST
98
#if WITH_SCACHE
99
      vpc = (*sc->argbuf.semantic.sem_fast) (current_cpu, sc);
100
#else
101
      vpc = (*sc->argbuf.semantic.sem_fast) (current_cpu, &sc->argbuf);
102
#endif
103
#else
104
      abort ();
105
#endif /* WITH_SEM_SWITCH_FAST */
106
    }
107
  else
108
    {
109
#if ! WITH_SEM_SWITCH_FULL
110
      ARGBUF *abuf = &sc->argbuf;
111
      const IDESC *idesc = abuf->idesc;
112
#if WITH_SCACHE_PBB
113
      int virtual_p = CGEN_ATTR_VALUE (NULL, idesc->attrs, CGEN_INSN_VIRTUAL);
114
#else
115
      int virtual_p = 0;
116
#endif
117
 
118
      if (! virtual_p)
119
        {
120
          /* FIXME: call x-before */
121
          if (ARGBUF_PROFILE_P (abuf))
122
            PROFILE_COUNT_INSN (current_cpu, abuf->addr, idesc->num);
123
          /* FIXME: Later make cover macros: PROFILE_INSN_{INIT,FINI}.  */
124
          if (PROFILE_MODEL_P (current_cpu)
125
              && ARGBUF_PROFILE_P (abuf))
126
            sh64_media_model_insn_before (current_cpu, 1 /*first_p*/);
127
          TRACE_INSN_INIT (current_cpu, abuf, 1);
128
          TRACE_INSN (current_cpu, idesc->idata,
129
                      (const struct argbuf *) abuf, abuf->addr);
130
        }
131
#if WITH_SCACHE
132
      vpc = (*sc->argbuf.semantic.sem_full) (current_cpu, sc);
133
#else
134
      vpc = (*sc->argbuf.semantic.sem_full) (current_cpu, abuf);
135
#endif
136
      if (! virtual_p)
137
        {
138
          /* FIXME: call x-after */
139
          if (PROFILE_MODEL_P (current_cpu)
140
              && ARGBUF_PROFILE_P (abuf))
141
            {
142
              int cycles;
143
 
144
              cycles = (*idesc->timing->model_fn) (current_cpu, sc);
145
              sh64_media_model_insn_after (current_cpu, 1 /*last_p*/, cycles);
146
            }
147
          TRACE_INSN_FINI (current_cpu, abuf, 1);
148
        }
149
#else
150
      abort ();
151
#endif /* WITH_SEM_SWITCH_FULL */
152
    }
153
 
154
  return vpc;
155
}
156
 
157
 
158
/* Record address of cti terminating a pbb.  */
159
#define SET_CTI_VPC(sc) do { _cti_sc = (sc); } while (0)
160
/* Record number of [real] insns in pbb.  */
161
#define SET_INSN_COUNT(n) do { _insn_count = (n); } while (0)
162
 
163
/* Fetch and extract a pseudo-basic-block.
164
   FAST_P is non-zero if no tracing/profiling/etc. is wanted.  */
165
 
166
INLINE SEM_PC
167
sh64_media_pbb_begin (SIM_CPU *current_cpu, int FAST_P)
168
{
169
  SEM_PC new_vpc;
170
  PCADDR pc;
171
  SCACHE *sc;
172
  int max_insns = CPU_SCACHE_MAX_CHAIN_LENGTH (current_cpu);
173
 
174
  pc = GET_H_PC ();
175
 
176
  new_vpc = scache_lookup_or_alloc (current_cpu, pc, max_insns, &sc);
177
  if (! new_vpc)
178
    {
179
      /* Leading '_' to avoid collision with mainloop.in.  */
180
      int _insn_count = 0;
181
      SCACHE *orig_sc = sc;
182
      SCACHE *_cti_sc = NULL;
183
      int slice_insns = CPU_MAX_SLICE_INSNS (current_cpu);
184
 
185
      /* First figure out how many instructions to compile.
186
         MAX_INSNS is the size of the allocated buffer, which includes space
187
         for before/after handlers if they're being used.
188
         SLICE_INSNS is the maxinum number of real insns that can be
189
         executed.  Zero means "as many as we want".  */
190
      /* ??? max_insns is serving two incompatible roles.
191
         1) Number of slots available in scache buffer.
192
         2) Number of real insns to execute.
193
         They're incompatible because there are virtual insns emitted too
194
         (chain,cti-chain,before,after handlers).  */
195
 
196
      if (slice_insns == 1)
197
        {
198
          /* No need to worry about extra slots required for virtual insns
199
             and parallel exec support because MAX_CHAIN_LENGTH is
200
             guaranteed to be big enough to execute at least 1 insn!  */
201
          max_insns = 1;
202
        }
203
      else
204
        {
205
          /* Allow enough slop so that while compiling insns, if max_insns > 0
206
             then there's guaranteed to be enough space to emit one real insn.
207
             MAX_CHAIN_LENGTH is typically much longer than
208
             the normal number of insns between cti's anyway.  */
209
          max_insns -= (1 /* one for the trailing chain insn */
210
                        + (FAST_P
211
                           ? 0
212
                           : (1 + MAX_PARALLEL_INSNS) /* before+after */)
213
                        + (MAX_PARALLEL_INSNS > 1
214
                           ? (MAX_PARALLEL_INSNS * 2)
215
                           : 0));
216
 
217
          /* Account for before/after handlers.  */
218
          if (! FAST_P)
219
            slice_insns *= 3;
220
 
221
          if (slice_insns > 0
222
              && slice_insns < max_insns)
223
            max_insns = slice_insns;
224
        }
225
 
226
      new_vpc = sc;
227
 
228
      /* SC,PC must be updated to point passed the last entry used.
229
         SET_CTI_VPC must be called if pbb is terminated by a cti.
230
         SET_INSN_COUNT must be called to record number of real insns in
231
         pbb [could be computed by us of course, extra cpu but perhaps
232
         negligible enough].  */
233
 
234
/* begin extract-pbb */
235
{
236
  const IDESC *idesc;
237
  int icount = 0;
238
 
239
 while (max_insns > 0)
240
    {
241
      USI insn = GETIMEMUSI (current_cpu, pc);
242
 
243
      idesc = extract (current_cpu, pc, insn, &sc->argbuf, FAST_P);
244
      SEM_SKIP_COMPILE (current_cpu, sc, 1);
245
      ++sc;
246
      --max_insns;
247
      ++icount;
248
      pc += idesc->length;
249
 
250
      if (IDESC_CTI_P (idesc))
251
        {
252
          SET_CTI_VPC (sc - 1);
253
 
254
          if (CGEN_ATTR_VALUE (NULL, idesc->attrs, CGEN_INSN_DELAY_SLOT))
255
            {
256
              USI insn = GETIMEMUSI (current_cpu, pc);
257
              idesc = extract (current_cpu, pc, insn, &sc->argbuf, FAST_P);
258
 
259
              ++sc;
260
              --max_insns;
261
              ++icount;
262
              pc += idesc->length;
263
            }
264
          break;
265
        }
266
    }
267
 
268
 Finish:
269
  SET_INSN_COUNT (icount);
270
}
271
/* end extract-pbb */
272
 
273
      /* The last one is a pseudo-insn to link to the next chain.
274
         It is also used to record the insn count for this chain.  */
275
      {
276
        const IDESC *id;
277
 
278
        /* Was pbb terminated by a cti?  */
279
        if (_cti_sc)
280
          {
281
            id = & CPU_IDESC (current_cpu) [SH64_MEDIA_INSN_X_CTI_CHAIN];
282
          }
283
        else
284
          {
285
            id = & CPU_IDESC (current_cpu) [SH64_MEDIA_INSN_X_CHAIN];
286
          }
287
        SEM_SET_CODE (&sc->argbuf, id, FAST_P);
288
        sc->argbuf.idesc = id;
289
        sc->argbuf.addr = pc;
290
        sc->argbuf.fields.chain.insn_count = _insn_count;
291
        sc->argbuf.fields.chain.next = 0;
292
        sc->argbuf.fields.chain.branch_target = 0;
293
        ++sc;
294
      }
295
 
296
      /* Update the pointer to the next free entry, may not have used as
297
         many entries as was asked for.  */
298
      CPU_SCACHE_NEXT_FREE (current_cpu) = sc;
299
      /* Record length of chain if profiling.
300
         This includes virtual insns since they count against
301
         max_insns too.  */
302
      if (! FAST_P)
303
        PROFILE_COUNT_SCACHE_CHAIN_LENGTH (current_cpu, sc - orig_sc);
304
    }
305
 
306
  return new_vpc;
307
}
308
 
309
/* Chain to the next block from a non-cti terminated previous block.  */
310
 
311
INLINE SEM_PC
312
sh64_media_pbb_chain (SIM_CPU *current_cpu, SEM_ARG sem_arg)
313
{
314
  ARGBUF *abuf = SEM_ARGBUF (sem_arg);
315
 
316
  PBB_UPDATE_INSN_COUNT (current_cpu, sem_arg);
317
 
318
  SET_H_PC (abuf->addr | 1);
319
 
320
  /* If not running forever, exit back to main loop.  */
321
  if (CPU_MAX_SLICE_INSNS (current_cpu) != 0
322
      /* Also exit back to main loop if there's an event.
323
         Note that if CPU_MAX_SLICE_INSNS != 1, events won't get processed
324
         at the "right" time, but then that was what was asked for.
325
         There is no silver bullet for simulator engines.
326
         ??? Clearly this needs a cleaner interface.
327
         At present it's just so Ctrl-C works.  */
328
      || STATE_EVENTS (CPU_STATE (current_cpu))->work_pending)
329
    CPU_RUNNING_P (current_cpu) = 0;
330
 
331
  /* If chained to next block, go straight to it.  */
332
  if (abuf->fields.chain.next)
333
    return abuf->fields.chain.next;
334
  /* See if next block has already been compiled.  */
335
  abuf->fields.chain.next = scache_lookup (current_cpu, abuf->addr);
336
  if (abuf->fields.chain.next)
337
    return abuf->fields.chain.next;
338
  /* Nope, so next insn is a virtual insn to invoke the compiler
339
     (begin a pbb).  */
340
  return CPU_SCACHE_PBB_BEGIN (current_cpu);
341
}
342
 
343
/* Chain to the next block from a cti terminated previous block.
344
   BR_TYPE indicates whether the branch was taken and whether we can cache
345
   the vpc of the branch target.
346
   NEW_PC is the target's branch address, and is only valid if
347
   BR_TYPE != SEM_BRANCH_UNTAKEN.  */
348
 
349
INLINE SEM_PC
350
sh64_media_pbb_cti_chain (SIM_CPU *current_cpu, SEM_ARG sem_arg,
351
                     SEM_BRANCH_TYPE br_type, PCADDR new_pc)
352
{
353
  SEM_PC *new_vpc_ptr;
354
 
355
  PBB_UPDATE_INSN_COUNT (current_cpu, sem_arg);
356
 
357
  /* If we have switched ISAs, exit back to main loop.
358
     Set idesc to 0 to cause the engine to point to the right insn table.  */
359
  if ((new_pc & 1) == 0)
360
  {
361
    /* Switch to SHcompact.  */
362
    CPU_IDESC_SEM_INIT_P (current_cpu) = 0;
363
    CPU_RUNNING_P (current_cpu) = 0;
364
  }
365
 
366
  /* If not running forever, exit back to main loop.  */
367
  if (CPU_MAX_SLICE_INSNS (current_cpu) != 0
368
      /* Also exit back to main loop if there's an event.
369
         Note that if CPU_MAX_SLICE_INSNS != 1, events won't get processed
370
         at the "right" time, but then that was what was asked for.
371
         There is no silver bullet for simulator engines.
372
         ??? Clearly this needs a cleaner interface.
373
         At present it's just so Ctrl-C works.  */
374
      || STATE_EVENTS (CPU_STATE (current_cpu))->work_pending)
375
    CPU_RUNNING_P (current_cpu) = 0;
376
 
377
  /* Restart compiler if we branched to an uncacheable address
378
     (e.g. "j reg").  */
379
  if (br_type == SEM_BRANCH_UNCACHEABLE)
380
    {
381
      SET_H_PC (new_pc);
382
      return CPU_SCACHE_PBB_BEGIN (current_cpu);
383
    }
384
 
385
  /* If branch wasn't taken, update the pc and set BR_ADDR_PTR to our
386
     next chain ptr.  */
387
  if (br_type == SEM_BRANCH_UNTAKEN)
388
    {
389
      ARGBUF *abuf = SEM_ARGBUF (sem_arg);
390
      new_pc = abuf->addr;
391
      /* Set bit 0 to stay in SHmedia mode.  */
392
      SET_H_PC (new_pc | 1);
393
      new_vpc_ptr = &abuf->fields.chain.next;
394
    }
395
  else
396
    {
397
      ARGBUF *abuf = SEM_ARGBUF (sem_arg);
398
      SET_H_PC (new_pc);
399
      new_vpc_ptr = &abuf->fields.chain.branch_target;
400
    }
401
 
402
  /* If chained to next block, go straight to it.  */
403
  if (*new_vpc_ptr)
404
    return *new_vpc_ptr;
405
  /* See if next block has already been compiled.  */
406
  *new_vpc_ptr = scache_lookup (current_cpu, new_pc);
407
  if (*new_vpc_ptr)
408
    return *new_vpc_ptr;
409
  /* Nope, so next insn is a virtual insn to invoke the compiler
410
     (begin a pbb).  */
411
  return CPU_SCACHE_PBB_BEGIN (current_cpu);
412
}
413
 
414
/* x-before handler.
415
   This is called before each insn.  */
416
 
417
void
418
sh64_media_pbb_before (SIM_CPU *current_cpu, SCACHE *sc)
419
{
420
  SEM_ARG sem_arg = sc;
421
  const ARGBUF *abuf = SEM_ARGBUF (sem_arg);
422
  int first_p = abuf->fields.before.first_p;
423
  const ARGBUF *cur_abuf = SEM_ARGBUF (sc + 1);
424
  const IDESC *cur_idesc = cur_abuf->idesc;
425
  PCADDR pc = cur_abuf->addr;
426
 
427
  if (ARGBUF_PROFILE_P (cur_abuf))
428
    PROFILE_COUNT_INSN (current_cpu, pc, cur_idesc->num);
429
 
430
  /* If this isn't the first insn, finish up the previous one.  */
431
 
432
  if (! first_p)
433
    {
434
      if (PROFILE_MODEL_P (current_cpu))
435
        {
436
          const SEM_ARG prev_sem_arg = sc - 1;
437
          const ARGBUF *prev_abuf = SEM_ARGBUF (prev_sem_arg);
438
          const IDESC *prev_idesc = prev_abuf->idesc;
439
          int cycles;
440
 
441
          /* ??? May want to measure all insns if doing insn tracing.  */
442
          if (ARGBUF_PROFILE_P (prev_abuf))
443
            {
444
              cycles = (*prev_idesc->timing->model_fn) (current_cpu, prev_sem_arg);
445
              sh64_media_model_insn_after (current_cpu, 0 /*last_p*/, cycles);
446
            }
447
        }
448
 
449
      TRACE_INSN_FINI (current_cpu, cur_abuf, 0 /*last_p*/);
450
    }
451
 
452
  /* FIXME: Later make cover macros: PROFILE_INSN_{INIT,FINI}.  */
453
  if (PROFILE_MODEL_P (current_cpu)
454
      && ARGBUF_PROFILE_P (cur_abuf))
455
    sh64_media_model_insn_before (current_cpu, first_p);
456
 
457
  TRACE_INSN_INIT (current_cpu, cur_abuf, first_p);
458
  TRACE_INSN (current_cpu, cur_idesc->idata, cur_abuf, pc);
459
}
460
 
461
/* x-after handler.
462
   This is called after a serial insn or at the end of a group of parallel
463
   insns.  */
464
 
465
void
466
sh64_media_pbb_after (SIM_CPU *current_cpu, SCACHE *sc)
467
{
468
  SEM_ARG sem_arg = sc;
469
  const ARGBUF *abuf = SEM_ARGBUF (sem_arg);
470
  const SEM_ARG prev_sem_arg = sc - 1;
471
  const ARGBUF *prev_abuf = SEM_ARGBUF (prev_sem_arg);
472
 
473
  /* ??? May want to measure all insns if doing insn tracing.  */
474
  if (PROFILE_MODEL_P (current_cpu)
475
      && ARGBUF_PROFILE_P (prev_abuf))
476
    {
477
      const IDESC *prev_idesc = prev_abuf->idesc;
478
      int cycles;
479
 
480
      cycles = (*prev_idesc->timing->model_fn) (current_cpu, prev_sem_arg);
481
      sh64_media_model_insn_after (current_cpu, 1 /*last_p*/, cycles);
482
    }
483
  TRACE_INSN_FINI (current_cpu, prev_abuf, 1 /*last_p*/);
484
}
485
 
486
#define FAST_P 0
487
 
488
void
489
sh64_media_engine_run_full (SIM_CPU *current_cpu)
490
{
491
  SIM_DESC current_state = CPU_STATE (current_cpu);
492
  SCACHE *scache = CPU_SCACHE_CACHE (current_cpu);
493
  /* virtual program counter */
494
  SEM_PC vpc;
495
#if WITH_SEM_SWITCH_FULL
496
  /* For communication between cti's and cti-chain.  */
497
  SEM_BRANCH_TYPE pbb_br_type;
498
  PCADDR pbb_br_npc;
499
#endif
500
 
501
 
502
  if (! CPU_IDESC_SEM_INIT_P (current_cpu))
503
    {
504
      /* ??? 'twould be nice to move this up a level and only call it once.
505
         On the other hand, in the "let's go fast" case the test is only done
506
         once per pbb (since we only return to the main loop at the end of
507
         a pbb).  And in the "let's run until we're done" case we don't return
508
         until the program exits.  */
509
 
510
#if WITH_SEM_SWITCH_FULL
511
#if defined (__GNUC__)
512
/* ??? Later maybe paste sem-switch.c in when building mainloop.c.  */
513
#define DEFINE_LABELS
514
#include "sem-media-switch.c"
515
#endif
516
#else
517
      sh64_media_sem_init_idesc_table (current_cpu);
518
#endif
519
 
520
      /* Initialize the "begin (compile) a pbb" virtual insn.  */
521
      vpc = CPU_SCACHE_PBB_BEGIN (current_cpu);
522
      SEM_SET_FULL_CODE (SEM_ARGBUF (vpc),
523
                         & CPU_IDESC (current_cpu) [SH64_MEDIA_INSN_X_BEGIN]);
524
      vpc->argbuf.idesc = & CPU_IDESC (current_cpu) [SH64_MEDIA_INSN_X_BEGIN];
525
 
526
      CPU_IDESC_SEM_INIT_P (current_cpu) = 1;
527
    }
528
 
529
  CPU_RUNNING_P (current_cpu) = 1;
530
  /* ??? In the case where we're returning to the main loop after every
531
     pbb we don't want to call pbb_begin each time (which hashes on the pc
532
     and does a table lookup).  A way to speed this up is to save vpc
533
     between calls.  */
534
  vpc = sh64_media_pbb_begin (current_cpu, FAST_P);
535
 
536
  do
537
    {
538
/* begin full-exec-pbb */
539
{
540
#if (! FAST_P && WITH_SEM_SWITCH_FULL) || (FAST_P && WITH_SEM_SWITCH_FAST)
541
#define DEFINE_SWITCH
542
#define WITH_ISA_COMPACT
543
#include "sem-media-switch.c"
544
#else
545
  vpc = execute (current_cpu, vpc, FAST_P);
546
#endif
547
}
548
/* end full-exec-pbb */
549
    }
550
  while (CPU_RUNNING_P (current_cpu));
551
}
552
 
553
#undef FAST_P
554
 
555
 
556
#define FAST_P 1
557
 
558
void
559
sh64_media_engine_run_fast (SIM_CPU *current_cpu)
560
{
561
  SIM_DESC current_state = CPU_STATE (current_cpu);
562
  SCACHE *scache = CPU_SCACHE_CACHE (current_cpu);
563
  /* virtual program counter */
564
  SEM_PC vpc;
565
#if WITH_SEM_SWITCH_FAST
566
  /* For communication between cti's and cti-chain.  */
567
  SEM_BRANCH_TYPE pbb_br_type;
568
  PCADDR pbb_br_npc;
569
#endif
570
 
571
 
572
  if (! CPU_IDESC_SEM_INIT_P (current_cpu))
573
    {
574
      /* ??? 'twould be nice to move this up a level and only call it once.
575
         On the other hand, in the "let's go fast" case the test is only done
576
         once per pbb (since we only return to the main loop at the end of
577
         a pbb).  And in the "let's run until we're done" case we don't return
578
         until the program exits.  */
579
 
580
#if WITH_SEM_SWITCH_FAST
581
#if defined (__GNUC__)
582
/* ??? Later maybe paste sem-switch.c in when building mainloop.c.  */
583
#define DEFINE_LABELS
584
#include "sem-media-switch.c"
585
#endif
586
#else
587
      sh64_media_semf_init_idesc_table (current_cpu);
588
#endif
589
 
590
      /* Initialize the "begin (compile) a pbb" virtual insn.  */
591
      vpc = CPU_SCACHE_PBB_BEGIN (current_cpu);
592
      SEM_SET_FAST_CODE (SEM_ARGBUF (vpc),
593
                         & CPU_IDESC (current_cpu) [SH64_MEDIA_INSN_X_BEGIN]);
594
      vpc->argbuf.idesc = & CPU_IDESC (current_cpu) [SH64_MEDIA_INSN_X_BEGIN];
595
 
596
      CPU_IDESC_SEM_INIT_P (current_cpu) = 1;
597
    }
598
 
599
  CPU_RUNNING_P (current_cpu) = 1;
600
  /* ??? In the case where we're returning to the main loop after every
601
     pbb we don't want to call pbb_begin each time (which hashes on the pc
602
     and does a table lookup).  A way to speed this up is to save vpc
603
     between calls.  */
604
  vpc = sh64_media_pbb_begin (current_cpu, FAST_P);
605
 
606
  do
607
    {
608
/* begin fast-exec-pbb */
609
{
610
#if (! FAST_P && WITH_SEM_SWITCH_FULL) || (FAST_P && WITH_SEM_SWITCH_FAST)
611
#define DEFINE_SWITCH
612
#define WITH_ISA_COMPACT
613
#include "sem-media-switch.c"
614
#else
615
  vpc = execute (current_cpu, vpc, FAST_P);
616
#endif
617
}
618
/* end fast-exec-pbb */
619
    }
620
  while (CPU_RUNNING_P (current_cpu));
621
}
622
 
623
#undef FAST_P
624
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.