OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-stable/] [gdb-7.2/] [sim/] [sh64/] [mloop-compact.c] - Diff between revs 835 and 841

Go to most recent revision | Only display areas with differences | Details | Blame | View Log

Rev 835 Rev 841
/* This file is generated by the genmloop script.  DO NOT EDIT! */
/* This file is generated by the genmloop script.  DO NOT EDIT! */
 
 
/* Enable switch() support in cgen headers.  */
/* Enable switch() support in cgen headers.  */
#define SEM_IN_SWITCH
#define SEM_IN_SWITCH
 
 
#define WANT_CPU sh64
#define WANT_CPU sh64
#define WANT_CPU_SH64
#define WANT_CPU_SH64
 
 
#include "sim-main.h"
#include "sim-main.h"
#include "bfd.h"
#include "bfd.h"
#include "cgen-mem.h"
#include "cgen-mem.h"
#include "cgen-ops.h"
#include "cgen-ops.h"
#include "sim-assert.h"
#include "sim-assert.h"
 
 
/* Fill in the administrative ARGBUF fields required by all insns,
/* Fill in the administrative ARGBUF fields required by all insns,
   virtual and real.  */
   virtual and real.  */
 
 
static INLINE void
static INLINE void
sh64_compact_fill_argbuf (const SIM_CPU *cpu, ARGBUF *abuf, const IDESC *idesc,
sh64_compact_fill_argbuf (const SIM_CPU *cpu, ARGBUF *abuf, const IDESC *idesc,
                    PCADDR pc, int fast_p)
                    PCADDR pc, int fast_p)
{
{
#if WITH_SCACHE
#if WITH_SCACHE
  SEM_SET_CODE (abuf, idesc, fast_p);
  SEM_SET_CODE (abuf, idesc, fast_p);
  ARGBUF_ADDR (abuf) = pc;
  ARGBUF_ADDR (abuf) = pc;
#endif
#endif
  ARGBUF_IDESC (abuf) = idesc;
  ARGBUF_IDESC (abuf) = idesc;
}
}
 
 
/* Fill in tracing/profiling fields of an ARGBUF.  */
/* Fill in tracing/profiling fields of an ARGBUF.  */
 
 
static INLINE void
static INLINE void
sh64_compact_fill_argbuf_tp (const SIM_CPU *cpu, ARGBUF *abuf,
sh64_compact_fill_argbuf_tp (const SIM_CPU *cpu, ARGBUF *abuf,
                       int trace_p, int profile_p)
                       int trace_p, int profile_p)
{
{
  ARGBUF_TRACE_P (abuf) = trace_p;
  ARGBUF_TRACE_P (abuf) = trace_p;
  ARGBUF_PROFILE_P (abuf) = profile_p;
  ARGBUF_PROFILE_P (abuf) = profile_p;
}
}
 
 
#if WITH_SCACHE_PBB
#if WITH_SCACHE_PBB
 
 
/* Emit the "x-before" handler.
/* Emit the "x-before" handler.
   x-before is emitted before each insn (serial or parallel).
   x-before is emitted before each insn (serial or parallel).
   This is as opposed to x-after which is only emitted at the end of a group
   This is as opposed to x-after which is only emitted at the end of a group
   of parallel insns.  */
   of parallel insns.  */
 
 
static INLINE void
static INLINE void
sh64_compact_emit_before (SIM_CPU *current_cpu, SCACHE *sc, PCADDR pc, int first_p)
sh64_compact_emit_before (SIM_CPU *current_cpu, SCACHE *sc, PCADDR pc, int first_p)
{
{
  ARGBUF *abuf = &sc[0].argbuf;
  ARGBUF *abuf = &sc[0].argbuf;
  const IDESC *id = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_BEFORE];
  const IDESC *id = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_BEFORE];
 
 
  abuf->fields.before.first_p = first_p;
  abuf->fields.before.first_p = first_p;
  sh64_compact_fill_argbuf (current_cpu, abuf, id, pc, 0);
  sh64_compact_fill_argbuf (current_cpu, abuf, id, pc, 0);
  /* no need to set trace_p,profile_p */
  /* no need to set trace_p,profile_p */
}
}
 
 
/* Emit the "x-after" handler.
/* Emit the "x-after" handler.
   x-after is emitted after a serial insn or at the end of a group of
   x-after is emitted after a serial insn or at the end of a group of
   parallel insns.  */
   parallel insns.  */
 
 
static INLINE void
static INLINE void
sh64_compact_emit_after (SIM_CPU *current_cpu, SCACHE *sc, PCADDR pc)
sh64_compact_emit_after (SIM_CPU *current_cpu, SCACHE *sc, PCADDR pc)
{
{
  ARGBUF *abuf = &sc[0].argbuf;
  ARGBUF *abuf = &sc[0].argbuf;
  const IDESC *id = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_AFTER];
  const IDESC *id = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_AFTER];
 
 
  sh64_compact_fill_argbuf (current_cpu, abuf, id, pc, 0);
  sh64_compact_fill_argbuf (current_cpu, abuf, id, pc, 0);
  /* no need to set trace_p,profile_p */
  /* no need to set trace_p,profile_p */
}
}
 
 
#endif /* WITH_SCACHE_PBB */
#endif /* WITH_SCACHE_PBB */
 
 
 
 
static INLINE const IDESC *
static INLINE const IDESC *
extract (SIM_CPU *current_cpu, PCADDR pc, CGEN_INSN_INT insn, ARGBUF *abuf,
extract (SIM_CPU *current_cpu, PCADDR pc, CGEN_INSN_INT insn, ARGBUF *abuf,
         int fast_p)
         int fast_p)
{
{
  const IDESC *id = sh64_compact_decode (current_cpu, pc, insn, insn, abuf);
  const IDESC *id = sh64_compact_decode (current_cpu, pc, insn, insn, abuf);
 
 
  sh64_compact_fill_argbuf (current_cpu, abuf, id, pc, fast_p);
  sh64_compact_fill_argbuf (current_cpu, abuf, id, pc, fast_p);
  if (! fast_p)
  if (! fast_p)
    {
    {
      int trace_p = PC_IN_TRACE_RANGE_P (current_cpu, pc);
      int trace_p = PC_IN_TRACE_RANGE_P (current_cpu, pc);
      int profile_p = PC_IN_PROFILE_RANGE_P (current_cpu, pc);
      int profile_p = PC_IN_PROFILE_RANGE_P (current_cpu, pc);
      sh64_compact_fill_argbuf_tp (current_cpu, abuf, trace_p, profile_p);
      sh64_compact_fill_argbuf_tp (current_cpu, abuf, trace_p, profile_p);
    }
    }
  return id;
  return id;
}
}
 
 
static INLINE SEM_PC
static INLINE SEM_PC
execute (SIM_CPU *current_cpu, SCACHE *sc, int fast_p)
execute (SIM_CPU *current_cpu, SCACHE *sc, int fast_p)
{
{
  SEM_PC vpc;
  SEM_PC vpc;
 
 
  if (fast_p)
  if (fast_p)
    {
    {
#if ! WITH_SEM_SWITCH_FAST
#if ! WITH_SEM_SWITCH_FAST
#if WITH_SCACHE
#if WITH_SCACHE
      vpc = (*sc->argbuf.semantic.sem_fast) (current_cpu, sc);
      vpc = (*sc->argbuf.semantic.sem_fast) (current_cpu, sc);
#else
#else
      vpc = (*sc->argbuf.semantic.sem_fast) (current_cpu, &sc->argbuf);
      vpc = (*sc->argbuf.semantic.sem_fast) (current_cpu, &sc->argbuf);
#endif
#endif
#else
#else
      abort ();
      abort ();
#endif /* WITH_SEM_SWITCH_FAST */
#endif /* WITH_SEM_SWITCH_FAST */
    }
    }
  else
  else
    {
    {
#if ! WITH_SEM_SWITCH_FULL
#if ! WITH_SEM_SWITCH_FULL
      ARGBUF *abuf = &sc->argbuf;
      ARGBUF *abuf = &sc->argbuf;
      const IDESC *idesc = abuf->idesc;
      const IDESC *idesc = abuf->idesc;
#if WITH_SCACHE_PBB
#if WITH_SCACHE_PBB
      int virtual_p = CGEN_ATTR_VALUE (NULL, idesc->attrs, CGEN_INSN_VIRTUAL);
      int virtual_p = CGEN_ATTR_VALUE (NULL, idesc->attrs, CGEN_INSN_VIRTUAL);
#else
#else
      int virtual_p = 0;
      int virtual_p = 0;
#endif
#endif
 
 
      if (! virtual_p)
      if (! virtual_p)
        {
        {
          /* FIXME: call x-before */
          /* FIXME: call x-before */
          if (ARGBUF_PROFILE_P (abuf))
          if (ARGBUF_PROFILE_P (abuf))
            PROFILE_COUNT_INSN (current_cpu, abuf->addr, idesc->num);
            PROFILE_COUNT_INSN (current_cpu, abuf->addr, idesc->num);
          /* FIXME: Later make cover macros: PROFILE_INSN_{INIT,FINI}.  */
          /* FIXME: Later make cover macros: PROFILE_INSN_{INIT,FINI}.  */
          if (PROFILE_MODEL_P (current_cpu)
          if (PROFILE_MODEL_P (current_cpu)
              && ARGBUF_PROFILE_P (abuf))
              && ARGBUF_PROFILE_P (abuf))
            sh64_compact_model_insn_before (current_cpu, 1 /*first_p*/);
            sh64_compact_model_insn_before (current_cpu, 1 /*first_p*/);
          TRACE_INSN_INIT (current_cpu, abuf, 1);
          TRACE_INSN_INIT (current_cpu, abuf, 1);
          TRACE_INSN (current_cpu, idesc->idata,
          TRACE_INSN (current_cpu, idesc->idata,
                      (const struct argbuf *) abuf, abuf->addr);
                      (const struct argbuf *) abuf, abuf->addr);
        }
        }
#if WITH_SCACHE
#if WITH_SCACHE
      vpc = (*sc->argbuf.semantic.sem_full) (current_cpu, sc);
      vpc = (*sc->argbuf.semantic.sem_full) (current_cpu, sc);
#else
#else
      vpc = (*sc->argbuf.semantic.sem_full) (current_cpu, abuf);
      vpc = (*sc->argbuf.semantic.sem_full) (current_cpu, abuf);
#endif
#endif
      if (! virtual_p)
      if (! virtual_p)
        {
        {
          /* FIXME: call x-after */
          /* FIXME: call x-after */
          if (PROFILE_MODEL_P (current_cpu)
          if (PROFILE_MODEL_P (current_cpu)
              && ARGBUF_PROFILE_P (abuf))
              && ARGBUF_PROFILE_P (abuf))
            {
            {
              int cycles;
              int cycles;
 
 
              cycles = (*idesc->timing->model_fn) (current_cpu, sc);
              cycles = (*idesc->timing->model_fn) (current_cpu, sc);
              sh64_compact_model_insn_after (current_cpu, 1 /*last_p*/, cycles);
              sh64_compact_model_insn_after (current_cpu, 1 /*last_p*/, cycles);
            }
            }
          TRACE_INSN_FINI (current_cpu, abuf, 1);
          TRACE_INSN_FINI (current_cpu, abuf, 1);
        }
        }
#else
#else
      abort ();
      abort ();
#endif /* WITH_SEM_SWITCH_FULL */
#endif /* WITH_SEM_SWITCH_FULL */
    }
    }
 
 
  return vpc;
  return vpc;
}
}
 
 
 
 
/* Record address of cti terminating a pbb.  */
/* Record address of cti terminating a pbb.  */
#define SET_CTI_VPC(sc) do { _cti_sc = (sc); } while (0)
#define SET_CTI_VPC(sc) do { _cti_sc = (sc); } while (0)
/* Record number of [real] insns in pbb.  */
/* Record number of [real] insns in pbb.  */
#define SET_INSN_COUNT(n) do { _insn_count = (n); } while (0)
#define SET_INSN_COUNT(n) do { _insn_count = (n); } while (0)
 
 
/* Fetch and extract a pseudo-basic-block.
/* Fetch and extract a pseudo-basic-block.
   FAST_P is non-zero if no tracing/profiling/etc. is wanted.  */
   FAST_P is non-zero if no tracing/profiling/etc. is wanted.  */
 
 
INLINE SEM_PC
INLINE SEM_PC
sh64_compact_pbb_begin (SIM_CPU *current_cpu, int FAST_P)
sh64_compact_pbb_begin (SIM_CPU *current_cpu, int FAST_P)
{
{
  SEM_PC new_vpc;
  SEM_PC new_vpc;
  PCADDR pc;
  PCADDR pc;
  SCACHE *sc;
  SCACHE *sc;
  int max_insns = CPU_SCACHE_MAX_CHAIN_LENGTH (current_cpu);
  int max_insns = CPU_SCACHE_MAX_CHAIN_LENGTH (current_cpu);
 
 
  pc = GET_H_PC ();
  pc = GET_H_PC ();
 
 
  new_vpc = scache_lookup_or_alloc (current_cpu, pc, max_insns, &sc);
  new_vpc = scache_lookup_or_alloc (current_cpu, pc, max_insns, &sc);
  if (! new_vpc)
  if (! new_vpc)
    {
    {
      /* Leading '_' to avoid collision with mainloop.in.  */
      /* Leading '_' to avoid collision with mainloop.in.  */
      int _insn_count = 0;
      int _insn_count = 0;
      SCACHE *orig_sc = sc;
      SCACHE *orig_sc = sc;
      SCACHE *_cti_sc = NULL;
      SCACHE *_cti_sc = NULL;
      int slice_insns = CPU_MAX_SLICE_INSNS (current_cpu);
      int slice_insns = CPU_MAX_SLICE_INSNS (current_cpu);
 
 
      /* First figure out how many instructions to compile.
      /* First figure out how many instructions to compile.
         MAX_INSNS is the size of the allocated buffer, which includes space
         MAX_INSNS is the size of the allocated buffer, which includes space
         for before/after handlers if they're being used.
         for before/after handlers if they're being used.
         SLICE_INSNS is the maxinum number of real insns that can be
         SLICE_INSNS is the maxinum number of real insns that can be
         executed.  Zero means "as many as we want".  */
         executed.  Zero means "as many as we want".  */
      /* ??? max_insns is serving two incompatible roles.
      /* ??? max_insns is serving two incompatible roles.
         1) Number of slots available in scache buffer.
         1) Number of slots available in scache buffer.
         2) Number of real insns to execute.
         2) Number of real insns to execute.
         They're incompatible because there are virtual insns emitted too
         They're incompatible because there are virtual insns emitted too
         (chain,cti-chain,before,after handlers).  */
         (chain,cti-chain,before,after handlers).  */
 
 
      if (slice_insns == 1)
      if (slice_insns == 1)
        {
        {
          /* No need to worry about extra slots required for virtual insns
          /* No need to worry about extra slots required for virtual insns
             and parallel exec support because MAX_CHAIN_LENGTH is
             and parallel exec support because MAX_CHAIN_LENGTH is
             guaranteed to be big enough to execute at least 1 insn!  */
             guaranteed to be big enough to execute at least 1 insn!  */
          max_insns = 1;
          max_insns = 1;
        }
        }
      else
      else
        {
        {
          /* Allow enough slop so that while compiling insns, if max_insns > 0
          /* Allow enough slop so that while compiling insns, if max_insns > 0
             then there's guaranteed to be enough space to emit one real insn.
             then there's guaranteed to be enough space to emit one real insn.
             MAX_CHAIN_LENGTH is typically much longer than
             MAX_CHAIN_LENGTH is typically much longer than
             the normal number of insns between cti's anyway.  */
             the normal number of insns between cti's anyway.  */
          max_insns -= (1 /* one for the trailing chain insn */
          max_insns -= (1 /* one for the trailing chain insn */
                        + (FAST_P
                        + (FAST_P
                           ? 0
                           ? 0
                           : (1 + MAX_PARALLEL_INSNS) /* before+after */)
                           : (1 + MAX_PARALLEL_INSNS) /* before+after */)
                        + (MAX_PARALLEL_INSNS > 1
                        + (MAX_PARALLEL_INSNS > 1
                           ? (MAX_PARALLEL_INSNS * 2)
                           ? (MAX_PARALLEL_INSNS * 2)
                           : 0));
                           : 0));
 
 
          /* Account for before/after handlers.  */
          /* Account for before/after handlers.  */
          if (! FAST_P)
          if (! FAST_P)
            slice_insns *= 3;
            slice_insns *= 3;
 
 
          if (slice_insns > 0
          if (slice_insns > 0
              && slice_insns < max_insns)
              && slice_insns < max_insns)
            max_insns = slice_insns;
            max_insns = slice_insns;
        }
        }
 
 
      new_vpc = sc;
      new_vpc = sc;
 
 
      /* SC,PC must be updated to point passed the last entry used.
      /* SC,PC must be updated to point passed the last entry used.
         SET_CTI_VPC must be called if pbb is terminated by a cti.
         SET_CTI_VPC must be called if pbb is terminated by a cti.
         SET_INSN_COUNT must be called to record number of real insns in
         SET_INSN_COUNT must be called to record number of real insns in
         pbb [could be computed by us of course, extra cpu but perhaps
         pbb [could be computed by us of course, extra cpu but perhaps
         negligible enough].  */
         negligible enough].  */
 
 
/* begin extract-pbb */
/* begin extract-pbb */
{
{
  const IDESC *idesc;
  const IDESC *idesc;
  int icount = 0;
  int icount = 0;
 
 
 while (max_insns > 0)
 while (max_insns > 0)
    {
    {
      UHI insn = GETIMEMUHI (current_cpu, pc);
      UHI insn = GETIMEMUHI (current_cpu, pc);
 
 
      idesc = extract (current_cpu, pc, insn, &sc->argbuf, FAST_P);
      idesc = extract (current_cpu, pc, insn, &sc->argbuf, FAST_P);
      SEM_SKIP_COMPILE (current_cpu, sc, 1);
      SEM_SKIP_COMPILE (current_cpu, sc, 1);
      ++sc;
      ++sc;
      --max_insns;
      --max_insns;
      ++icount;
      ++icount;
      pc += idesc->length;
      pc += idesc->length;
 
 
      if (IDESC_CTI_P (idesc))
      if (IDESC_CTI_P (idesc))
        {
        {
          SET_CTI_VPC (sc - 1);
          SET_CTI_VPC (sc - 1);
 
 
          if (CGEN_ATTR_VALUE (NULL, idesc->attrs, CGEN_INSN_DELAY_SLOT))
          if (CGEN_ATTR_VALUE (NULL, idesc->attrs, CGEN_INSN_DELAY_SLOT))
            {
            {
              USI insn = GETIMEMUHI (current_cpu, pc);
              USI insn = GETIMEMUHI (current_cpu, pc);
              idesc = extract (current_cpu, pc, insn, &sc->argbuf, FAST_P);
              idesc = extract (current_cpu, pc, insn, &sc->argbuf, FAST_P);
 
 
              if (IDESC_CTI_P (idesc) ||
              if (IDESC_CTI_P (idesc) ||
                  CGEN_ATTR_VALUE (NULL, idesc->attrs, CGEN_INSN_ILLSLOT))
                  CGEN_ATTR_VALUE (NULL, idesc->attrs, CGEN_INSN_ILLSLOT))
                {
                {
                  SIM_DESC sd = CPU_STATE (current_cpu);
                  SIM_DESC sd = CPU_STATE (current_cpu);
                  sim_io_eprintf (CPU_STATE (current_cpu),
                  sim_io_eprintf (CPU_STATE (current_cpu),
                    "malformed program, `%s' insn in delay slot\n",
                    "malformed program, `%s' insn in delay slot\n",
                    CGEN_INSN_NAME (idesc->idata));
                    CGEN_INSN_NAME (idesc->idata));
                  sim_engine_halt (sd, current_cpu, NULL, pc,
                  sim_engine_halt (sd, current_cpu, NULL, pc,
                                   sim_stopped, SIM_SIGILL);
                                   sim_stopped, SIM_SIGILL);
                }
                }
              else
              else
                {
                {
                  ++sc;
                  ++sc;
                  --max_insns;
                  --max_insns;
                  ++icount;
                  ++icount;
                  pc += idesc->length;
                  pc += idesc->length;
                }
                }
            }
            }
          break;
          break;
      }
      }
    }
    }
 
 
 Finish:
 Finish:
  SET_INSN_COUNT (icount);
  SET_INSN_COUNT (icount);
}
}
/* end extract-pbb */
/* end extract-pbb */
 
 
      /* The last one is a pseudo-insn to link to the next chain.
      /* The last one is a pseudo-insn to link to the next chain.
         It is also used to record the insn count for this chain.  */
         It is also used to record the insn count for this chain.  */
      {
      {
        const IDESC *id;
        const IDESC *id;
 
 
        /* Was pbb terminated by a cti?  */
        /* Was pbb terminated by a cti?  */
        if (_cti_sc)
        if (_cti_sc)
          {
          {
            id = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_CTI_CHAIN];
            id = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_CTI_CHAIN];
          }
          }
        else
        else
          {
          {
            id = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_CHAIN];
            id = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_CHAIN];
          }
          }
        SEM_SET_CODE (&sc->argbuf, id, FAST_P);
        SEM_SET_CODE (&sc->argbuf, id, FAST_P);
        sc->argbuf.idesc = id;
        sc->argbuf.idesc = id;
        sc->argbuf.addr = pc;
        sc->argbuf.addr = pc;
        sc->argbuf.fields.chain.insn_count = _insn_count;
        sc->argbuf.fields.chain.insn_count = _insn_count;
        sc->argbuf.fields.chain.next = 0;
        sc->argbuf.fields.chain.next = 0;
        sc->argbuf.fields.chain.branch_target = 0;
        sc->argbuf.fields.chain.branch_target = 0;
        ++sc;
        ++sc;
      }
      }
 
 
      /* Update the pointer to the next free entry, may not have used as
      /* Update the pointer to the next free entry, may not have used as
         many entries as was asked for.  */
         many entries as was asked for.  */
      CPU_SCACHE_NEXT_FREE (current_cpu) = sc;
      CPU_SCACHE_NEXT_FREE (current_cpu) = sc;
      /* Record length of chain if profiling.
      /* Record length of chain if profiling.
         This includes virtual insns since they count against
         This includes virtual insns since they count against
         max_insns too.  */
         max_insns too.  */
      if (! FAST_P)
      if (! FAST_P)
        PROFILE_COUNT_SCACHE_CHAIN_LENGTH (current_cpu, sc - orig_sc);
        PROFILE_COUNT_SCACHE_CHAIN_LENGTH (current_cpu, sc - orig_sc);
    }
    }
 
 
  return new_vpc;
  return new_vpc;
}
}
 
 
/* Chain to the next block from a non-cti terminated previous block.  */
/* Chain to the next block from a non-cti terminated previous block.  */
 
 
INLINE SEM_PC
INLINE SEM_PC
sh64_compact_pbb_chain (SIM_CPU *current_cpu, SEM_ARG sem_arg)
sh64_compact_pbb_chain (SIM_CPU *current_cpu, SEM_ARG sem_arg)
{
{
  ARGBUF *abuf = SEM_ARGBUF (sem_arg);
  ARGBUF *abuf = SEM_ARGBUF (sem_arg);
 
 
  PBB_UPDATE_INSN_COUNT (current_cpu, sem_arg);
  PBB_UPDATE_INSN_COUNT (current_cpu, sem_arg);
 
 
  SET_H_PC (abuf->addr);
  SET_H_PC (abuf->addr);
 
 
 
 
  /* If not running forever, exit back to main loop.  */
  /* If not running forever, exit back to main loop.  */
  if (CPU_MAX_SLICE_INSNS (current_cpu) != 0
  if (CPU_MAX_SLICE_INSNS (current_cpu) != 0
      /* Also exit back to main loop if there's an event.
      /* Also exit back to main loop if there's an event.
         Note that if CPU_MAX_SLICE_INSNS != 1, events won't get processed
         Note that if CPU_MAX_SLICE_INSNS != 1, events won't get processed
         at the "right" time, but then that was what was asked for.
         at the "right" time, but then that was what was asked for.
         There is no silver bullet for simulator engines.
         There is no silver bullet for simulator engines.
         ??? Clearly this needs a cleaner interface.
         ??? Clearly this needs a cleaner interface.
         At present it's just so Ctrl-C works.  */
         At present it's just so Ctrl-C works.  */
      || STATE_EVENTS (CPU_STATE (current_cpu))->work_pending)
      || STATE_EVENTS (CPU_STATE (current_cpu))->work_pending)
    CPU_RUNNING_P (current_cpu) = 0;
    CPU_RUNNING_P (current_cpu) = 0;
 
 
  /* If chained to next block, go straight to it.  */
  /* If chained to next block, go straight to it.  */
  if (abuf->fields.chain.next)
  if (abuf->fields.chain.next)
    return abuf->fields.chain.next;
    return abuf->fields.chain.next;
  /* See if next block has already been compiled.  */
  /* See if next block has already been compiled.  */
  abuf->fields.chain.next = scache_lookup (current_cpu, abuf->addr);
  abuf->fields.chain.next = scache_lookup (current_cpu, abuf->addr);
  if (abuf->fields.chain.next)
  if (abuf->fields.chain.next)
    return abuf->fields.chain.next;
    return abuf->fields.chain.next;
  /* Nope, so next insn is a virtual insn to invoke the compiler
  /* Nope, so next insn is a virtual insn to invoke the compiler
     (begin a pbb).  */
     (begin a pbb).  */
  return CPU_SCACHE_PBB_BEGIN (current_cpu);
  return CPU_SCACHE_PBB_BEGIN (current_cpu);
}
}
 
 
/* Chain to the next block from a cti terminated previous block.
/* Chain to the next block from a cti terminated previous block.
   BR_TYPE indicates whether the branch was taken and whether we can cache
   BR_TYPE indicates whether the branch was taken and whether we can cache
   the vpc of the branch target.
   the vpc of the branch target.
   NEW_PC is the target's branch address, and is only valid if
   NEW_PC is the target's branch address, and is only valid if
   BR_TYPE != SEM_BRANCH_UNTAKEN.  */
   BR_TYPE != SEM_BRANCH_UNTAKEN.  */
 
 
INLINE SEM_PC
INLINE SEM_PC
sh64_compact_pbb_cti_chain (SIM_CPU *current_cpu, SEM_ARG sem_arg,
sh64_compact_pbb_cti_chain (SIM_CPU *current_cpu, SEM_ARG sem_arg,
                     SEM_BRANCH_TYPE br_type, PCADDR new_pc)
                     SEM_BRANCH_TYPE br_type, PCADDR new_pc)
{
{
  SEM_PC *new_vpc_ptr;
  SEM_PC *new_vpc_ptr;
 
 
  PBB_UPDATE_INSN_COUNT (current_cpu, sem_arg);
  PBB_UPDATE_INSN_COUNT (current_cpu, sem_arg);
 
 
  /* If we have switched ISAs, exit back to main loop.
  /* If we have switched ISAs, exit back to main loop.
     Set idesc to 0 to cause the engine to point to the right insn table.  */
     Set idesc to 0 to cause the engine to point to the right insn table.  */
  if (new_pc & 1)
  if (new_pc & 1)
  {
  {
    /* Switch to SHmedia.  */
    /* Switch to SHmedia.  */
    CPU_IDESC_SEM_INIT_P (current_cpu) = 0;
    CPU_IDESC_SEM_INIT_P (current_cpu) = 0;
    CPU_RUNNING_P (current_cpu) = 0;
    CPU_RUNNING_P (current_cpu) = 0;
  }
  }
 
 
  /* If not running forever, exit back to main loop.  */
  /* If not running forever, exit back to main loop.  */
  if (CPU_MAX_SLICE_INSNS (current_cpu) != 0
  if (CPU_MAX_SLICE_INSNS (current_cpu) != 0
      /* Also exit back to main loop if there's an event.
      /* Also exit back to main loop if there's an event.
         Note that if CPU_MAX_SLICE_INSNS != 1, events won't get processed
         Note that if CPU_MAX_SLICE_INSNS != 1, events won't get processed
         at the "right" time, but then that was what was asked for.
         at the "right" time, but then that was what was asked for.
         There is no silver bullet for simulator engines.
         There is no silver bullet for simulator engines.
         ??? Clearly this needs a cleaner interface.
         ??? Clearly this needs a cleaner interface.
         At present it's just so Ctrl-C works.  */
         At present it's just so Ctrl-C works.  */
      || STATE_EVENTS (CPU_STATE (current_cpu))->work_pending)
      || STATE_EVENTS (CPU_STATE (current_cpu))->work_pending)
    CPU_RUNNING_P (current_cpu) = 0;
    CPU_RUNNING_P (current_cpu) = 0;
 
 
  /* Restart compiler if we branched to an uncacheable address
  /* Restart compiler if we branched to an uncacheable address
     (e.g. "j reg").  */
     (e.g. "j reg").  */
  if (br_type == SEM_BRANCH_UNCACHEABLE)
  if (br_type == SEM_BRANCH_UNCACHEABLE)
    {
    {
      SET_H_PC (new_pc);
      SET_H_PC (new_pc);
      return CPU_SCACHE_PBB_BEGIN (current_cpu);
      return CPU_SCACHE_PBB_BEGIN (current_cpu);
    }
    }
 
 
  /* If branch wasn't taken, update the pc and set BR_ADDR_PTR to our
  /* If branch wasn't taken, update the pc and set BR_ADDR_PTR to our
     next chain ptr.  */
     next chain ptr.  */
  if (br_type == SEM_BRANCH_UNTAKEN)
  if (br_type == SEM_BRANCH_UNTAKEN)
    {
    {
      ARGBUF *abuf = SEM_ARGBUF (sem_arg);
      ARGBUF *abuf = SEM_ARGBUF (sem_arg);
      new_pc = abuf->addr;
      new_pc = abuf->addr;
      SET_H_PC (new_pc);
      SET_H_PC (new_pc);
      new_vpc_ptr = &abuf->fields.chain.next;
      new_vpc_ptr = &abuf->fields.chain.next;
    }
    }
  else
  else
    {
    {
      ARGBUF *abuf = SEM_ARGBUF (sem_arg);
      ARGBUF *abuf = SEM_ARGBUF (sem_arg);
      SET_H_PC (new_pc);
      SET_H_PC (new_pc);
      new_vpc_ptr = &abuf->fields.chain.branch_target;
      new_vpc_ptr = &abuf->fields.chain.branch_target;
    }
    }
 
 
  /* If chained to next block, go straight to it.  */
  /* If chained to next block, go straight to it.  */
  if (*new_vpc_ptr)
  if (*new_vpc_ptr)
    return *new_vpc_ptr;
    return *new_vpc_ptr;
  /* See if next block has already been compiled.  */
  /* See if next block has already been compiled.  */
  *new_vpc_ptr = scache_lookup (current_cpu, new_pc);
  *new_vpc_ptr = scache_lookup (current_cpu, new_pc);
  if (*new_vpc_ptr)
  if (*new_vpc_ptr)
    return *new_vpc_ptr;
    return *new_vpc_ptr;
  /* Nope, so next insn is a virtual insn to invoke the compiler
  /* Nope, so next insn is a virtual insn to invoke the compiler
     (begin a pbb).  */
     (begin a pbb).  */
  return CPU_SCACHE_PBB_BEGIN (current_cpu);
  return CPU_SCACHE_PBB_BEGIN (current_cpu);
}
}
 
 
/* x-before handler.
/* x-before handler.
   This is called before each insn.  */
   This is called before each insn.  */
 
 
void
void
sh64_compact_pbb_before (SIM_CPU *current_cpu, SCACHE *sc)
sh64_compact_pbb_before (SIM_CPU *current_cpu, SCACHE *sc)
{
{
  SEM_ARG sem_arg = sc;
  SEM_ARG sem_arg = sc;
  const ARGBUF *abuf = SEM_ARGBUF (sem_arg);
  const ARGBUF *abuf = SEM_ARGBUF (sem_arg);
  int first_p = abuf->fields.before.first_p;
  int first_p = abuf->fields.before.first_p;
  const ARGBUF *cur_abuf = SEM_ARGBUF (sc + 1);
  const ARGBUF *cur_abuf = SEM_ARGBUF (sc + 1);
  const IDESC *cur_idesc = cur_abuf->idesc;
  const IDESC *cur_idesc = cur_abuf->idesc;
  PCADDR pc = cur_abuf->addr;
  PCADDR pc = cur_abuf->addr;
 
 
  if (ARGBUF_PROFILE_P (cur_abuf))
  if (ARGBUF_PROFILE_P (cur_abuf))
    PROFILE_COUNT_INSN (current_cpu, pc, cur_idesc->num);
    PROFILE_COUNT_INSN (current_cpu, pc, cur_idesc->num);
 
 
  /* If this isn't the first insn, finish up the previous one.  */
  /* If this isn't the first insn, finish up the previous one.  */
 
 
  if (! first_p)
  if (! first_p)
    {
    {
      if (PROFILE_MODEL_P (current_cpu))
      if (PROFILE_MODEL_P (current_cpu))
        {
        {
          const SEM_ARG prev_sem_arg = sc - 1;
          const SEM_ARG prev_sem_arg = sc - 1;
          const ARGBUF *prev_abuf = SEM_ARGBUF (prev_sem_arg);
          const ARGBUF *prev_abuf = SEM_ARGBUF (prev_sem_arg);
          const IDESC *prev_idesc = prev_abuf->idesc;
          const IDESC *prev_idesc = prev_abuf->idesc;
          int cycles;
          int cycles;
 
 
          /* ??? May want to measure all insns if doing insn tracing.  */
          /* ??? May want to measure all insns if doing insn tracing.  */
          if (ARGBUF_PROFILE_P (prev_abuf))
          if (ARGBUF_PROFILE_P (prev_abuf))
            {
            {
              cycles = (*prev_idesc->timing->model_fn) (current_cpu, prev_sem_arg);
              cycles = (*prev_idesc->timing->model_fn) (current_cpu, prev_sem_arg);
              sh64_compact_model_insn_after (current_cpu, 0 /*last_p*/, cycles);
              sh64_compact_model_insn_after (current_cpu, 0 /*last_p*/, cycles);
            }
            }
        }
        }
 
 
      TRACE_INSN_FINI (current_cpu, cur_abuf, 0 /*last_p*/);
      TRACE_INSN_FINI (current_cpu, cur_abuf, 0 /*last_p*/);
    }
    }
 
 
  /* FIXME: Later make cover macros: PROFILE_INSN_{INIT,FINI}.  */
  /* FIXME: Later make cover macros: PROFILE_INSN_{INIT,FINI}.  */
  if (PROFILE_MODEL_P (current_cpu)
  if (PROFILE_MODEL_P (current_cpu)
      && ARGBUF_PROFILE_P (cur_abuf))
      && ARGBUF_PROFILE_P (cur_abuf))
    sh64_compact_model_insn_before (current_cpu, first_p);
    sh64_compact_model_insn_before (current_cpu, first_p);
 
 
  TRACE_INSN_INIT (current_cpu, cur_abuf, first_p);
  TRACE_INSN_INIT (current_cpu, cur_abuf, first_p);
  TRACE_INSN (current_cpu, cur_idesc->idata, cur_abuf, pc);
  TRACE_INSN (current_cpu, cur_idesc->idata, cur_abuf, pc);
}
}
 
 
/* x-after handler.
/* x-after handler.
   This is called after a serial insn or at the end of a group of parallel
   This is called after a serial insn or at the end of a group of parallel
   insns.  */
   insns.  */
 
 
void
void
sh64_compact_pbb_after (SIM_CPU *current_cpu, SCACHE *sc)
sh64_compact_pbb_after (SIM_CPU *current_cpu, SCACHE *sc)
{
{
  SEM_ARG sem_arg = sc;
  SEM_ARG sem_arg = sc;
  const ARGBUF *abuf = SEM_ARGBUF (sem_arg);
  const ARGBUF *abuf = SEM_ARGBUF (sem_arg);
  const SEM_ARG prev_sem_arg = sc - 1;
  const SEM_ARG prev_sem_arg = sc - 1;
  const ARGBUF *prev_abuf = SEM_ARGBUF (prev_sem_arg);
  const ARGBUF *prev_abuf = SEM_ARGBUF (prev_sem_arg);
 
 
  /* ??? May want to measure all insns if doing insn tracing.  */
  /* ??? May want to measure all insns if doing insn tracing.  */
  if (PROFILE_MODEL_P (current_cpu)
  if (PROFILE_MODEL_P (current_cpu)
      && ARGBUF_PROFILE_P (prev_abuf))
      && ARGBUF_PROFILE_P (prev_abuf))
    {
    {
      const IDESC *prev_idesc = prev_abuf->idesc;
      const IDESC *prev_idesc = prev_abuf->idesc;
      int cycles;
      int cycles;
 
 
      cycles = (*prev_idesc->timing->model_fn) (current_cpu, prev_sem_arg);
      cycles = (*prev_idesc->timing->model_fn) (current_cpu, prev_sem_arg);
      sh64_compact_model_insn_after (current_cpu, 1 /*last_p*/, cycles);
      sh64_compact_model_insn_after (current_cpu, 1 /*last_p*/, cycles);
    }
    }
  TRACE_INSN_FINI (current_cpu, prev_abuf, 1 /*last_p*/);
  TRACE_INSN_FINI (current_cpu, prev_abuf, 1 /*last_p*/);
}
}
 
 
#define FAST_P 0
#define FAST_P 0
 
 
void
void
sh64_compact_engine_run_full (SIM_CPU *current_cpu)
sh64_compact_engine_run_full (SIM_CPU *current_cpu)
{
{
  SIM_DESC current_state = CPU_STATE (current_cpu);
  SIM_DESC current_state = CPU_STATE (current_cpu);
  SCACHE *scache = CPU_SCACHE_CACHE (current_cpu);
  SCACHE *scache = CPU_SCACHE_CACHE (current_cpu);
  /* virtual program counter */
  /* virtual program counter */
  SEM_PC vpc;
  SEM_PC vpc;
#if WITH_SEM_SWITCH_FULL
#if WITH_SEM_SWITCH_FULL
  /* For communication between cti's and cti-chain.  */
  /* For communication between cti's and cti-chain.  */
  SEM_BRANCH_TYPE pbb_br_type;
  SEM_BRANCH_TYPE pbb_br_type;
  PCADDR pbb_br_npc;
  PCADDR pbb_br_npc;
#endif
#endif
 
 
 
 
  if (! CPU_IDESC_SEM_INIT_P (current_cpu))
  if (! CPU_IDESC_SEM_INIT_P (current_cpu))
    {
    {
      /* ??? 'twould be nice to move this up a level and only call it once.
      /* ??? 'twould be nice to move this up a level and only call it once.
         On the other hand, in the "let's go fast" case the test is only done
         On the other hand, in the "let's go fast" case the test is only done
         once per pbb (since we only return to the main loop at the end of
         once per pbb (since we only return to the main loop at the end of
         a pbb).  And in the "let's run until we're done" case we don't return
         a pbb).  And in the "let's run until we're done" case we don't return
         until the program exits.  */
         until the program exits.  */
 
 
#if WITH_SEM_SWITCH_FULL
#if WITH_SEM_SWITCH_FULL
#if defined (__GNUC__)
#if defined (__GNUC__)
/* ??? Later maybe paste sem-switch.c in when building mainloop.c.  */
/* ??? Later maybe paste sem-switch.c in when building mainloop.c.  */
#define DEFINE_LABELS
#define DEFINE_LABELS
#include "sem-compact-switch.c"
#include "sem-compact-switch.c"
#endif
#endif
#else
#else
      sh64_compact_sem_init_idesc_table (current_cpu);
      sh64_compact_sem_init_idesc_table (current_cpu);
#endif
#endif
 
 
      /* Initialize the "begin (compile) a pbb" virtual insn.  */
      /* Initialize the "begin (compile) a pbb" virtual insn.  */
      vpc = CPU_SCACHE_PBB_BEGIN (current_cpu);
      vpc = CPU_SCACHE_PBB_BEGIN (current_cpu);
      SEM_SET_FULL_CODE (SEM_ARGBUF (vpc),
      SEM_SET_FULL_CODE (SEM_ARGBUF (vpc),
                         & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_BEGIN]);
                         & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_BEGIN]);
      vpc->argbuf.idesc = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_BEGIN];
      vpc->argbuf.idesc = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_BEGIN];
 
 
      CPU_IDESC_SEM_INIT_P (current_cpu) = 1;
      CPU_IDESC_SEM_INIT_P (current_cpu) = 1;
    }
    }
 
 
  CPU_RUNNING_P (current_cpu) = 1;
  CPU_RUNNING_P (current_cpu) = 1;
  /* ??? In the case where we're returning to the main loop after every
  /* ??? In the case where we're returning to the main loop after every
     pbb we don't want to call pbb_begin each time (which hashes on the pc
     pbb we don't want to call pbb_begin each time (which hashes on the pc
     and does a table lookup).  A way to speed this up is to save vpc
     and does a table lookup).  A way to speed this up is to save vpc
     between calls.  */
     between calls.  */
  vpc = sh64_compact_pbb_begin (current_cpu, FAST_P);
  vpc = sh64_compact_pbb_begin (current_cpu, FAST_P);
 
 
  do
  do
    {
    {
/* begin full-exec-pbb */
/* begin full-exec-pbb */
{
{
#if (! FAST_P && WITH_SEM_SWITCH_FULL) || (FAST_P && WITH_SEM_SWITCH_FAST)
#if (! FAST_P && WITH_SEM_SWITCH_FULL) || (FAST_P && WITH_SEM_SWITCH_FAST)
#define DEFINE_SWITCH
#define DEFINE_SWITCH
#include "sem-compact-switch.c"
#include "sem-compact-switch.c"
#else
#else
  vpc = execute (current_cpu, vpc, FAST_P);
  vpc = execute (current_cpu, vpc, FAST_P);
#endif
#endif
}
}
/* end full-exec-pbb */
/* end full-exec-pbb */
    }
    }
  while (CPU_RUNNING_P (current_cpu));
  while (CPU_RUNNING_P (current_cpu));
}
}
 
 
#undef FAST_P
#undef FAST_P
 
 
 
 
#define FAST_P 1
#define FAST_P 1
 
 
void
void
sh64_compact_engine_run_fast (SIM_CPU *current_cpu)
sh64_compact_engine_run_fast (SIM_CPU *current_cpu)
{
{
  SIM_DESC current_state = CPU_STATE (current_cpu);
  SIM_DESC current_state = CPU_STATE (current_cpu);
  SCACHE *scache = CPU_SCACHE_CACHE (current_cpu);
  SCACHE *scache = CPU_SCACHE_CACHE (current_cpu);
  /* virtual program counter */
  /* virtual program counter */
  SEM_PC vpc;
  SEM_PC vpc;
#if WITH_SEM_SWITCH_FAST
#if WITH_SEM_SWITCH_FAST
  /* For communication between cti's and cti-chain.  */
  /* For communication between cti's and cti-chain.  */
  SEM_BRANCH_TYPE pbb_br_type;
  SEM_BRANCH_TYPE pbb_br_type;
  PCADDR pbb_br_npc;
  PCADDR pbb_br_npc;
#endif
#endif
 
 
 
 
  if (! CPU_IDESC_SEM_INIT_P (current_cpu))
  if (! CPU_IDESC_SEM_INIT_P (current_cpu))
    {
    {
      /* ??? 'twould be nice to move this up a level and only call it once.
      /* ??? 'twould be nice to move this up a level and only call it once.
         On the other hand, in the "let's go fast" case the test is only done
         On the other hand, in the "let's go fast" case the test is only done
         once per pbb (since we only return to the main loop at the end of
         once per pbb (since we only return to the main loop at the end of
         a pbb).  And in the "let's run until we're done" case we don't return
         a pbb).  And in the "let's run until we're done" case we don't return
         until the program exits.  */
         until the program exits.  */
 
 
#if WITH_SEM_SWITCH_FAST
#if WITH_SEM_SWITCH_FAST
#if defined (__GNUC__)
#if defined (__GNUC__)
/* ??? Later maybe paste sem-switch.c in when building mainloop.c.  */
/* ??? Later maybe paste sem-switch.c in when building mainloop.c.  */
#define DEFINE_LABELS
#define DEFINE_LABELS
#include "sem-compact-switch.c"
#include "sem-compact-switch.c"
#endif
#endif
#else
#else
      sh64_compact_semf_init_idesc_table (current_cpu);
      sh64_compact_semf_init_idesc_table (current_cpu);
#endif
#endif
 
 
      /* Initialize the "begin (compile) a pbb" virtual insn.  */
      /* Initialize the "begin (compile) a pbb" virtual insn.  */
      vpc = CPU_SCACHE_PBB_BEGIN (current_cpu);
      vpc = CPU_SCACHE_PBB_BEGIN (current_cpu);
      SEM_SET_FAST_CODE (SEM_ARGBUF (vpc),
      SEM_SET_FAST_CODE (SEM_ARGBUF (vpc),
                         & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_BEGIN]);
                         & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_BEGIN]);
      vpc->argbuf.idesc = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_BEGIN];
      vpc->argbuf.idesc = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_BEGIN];
 
 
      CPU_IDESC_SEM_INIT_P (current_cpu) = 1;
      CPU_IDESC_SEM_INIT_P (current_cpu) = 1;
    }
    }
 
 
  CPU_RUNNING_P (current_cpu) = 1;
  CPU_RUNNING_P (current_cpu) = 1;
  /* ??? In the case where we're returning to the main loop after every
  /* ??? In the case where we're returning to the main loop after every
     pbb we don't want to call pbb_begin each time (which hashes on the pc
     pbb we don't want to call pbb_begin each time (which hashes on the pc
     and does a table lookup).  A way to speed this up is to save vpc
     and does a table lookup).  A way to speed this up is to save vpc
     between calls.  */
     between calls.  */
  vpc = sh64_compact_pbb_begin (current_cpu, FAST_P);
  vpc = sh64_compact_pbb_begin (current_cpu, FAST_P);
 
 
  do
  do
    {
    {
/* begin fast-exec-pbb */
/* begin fast-exec-pbb */
{
{
#if (! FAST_P && WITH_SEM_SWITCH_FULL) || (FAST_P && WITH_SEM_SWITCH_FAST)
#if (! FAST_P && WITH_SEM_SWITCH_FULL) || (FAST_P && WITH_SEM_SWITCH_FAST)
#define DEFINE_SWITCH
#define DEFINE_SWITCH
#include "sem-compact-switch.c"
#include "sem-compact-switch.c"
#else
#else
  vpc = execute (current_cpu, vpc, FAST_P);
  vpc = execute (current_cpu, vpc, FAST_P);
#endif
#endif
}
}
/* end fast-exec-pbb */
/* end fast-exec-pbb */
    }
    }
  while (CPU_RUNNING_P (current_cpu));
  while (CPU_RUNNING_P (current_cpu));
}
}
 
 
#undef FAST_P
#undef FAST_P
 
 
 
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.