OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [branches/] [stable_0_2_x/] [or1ksim/] [peripheral/] [dma.c] - Rev 1350

Go to most recent revision | Compare with Previous | Blame | View Log

/* dma.c -- Simulation of DMA
   Copyright (C) 2001 by Erez Volk, erez@opencores.org
 
   This file is part of OpenRISC 1000 Architectural Simulator.
 
   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation; either version 2 of the License, or
   (at your option) any later version.
 
   This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
   GNU General Public License for more details.
 
   You should have received a copy of the GNU General Public License
   along with this program; if not, write to the Free Software
   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
 
/*
 * This simulation of the DMA core is not meant to be full.
 * It is written only to allow simulating the Ethernet core.
 * Of course, if anyone feels like perfecting it, feel free...
 */
 
#include <string.h>
 
#include "config.h"
 
#ifdef HAVE_INTTYPES_H
#include <inttypes.h>
#endif
 
#include "port.h"
#include "arch.h"
#include "dma.h"
#include "sim-config.h"
#include "pic.h"
#include "abstract.h"
#include "fields.h"
#include "debug.h"
 
/* The representation of the DMA controllers */
static struct dma_controller dmas[MAX_DMAS];
 
static uint32_t dma_read32( oraddr_t addr );
static void dma_write32( oraddr_t addr, uint32_t value );
 
static unsigned long dma_read_ch_csr( struct dma_channel *channel );
static void dma_write_ch_csr( struct dma_channel *channel, unsigned long value );
static void dma_controller_clock( struct dma_controller *dma );
static void dma_load_descriptor( struct dma_channel *channel );
static void dma_init_transfer( struct dma_channel *channel );
static void dma_channel_terminate_transfer( struct dma_channel *channel, int generate_interrupt );
 
static void masked_increase( unsigned long *value, unsigned long mask );
 
#define CHANNEL_ND_I(ch) (TEST_FLAG(ch->regs.csr,DMA_CH_CSR,MODE) && TEST_FLAG(ch->regs.csr,DMA_CH_CSR,USE_ED) && ch->dma_nd_i)
 
 
/* Reset. Initializes all registers to default and places devices in memory address space. */
void dma_reset()
{
  unsigned i;
 
  memset( dmas, 0, sizeof(dmas) );
 
  for ( i = 0; i < config.ndmas; ++ i ) {
    struct dma_controller *dma = &(dmas[i]);
    unsigned channel_number;
 
    dma->baseaddr = config.dmas[i].baseaddr;
    dma->irq = config.dmas[i].irq;
    for ( channel_number = 0; channel_number < DMA_NUM_CHANNELS; ++ channel_number ) {
      dma->ch[channel_number].controller = &(dmas[i]);
      dma->ch[channel_number].channel_number = channel_number;
      dma->ch[channel_number].channel_mask = 1LU << channel_number;
      dma->ch[channel_number].regs.am0 = dma->ch[channel_number].regs.am1 = 0xFFFFFFFC;
    }
    if ( dma->baseaddr != 0 )
      register_memoryarea( dma->baseaddr, DMA_ADDR_SPACE, 4, 0, dma_read32, dma_write32);
  }
}
 
/* Print register values on stdout */
void dma_status( void )
{
  unsigned i, j;
 
  for ( i = 0; i < config.ndmas; ++ i ) {
    struct dma_controller *dma = &(dmas[i]);
 
    if ( dma->baseaddr == 0 )
      continue;
 
    PRINTF( "\nDMA controller %u at 0x%"PRIxADDR":\n", i, dma->baseaddr );
    PRINTF( "CSR       : 0x%08lX\n", dma->regs.csr );
    PRINTF( "INT_MSK_A : 0x%08lX\n", dma->regs.int_msk_a );
    PRINTF( "INT_MSK_B : 0x%08lX\n", dma->regs.int_msk_b );
    PRINTF( "INT_SRC_A : 0x%08lX\n", dma->regs.int_src_a );
    PRINTF( "INT_SRC_B : 0x%08lX\n", dma->regs.int_src_b );
 
    for ( j = 0; j < DMA_NUM_CHANNELS; ++ j ) {
      struct dma_channel *channel = &(dma->ch[j]);
      if ( !channel->referenced )
        continue;
      PRINTF( "CH%u_CSR   : 0x%08lX\n", j, channel->regs.csr );
      PRINTF( "CH%u_SZ    : 0x%08lX\n", j, channel->regs.sz );
      PRINTF( "CH%u_A0    : 0x%08lX\n", j, channel->regs.a0 );
      PRINTF( "CH%u_AM0   : 0x%08lX\n", j, channel->regs.am0 );
      PRINTF( "CH%u_A1    : 0x%08lX\n", j, channel->regs.a1 );
      PRINTF( "CH%u_AM1   : 0x%08lX\n", j, channel->regs.am1 );
      PRINTF( "CH%u_DESC  : 0x%08lX\n", j, channel->regs.desc );
      PRINTF( "CH%u_SWPTR : 0x%08lX\n", j, channel->regs.swptr );
    }
  }
}
 
 
/* Read a register */
uint32_t dma_read32( oraddr_t addr )
{
  unsigned i;
  struct dma_controller *dma = NULL;
 
  for ( i = 0; i < MAX_DMAS && dma == NULL; ++ i ) {
    if ( addr >= dmas[i].baseaddr && addr < dmas[i].baseaddr + DMA_ADDR_SPACE )
      dma = &(dmas[i]);
  }
 
  /* verify we found a controller */
  if ( dma == NULL ) {
    fprintf( stderr, "dma_read32( 0x%"PRIxADDR" ): Out of range\n", addr );
    runtime.sim.cont_run = 0;
    return 0;
  }
 
  addr -= dma->baseaddr;
 
  if ( addr % 4 != 0 ) {
    fprintf( stderr, "dma_read32( 0x%"PRIxADDR" ): Not register-aligned\n",
             addr + dma->baseaddr );
    runtime.sim.cont_run = 0;
    return 0;
  }
 
  if ( addr < DMA_CH_BASE ) {
    /* case of global (not per-channel) registers */
    switch( addr ) {
    case DMA_CSR: return dma->regs.csr;
    case DMA_INT_MSK_A: return dma->regs.int_msk_a;
    case DMA_INT_MSK_B: return dma->regs.int_msk_b;
    case DMA_INT_SRC_A: return dma->regs.int_src_a;
    case DMA_INT_SRC_B: return dma->regs.int_src_b;
    default:
      fprintf( stderr, "dma_read32( 0x%"PRIxADDR" ): Illegal register\n",
               addr + dma->baseaddr );
      runtime.sim.cont_run = 0;
      return 0;
    }
  } else {
    /* case of per-channel registers */
    unsigned chno = (addr - DMA_CH_BASE) / DMA_CH_SIZE;
    addr = (addr - DMA_CH_BASE) % DMA_CH_SIZE;
    switch( addr ) {
    case DMA_CH_CSR: return dma_read_ch_csr( &(dma->ch[chno]) );
    case DMA_CH_SZ: return dma->ch[chno].regs.sz;
    case DMA_CH_A0: return dma->ch[chno].regs.a0;
    case DMA_CH_AM0: return dma->ch[chno].regs.am0;
    case DMA_CH_A1: return dma->ch[chno].regs.a1;
    case DMA_CH_AM1: return dma->ch[chno].regs.am1;
    case DMA_CH_DESC: return dma->ch[chno].regs.desc;
    case DMA_CH_SWPTR: return dma->ch[chno].regs.swptr;
    }
  }		 
}
 
 
/* Handle read from a channel CSR */
unsigned long dma_read_ch_csr( struct dma_channel *channel )
{
  unsigned long result = channel->regs.csr;
 
  /* before returning, clear all relevant bits */
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_CHUNK_DONE );
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_DONE );
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_ERR );
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
 
  return result;
}
 
 
 
/* Write a register */
void dma_write32( oraddr_t addr, uint32_t value )
{
  unsigned i;
  struct dma_controller *dma = NULL;
 
  /* Find which controller this is */
  for ( i = 0; i < MAX_DMAS && dma == NULL; ++ i ) {
    if ( (addr >= dmas[i].baseaddr) && (addr < dmas[i].baseaddr + DMA_ADDR_SPACE) )
      dma = &(dmas[i]);
  }
 
  /* verify we found a controller */
  if ( dma == NULL ) {
    fprintf( stderr, "dma_write32( 0x%"PRIxADDR" ): Out of range\n", addr );
    runtime.sim.cont_run = 0;
    return;
  }
 
  addr -= dma->baseaddr;
 
  if ( addr % 4 != 0 ) {
    fprintf( stderr, "dma_write32( 0x%"PRIxADDR", 0x%08"PRIx32" ): Not register-aligned\n", addr + dma->baseaddr, value );
    runtime.sim.cont_run = 0;
    return;
  }
 
  /* case of global (not per-channel) registers */
  if ( addr < DMA_CH_BASE ) {
    switch( addr ) {
    case DMA_CSR:
      if ( TEST_FLAG( value, DMA_CSR, PAUSE ) )
	fprintf( stderr, "dma: PAUSE not implemented\n" );
      break;
 
    case DMA_INT_MSK_A: dma->regs.int_msk_a = value; break;
    case DMA_INT_MSK_B: dma->regs.int_msk_b = value; break;
    case DMA_INT_SRC_A: dma->regs.int_src_a = value; break;
    case DMA_INT_SRC_B: dma->regs.int_src_b = value; break;
    default:
      fprintf( stderr, "dma_write32( 0x%"PRIxADDR" ): Illegal register\n",
               addr + dma->baseaddr );
      runtime.sim.cont_run = 0;
      return;
    }
  } else {
    /* case of per-channel registers */
    unsigned chno = (addr - DMA_CH_BASE) / DMA_CH_SIZE;
    struct dma_channel *channel = &(dma->ch[chno]);
    channel->referenced = 1;
    addr = (addr - DMA_CH_BASE) % DMA_CH_SIZE;
    switch( addr ) {
    case DMA_CSR: dma_write_ch_csr( &(dma->ch[chno]), value ); break;
    case DMA_CH_SZ: channel->regs.sz = value; break;
    case DMA_CH_A0: channel->regs.a0 = value; break;
    case DMA_CH_AM0: channel->regs.am0 = value; break;
    case DMA_CH_A1: channel->regs.a1 = value; break;
    case DMA_CH_AM1: channel->regs.am1 = value; break;
    case DMA_CH_DESC: channel->regs.desc = value; break;
    case DMA_CH_SWPTR: channel->regs.swptr = value; break;
    }
  }
}
 
 
/* Write a channel CSR
 * This ensures only the writable bits are modified.
 */
void dma_write_ch_csr( struct dma_channel *channel, unsigned long value )
{
  /* Copy the writable bits to the channel CSR */
  channel->regs.csr &= ~DMA_CH_CSR_WRITE_MASK;
  channel->regs.csr |= value & DMA_CH_CSR_WRITE_MASK;
}
 
 
/*
 * Simulation of control signals
 * To be used by simulations for other devices, e.g. ethernet
 */
 
void set_dma_req_i( unsigned dma_controller, unsigned channel )
{
  dmas[dma_controller].ch[channel].dma_req_i = 1;
}
 
void clear_dma_req_i( unsigned dma_controller, unsigned channel )
{
  dmas[dma_controller].ch[channel].dma_req_i = 0;
}
 
void set_dma_nd_i( unsigned dma_controller, unsigned channel )
{
  dmas[dma_controller].ch[channel].dma_nd_i = 1;
}
 
void clear_dma_nd_i( unsigned dma_controller, unsigned channel )
{
  dmas[dma_controller].ch[channel].dma_nd_i = 0;
}
 
unsigned check_dma_ack_o( unsigned dma_controller, unsigned channel )
{
  return dmas[dma_controller].ch[channel].dma_ack_o;
}
 
 
 
/* Simulation hook. Must be called every clock cycle to simulate DMA. */
void dma_clock()
{
  unsigned i;
  for ( i = 0; i < MAX_DMAS; ++ i ) {
    if ( dmas[i].baseaddr != 0 )
      dma_controller_clock( &(dmas[i]) );
  }
}
 
 
/* Clock tick for one DMA controller.
 * This does the actual "DMA" operation.
 * One chunk is transferred per clock.
 */
void dma_controller_clock( struct dma_controller *dma )
{
  unsigned chno;
  int breakpoint = 0;
 
  for ( chno = 0; chno < DMA_NUM_CHANNELS; ++ chno ) {
    struct dma_channel *channel = &(dma->ch[chno]);
 
    /* check if this channel is enabled */
    if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN ) )
      continue;
 
    /* Do we need to abort? */
    if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, STOP ) ) {
      debug( 3,  "DMA: STOP requested\n" );
      CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN );
      CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
      SET_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
 
      if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INE_ERR ) &&
	   (channel->controller->regs.int_msk_a & channel->channel_mask) ) {
	SET_FLAG( channel->regs.csr, DMA_CH_CSR, INT_ERR );
	channel->controller->regs.int_src_a = channel->channel_mask;
	report_interrupt( channel->controller->irq );
      }
 
      continue;
    }
 
    /* In HW Handshake mode, only work when dma_req_i asserted */
    if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, MODE ) &&
	 !channel->dma_req_i ) {
      continue;
    }
 
    /* If this is the first cycle of the transfer, initialize our state */
    if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY ) ) {
      debug( 4,  "DMA: Starting new transfer\n" );
 
      CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, DONE );
      CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
      SET_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
 
      /* If using linked lists, copy the appropriate fields to our registers */
      if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, USE_ED ) )
	dma_load_descriptor( channel );
      else
	channel->load_next_descriptor_when_done = 0;
 
      /* Set our internal status */
      dma_init_transfer( channel );
 
      /* Might need to skip descriptor */
      if ( CHANNEL_ND_I( channel ) ) {
	debug( 3,  "DMA: dma_nd_i asserted before dma_req_i, skipping descriptor\n" );
	dma_channel_terminate_transfer( channel, 0 );
	continue;
      }
    }
 
    /* Transfer one word */
    set_mem32( channel->destination, eval_mem32( channel->source, &breakpoint ), &breakpoint );
 
    /* Advance the source and destionation pointers */
    masked_increase( &(channel->source), channel->source_mask );
    masked_increase( &(channel->destination), channel->destination_mask );
    ++ channel->words_transferred;
 
    /* Have we finished a whole chunk? */
    channel->dma_ack_o = (channel->words_transferred % channel->chunk_size == 0);
 
    /* When done with a chunk, check for dma_nd_i */
    if ( CHANNEL_ND_I( channel ) ) {
      debug( 3,  "DMA: dma_nd_i asserted\n" );
      dma_channel_terminate_transfer( channel, 0 );
      continue;
    }
 
    /* Are we done? */
    if ( channel->words_transferred >= channel->total_size )
      dma_channel_terminate_transfer( channel, 1 );
  }
}
 
 
/* Copy relevant valued from linked list descriptor to channel registers */
void dma_load_descriptor( struct dma_channel *channel )
{
  int breakpoint = 0;
  unsigned long desc_csr = eval_mem32( channel->regs.desc + DMA_DESC_CSR, &breakpoint );
 
  channel->load_next_descriptor_when_done = !TEST_FLAG( desc_csr, DMA_DESC_CSR, EOL );
 
  ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, INC_SRC, TEST_FLAG( desc_csr, DMA_DESC_CSR, INC_SRC ) );
  ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, INC_DST, TEST_FLAG( desc_csr, DMA_DESC_CSR, INC_DST ) );
  ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, SRC_SEL, TEST_FLAG( desc_csr, DMA_DESC_CSR, SRC_SEL ) );
  ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, DST_SEL, TEST_FLAG( desc_csr, DMA_DESC_CSR, DST_SEL ) );
 
  SET_FIELD( channel->regs.sz, DMA_CH_SZ, TOT_SZ,	 GET_FIELD( desc_csr, DMA_DESC_CSR, TOT_SZ ) );
 
  channel->regs.a0 = eval_mem32( channel->regs.desc + DMA_DESC_ADR0, &breakpoint );
  channel->regs.a1 = eval_mem32( channel->regs.desc + DMA_DESC_ADR1, &breakpoint );
 
  channel->current_descriptor = channel->regs.desc;
  channel->regs.desc = eval_mem32( channel->regs.desc + DMA_DESC_NEXT, &breakpoint );
}
 
 
/* Initialize internal parameters used to implement transfers */
void dma_init_transfer( struct dma_channel *channel )
{
  channel->source = channel->regs.a0;
  channel->destination = channel->regs.a1;
  channel->source_mask = TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INC_SRC ) ? channel->regs.am0 : 0;
  channel->destination_mask = TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INC_DST ) ? channel->regs.am1 : 0;
  channel->total_size = GET_FIELD( channel->regs.sz, DMA_CH_SZ, TOT_SZ );
  channel->chunk_size = GET_FIELD( channel->regs.sz, DMA_CH_SZ, CHK_SZ );
  if ( !channel->chunk_size || (channel->chunk_size > channel->total_size) )
    channel->chunk_size = channel->total_size;
  channel->words_transferred = 0;
}
 
 
/* Take care of transfer termination */
void dma_channel_terminate_transfer( struct dma_channel *channel, int generate_interrupt )
{
  debug( 4,  "DMA: Terminating transfer\n" );
 
  /* Might be working in a linked list */
  if ( channel->load_next_descriptor_when_done ) {
    dma_load_descriptor( channel );
    dma_init_transfer( channel );
    return;
  }
 
  /* Might be in auto-restart mode */
  if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, ARS ) ) {
    dma_init_transfer( channel );
    return;
  }
 
  /* If needed, write amount of data transferred back to memory */
  if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, SZ_WB ) &&
       TEST_FLAG( channel->regs.csr, DMA_CH_CSR, USE_ED ) ) {
    /* TODO: What should we write back? Doc says "total number of remaining bytes" !? */
    unsigned long remaining_words = channel->total_size - channel->words_transferred;
    SET_FIELD( channel->regs.sz, DMA_DESC_CSR, TOT_SZ, remaining_words );
  }
 
  /* Mark end of transfer */
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN );
  SET_FLAG( channel->regs.csr, DMA_CH_CSR, DONE );
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
 
  /* If needed, generate interrupt */
  if ( generate_interrupt ) {
    /* TODO: Which channel should we interrupt? */
    if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INE_DONE ) &&
	 (channel->controller->regs.int_msk_a & channel->channel_mask) ) {
      SET_FLAG( channel->regs.csr, DMA_CH_CSR, INT_DONE );
      channel->controller->regs.int_src_a = channel->channel_mask;
      report_interrupt( channel->controller->irq );
    }
  }
}
 
/* Utility function: Add 4 to a value with a mask */
void masked_increase( unsigned long *value, unsigned long mask )
{
  *value = (*value & ~mask) | ((*value + 4) & mask);
}
 

Go to most recent revision | Compare with Previous | Blame | View Log

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.