OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

Compare Revisions

  • This comparison shows the changes necessary to convert path
    /
    from Rev 1369 to Rev 1370
    Reverse comparison

Rev 1369 → Rev 1370

/trunk/or1ksim/sim-config.h
24,7 → 24,6
 
/* Simulator configuration macros. Eventually this one will be a lot bigger. */
 
#define MAX_DMAS 4 /* Max. number of DMA controllers */
#define MAX_ETHERNETS 4 /* Max. number of Ethernet MACs */
#define MAX_GPIOS 4 /* Max. number of GPIO modules */
#define MAX_MEMORIES 16 /* Max. number of memory devices attached */
42,13 → 41,6
int enabled; /* Is tick timer enabled? */
} tick;
int ndmas;
struct {
unsigned long baseaddr;
int irq; /* IRQ of this device */
unsigned long vapi_id; /* VAPI id for this instance */
} dmas[MAX_DMAS];
int nethernets;
struct {
unsigned long baseaddr;
/trunk/or1ksim/sim.cfg
643,11 → 643,6
 
This section configures the DMAs
 
ndmas = <value>
make specified number of instances, configure each
instance within device - enddevice construct.
 
instance specific:
baseaddr = <hex_value>
address of first DMA register for this device
 
659,14 → 654,8
*/
 
section dma
ndmas = 0
 
/*
device 0
baseaddr = 0x9a000000
irq = 11
enddevice
*/
baseaddr = 0x9a000000
irq = 11
end
 
 
/trunk/or1ksim/sim-cmd.c
60,7 → 60,6
#include "icache_model.h"
#include "dcache_model.h"
#include "branch_predict.h"
#include "dma.h"
#include "ethernet.h"
#include "gpio.h"
#include "ps2kbd.h"
423,7 → 422,6
if (config.bpb.btic) btic_info();
if (config.mc.enabled) mc_status();
if (config.ndmas) dma_status();
if (config.nethernets) eth_status();
if (config.ngpios) gpio_status();
kbd_info();
/trunk/or1ksim/peripheral/dma.c
39,10 → 39,12
#include "pic.h"
#include "abstract.h"
#include "fields.h"
#include "sched.h"
#include "debug.h"
 
/* The representation of the DMA controllers */
static struct dma_controller dmas[MAX_DMAS];
/* We keep a copy of all our controllers because we have to export an interface
* to other peripherals eg. ethernet */
static struct dma_controller *dmas = NULL;
 
static uint32_t dma_read32( oraddr_t addr, void *dat );
static void dma_write32( oraddr_t addr, uint32_t value, void *dat );
49,93 → 51,77
 
static unsigned long dma_read_ch_csr( struct dma_channel *channel );
static void dma_write_ch_csr( struct dma_channel *channel, unsigned long value );
static void dma_controller_clock( struct dma_controller *dma );
void dma_controller_clock( struct dma_controller *dma );
static void dma_load_descriptor( struct dma_channel *channel );
static void dma_init_transfer( struct dma_channel *channel );
static void dma_channel_terminate_transfer( struct dma_channel *channel, int generate_interrupt );
 
static void masked_increase( unsigned long *value, unsigned long mask );
void dma_channel_clock( void *dat );
 
static void masked_increase( oraddr_t *value, unsigned long mask );
 
#define CHANNEL_ND_I(ch) (TEST_FLAG(ch->regs.csr,DMA_CH_CSR,MODE) && TEST_FLAG(ch->regs.csr,DMA_CH_CSR,USE_ED) && ch->dma_nd_i)
 
 
/* Reset. Initializes all registers to default and places devices in memory address space. */
void dma_reset()
void dma_reset(void *dat)
{
unsigned i;
struct dma_controller *dma = dat;
unsigned channel_number;
 
memset( dmas, 0, sizeof(dmas) );
memset( dma->ch, 0, sizeof(dma->ch) );
 
dma->regs.csr = 0;
dma->regs.int_msk_a = 0;
dma->regs.int_msk_b = 0;
dma->regs.int_src_a = 0;
dma->regs.int_src_b = 0;
for ( i = 0; i < config.ndmas; ++ i ) {
struct dma_controller *dma = &(dmas[i]);
unsigned channel_number;
 
dma->baseaddr = config.dmas[i].baseaddr;
dma->irq = config.dmas[i].irq;
for ( channel_number = 0; channel_number < DMA_NUM_CHANNELS; ++ channel_number ) {
dma->ch[channel_number].controller = &(dmas[i]);
dma->ch[channel_number].channel_number = channel_number;
dma->ch[channel_number].channel_mask = 1LU << channel_number;
dma->ch[channel_number].regs.am0 = dma->ch[channel_number].regs.am1 = 0xFFFFFFFC;
}
if ( dma->baseaddr != 0 )
register_memoryarea( dma->baseaddr, DMA_ADDR_SPACE, 4, 0, dma_read32, dma_write32, NULL );
for ( channel_number = 0; channel_number < DMA_NUM_CHANNELS; ++ channel_number ) {
dma->ch[channel_number].controller = dma;
dma->ch[channel_number].channel_number = channel_number;
dma->ch[channel_number].channel_mask = 1LU << channel_number;
dma->ch[channel_number].regs.am0 = dma->ch[channel_number].regs.am1 = 0xFFFFFFFC;
}
}
 
/* Print register values on stdout */
void dma_status( void )
void dma_status( void *dat )
{
unsigned i, j;
struct dma_controller *dma = dat;
 
for ( i = 0; i < config.ndmas; ++ i ) {
struct dma_controller *dma = &(dmas[i]);
if ( dma->baseaddr == 0 )
continue;
if ( dma->baseaddr == 0 )
return;
 
PRINTF( "\nDMA controller %u at 0x%"PRIxADDR":\n", i, dma->baseaddr );
PRINTF( "CSR : 0x%08lX\n", dma->regs.csr );
PRINTF( "INT_MSK_A : 0x%08lX\n", dma->regs.int_msk_a );
PRINTF( "INT_MSK_B : 0x%08lX\n", dma->regs.int_msk_b );
PRINTF( "INT_SRC_A : 0x%08lX\n", dma->regs.int_src_a );
PRINTF( "INT_SRC_B : 0x%08lX\n", dma->regs.int_src_b );
PRINTF( "\nDMA controller %u at 0x%"PRIxADDR":\n", i, dma->baseaddr );
PRINTF( "CSR : 0x%08lX\n", dma->regs.csr );
PRINTF( "INT_MSK_A : 0x%08lX\n", dma->regs.int_msk_a );
PRINTF( "INT_MSK_B : 0x%08lX\n", dma->regs.int_msk_b );
PRINTF( "INT_SRC_A : 0x%08lX\n", dma->regs.int_src_a );
PRINTF( "INT_SRC_B : 0x%08lX\n", dma->regs.int_src_b );
 
for ( j = 0; j < DMA_NUM_CHANNELS; ++ j ) {
struct dma_channel *channel = &(dma->ch[j]);
if ( !channel->referenced )
continue;
PRINTF( "CH%u_CSR : 0x%08lX\n", j, channel->regs.csr );
PRINTF( "CH%u_SZ : 0x%08lX\n", j, channel->regs.sz );
PRINTF( "CH%u_A0 : 0x%08lX\n", j, channel->regs.a0 );
PRINTF( "CH%u_AM0 : 0x%08lX\n", j, channel->regs.am0 );
PRINTF( "CH%u_A1 : 0x%08lX\n", j, channel->regs.a1 );
PRINTF( "CH%u_AM1 : 0x%08lX\n", j, channel->regs.am1 );
PRINTF( "CH%u_DESC : 0x%08lX\n", j, channel->regs.desc );
PRINTF( "CH%u_SWPTR : 0x%08lX\n", j, channel->regs.swptr );
}
for ( j = 0; j < DMA_NUM_CHANNELS; ++ j ) {
struct dma_channel *channel = &(dma->ch[j]);
if ( !channel->referenced )
continue;
PRINTF( "CH%u_CSR : 0x%08lX\n", j, channel->regs.csr );
PRINTF( "CH%u_SZ : 0x%08lX\n", j, channel->regs.sz );
PRINTF( "CH%u_A0 : 0x%08lX\n", j, channel->regs.a0 );
PRINTF( "CH%u_AM0 : 0x%08lX\n", j, channel->regs.am0 );
PRINTF( "CH%u_A1 : 0x%08lX\n", j, channel->regs.a1 );
PRINTF( "CH%u_AM1 : 0x%08lX\n", j, channel->regs.am1 );
PRINTF( "CH%u_DESC : 0x%08lX\n", j, channel->regs.desc );
PRINTF( "CH%u_SWPTR : 0x%08lX\n", j, channel->regs.swptr );
}
}
 
 
/* Read a register */
uint32_t dma_read32( oraddr_t addr, void *dat )
{
unsigned i;
struct dma_controller *dma = NULL;
struct dma_controller *dma = dat;
 
for ( i = 0; i < MAX_DMAS && dma == NULL; ++ i ) {
if ( addr >= dmas[i].baseaddr && addr < dmas[i].baseaddr + DMA_ADDR_SPACE )
dma = &(dmas[i]);
}
/* verify we found a controller */
if ( dma == NULL ) {
fprintf( stderr, "dma_read32( 0x%"PRIxADDR" ): Out of range\n", addr );
runtime.sim.cont_run = 0;
return 0;
}
 
addr -= dma->baseaddr;
 
if ( addr % 4 != 0 ) {
174,6 → 160,7
case DMA_CH_SWPTR: return dma->ch[chno].regs.swptr;
}
}
return 0;
}
 
 
196,22 → 183,8
/* Write a register */
void dma_write32( oraddr_t addr, uint32_t value, void *dat )
{
unsigned i;
struct dma_controller *dma = NULL;
struct dma_controller *dma = dat;
 
/* Find which controller this is */
for ( i = 0; i < MAX_DMAS && dma == NULL; ++ i ) {
if ( (addr >= dmas[i].baseaddr) && (addr < dmas[i].baseaddr + DMA_ADDR_SPACE) )
dma = &(dmas[i]);
}
/* verify we found a controller */
if ( dma == NULL ) {
fprintf( stderr, "dma_write32( 0x%"PRIxADDR" ): Out of range\n", addr );
runtime.sim.cont_run = 0;
return;
}
 
addr -= dma->baseaddr;
 
if ( addr % 4 != 0 ) {
263,6 → 236,15
*/
void dma_write_ch_csr( struct dma_channel *channel, unsigned long value )
{
/* Check if we should *start* a transfer */
if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN ) &&
TEST_FLAG( value, DMA_CH_CSR, CH_EN ))
SCHED_ADD( dma_channel_clock, channel, runtime.sim.cycles + 1 );
else if ( !TEST_FLAG( value, DMA_CH_CSR, CH_EN ) )
/* The CH_EN flag is clear, check if we have a transfer in progress and
* clear it */
SCHED_FIND_REMOVE( dma_channel_clock, channel );
 
/* Copy the writable bits to the channel CSR */
channel->regs.csr &= ~DMA_CH_CSR_WRITE_MASK;
channel->regs.csr |= value & DMA_CH_CSR_WRITE_MASK;
269,135 → 251,91
}
 
 
/*
* Simulation of control signals
* To be used by simulations for other devices, e.g. ethernet
*/
 
void set_dma_req_i( unsigned dma_controller, unsigned channel )
{
dmas[dma_controller].ch[channel].dma_req_i = 1;
}
 
void clear_dma_req_i( unsigned dma_controller, unsigned channel )
{
dmas[dma_controller].ch[channel].dma_req_i = 0;
}
 
void set_dma_nd_i( unsigned dma_controller, unsigned channel )
{
dmas[dma_controller].ch[channel].dma_nd_i = 1;
}
 
void clear_dma_nd_i( unsigned dma_controller, unsigned channel )
{
dmas[dma_controller].ch[channel].dma_nd_i = 0;
}
 
unsigned check_dma_ack_o( unsigned dma_controller, unsigned channel )
{
return dmas[dma_controller].ch[channel].dma_ack_o;
}
 
 
 
/* Simulation hook. Must be called every clock cycle to simulate DMA. */
void dma_clock()
{
unsigned i;
for ( i = 0; i < MAX_DMAS; ++ i ) {
if ( dmas[i].baseaddr != 0 )
dma_controller_clock( &(dmas[i]) );
}
}
 
 
/* Clock tick for one DMA controller.
/* Clock tick for one channel on one DMA controller.
* This does the actual "DMA" operation.
* One chunk is transferred per clock.
*/
void dma_controller_clock( struct dma_controller *dma )
void dma_channel_clock( void *dat )
{
unsigned chno;
int breakpoint = 0;
struct dma_channel *channel = dat;
for ( chno = 0; chno < DMA_NUM_CHANNELS; ++ chno ) {
struct dma_channel *channel = &(dma->ch[chno]);
/* Do we need to abort? */
if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, STOP ) ) {
debug( 3, "DMA: STOP requested\n" );
CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN );
CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
SET_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
 
/* check if this channel is enabled */
if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN ) )
continue;
 
/* Do we need to abort? */
if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, STOP ) ) {
debug( 3, "DMA: STOP requested\n" );
CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN );
CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
SET_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INE_ERR ) &&
(channel->controller->regs.int_msk_a & channel->channel_mask) ) {
SET_FLAG( channel->regs.csr, DMA_CH_CSR, INT_ERR );
channel->controller->regs.int_src_a = channel->channel_mask;
report_interrupt( channel->controller->irq );
}
 
continue;
if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INE_ERR ) &&
(channel->controller->regs.int_msk_a & channel->channel_mask) ) {
SET_FLAG( channel->regs.csr, DMA_CH_CSR, INT_ERR );
channel->controller->regs.int_src_a = channel->channel_mask;
report_interrupt( channel->controller->irq );
}
 
/* In HW Handshake mode, only work when dma_req_i asserted */
if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, MODE ) &&
!channel->dma_req_i ) {
continue;
}
return;
}
 
/* If this is the first cycle of the transfer, initialize our state */
if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY ) ) {
debug( 4, "DMA: Starting new transfer\n" );
CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, DONE );
CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
SET_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
/* In HW Handshake mode, only work when dma_req_i asserted */
if ( TEST_FLAG(channel->regs.csr, DMA_CH_CSR, MODE) && !channel->dma_req_i ) {
/* Reschedule */
SCHED_ADD( dma_channel_clock, dat, runtime.sim.cycles + 1 );
return;
}
 
/* If using linked lists, copy the appropriate fields to our registers */
if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, USE_ED ) )
dma_load_descriptor( channel );
else
channel->load_next_descriptor_when_done = 0;
/* Set our internal status */
dma_init_transfer( channel );
/* If this is the first cycle of the transfer, initialize our state */
if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY ) ) {
debug( 4, "DMA: Starting new transfer\n" );
 
/* Might need to skip descriptor */
if ( CHANNEL_ND_I( channel ) ) {
debug( 3, "DMA: dma_nd_i asserted before dma_req_i, skipping descriptor\n" );
dma_channel_terminate_transfer( channel, 0 );
continue;
}
}
CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, DONE );
CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
SET_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
 
/* Transfer one word */
set_mem32( channel->destination, eval_mem32( channel->source, &breakpoint ), &breakpoint );
/* If using linked lists, copy the appropriate fields to our registers */
if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, USE_ED ) )
dma_load_descriptor( channel );
else
channel->load_next_descriptor_when_done = 0;
 
/* Advance the source and destionation pointers */
masked_increase( &(channel->source), channel->source_mask );
masked_increase( &(channel->destination), channel->destination_mask );
++ channel->words_transferred;
/* Set our internal status */
dma_init_transfer( channel );
 
/* Have we finished a whole chunk? */
channel->dma_ack_o = (channel->words_transferred % channel->chunk_size == 0);
 
/* When done with a chunk, check for dma_nd_i */
/* Might need to skip descriptor */
if ( CHANNEL_ND_I( channel ) ) {
debug( 3, "DMA: dma_nd_i asserted\n" );
debug( 3, "DMA: dma_nd_i asserted before dma_req_i, skipping descriptor\n" );
dma_channel_terminate_transfer( channel, 0 );
continue;
return;
}
}
 
/* Are we done? */
if ( channel->words_transferred >= channel->total_size )
dma_channel_terminate_transfer( channel, 1 );
/* Transfer one word */
set_mem32( channel->destination, eval_mem32( channel->source, &breakpoint ), &breakpoint );
 
/* Advance the source and destionation pointers */
masked_increase( &(channel->source), channel->source_mask );
masked_increase( &(channel->destination), channel->destination_mask );
++ channel->words_transferred;
 
/* Have we finished a whole chunk? */
channel->dma_ack_o = (channel->words_transferred % channel->chunk_size == 0);
 
/* When done with a chunk, check for dma_nd_i */
if ( CHANNEL_ND_I( channel ) ) {
debug( 3, "DMA: dma_nd_i asserted\n" );
dma_channel_terminate_transfer( channel, 0 );
return;
}
 
/* Are we done? */
if ( channel->words_transferred >= channel->total_size ) {
dma_channel_terminate_transfer( channel, 1 );
return;
}
 
/* Reschedule to transfer the next chunk */
SCHED_ADD( dma_channel_clock, dat, runtime.sim.cycles + 1 );
}
 
 
448,6 → 386,8
if ( channel->load_next_descriptor_when_done ) {
dma_load_descriptor( channel );
dma_init_transfer( channel );
/* Reschedule */
SCHED_ADD( dma_channel_clock, channel, runtime.sim.cycles + 1 );
return;
}
 
460,7 → 400,7
/* If needed, write amount of data transferred back to memory */
if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, SZ_WB ) &&
TEST_FLAG( channel->regs.csr, DMA_CH_CSR, USE_ED ) ) {
/* TODO: What should we write back? Doc says "total number of remaining bytes" !? */
/* TODO: What should we write back? Doc says "total number of remaining bytes" !? */
unsigned long remaining_words = channel->total_size - channel->words_transferred;
SET_FIELD( channel->regs.sz, DMA_DESC_CSR, TOT_SZ, remaining_words );
}
484,51 → 424,112
}
 
/* Utility function: Add 4 to a value with a mask */
void masked_increase( unsigned long *value, unsigned long mask )
static void masked_increase( oraddr_t *value, unsigned long mask )
{
*value = (*value & ~mask) | ((*value + 4) & mask);
}
 
/*----------------------------------------------------[ DMA configuration ]---*/
void dma_ndmas(union param_val val, void *dat)
/*-------------------------------------------[ DMA<->Peripheral interface ]---*/
/*
* Simulation of control signals
* To be used by simulations for other devices, e.g. ethernet
*/
 
void set_dma_req_i( struct dma_channel *channel )
{
if (val.int_val >= 0 && val.int_val < MAX_DMAS)
config.ndmas = val.int_val;
else
CONFIG_ERROR("invalid number of devices.");
channel->dma_req_i = 1;
}
 
void clear_dma_req_i( struct dma_channel *channel )
{
channel->dma_req_i = 0;
}
 
void set_dma_nd_i( struct dma_channel *channel )
{
channel->dma_nd_i = 1;
}
 
void clear_dma_nd_i( struct dma_channel *channel )
{
channel->dma_nd_i = 0;
}
 
unsigned check_dma_ack_o( struct dma_channel *channel )
{
return channel->dma_ack_o;
}
 
struct dma_channel *find_dma_controller_ch( unsigned controller,
unsigned channel )
{
struct dma_controller *cur = dmas;
 
while( cur && controller ) {
cur = cur->next;
controller--;
}
 
if( !cur )
return NULL;
 
return &(cur->ch[channel]);
}
 
 
/*----------------------------------------------------[ DMA configuration ]---*/
void dma_baseaddr(union param_val val, void *dat)
{
if (current_device >= 0 && current_device < config.ndmas)
config.dmas[current_device].baseaddr = val.addr_val;
else
CONFIG_ERROR("invalid device number.");
struct dma_controller *dma = dat;
dma->baseaddr = val.addr_val;
}
 
void dma_irq(union param_val val, void *dat)
{
if (current_device >= 0 && current_device < config.ndmas)
config.dmas[current_device].irq = val.int_val;
else
CONFIG_ERROR("invalid device number.");
struct dma_controller *dma = dat;
dma->irq = val.int_val;
}
 
void dma_vapi_id(union param_val val, void *dat)
{
if (current_device >= 0 && current_device < config.ndmas)
config.dmas[current_device].vapi_id = val.int_val;
else
CONFIG_ERROR("invalid device number.");
struct dma_controller *dma = dat;
dma->vapi_id = val.int_val;
}
 
void *dma_sec_start(void)
{
struct dma_controller *new = malloc(sizeof(struct dma_controller));
 
if(!new) {
fprintf(stderr, "Peripheral DMA: Run out of memory\n");
exit(-1);
}
 
new->next = NULL;
 
return new;
}
 
void dma_sec_end(void *dat)
{
struct dma_controller *dma = dat;
struct dma_controller *cur;
 
register_memoryarea( dma->baseaddr, DMA_ADDR_SPACE, 4, 0, dma_read32, dma_write32, dat );
reg_sim_reset( dma_reset, dat );
reg_sim_stat( dma_status, dat );
 
if(dmas) {
for(cur = dmas; cur->next; cur = cur->next);
cur->next = dma;
} else
dmas = dma;
}
 
void reg_dma_sec(void)
{
struct config_section *sec = reg_config_sec("dma", NULL, NULL);
struct config_section *sec = reg_config_sec("dma", dma_sec_start, dma_sec_end);
 
reg_config_param(sec, "ndmas", paramt_int, dma_ndmas);
reg_config_param(sec, "device", paramt_int, change_device);
reg_config_param(sec, "enddevice", paramt_none, end_device);
reg_config_param(sec, "irq", paramt_int, dma_irq);
reg_config_param(sec, "baseaddr", paramt_addr, dma_baseaddr);
reg_config_param(sec, "vapi_id", paramt_addr, dma_vapi_id);
/trunk/or1ksim/peripheral/dma.h
1,4 → 1,4
/* dma.h -- Definition of types and structures for DMA
/* dma.h -- Definition of DMA<->peripheral interface
Copyright (C) 2001 by Erez Volk, erez@opencores.org
 
This file is part of OpenRISC 1000 Architectural Simulator.
20,17 → 20,6
 
#include "dma_defs.h"
 
/* Exported function prototypes */
void dma_reset( void );
void dma_clock( void );
void dma_status( void );
 
void set_dma_req_i( unsigned dma_controller, unsigned channel );
void clear_dma_req_i( unsigned dma_controller, unsigned channel );
void set_dma_nd_i( unsigned dma_controller, unsigned channel );
void clear_dma_nd_i( unsigned dma_controller, unsigned channel );
unsigned check_dma_ack_o( unsigned dma_controller, unsigned channel );
 
/* Implementation of DMA Channel Registers and State */
struct dma_channel
{
47,7 → 36,7
/* Inner state of transfer etc. */
unsigned load_next_descriptor_when_done;
unsigned long current_descriptor;
unsigned long source, destination, source_mask, destination_mask;
oraddr_t source, destination, source_mask, destination_mask;
unsigned long chunk_size, total_size, words_transferred;
 
/* The interface registers */
79,6 → 68,9
/* Which interrupt number we generate */
unsigned irq;
 
/* VAPI id */
int vapi_id;
 
/* Controller Registers */
struct
{
91,4 → 83,14
 
/* Channels */
struct dma_channel ch[DMA_NUM_CHANNELS];
 
struct dma_controller *next;
};
 
void set_dma_req_i( struct dma_channel *channel );
void clear_dma_req_i( struct dma_channel *channel );
void set_dma_nd_i( struct dma_channel *channel );
void clear_dma_nd_i( struct dma_channel *channel );
unsigned check_dma_ack_o( struct dma_channel *channel );
struct dma_channel *find_dma_controller_ch( unsigned controller,
unsigned channel );
/trunk/or1ksim/toplevel.c
47,7 → 47,6
#include "sim-config.h"
#include "spr_defs.h"
#include "sprs.h"
#include "dma.h"
#include "ps2kbd.h"
#include "vapi.h"
#include "gdbcomm.h"
73,7 → 72,7
#include "cuc.h"
 
/* CVS revision number. */
const char rcsrev[] = "$Revision: 1.110 $";
const char rcsrev[] = "$Revision: 1.111 $";
 
inline void debug(int level, const char *format, ...)
{
169,7 → 168,6
cur_reset = cur_reset->next;
}
 
dma_reset();
eth_reset();
gpio_reset();
kbd_reset ();
447,7 → 445,6
if (config.ic.enabled) ic_clock();
}
 
if (config.dmas) dma_clock();
if (config.ethernets) eth_clock();
if (config.ngpios) gpio_clock();
if (config.vapi.enabled && runtime.vapi.enabled) vapi_check();
/trunk/or1ksim/sim-config.c
124,9 → 124,6
/* Memory Controller */
config.mc.enabled = 0;
/* DMAs */
config.ndmas = 0;
/* CPU */
config.cpu.superscalar = 0;
config.sim.history = 0;
898,15 → 895,6
fprintf (f, " tick:{enabled:%i},\n", config.tick.enabled);
 
fprintf (f, " ndmas:%i, dmas:{", config.ndmas);
comma = 0;
for (i = 0; i < config.ndmas; i++) {
fprintf (f, "%s\n {baseaddr:0x%08lx, irq:%i, vapi_id:0x%08lx}",
comma ? "," :"", config.dmas[i].baseaddr, config.dmas[i].irq, config.dmas[i].vapi_id);
comma = 1;
}
fprintf (f, "},\n");
 
fprintf (f, " nethernets:%i, ethernets:{", config.nethernets);
comma = 0;
for (i = 0; i < config.nethernets; i++) {

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.