OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [or1ksim/] [peripheral/] [dma.c] - Blame information for rev 1649

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 212 erez
/* dma.c -- Simulation of DMA
2 503 erez
   Copyright (C) 2001 by Erez Volk, erez@opencores.org
3 212 erez
 
4 503 erez
   This file is part of OpenRISC 1000 Architectural Simulator.
5 235 erez
 
6 503 erez
   This program is free software; you can redistribute it and/or modify
7
   it under the terms of the GNU General Public License as published by
8
   the Free Software Foundation; either version 2 of the License, or
9
   (at your option) any later version.
10 235 erez
 
11 503 erez
   This program is distributed in the hope that it will be useful,
12
   but WITHOUT ANY WARRANTY; without even the implied warranty of
13
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
   GNU General Public License for more details.
15 212 erez
 
16 503 erez
   You should have received a copy of the GNU General Public License
17
   along with this program; if not, write to the Free Software
18
   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 235 erez
*/
20 212 erez
 
21
/*
22
 * This simulation of the DMA core is not meant to be full.
23
 * It is written only to allow simulating the Ethernet core.
24
 * Of course, if anyone feels like perfecting it, feel free...
25
 */
26
 
27 1308 phoenix
#include <string.h>
28
 
29 1350 nogj
#include "config.h"
30
 
31
#ifdef HAVE_INTTYPES_H
32
#include <inttypes.h>
33
#endif
34
 
35
#include "port.h"
36
#include "arch.h"
37 212 erez
#include "dma.h"
38
#include "sim-config.h"
39
#include "pic.h"
40 235 erez
#include "abstract.h"
41 212 erez
#include "fields.h"
42 1370 nogj
#include "sched.h"
43 1308 phoenix
#include "debug.h"
44 212 erez
 
45 1491 nogj
DEFAULT_DEBUG_CHANNEL(dma);
46
 
47 1370 nogj
/* We keep a copy of all our controllers because we have to export an interface
48
 * to other peripherals eg. ethernet */
49
static struct dma_controller *dmas = NULL;
50 212 erez
 
51 1359 nogj
static uint32_t dma_read32( oraddr_t addr, void *dat );
52
static void dma_write32( oraddr_t addr, uint32_t value, void *dat );
53 235 erez
 
54 212 erez
static unsigned long dma_read_ch_csr( struct dma_channel *channel );
55
static void dma_write_ch_csr( struct dma_channel *channel, unsigned long value );
56 1370 nogj
void dma_controller_clock( struct dma_controller *dma );
57 212 erez
static void dma_load_descriptor( struct dma_channel *channel );
58
static void dma_init_transfer( struct dma_channel *channel );
59
static void dma_channel_terminate_transfer( struct dma_channel *channel, int generate_interrupt );
60
 
61 1370 nogj
void dma_channel_clock( void *dat );
62 212 erez
 
63 1370 nogj
static void masked_increase( oraddr_t *value, unsigned long mask );
64
 
65 212 erez
#define CHANNEL_ND_I(ch) (TEST_FLAG(ch->regs.csr,DMA_CH_CSR,MODE) && TEST_FLAG(ch->regs.csr,DMA_CH_CSR,USE_ED) && ch->dma_nd_i)
66
 
67
 
68
/* Reset. Initializes all registers to default and places devices in memory address space. */
69 1370 nogj
void dma_reset(void *dat)
70 212 erez
{
71 1370 nogj
  struct dma_controller *dma = dat;
72
  unsigned channel_number;
73 212 erez
 
74 1370 nogj
  memset( dma->ch, 0, sizeof(dma->ch) );
75
 
76
  dma->regs.csr = 0;
77
  dma->regs.int_msk_a = 0;
78
  dma->regs.int_msk_b = 0;
79
  dma->regs.int_src_a = 0;
80
  dma->regs.int_src_b = 0;
81 235 erez
 
82 1370 nogj
  for ( channel_number = 0; channel_number < DMA_NUM_CHANNELS; ++ channel_number ) {
83
    dma->ch[channel_number].controller = dma;
84
    dma->ch[channel_number].channel_number = channel_number;
85
    dma->ch[channel_number].channel_mask = 1LU << channel_number;
86
    dma->ch[channel_number].regs.am0 = dma->ch[channel_number].regs.am1 = 0xFFFFFFFC;
87 503 erez
  }
88 212 erez
}
89
 
90
/* Print register values on stdout */
91 1370 nogj
void dma_status( void *dat )
92 212 erez
{
93 1465 nogj
  unsigned j;
94 1370 nogj
  struct dma_controller *dma = dat;
95 212 erez
 
96 1370 nogj
  if ( dma->baseaddr == 0 )
97
    return;
98 212 erez
 
99 1465 nogj
  PRINTF( "\nDMA controller at 0x%"PRIxADDR":\n", dma->baseaddr );
100 1370 nogj
  PRINTF( "CSR       : 0x%08lX\n", dma->regs.csr );
101
  PRINTF( "INT_MSK_A : 0x%08lX\n", dma->regs.int_msk_a );
102
  PRINTF( "INT_MSK_B : 0x%08lX\n", dma->regs.int_msk_b );
103
  PRINTF( "INT_SRC_A : 0x%08lX\n", dma->regs.int_src_a );
104
  PRINTF( "INT_SRC_B : 0x%08lX\n", dma->regs.int_src_b );
105 212 erez
 
106 1370 nogj
  for ( j = 0; j < DMA_NUM_CHANNELS; ++ j ) {
107
    struct dma_channel *channel = &(dma->ch[j]);
108
    if ( !channel->referenced )
109
      continue;
110
    PRINTF( "CH%u_CSR   : 0x%08lX\n", j, channel->regs.csr );
111
    PRINTF( "CH%u_SZ    : 0x%08lX\n", j, channel->regs.sz );
112
    PRINTF( "CH%u_A0    : 0x%08lX\n", j, channel->regs.a0 );
113
    PRINTF( "CH%u_AM0   : 0x%08lX\n", j, channel->regs.am0 );
114
    PRINTF( "CH%u_A1    : 0x%08lX\n", j, channel->regs.a1 );
115
    PRINTF( "CH%u_AM1   : 0x%08lX\n", j, channel->regs.am1 );
116
    PRINTF( "CH%u_DESC  : 0x%08lX\n", j, channel->regs.desc );
117
    PRINTF( "CH%u_SWPTR : 0x%08lX\n", j, channel->regs.swptr );
118 503 erez
  }
119 212 erez
}
120
 
121 1370 nogj
 
122 212 erez
/* Read a register */
123 1359 nogj
uint32_t dma_read32( oraddr_t addr, void *dat )
124 212 erez
{
125 1370 nogj
  struct dma_controller *dma = dat;
126 212 erez
 
127 503 erez
  if ( addr < DMA_CH_BASE ) {
128
    /* case of global (not per-channel) registers */
129
    switch( addr ) {
130
    case DMA_CSR: return dma->regs.csr;
131
    case DMA_INT_MSK_A: return dma->regs.int_msk_a;
132
    case DMA_INT_MSK_B: return dma->regs.int_msk_b;
133
    case DMA_INT_SRC_A: return dma->regs.int_src_a;
134
    case DMA_INT_SRC_B: return dma->regs.int_src_b;
135
    default:
136 1350 nogj
      fprintf( stderr, "dma_read32( 0x%"PRIxADDR" ): Illegal register\n",
137
               addr + dma->baseaddr );
138 503 erez
      return 0;
139
    }
140
  } else {
141
    /* case of per-channel registers */
142
    unsigned chno = (addr - DMA_CH_BASE) / DMA_CH_SIZE;
143
    addr = (addr - DMA_CH_BASE) % DMA_CH_SIZE;
144
    switch( addr ) {
145
    case DMA_CH_CSR: return dma_read_ch_csr( &(dma->ch[chno]) );
146
    case DMA_CH_SZ: return dma->ch[chno].regs.sz;
147
    case DMA_CH_A0: return dma->ch[chno].regs.a0;
148
    case DMA_CH_AM0: return dma->ch[chno].regs.am0;
149
    case DMA_CH_A1: return dma->ch[chno].regs.a1;
150
    case DMA_CH_AM1: return dma->ch[chno].regs.am1;
151
    case DMA_CH_DESC: return dma->ch[chno].regs.desc;
152
    case DMA_CH_SWPTR: return dma->ch[chno].regs.swptr;
153
    }
154
  }
155 1370 nogj
  return 0;
156 212 erez
}
157
 
158
 
159
/* Handle read from a channel CSR */
160
unsigned long dma_read_ch_csr( struct dma_channel *channel )
161
{
162 503 erez
  unsigned long result = channel->regs.csr;
163 212 erez
 
164 503 erez
  /* before returning, clear all relevant bits */
165
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_CHUNK_DONE );
166
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_DONE );
167
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_ERR );
168
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
169 212 erez
 
170 503 erez
  return result;
171 212 erez
}
172
 
173
 
174
 
175
/* Write a register */
176 1359 nogj
void dma_write32( oraddr_t addr, uint32_t value, void *dat )
177 212 erez
{
178 1370 nogj
  struct dma_controller *dma = dat;
179 212 erez
 
180 503 erez
  /* case of global (not per-channel) registers */
181
  if ( addr < DMA_CH_BASE ) {
182
    switch( addr ) {
183
    case DMA_CSR:
184
      if ( TEST_FLAG( value, DMA_CSR, PAUSE ) )
185
        fprintf( stderr, "dma: PAUSE not implemented\n" );
186
      break;
187 212 erez
 
188 503 erez
    case DMA_INT_MSK_A: dma->regs.int_msk_a = value; break;
189
    case DMA_INT_MSK_B: dma->regs.int_msk_b = value; break;
190
    case DMA_INT_SRC_A: dma->regs.int_src_a = value; break;
191
    case DMA_INT_SRC_B: dma->regs.int_src_b = value; break;
192
    default:
193 1350 nogj
      fprintf( stderr, "dma_write32( 0x%"PRIxADDR" ): Illegal register\n",
194
               addr + dma->baseaddr );
195 503 erez
      return;
196
    }
197
  } else {
198
    /* case of per-channel registers */
199
    unsigned chno = (addr - DMA_CH_BASE) / DMA_CH_SIZE;
200
    struct dma_channel *channel = &(dma->ch[chno]);
201
    channel->referenced = 1;
202
    addr = (addr - DMA_CH_BASE) % DMA_CH_SIZE;
203
    switch( addr ) {
204
    case DMA_CSR: dma_write_ch_csr( &(dma->ch[chno]), value ); break;
205
    case DMA_CH_SZ: channel->regs.sz = value; break;
206
    case DMA_CH_A0: channel->regs.a0 = value; break;
207
    case DMA_CH_AM0: channel->regs.am0 = value; break;
208
    case DMA_CH_A1: channel->regs.a1 = value; break;
209
    case DMA_CH_AM1: channel->regs.am1 = value; break;
210
    case DMA_CH_DESC: channel->regs.desc = value; break;
211
    case DMA_CH_SWPTR: channel->regs.swptr = value; break;
212
    }
213
  }
214 212 erez
}
215
 
216
 
217
/* Write a channel CSR
218
 * This ensures only the writable bits are modified.
219
 */
220
void dma_write_ch_csr( struct dma_channel *channel, unsigned long value )
221
{
222 1370 nogj
  /* Check if we should *start* a transfer */
223
  if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN ) &&
224
       TEST_FLAG( value, DMA_CH_CSR, CH_EN ))
225 1390 nogj
    SCHED_ADD( dma_channel_clock, channel, 1 );
226 1370 nogj
  else if ( !TEST_FLAG( value, DMA_CH_CSR, CH_EN ) )
227
    /* The CH_EN flag is clear, check if we have a transfer in progress and
228
     * clear it */
229
    SCHED_FIND_REMOVE( dma_channel_clock, channel );
230
 
231 503 erez
  /* Copy the writable bits to the channel CSR */
232
  channel->regs.csr &= ~DMA_CH_CSR_WRITE_MASK;
233
  channel->regs.csr |= value & DMA_CH_CSR_WRITE_MASK;
234 212 erez
}
235
 
236
 
237
 
238 1370 nogj
/* Clock tick for one channel on one DMA controller.
239 212 erez
 * This does the actual "DMA" operation.
240
 * One chunk is transferred per clock.
241
 */
242 1370 nogj
void dma_channel_clock( void *dat )
243 212 erez
{
244 1370 nogj
  struct dma_channel *channel = dat;
245 235 erez
 
246 1370 nogj
  /* Do we need to abort? */
247
  if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, STOP ) ) {
248 1491 nogj
    TRACE( "DMA: STOP requested\n" );
249 1370 nogj
    CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN );
250
    CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
251
    SET_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
252 256 erez
 
253 1370 nogj
    if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INE_ERR ) &&
254
         (channel->controller->regs.int_msk_a & channel->channel_mask) ) {
255
      SET_FLAG( channel->regs.csr, DMA_CH_CSR, INT_ERR );
256
      channel->controller->regs.int_src_a = channel->channel_mask;
257
      report_interrupt( channel->controller->irq );
258 503 erez
    }
259 212 erez
 
260 1370 nogj
    return;
261
  }
262 212 erez
 
263 1370 nogj
  /* In HW Handshake mode, only work when dma_req_i asserted */
264
  if ( TEST_FLAG(channel->regs.csr, DMA_CH_CSR, MODE) && !channel->dma_req_i ) {
265
    /* Reschedule */
266 1390 nogj
    SCHED_ADD( dma_channel_clock, dat, 1 );
267 1370 nogj
    return;
268
  }
269 212 erez
 
270 1370 nogj
  /* If this is the first cycle of the transfer, initialize our state */
271
  if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY ) ) {
272 1491 nogj
    TRACE( "DMA: Starting new transfer\n" );
273 212 erez
 
274 1370 nogj
    CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, DONE );
275
    CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
276
    SET_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
277 212 erez
 
278 1370 nogj
    /* If using linked lists, copy the appropriate fields to our registers */
279
    if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, USE_ED ) )
280
      dma_load_descriptor( channel );
281
    else
282
      channel->load_next_descriptor_when_done = 0;
283 212 erez
 
284 1370 nogj
    /* Set our internal status */
285
    dma_init_transfer( channel );
286 212 erez
 
287 1370 nogj
    /* Might need to skip descriptor */
288 503 erez
    if ( CHANNEL_ND_I( channel ) ) {
289 1491 nogj
      TRACE( "DMA: dma_nd_i asserted before dma_req_i, skipping descriptor\n" );
290 503 erez
      dma_channel_terminate_transfer( channel, 0 );
291 1370 nogj
      return;
292 503 erez
    }
293 1370 nogj
  }
294 235 erez
 
295 1370 nogj
  /* Transfer one word */
296 1487 nogj
  set_direct32( channel->destination, eval_direct32( channel->source, 0, 0 ),
297
                0, 0 );
298 1370 nogj
 
299
  /* Advance the source and destionation pointers */
300
  masked_increase( &(channel->source), channel->source_mask );
301
  masked_increase( &(channel->destination), channel->destination_mask );
302
  ++ channel->words_transferred;
303
 
304
  /* Have we finished a whole chunk? */
305
  channel->dma_ack_o = (channel->words_transferred % channel->chunk_size == 0);
306
 
307
  /* When done with a chunk, check for dma_nd_i */
308
  if ( CHANNEL_ND_I( channel ) ) {
309 1491 nogj
    TRACE( "DMA: dma_nd_i asserted\n" );
310 1370 nogj
    dma_channel_terminate_transfer( channel, 0 );
311
    return;
312 503 erez
  }
313 1370 nogj
 
314
  /* Are we done? */
315
  if ( channel->words_transferred >= channel->total_size ) {
316
    dma_channel_terminate_transfer( channel, 1 );
317
    return;
318
  }
319
 
320
  /* Reschedule to transfer the next chunk */
321 1390 nogj
  SCHED_ADD( dma_channel_clock, dat, 1 );
322 212 erez
}
323
 
324
 
325
/* Copy relevant valued from linked list descriptor to channel registers */
326
void dma_load_descriptor( struct dma_channel *channel )
327
{
328 1487 nogj
  unsigned long desc_csr = eval_direct32( channel->regs.desc + DMA_DESC_CSR, 0, 0 );
329 212 erez
 
330 503 erez
  channel->load_next_descriptor_when_done = !TEST_FLAG( desc_csr, DMA_DESC_CSR, EOL );
331 212 erez
 
332 503 erez
  ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, INC_SRC, TEST_FLAG( desc_csr, DMA_DESC_CSR, INC_SRC ) );
333
  ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, INC_DST, TEST_FLAG( desc_csr, DMA_DESC_CSR, INC_DST ) );
334
  ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, SRC_SEL, TEST_FLAG( desc_csr, DMA_DESC_CSR, SRC_SEL ) );
335
  ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, DST_SEL, TEST_FLAG( desc_csr, DMA_DESC_CSR, DST_SEL ) );
336 212 erez
 
337 503 erez
  SET_FIELD( channel->regs.sz, DMA_CH_SZ, TOT_SZ,        GET_FIELD( desc_csr, DMA_DESC_CSR, TOT_SZ ) );
338 212 erez
 
339 1487 nogj
  channel->regs.a0 = eval_direct32( channel->regs.desc + DMA_DESC_ADR0, 0, 0 );
340
  channel->regs.a1 = eval_direct32( channel->regs.desc + DMA_DESC_ADR1, 0, 0 );
341 212 erez
 
342 503 erez
  channel->current_descriptor = channel->regs.desc;
343 1487 nogj
  channel->regs.desc = eval_direct32( channel->regs.desc + DMA_DESC_NEXT, 0, 0 );
344 212 erez
}
345
 
346
 
347
/* Initialize internal parameters used to implement transfers */
348
void dma_init_transfer( struct dma_channel *channel )
349
{
350 503 erez
  channel->source = channel->regs.a0;
351
  channel->destination = channel->regs.a1;
352
  channel->source_mask = TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INC_SRC ) ? channel->regs.am0 : 0;
353
  channel->destination_mask = TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INC_DST ) ? channel->regs.am1 : 0;
354
  channel->total_size = GET_FIELD( channel->regs.sz, DMA_CH_SZ, TOT_SZ );
355
  channel->chunk_size = GET_FIELD( channel->regs.sz, DMA_CH_SZ, CHK_SZ );
356
  if ( !channel->chunk_size || (channel->chunk_size > channel->total_size) )
357
    channel->chunk_size = channel->total_size;
358
  channel->words_transferred = 0;
359 212 erez
}
360
 
361
 
362
/* Take care of transfer termination */
363
void dma_channel_terminate_transfer( struct dma_channel *channel, int generate_interrupt )
364
{
365 1491 nogj
  TRACE( "DMA: Terminating transfer\n" );
366 256 erez
 
367 503 erez
  /* Might be working in a linked list */
368
  if ( channel->load_next_descriptor_when_done ) {
369
    dma_load_descriptor( channel );
370
    dma_init_transfer( channel );
371 1370 nogj
    /* Reschedule */
372 1390 nogj
    SCHED_ADD( dma_channel_clock, channel, 1 );
373 503 erez
    return;
374
  }
375 212 erez
 
376 503 erez
  /* Might be in auto-restart mode */
377
  if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, ARS ) ) {
378
    dma_init_transfer( channel );
379
    return;
380
  }
381 212 erez
 
382 503 erez
  /* If needed, write amount of data transferred back to memory */
383
  if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, SZ_WB ) &&
384
       TEST_FLAG( channel->regs.csr, DMA_CH_CSR, USE_ED ) ) {
385 1370 nogj
   /* TODO: What should we write back? Doc says "total number of remaining bytes" !? */
386 503 erez
    unsigned long remaining_words = channel->total_size - channel->words_transferred;
387
    SET_FIELD( channel->regs.sz, DMA_DESC_CSR, TOT_SZ, remaining_words );
388
  }
389 212 erez
 
390 503 erez
  /* Mark end of transfer */
391
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN );
392
  SET_FLAG( channel->regs.csr, DMA_CH_CSR, DONE );
393
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
394
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
395 235 erez
 
396 503 erez
  /* If needed, generate interrupt */
397
  if ( generate_interrupt ) {
398
    /* TODO: Which channel should we interrupt? */
399
    if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INE_DONE ) &&
400
         (channel->controller->regs.int_msk_a & channel->channel_mask) ) {
401
      SET_FLAG( channel->regs.csr, DMA_CH_CSR, INT_DONE );
402
      channel->controller->regs.int_src_a = channel->channel_mask;
403
      report_interrupt( channel->controller->irq );
404
    }
405
  }
406 212 erez
}
407
 
408
/* Utility function: Add 4 to a value with a mask */
409 1370 nogj
static void masked_increase( oraddr_t *value, unsigned long mask )
410 212 erez
{
411 503 erez
  *value = (*value & ~mask) | ((*value + 4) & mask);
412 212 erez
}
413 1358 nogj
 
414 1370 nogj
/*-------------------------------------------[ DMA<->Peripheral interface ]---*/
415
/*
416
 * Simulation of control signals
417
 * To be used by simulations for other devices, e.g. ethernet
418
 */
419
 
420
void set_dma_req_i( struct dma_channel *channel )
421 1358 nogj
{
422 1370 nogj
  channel->dma_req_i = 1;
423 1358 nogj
}
424
 
425 1370 nogj
void clear_dma_req_i( struct dma_channel *channel )
426
{
427
  channel->dma_req_i = 0;
428
}
429
 
430
void set_dma_nd_i( struct dma_channel *channel )
431
{
432
  channel->dma_nd_i = 1;
433
}
434
 
435
void clear_dma_nd_i( struct dma_channel *channel )
436
{
437
  channel->dma_nd_i = 0;
438
}
439
 
440
unsigned check_dma_ack_o( struct dma_channel *channel )
441
{
442
  return channel->dma_ack_o;
443
}
444
 
445
struct dma_channel *find_dma_controller_ch( unsigned controller,
446
                                            unsigned channel )
447
{
448
  struct dma_controller *cur = dmas;
449
 
450
  while( cur && controller ) {
451
    cur = cur->next;
452
    controller--;
453
  }
454
 
455
  if( !cur )
456
    return NULL;
457
 
458
  return &(cur->ch[channel]);
459
}
460
 
461
 
462
/*----------------------------------------------------[ DMA configuration ]---*/
463 1649 nogj
static void dma_baseaddr(union param_val val, void *dat)
464 1358 nogj
{
465 1370 nogj
  struct dma_controller *dma = dat;
466
  dma->baseaddr = val.addr_val;
467 1358 nogj
}
468
 
469 1649 nogj
static void dma_irq(union param_val val, void *dat)
470 1358 nogj
{
471 1370 nogj
  struct dma_controller *dma = dat;
472
  dma->irq = val.int_val;
473 1358 nogj
}
474
 
475 1649 nogj
static void dma_vapi_id(union param_val val, void *dat)
476 1358 nogj
{
477 1370 nogj
  struct dma_controller *dma = dat;
478
  dma->vapi_id = val.int_val;
479 1358 nogj
}
480
 
481 1649 nogj
static void dma_enabled(union param_val val, void *dat)
482 1461 nogj
{
483
  struct dma_controller *dma = dat;
484
  dma->enabled = val.int_val;
485
}
486
 
487 1649 nogj
static void *dma_sec_start(void)
488 1370 nogj
{
489
  struct dma_controller *new = malloc(sizeof(struct dma_controller));
490
 
491
  if(!new) {
492
    fprintf(stderr, "Peripheral DMA: Run out of memory\n");
493
    exit(-1);
494
  }
495
 
496
  new->next = NULL;
497 1461 nogj
  new->enabled = 1;
498 1370 nogj
 
499
  return new;
500
}
501
 
502 1649 nogj
static void dma_sec_end(void *dat)
503 1370 nogj
{
504
  struct dma_controller *dma = dat;
505
  struct dma_controller *cur;
506 1486 nogj
  struct mem_ops ops;
507 1370 nogj
 
508 1461 nogj
  if(!dma->enabled) {
509
    free(dat);
510
    return;
511
  }
512
 
513 1486 nogj
  memset(&ops, 0, sizeof(struct mem_ops));
514
 
515
  ops.readfunc32 = dma_read32;
516
  ops.writefunc32 = dma_write32;
517
  ops.read_dat32 = dat;
518
  ops.write_dat32 = dat;
519
 
520
  /* FIXME: Correct delay?? */
521
  ops.delayr = 2;
522
  ops.delayw = 2;
523
 
524
  reg_mem_area( dma->baseaddr, DMA_ADDR_SPACE, 0, &ops );
525 1370 nogj
  reg_sim_reset( dma_reset, dat );
526
  reg_sim_stat( dma_status, dat );
527
 
528
  if(dmas) {
529
    for(cur = dmas; cur->next; cur = cur->next);
530
    cur->next = dma;
531
  } else
532
    dmas = dma;
533
}
534
 
535 1358 nogj
void reg_dma_sec(void)
536
{
537 1370 nogj
  struct config_section *sec = reg_config_sec("dma", dma_sec_start, dma_sec_end);
538 1358 nogj
 
539
  reg_config_param(sec, "irq", paramt_int, dma_irq);
540 1461 nogj
  reg_config_param(sec, "enabled", paramt_int, dma_enabled);
541 1358 nogj
  reg_config_param(sec, "baseaddr", paramt_addr, dma_baseaddr);
542
  reg_config_param(sec, "vapi_id", paramt_addr, dma_vapi_id);
543
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.