OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [tags/] [nog_patch_34/] [or1ksim/] [peripheral/] [dma.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 212 erez
/* dma.c -- Simulation of DMA
2 503 erez
   Copyright (C) 2001 by Erez Volk, erez@opencores.org
3 212 erez
 
4 503 erez
   This file is part of OpenRISC 1000 Architectural Simulator.
5 235 erez
 
6 503 erez
   This program is free software; you can redistribute it and/or modify
7
   it under the terms of the GNU General Public License as published by
8
   the Free Software Foundation; either version 2 of the License, or
9
   (at your option) any later version.
10 235 erez
 
11 503 erez
   This program is distributed in the hope that it will be useful,
12
   but WITHOUT ANY WARRANTY; without even the implied warranty of
13
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
   GNU General Public License for more details.
15 212 erez
 
16 503 erez
   You should have received a copy of the GNU General Public License
17
   along with this program; if not, write to the Free Software
18
   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 235 erez
*/
20 212 erez
 
21
/*
22
 * This simulation of the DMA core is not meant to be full.
23
 * It is written only to allow simulating the Ethernet core.
24
 * Of course, if anyone feels like perfecting it, feel free...
25
 */
26
 
27 1308 phoenix
#include <string.h>
28
 
29 1350 nogj
#include "config.h"
30
 
31
#ifdef HAVE_INTTYPES_H
32
#include <inttypes.h>
33
#endif
34
 
35
#include "port.h"
36
#include "arch.h"
37 212 erez
#include "dma.h"
38
#include "sim-config.h"
39
#include "pic.h"
40 235 erez
#include "abstract.h"
41 212 erez
#include "fields.h"
42 1370 nogj
#include "sched.h"
43 1308 phoenix
#include "debug.h"
44 212 erez
 
45 1370 nogj
/* We keep a copy of all our controllers because we have to export an interface
46
 * to other peripherals eg. ethernet */
47
static struct dma_controller *dmas = NULL;
48 212 erez
 
49 1359 nogj
static uint32_t dma_read32( oraddr_t addr, void *dat );
50
static void dma_write32( oraddr_t addr, uint32_t value, void *dat );
51 235 erez
 
52 212 erez
static unsigned long dma_read_ch_csr( struct dma_channel *channel );
53
static void dma_write_ch_csr( struct dma_channel *channel, unsigned long value );
54 1370 nogj
void dma_controller_clock( struct dma_controller *dma );
55 212 erez
static void dma_load_descriptor( struct dma_channel *channel );
56
static void dma_init_transfer( struct dma_channel *channel );
57
static void dma_channel_terminate_transfer( struct dma_channel *channel, int generate_interrupt );
58
 
59 1370 nogj
void dma_channel_clock( void *dat );
60 212 erez
 
61 1370 nogj
static void masked_increase( oraddr_t *value, unsigned long mask );
62
 
63 212 erez
#define CHANNEL_ND_I(ch) (TEST_FLAG(ch->regs.csr,DMA_CH_CSR,MODE) && TEST_FLAG(ch->regs.csr,DMA_CH_CSR,USE_ED) && ch->dma_nd_i)
64
 
65
 
66
/* Reset. Initializes all registers to default and places devices in memory address space. */
67 1370 nogj
void dma_reset(void *dat)
68 212 erez
{
69 1370 nogj
  struct dma_controller *dma = dat;
70
  unsigned channel_number;
71 212 erez
 
72 1370 nogj
  memset( dma->ch, 0, sizeof(dma->ch) );
73
 
74
  dma->regs.csr = 0;
75
  dma->regs.int_msk_a = 0;
76
  dma->regs.int_msk_b = 0;
77
  dma->regs.int_src_a = 0;
78
  dma->regs.int_src_b = 0;
79 235 erez
 
80 1370 nogj
  for ( channel_number = 0; channel_number < DMA_NUM_CHANNELS; ++ channel_number ) {
81
    dma->ch[channel_number].controller = dma;
82
    dma->ch[channel_number].channel_number = channel_number;
83
    dma->ch[channel_number].channel_mask = 1LU << channel_number;
84
    dma->ch[channel_number].regs.am0 = dma->ch[channel_number].regs.am1 = 0xFFFFFFFC;
85 503 erez
  }
86 212 erez
}
87
 
88
/* Print register values on stdout */
89 1370 nogj
void dma_status( void *dat )
90 212 erez
{
91 503 erez
  unsigned i, j;
92 1370 nogj
  struct dma_controller *dma = dat;
93 212 erez
 
94 1370 nogj
  if ( dma->baseaddr == 0 )
95
    return;
96 212 erez
 
97 1370 nogj
  PRINTF( "\nDMA controller %u at 0x%"PRIxADDR":\n", i, dma->baseaddr );
98
  PRINTF( "CSR       : 0x%08lX\n", dma->regs.csr );
99
  PRINTF( "INT_MSK_A : 0x%08lX\n", dma->regs.int_msk_a );
100
  PRINTF( "INT_MSK_B : 0x%08lX\n", dma->regs.int_msk_b );
101
  PRINTF( "INT_SRC_A : 0x%08lX\n", dma->regs.int_src_a );
102
  PRINTF( "INT_SRC_B : 0x%08lX\n", dma->regs.int_src_b );
103 212 erez
 
104 1370 nogj
  for ( j = 0; j < DMA_NUM_CHANNELS; ++ j ) {
105
    struct dma_channel *channel = &(dma->ch[j]);
106
    if ( !channel->referenced )
107
      continue;
108
    PRINTF( "CH%u_CSR   : 0x%08lX\n", j, channel->regs.csr );
109
    PRINTF( "CH%u_SZ    : 0x%08lX\n", j, channel->regs.sz );
110
    PRINTF( "CH%u_A0    : 0x%08lX\n", j, channel->regs.a0 );
111
    PRINTF( "CH%u_AM0   : 0x%08lX\n", j, channel->regs.am0 );
112
    PRINTF( "CH%u_A1    : 0x%08lX\n", j, channel->regs.a1 );
113
    PRINTF( "CH%u_AM1   : 0x%08lX\n", j, channel->regs.am1 );
114
    PRINTF( "CH%u_DESC  : 0x%08lX\n", j, channel->regs.desc );
115
    PRINTF( "CH%u_SWPTR : 0x%08lX\n", j, channel->regs.swptr );
116 503 erez
  }
117 212 erez
}
118
 
119 1370 nogj
 
120 212 erez
/* Read a register */
121 1359 nogj
uint32_t dma_read32( oraddr_t addr, void *dat )
122 212 erez
{
123 1370 nogj
  struct dma_controller *dma = dat;
124 212 erez
 
125 503 erez
  addr -= dma->baseaddr;
126 212 erez
 
127 503 erez
  if ( addr % 4 != 0 ) {
128 1350 nogj
    fprintf( stderr, "dma_read32( 0x%"PRIxADDR" ): Not register-aligned\n",
129
             addr + dma->baseaddr );
130 884 markom
    runtime.sim.cont_run = 0;
131 503 erez
    return 0;
132
  }
133 212 erez
 
134 503 erez
  if ( addr < DMA_CH_BASE ) {
135
    /* case of global (not per-channel) registers */
136
    switch( addr ) {
137
    case DMA_CSR: return dma->regs.csr;
138
    case DMA_INT_MSK_A: return dma->regs.int_msk_a;
139
    case DMA_INT_MSK_B: return dma->regs.int_msk_b;
140
    case DMA_INT_SRC_A: return dma->regs.int_src_a;
141
    case DMA_INT_SRC_B: return dma->regs.int_src_b;
142
    default:
143 1350 nogj
      fprintf( stderr, "dma_read32( 0x%"PRIxADDR" ): Illegal register\n",
144
               addr + dma->baseaddr );
145 884 markom
      runtime.sim.cont_run = 0;
146 503 erez
      return 0;
147
    }
148
  } else {
149
    /* case of per-channel registers */
150
    unsigned chno = (addr - DMA_CH_BASE) / DMA_CH_SIZE;
151
    addr = (addr - DMA_CH_BASE) % DMA_CH_SIZE;
152
    switch( addr ) {
153
    case DMA_CH_CSR: return dma_read_ch_csr( &(dma->ch[chno]) );
154
    case DMA_CH_SZ: return dma->ch[chno].regs.sz;
155
    case DMA_CH_A0: return dma->ch[chno].regs.a0;
156
    case DMA_CH_AM0: return dma->ch[chno].regs.am0;
157
    case DMA_CH_A1: return dma->ch[chno].regs.a1;
158
    case DMA_CH_AM1: return dma->ch[chno].regs.am1;
159
    case DMA_CH_DESC: return dma->ch[chno].regs.desc;
160
    case DMA_CH_SWPTR: return dma->ch[chno].regs.swptr;
161
    }
162
  }
163 1370 nogj
  return 0;
164 212 erez
}
165
 
166
 
167
/* Handle read from a channel CSR */
168
unsigned long dma_read_ch_csr( struct dma_channel *channel )
169
{
170 503 erez
  unsigned long result = channel->regs.csr;
171 212 erez
 
172 503 erez
  /* before returning, clear all relevant bits */
173
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_CHUNK_DONE );
174
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_DONE );
175
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_ERR );
176
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
177 212 erez
 
178 503 erez
  return result;
179 212 erez
}
180
 
181
 
182
 
183
/* Write a register */
184 1359 nogj
void dma_write32( oraddr_t addr, uint32_t value, void *dat )
185 212 erez
{
186 1370 nogj
  struct dma_controller *dma = dat;
187 212 erez
 
188 503 erez
  addr -= dma->baseaddr;
189 212 erez
 
190 503 erez
  if ( addr % 4 != 0 ) {
191 1350 nogj
    fprintf( stderr, "dma_write32( 0x%"PRIxADDR", 0x%08"PRIx32" ): Not register-aligned\n", addr + dma->baseaddr, value );
192 884 markom
    runtime.sim.cont_run = 0;
193 503 erez
    return;
194
  }
195 212 erez
 
196 503 erez
  /* case of global (not per-channel) registers */
197
  if ( addr < DMA_CH_BASE ) {
198
    switch( addr ) {
199
    case DMA_CSR:
200
      if ( TEST_FLAG( value, DMA_CSR, PAUSE ) )
201
        fprintf( stderr, "dma: PAUSE not implemented\n" );
202
      break;
203 212 erez
 
204 503 erez
    case DMA_INT_MSK_A: dma->regs.int_msk_a = value; break;
205
    case DMA_INT_MSK_B: dma->regs.int_msk_b = value; break;
206
    case DMA_INT_SRC_A: dma->regs.int_src_a = value; break;
207
    case DMA_INT_SRC_B: dma->regs.int_src_b = value; break;
208
    default:
209 1350 nogj
      fprintf( stderr, "dma_write32( 0x%"PRIxADDR" ): Illegal register\n",
210
               addr + dma->baseaddr );
211 884 markom
      runtime.sim.cont_run = 0;
212 503 erez
      return;
213
    }
214
  } else {
215
    /* case of per-channel registers */
216
    unsigned chno = (addr - DMA_CH_BASE) / DMA_CH_SIZE;
217
    struct dma_channel *channel = &(dma->ch[chno]);
218
    channel->referenced = 1;
219
    addr = (addr - DMA_CH_BASE) % DMA_CH_SIZE;
220
    switch( addr ) {
221
    case DMA_CSR: dma_write_ch_csr( &(dma->ch[chno]), value ); break;
222
    case DMA_CH_SZ: channel->regs.sz = value; break;
223
    case DMA_CH_A0: channel->regs.a0 = value; break;
224
    case DMA_CH_AM0: channel->regs.am0 = value; break;
225
    case DMA_CH_A1: channel->regs.a1 = value; break;
226
    case DMA_CH_AM1: channel->regs.am1 = value; break;
227
    case DMA_CH_DESC: channel->regs.desc = value; break;
228
    case DMA_CH_SWPTR: channel->regs.swptr = value; break;
229
    }
230
  }
231 212 erez
}
232
 
233
 
234
/* Write a channel CSR
235
 * This ensures only the writable bits are modified.
236
 */
237
void dma_write_ch_csr( struct dma_channel *channel, unsigned long value )
238
{
239 1370 nogj
  /* Check if we should *start* a transfer */
240
  if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN ) &&
241
       TEST_FLAG( value, DMA_CH_CSR, CH_EN ))
242 1390 nogj
    SCHED_ADD( dma_channel_clock, channel, 1 );
243 1370 nogj
  else if ( !TEST_FLAG( value, DMA_CH_CSR, CH_EN ) )
244
    /* The CH_EN flag is clear, check if we have a transfer in progress and
245
     * clear it */
246
    SCHED_FIND_REMOVE( dma_channel_clock, channel );
247
 
248 503 erez
  /* Copy the writable bits to the channel CSR */
249
  channel->regs.csr &= ~DMA_CH_CSR_WRITE_MASK;
250
  channel->regs.csr |= value & DMA_CH_CSR_WRITE_MASK;
251 212 erez
}
252
 
253
 
254
 
255 1370 nogj
/* Clock tick for one channel on one DMA controller.
256 212 erez
 * This does the actual "DMA" operation.
257
 * One chunk is transferred per clock.
258
 */
259 1370 nogj
void dma_channel_clock( void *dat )
260 212 erez
{
261 503 erez
  int breakpoint = 0;
262 1370 nogj
  struct dma_channel *channel = dat;
263 235 erez
 
264 1370 nogj
  /* Do we need to abort? */
265
  if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, STOP ) ) {
266
    debug( 3,  "DMA: STOP requested\n" );
267
    CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN );
268
    CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
269
    SET_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
270 256 erez
 
271 1370 nogj
    if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INE_ERR ) &&
272
         (channel->controller->regs.int_msk_a & channel->channel_mask) ) {
273
      SET_FLAG( channel->regs.csr, DMA_CH_CSR, INT_ERR );
274
      channel->controller->regs.int_src_a = channel->channel_mask;
275
      report_interrupt( channel->controller->irq );
276 503 erez
    }
277 212 erez
 
278 1370 nogj
    return;
279
  }
280 212 erez
 
281 1370 nogj
  /* In HW Handshake mode, only work when dma_req_i asserted */
282
  if ( TEST_FLAG(channel->regs.csr, DMA_CH_CSR, MODE) && !channel->dma_req_i ) {
283
    /* Reschedule */
284 1390 nogj
    SCHED_ADD( dma_channel_clock, dat, 1 );
285 1370 nogj
    return;
286
  }
287 212 erez
 
288 1370 nogj
  /* If this is the first cycle of the transfer, initialize our state */
289
  if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY ) ) {
290
    debug( 4,  "DMA: Starting new transfer\n" );
291 212 erez
 
292 1370 nogj
    CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, DONE );
293
    CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
294
    SET_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
295 212 erez
 
296 1370 nogj
    /* If using linked lists, copy the appropriate fields to our registers */
297
    if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, USE_ED ) )
298
      dma_load_descriptor( channel );
299
    else
300
      channel->load_next_descriptor_when_done = 0;
301 212 erez
 
302 1370 nogj
    /* Set our internal status */
303
    dma_init_transfer( channel );
304 212 erez
 
305 1370 nogj
    /* Might need to skip descriptor */
306 503 erez
    if ( CHANNEL_ND_I( channel ) ) {
307 1370 nogj
      debug( 3,  "DMA: dma_nd_i asserted before dma_req_i, skipping descriptor\n" );
308 503 erez
      dma_channel_terminate_transfer( channel, 0 );
309 1370 nogj
      return;
310 503 erez
    }
311 1370 nogj
  }
312 235 erez
 
313 1370 nogj
  /* Transfer one word */
314
  set_mem32( channel->destination, eval_mem32( channel->source, &breakpoint ), &breakpoint );
315
 
316
  /* Advance the source and destionation pointers */
317
  masked_increase( &(channel->source), channel->source_mask );
318
  masked_increase( &(channel->destination), channel->destination_mask );
319
  ++ channel->words_transferred;
320
 
321
  /* Have we finished a whole chunk? */
322
  channel->dma_ack_o = (channel->words_transferred % channel->chunk_size == 0);
323
 
324
  /* When done with a chunk, check for dma_nd_i */
325
  if ( CHANNEL_ND_I( channel ) ) {
326
    debug( 3,  "DMA: dma_nd_i asserted\n" );
327
    dma_channel_terminate_transfer( channel, 0 );
328
    return;
329 503 erez
  }
330 1370 nogj
 
331
  /* Are we done? */
332
  if ( channel->words_transferred >= channel->total_size ) {
333
    dma_channel_terminate_transfer( channel, 1 );
334
    return;
335
  }
336
 
337
  /* Reschedule to transfer the next chunk */
338 1390 nogj
  SCHED_ADD( dma_channel_clock, dat, 1 );
339 212 erez
}
340
 
341
 
342
/* Copy relevant valued from linked list descriptor to channel registers */
343
void dma_load_descriptor( struct dma_channel *channel )
344
{
345 503 erez
  int breakpoint = 0;
346
  unsigned long desc_csr = eval_mem32( channel->regs.desc + DMA_DESC_CSR, &breakpoint );
347 212 erez
 
348 503 erez
  channel->load_next_descriptor_when_done = !TEST_FLAG( desc_csr, DMA_DESC_CSR, EOL );
349 212 erez
 
350 503 erez
  ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, INC_SRC, TEST_FLAG( desc_csr, DMA_DESC_CSR, INC_SRC ) );
351
  ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, INC_DST, TEST_FLAG( desc_csr, DMA_DESC_CSR, INC_DST ) );
352
  ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, SRC_SEL, TEST_FLAG( desc_csr, DMA_DESC_CSR, SRC_SEL ) );
353
  ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, DST_SEL, TEST_FLAG( desc_csr, DMA_DESC_CSR, DST_SEL ) );
354 212 erez
 
355 503 erez
  SET_FIELD( channel->regs.sz, DMA_CH_SZ, TOT_SZ,        GET_FIELD( desc_csr, DMA_DESC_CSR, TOT_SZ ) );
356 212 erez
 
357 503 erez
  channel->regs.a0 = eval_mem32( channel->regs.desc + DMA_DESC_ADR0, &breakpoint );
358
  channel->regs.a1 = eval_mem32( channel->regs.desc + DMA_DESC_ADR1, &breakpoint );
359 212 erez
 
360 503 erez
  channel->current_descriptor = channel->regs.desc;
361
  channel->regs.desc = eval_mem32( channel->regs.desc + DMA_DESC_NEXT, &breakpoint );
362 212 erez
}
363
 
364
 
365
/* Initialize internal parameters used to implement transfers */
366
void dma_init_transfer( struct dma_channel *channel )
367
{
368 503 erez
  channel->source = channel->regs.a0;
369
  channel->destination = channel->regs.a1;
370
  channel->source_mask = TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INC_SRC ) ? channel->regs.am0 : 0;
371
  channel->destination_mask = TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INC_DST ) ? channel->regs.am1 : 0;
372
  channel->total_size = GET_FIELD( channel->regs.sz, DMA_CH_SZ, TOT_SZ );
373
  channel->chunk_size = GET_FIELD( channel->regs.sz, DMA_CH_SZ, CHK_SZ );
374
  if ( !channel->chunk_size || (channel->chunk_size > channel->total_size) )
375
    channel->chunk_size = channel->total_size;
376
  channel->words_transferred = 0;
377 212 erez
}
378
 
379
 
380
/* Take care of transfer termination */
381
void dma_channel_terminate_transfer( struct dma_channel *channel, int generate_interrupt )
382
{
383 503 erez
  debug( 4,  "DMA: Terminating transfer\n" );
384 256 erez
 
385 503 erez
  /* Might be working in a linked list */
386
  if ( channel->load_next_descriptor_when_done ) {
387
    dma_load_descriptor( channel );
388
    dma_init_transfer( channel );
389 1370 nogj
    /* Reschedule */
390 1390 nogj
    SCHED_ADD( dma_channel_clock, channel, 1 );
391 503 erez
    return;
392
  }
393 212 erez
 
394 503 erez
  /* Might be in auto-restart mode */
395
  if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, ARS ) ) {
396
    dma_init_transfer( channel );
397
    return;
398
  }
399 212 erez
 
400 503 erez
  /* If needed, write amount of data transferred back to memory */
401
  if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, SZ_WB ) &&
402
       TEST_FLAG( channel->regs.csr, DMA_CH_CSR, USE_ED ) ) {
403 1370 nogj
   /* TODO: What should we write back? Doc says "total number of remaining bytes" !? */
404 503 erez
    unsigned long remaining_words = channel->total_size - channel->words_transferred;
405
    SET_FIELD( channel->regs.sz, DMA_DESC_CSR, TOT_SZ, remaining_words );
406
  }
407 212 erez
 
408 503 erez
  /* Mark end of transfer */
409
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN );
410
  SET_FLAG( channel->regs.csr, DMA_CH_CSR, DONE );
411
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
412
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
413 235 erez
 
414 503 erez
  /* If needed, generate interrupt */
415
  if ( generate_interrupt ) {
416
    /* TODO: Which channel should we interrupt? */
417
    if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INE_DONE ) &&
418
         (channel->controller->regs.int_msk_a & channel->channel_mask) ) {
419
      SET_FLAG( channel->regs.csr, DMA_CH_CSR, INT_DONE );
420
      channel->controller->regs.int_src_a = channel->channel_mask;
421
      report_interrupt( channel->controller->irq );
422
    }
423
  }
424 212 erez
}
425
 
426
/* Utility function: Add 4 to a value with a mask */
427 1370 nogj
static void masked_increase( oraddr_t *value, unsigned long mask )
428 212 erez
{
429 503 erez
  *value = (*value & ~mask) | ((*value + 4) & mask);
430 212 erez
}
431 1358 nogj
 
432 1370 nogj
/*-------------------------------------------[ DMA<->Peripheral interface ]---*/
433
/*
434
 * Simulation of control signals
435
 * To be used by simulations for other devices, e.g. ethernet
436
 */
437
 
438
void set_dma_req_i( struct dma_channel *channel )
439 1358 nogj
{
440 1370 nogj
  channel->dma_req_i = 1;
441 1358 nogj
}
442
 
443 1370 nogj
void clear_dma_req_i( struct dma_channel *channel )
444
{
445
  channel->dma_req_i = 0;
446
}
447
 
448
void set_dma_nd_i( struct dma_channel *channel )
449
{
450
  channel->dma_nd_i = 1;
451
}
452
 
453
void clear_dma_nd_i( struct dma_channel *channel )
454
{
455
  channel->dma_nd_i = 0;
456
}
457
 
458
unsigned check_dma_ack_o( struct dma_channel *channel )
459
{
460
  return channel->dma_ack_o;
461
}
462
 
463
struct dma_channel *find_dma_controller_ch( unsigned controller,
464
                                            unsigned channel )
465
{
466
  struct dma_controller *cur = dmas;
467
 
468
  while( cur && controller ) {
469
    cur = cur->next;
470
    controller--;
471
  }
472
 
473
  if( !cur )
474
    return NULL;
475
 
476
  return &(cur->ch[channel]);
477
}
478
 
479
 
480
/*----------------------------------------------------[ DMA configuration ]---*/
481 1358 nogj
void dma_baseaddr(union param_val val, void *dat)
482
{
483 1370 nogj
  struct dma_controller *dma = dat;
484
  dma->baseaddr = val.addr_val;
485 1358 nogj
}
486
 
487
void dma_irq(union param_val val, void *dat)
488
{
489 1370 nogj
  struct dma_controller *dma = dat;
490
  dma->irq = val.int_val;
491 1358 nogj
}
492
 
493
void dma_vapi_id(union param_val val, void *dat)
494
{
495 1370 nogj
  struct dma_controller *dma = dat;
496
  dma->vapi_id = val.int_val;
497 1358 nogj
}
498
 
499 1370 nogj
void *dma_sec_start(void)
500
{
501
  struct dma_controller *new = malloc(sizeof(struct dma_controller));
502
 
503
  if(!new) {
504
    fprintf(stderr, "Peripheral DMA: Run out of memory\n");
505
    exit(-1);
506
  }
507
 
508
  new->next = NULL;
509
 
510
  return new;
511
}
512
 
513
void dma_sec_end(void *dat)
514
{
515
  struct dma_controller *dma = dat;
516
  struct dma_controller *cur;
517
 
518
  register_memoryarea( dma->baseaddr, DMA_ADDR_SPACE, 4, 0, dma_read32, dma_write32, dat );
519
  reg_sim_reset( dma_reset, dat );
520
  reg_sim_stat( dma_status, dat );
521
 
522
  if(dmas) {
523
    for(cur = dmas; cur->next; cur = cur->next);
524
    cur->next = dma;
525
  } else
526
    dmas = dma;
527
}
528
 
529 1358 nogj
void reg_dma_sec(void)
530
{
531 1370 nogj
  struct config_section *sec = reg_config_sec("dma", dma_sec_start, dma_sec_end);
532 1358 nogj
 
533
  reg_config_param(sec, "irq", paramt_int, dma_irq);
534
  reg_config_param(sec, "baseaddr", paramt_addr, dma_baseaddr);
535
  reg_config_param(sec, "vapi_id", paramt_addr, dma_vapi_id);
536
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.