OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [tags/] [stable_0_1_0/] [or1ksim/] [peripheral/] [dma.c] - Blame information for rev 1780

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 212 erez
/* dma.c -- Simulation of DMA
2 503 erez
   Copyright (C) 2001 by Erez Volk, erez@opencores.org
3 212 erez
 
4 503 erez
   This file is part of OpenRISC 1000 Architectural Simulator.
5 235 erez
 
6 503 erez
   This program is free software; you can redistribute it and/or modify
7
   it under the terms of the GNU General Public License as published by
8
   the Free Software Foundation; either version 2 of the License, or
9
   (at your option) any later version.
10 235 erez
 
11 503 erez
   This program is distributed in the hope that it will be useful,
12
   but WITHOUT ANY WARRANTY; without even the implied warranty of
13
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
   GNU General Public License for more details.
15 212 erez
 
16 503 erez
   You should have received a copy of the GNU General Public License
17
   along with this program; if not, write to the Free Software
18
   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 235 erez
*/
20 212 erez
 
21
/*
22
 * This simulation of the DMA core is not meant to be full.
23
 * It is written only to allow simulating the Ethernet core.
24
 * Of course, if anyone feels like perfecting it, feel free...
25
 */
26
 
27 1308 phoenix
#include <string.h>
28
 
29 1350 nogj
#include "config.h"
30
 
31
#ifdef HAVE_INTTYPES_H
32
#include <inttypes.h>
33
#endif
34
 
35
#include "port.h"
36
#include "arch.h"
37 212 erez
#include "dma.h"
38
#include "sim-config.h"
39
#include "pic.h"
40 235 erez
#include "abstract.h"
41 212 erez
#include "fields.h"
42 1308 phoenix
#include "debug.h"
43 212 erez
 
44
/* The representation of the DMA controllers */
45 424 markom
static struct dma_controller dmas[MAX_DMAS];
46 212 erez
 
47 1350 nogj
static uint32_t dma_read32( oraddr_t addr );
48
static void dma_write32( oraddr_t addr, uint32_t value );
49 235 erez
 
50 212 erez
static unsigned long dma_read_ch_csr( struct dma_channel *channel );
51
static void dma_write_ch_csr( struct dma_channel *channel, unsigned long value );
52
static void dma_controller_clock( struct dma_controller *dma );
53
static void dma_load_descriptor( struct dma_channel *channel );
54
static void dma_init_transfer( struct dma_channel *channel );
55
static void dma_channel_terminate_transfer( struct dma_channel *channel, int generate_interrupt );
56
 
57
static void masked_increase( unsigned long *value, unsigned long mask );
58
 
59
#define CHANNEL_ND_I(ch) (TEST_FLAG(ch->regs.csr,DMA_CH_CSR,MODE) && TEST_FLAG(ch->regs.csr,DMA_CH_CSR,USE_ED) && ch->dma_nd_i)
60
 
61
 
62
/* Reset. Initializes all registers to default and places devices in memory address space. */
63
void dma_reset()
64
{
65 503 erez
  unsigned i;
66 212 erez
 
67 503 erez
  memset( dmas, 0, sizeof(dmas) );
68 235 erez
 
69 503 erez
  for ( i = 0; i < config.ndmas; ++ i ) {
70
    struct dma_controller *dma = &(dmas[i]);
71
    unsigned channel_number;
72 212 erez
 
73 503 erez
    dma->baseaddr = config.dmas[i].baseaddr;
74
    dma->irq = config.dmas[i].irq;
75
    for ( channel_number = 0; channel_number < DMA_NUM_CHANNELS; ++ channel_number ) {
76
      dma->ch[channel_number].controller = &(dmas[i]);
77
      dma->ch[channel_number].channel_number = channel_number;
78
      dma->ch[channel_number].channel_mask = 1LU << channel_number;
79
      dma->ch[channel_number].regs.am0 = dma->ch[channel_number].regs.am1 = 0xFFFFFFFC;
80
    }
81
    if ( dma->baseaddr != 0 )
82 970 simons
      register_memoryarea( dma->baseaddr, DMA_ADDR_SPACE, 4, 0, dma_read32, dma_write32);
83 503 erez
  }
84 212 erez
}
85
 
86
/* Print register values on stdout */
87
void dma_status( void )
88
{
89 503 erez
  unsigned i, j;
90 212 erez
 
91 503 erez
  for ( i = 0; i < config.ndmas; ++ i ) {
92
    struct dma_controller *dma = &(dmas[i]);
93 212 erez
 
94 503 erez
    if ( dma->baseaddr == 0 )
95
      continue;
96 212 erez
 
97 1350 nogj
    PRINTF( "\nDMA controller %u at 0x%"PRIxADDR":\n", i, dma->baseaddr );
98 997 markom
    PRINTF( "CSR       : 0x%08lX\n", dma->regs.csr );
99
    PRINTF( "INT_MSK_A : 0x%08lX\n", dma->regs.int_msk_a );
100
    PRINTF( "INT_MSK_B : 0x%08lX\n", dma->regs.int_msk_b );
101
    PRINTF( "INT_SRC_A : 0x%08lX\n", dma->regs.int_src_a );
102
    PRINTF( "INT_SRC_B : 0x%08lX\n", dma->regs.int_src_b );
103 212 erez
 
104 503 erez
    for ( j = 0; j < DMA_NUM_CHANNELS; ++ j ) {
105
      struct dma_channel *channel = &(dma->ch[j]);
106
      if ( !channel->referenced )
107
        continue;
108 997 markom
      PRINTF( "CH%u_CSR   : 0x%08lX\n", j, channel->regs.csr );
109
      PRINTF( "CH%u_SZ    : 0x%08lX\n", j, channel->regs.sz );
110
      PRINTF( "CH%u_A0    : 0x%08lX\n", j, channel->regs.a0 );
111
      PRINTF( "CH%u_AM0   : 0x%08lX\n", j, channel->regs.am0 );
112
      PRINTF( "CH%u_A1    : 0x%08lX\n", j, channel->regs.a1 );
113
      PRINTF( "CH%u_AM1   : 0x%08lX\n", j, channel->regs.am1 );
114
      PRINTF( "CH%u_DESC  : 0x%08lX\n", j, channel->regs.desc );
115
      PRINTF( "CH%u_SWPTR : 0x%08lX\n", j, channel->regs.swptr );
116 503 erez
    }
117
  }
118 212 erez
}
119
 
120
 
121
/* Read a register */
122 1350 nogj
uint32_t dma_read32( oraddr_t addr )
123 212 erez
{
124 503 erez
  unsigned i;
125
  struct dma_controller *dma = NULL;
126 212 erez
 
127 503 erez
  for ( i = 0; i < MAX_DMAS && dma == NULL; ++ i ) {
128
    if ( addr >= dmas[i].baseaddr && addr < dmas[i].baseaddr + DMA_ADDR_SPACE )
129
      dma = &(dmas[i]);
130
  }
131 235 erez
 
132 503 erez
  /* verify we found a controller */
133
  if ( dma == NULL ) {
134 1350 nogj
    fprintf( stderr, "dma_read32( 0x%"PRIxADDR" ): Out of range\n", addr );
135 884 markom
    runtime.sim.cont_run = 0;
136 503 erez
    return 0;
137
  }
138 212 erez
 
139 503 erez
  addr -= dma->baseaddr;
140 212 erez
 
141 503 erez
  if ( addr % 4 != 0 ) {
142 1350 nogj
    fprintf( stderr, "dma_read32( 0x%"PRIxADDR" ): Not register-aligned\n",
143
             addr + dma->baseaddr );
144 884 markom
    runtime.sim.cont_run = 0;
145 503 erez
    return 0;
146
  }
147 212 erez
 
148 503 erez
  if ( addr < DMA_CH_BASE ) {
149
    /* case of global (not per-channel) registers */
150
    switch( addr ) {
151
    case DMA_CSR: return dma->regs.csr;
152
    case DMA_INT_MSK_A: return dma->regs.int_msk_a;
153
    case DMA_INT_MSK_B: return dma->regs.int_msk_b;
154
    case DMA_INT_SRC_A: return dma->regs.int_src_a;
155
    case DMA_INT_SRC_B: return dma->regs.int_src_b;
156
    default:
157 1350 nogj
      fprintf( stderr, "dma_read32( 0x%"PRIxADDR" ): Illegal register\n",
158
               addr + dma->baseaddr );
159 884 markom
      runtime.sim.cont_run = 0;
160 503 erez
      return 0;
161
    }
162
  } else {
163
    /* case of per-channel registers */
164
    unsigned chno = (addr - DMA_CH_BASE) / DMA_CH_SIZE;
165
    addr = (addr - DMA_CH_BASE) % DMA_CH_SIZE;
166
    switch( addr ) {
167
    case DMA_CH_CSR: return dma_read_ch_csr( &(dma->ch[chno]) );
168
    case DMA_CH_SZ: return dma->ch[chno].regs.sz;
169
    case DMA_CH_A0: return dma->ch[chno].regs.a0;
170
    case DMA_CH_AM0: return dma->ch[chno].regs.am0;
171
    case DMA_CH_A1: return dma->ch[chno].regs.a1;
172
    case DMA_CH_AM1: return dma->ch[chno].regs.am1;
173
    case DMA_CH_DESC: return dma->ch[chno].regs.desc;
174
    case DMA_CH_SWPTR: return dma->ch[chno].regs.swptr;
175
    }
176
  }
177 212 erez
}
178
 
179
 
180
/* Handle read from a channel CSR */
181
unsigned long dma_read_ch_csr( struct dma_channel *channel )
182
{
183 503 erez
  unsigned long result = channel->regs.csr;
184 212 erez
 
185 503 erez
  /* before returning, clear all relevant bits */
186
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_CHUNK_DONE );
187
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_DONE );
188
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_ERR );
189
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
190 212 erez
 
191 503 erez
  return result;
192 212 erez
}
193
 
194
 
195
 
196
/* Write a register */
197 1350 nogj
void dma_write32( oraddr_t addr, uint32_t value )
198 212 erez
{
199 503 erez
  unsigned i;
200
  struct dma_controller *dma = NULL;
201 212 erez
 
202 503 erez
  /* Find which controller this is */
203
  for ( i = 0; i < MAX_DMAS && dma == NULL; ++ i ) {
204
    if ( (addr >= dmas[i].baseaddr) && (addr < dmas[i].baseaddr + DMA_ADDR_SPACE) )
205
      dma = &(dmas[i]);
206
  }
207 235 erez
 
208 503 erez
  /* verify we found a controller */
209
  if ( dma == NULL ) {
210 1350 nogj
    fprintf( stderr, "dma_write32( 0x%"PRIxADDR" ): Out of range\n", addr );
211 884 markom
    runtime.sim.cont_run = 0;
212 503 erez
    return;
213
  }
214 212 erez
 
215 503 erez
  addr -= dma->baseaddr;
216 212 erez
 
217 503 erez
  if ( addr % 4 != 0 ) {
218 1350 nogj
    fprintf( stderr, "dma_write32( 0x%"PRIxADDR", 0x%08"PRIx32" ): Not register-aligned\n", addr + dma->baseaddr, value );
219 884 markom
    runtime.sim.cont_run = 0;
220 503 erez
    return;
221
  }
222 212 erez
 
223 503 erez
  /* case of global (not per-channel) registers */
224
  if ( addr < DMA_CH_BASE ) {
225
    switch( addr ) {
226
    case DMA_CSR:
227
      if ( TEST_FLAG( value, DMA_CSR, PAUSE ) )
228
        fprintf( stderr, "dma: PAUSE not implemented\n" );
229
      break;
230 212 erez
 
231 503 erez
    case DMA_INT_MSK_A: dma->regs.int_msk_a = value; break;
232
    case DMA_INT_MSK_B: dma->regs.int_msk_b = value; break;
233
    case DMA_INT_SRC_A: dma->regs.int_src_a = value; break;
234
    case DMA_INT_SRC_B: dma->regs.int_src_b = value; break;
235
    default:
236 1350 nogj
      fprintf( stderr, "dma_write32( 0x%"PRIxADDR" ): Illegal register\n",
237
               addr + dma->baseaddr );
238 884 markom
      runtime.sim.cont_run = 0;
239 503 erez
      return;
240
    }
241
  } else {
242
    /* case of per-channel registers */
243
    unsigned chno = (addr - DMA_CH_BASE) / DMA_CH_SIZE;
244
    struct dma_channel *channel = &(dma->ch[chno]);
245
    channel->referenced = 1;
246
    addr = (addr - DMA_CH_BASE) % DMA_CH_SIZE;
247
    switch( addr ) {
248
    case DMA_CSR: dma_write_ch_csr( &(dma->ch[chno]), value ); break;
249
    case DMA_CH_SZ: channel->regs.sz = value; break;
250
    case DMA_CH_A0: channel->regs.a0 = value; break;
251
    case DMA_CH_AM0: channel->regs.am0 = value; break;
252
    case DMA_CH_A1: channel->regs.a1 = value; break;
253
    case DMA_CH_AM1: channel->regs.am1 = value; break;
254
    case DMA_CH_DESC: channel->regs.desc = value; break;
255
    case DMA_CH_SWPTR: channel->regs.swptr = value; break;
256
    }
257
  }
258 212 erez
}
259
 
260
 
261
/* Write a channel CSR
262
 * This ensures only the writable bits are modified.
263
 */
264
void dma_write_ch_csr( struct dma_channel *channel, unsigned long value )
265
{
266 503 erez
  /* Copy the writable bits to the channel CSR */
267
  channel->regs.csr &= ~DMA_CH_CSR_WRITE_MASK;
268
  channel->regs.csr |= value & DMA_CH_CSR_WRITE_MASK;
269 212 erez
}
270
 
271
 
272
/*
273
 * Simulation of control signals
274
 * To be used by simulations for other devices, e.g. ethernet
275
 */
276
 
277
void set_dma_req_i( unsigned dma_controller, unsigned channel )
278
{
279 503 erez
  dmas[dma_controller].ch[channel].dma_req_i = 1;
280 212 erez
}
281
 
282
void clear_dma_req_i( unsigned dma_controller, unsigned channel )
283
{
284 503 erez
  dmas[dma_controller].ch[channel].dma_req_i = 0;
285 212 erez
}
286
 
287
void set_dma_nd_i( unsigned dma_controller, unsigned channel )
288
{
289 503 erez
  dmas[dma_controller].ch[channel].dma_nd_i = 1;
290 212 erez
}
291
 
292
void clear_dma_nd_i( unsigned dma_controller, unsigned channel )
293
{
294 503 erez
  dmas[dma_controller].ch[channel].dma_nd_i = 0;
295 212 erez
}
296
 
297 235 erez
unsigned check_dma_ack_o( unsigned dma_controller, unsigned channel )
298 212 erez
{
299 503 erez
  return dmas[dma_controller].ch[channel].dma_ack_o;
300 212 erez
}
301
 
302
 
303
 
304
/* Simulation hook. Must be called every clock cycle to simulate DMA. */
305
void dma_clock()
306
{
307 503 erez
  unsigned i;
308
  for ( i = 0; i < MAX_DMAS; ++ i ) {
309
    if ( dmas[i].baseaddr != 0 )
310
      dma_controller_clock( &(dmas[i]) );
311
  }
312 212 erez
}
313
 
314
 
315
/* Clock tick for one DMA controller.
316
 * This does the actual "DMA" operation.
317
 * One chunk is transferred per clock.
318
 */
319
void dma_controller_clock( struct dma_controller *dma )
320
{
321 1308 phoenix
  unsigned chno;
322 503 erez
  int breakpoint = 0;
323 235 erez
 
324 503 erez
  for ( chno = 0; chno < DMA_NUM_CHANNELS; ++ chno ) {
325
    struct dma_channel *channel = &(dma->ch[chno]);
326 256 erez
 
327 503 erez
    /* check if this channel is enabled */
328
    if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN ) )
329
      continue;
330 212 erez
 
331 503 erez
    /* Do we need to abort? */
332
    if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, STOP ) ) {
333
      debug( 3,  "DMA: STOP requested\n" );
334
      CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN );
335
      CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
336
      SET_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
337 235 erez
 
338 503 erez
      if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INE_ERR ) &&
339
           (channel->controller->regs.int_msk_a & channel->channel_mask) ) {
340
        SET_FLAG( channel->regs.csr, DMA_CH_CSR, INT_ERR );
341
        channel->controller->regs.int_src_a = channel->channel_mask;
342
        report_interrupt( channel->controller->irq );
343
      }
344 212 erez
 
345 503 erez
      continue;
346
    }
347 212 erez
 
348 503 erez
    /* In HW Handshake mode, only work when dma_req_i asserted */
349
    if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, MODE ) &&
350
         !channel->dma_req_i ) {
351
      continue;
352
    }
353 212 erez
 
354 503 erez
    /* If this is the first cycle of the transfer, initialize our state */
355
    if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY ) ) {
356
      debug( 4,  "DMA: Starting new transfer\n" );
357 256 erez
 
358 503 erez
      CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, DONE );
359
      CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
360
      SET_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
361 212 erez
 
362 503 erez
      /* If using linked lists, copy the appropriate fields to our registers */
363
      if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, USE_ED ) )
364
        dma_load_descriptor( channel );
365
      else
366
        channel->load_next_descriptor_when_done = 0;
367 235 erez
 
368 503 erez
      /* Set our internal status */
369
      dma_init_transfer( channel );
370 212 erez
 
371 503 erez
      /* Might need to skip descriptor */
372
      if ( CHANNEL_ND_I( channel ) ) {
373
        debug( 3,  "DMA: dma_nd_i asserted before dma_req_i, skipping descriptor\n" );
374
        dma_channel_terminate_transfer( channel, 0 );
375
        continue;
376
      }
377
    }
378 212 erez
 
379 503 erez
    /* Transfer one word */
380
    set_mem32( channel->destination, eval_mem32( channel->source, &breakpoint ), &breakpoint );
381 212 erez
 
382 503 erez
    /* Advance the source and destionation pointers */
383
    masked_increase( &(channel->source), channel->source_mask );
384
    masked_increase( &(channel->destination), channel->destination_mask );
385
    ++ channel->words_transferred;
386 212 erez
 
387 503 erez
    /* Have we finished a whole chunk? */
388
    channel->dma_ack_o = (channel->words_transferred % channel->chunk_size == 0);
389 212 erez
 
390 503 erez
    /* When done with a chunk, check for dma_nd_i */
391
    if ( CHANNEL_ND_I( channel ) ) {
392
      debug( 3,  "DMA: dma_nd_i asserted\n" );
393
      dma_channel_terminate_transfer( channel, 0 );
394
      continue;
395
    }
396 235 erez
 
397 503 erez
    /* Are we done? */
398
    if ( channel->words_transferred >= channel->total_size )
399
      dma_channel_terminate_transfer( channel, 1 );
400
  }
401 212 erez
}
402
 
403
 
404
/* Copy relevant valued from linked list descriptor to channel registers */
405
void dma_load_descriptor( struct dma_channel *channel )
406
{
407 503 erez
  int breakpoint = 0;
408
  unsigned long desc_csr = eval_mem32( channel->regs.desc + DMA_DESC_CSR, &breakpoint );
409 212 erez
 
410 503 erez
  channel->load_next_descriptor_when_done = !TEST_FLAG( desc_csr, DMA_DESC_CSR, EOL );
411 212 erez
 
412 503 erez
  ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, INC_SRC, TEST_FLAG( desc_csr, DMA_DESC_CSR, INC_SRC ) );
413
  ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, INC_DST, TEST_FLAG( desc_csr, DMA_DESC_CSR, INC_DST ) );
414
  ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, SRC_SEL, TEST_FLAG( desc_csr, DMA_DESC_CSR, SRC_SEL ) );
415
  ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, DST_SEL, TEST_FLAG( desc_csr, DMA_DESC_CSR, DST_SEL ) );
416 212 erez
 
417 503 erez
  SET_FIELD( channel->regs.sz, DMA_CH_SZ, TOT_SZ,        GET_FIELD( desc_csr, DMA_DESC_CSR, TOT_SZ ) );
418 212 erez
 
419 503 erez
  channel->regs.a0 = eval_mem32( channel->regs.desc + DMA_DESC_ADR0, &breakpoint );
420
  channel->regs.a1 = eval_mem32( channel->regs.desc + DMA_DESC_ADR1, &breakpoint );
421 212 erez
 
422 503 erez
  channel->current_descriptor = channel->regs.desc;
423
  channel->regs.desc = eval_mem32( channel->regs.desc + DMA_DESC_NEXT, &breakpoint );
424 212 erez
}
425
 
426
 
427
/* Initialize internal parameters used to implement transfers */
428
void dma_init_transfer( struct dma_channel *channel )
429
{
430 503 erez
  channel->source = channel->regs.a0;
431
  channel->destination = channel->regs.a1;
432
  channel->source_mask = TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INC_SRC ) ? channel->regs.am0 : 0;
433
  channel->destination_mask = TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INC_DST ) ? channel->regs.am1 : 0;
434
  channel->total_size = GET_FIELD( channel->regs.sz, DMA_CH_SZ, TOT_SZ );
435
  channel->chunk_size = GET_FIELD( channel->regs.sz, DMA_CH_SZ, CHK_SZ );
436
  if ( !channel->chunk_size || (channel->chunk_size > channel->total_size) )
437
    channel->chunk_size = channel->total_size;
438
  channel->words_transferred = 0;
439 212 erez
}
440
 
441
 
442
/* Take care of transfer termination */
443
void dma_channel_terminate_transfer( struct dma_channel *channel, int generate_interrupt )
444
{
445 503 erez
  debug( 4,  "DMA: Terminating transfer\n" );
446 256 erez
 
447 503 erez
  /* Might be working in a linked list */
448
  if ( channel->load_next_descriptor_when_done ) {
449
    dma_load_descriptor( channel );
450
    dma_init_transfer( channel );
451
    return;
452
  }
453 212 erez
 
454 503 erez
  /* Might be in auto-restart mode */
455
  if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, ARS ) ) {
456
    dma_init_transfer( channel );
457
    return;
458
  }
459 212 erez
 
460 503 erez
  /* If needed, write amount of data transferred back to memory */
461
  if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, SZ_WB ) &&
462
       TEST_FLAG( channel->regs.csr, DMA_CH_CSR, USE_ED ) ) {
463
    /* TODO: What should we write back? Doc says "total number of remaining bytes" !? */
464
    unsigned long remaining_words = channel->total_size - channel->words_transferred;
465
    SET_FIELD( channel->regs.sz, DMA_DESC_CSR, TOT_SZ, remaining_words );
466
  }
467 212 erez
 
468 503 erez
  /* Mark end of transfer */
469
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN );
470
  SET_FLAG( channel->regs.csr, DMA_CH_CSR, DONE );
471
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
472
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
473 235 erez
 
474 503 erez
  /* If needed, generate interrupt */
475
  if ( generate_interrupt ) {
476
    /* TODO: Which channel should we interrupt? */
477
    if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INE_DONE ) &&
478
         (channel->controller->regs.int_msk_a & channel->channel_mask) ) {
479
      SET_FLAG( channel->regs.csr, DMA_CH_CSR, INT_DONE );
480
      channel->controller->regs.int_src_a = channel->channel_mask;
481
      report_interrupt( channel->controller->irq );
482
    }
483
  }
484 212 erez
}
485
 
486
/* Utility function: Add 4 to a value with a mask */
487
void masked_increase( unsigned long *value, unsigned long mask )
488
{
489 503 erez
  *value = (*value & ~mask) | ((*value + 4) & mask);
490 212 erez
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.