OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [tags/] [stable_0_2_0_rc2/] [or1ksim/] [peripheral/] [dma.c] - Blame information for rev 1308

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 212 erez
/* dma.c -- Simulation of DMA
2 503 erez
   Copyright (C) 2001 by Erez Volk, erez@opencores.org
3 212 erez
 
4 503 erez
   This file is part of OpenRISC 1000 Architectural Simulator.
5 235 erez
 
6 503 erez
   This program is free software; you can redistribute it and/or modify
7
   it under the terms of the GNU General Public License as published by
8
   the Free Software Foundation; either version 2 of the License, or
9
   (at your option) any later version.
10 235 erez
 
11 503 erez
   This program is distributed in the hope that it will be useful,
12
   but WITHOUT ANY WARRANTY; without even the implied warranty of
13
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
   GNU General Public License for more details.
15 212 erez
 
16 503 erez
   You should have received a copy of the GNU General Public License
17
   along with this program; if not, write to the Free Software
18
   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 235 erez
*/
20 212 erez
 
21
/*
22
 * This simulation of the DMA core is not meant to be full.
23
 * It is written only to allow simulating the Ethernet core.
24
 * Of course, if anyone feels like perfecting it, feel free...
25
 */
26
 
27 1308 phoenix
#include <string.h>
28
 
29 212 erez
#include "dma.h"
30
#include "sim-config.h"
31
#include "pic.h"
32 235 erez
#include "abstract.h"
33 212 erez
#include "fields.h"
34 1308 phoenix
#include "debug.h"
35 212 erez
 
36
/* The representation of the DMA controllers */
37 424 markom
static struct dma_controller dmas[MAX_DMAS];
38 212 erez
 
39 235 erez
static unsigned long dma_read32( unsigned long addr );
40
static void dma_write32( unsigned long addr, unsigned long value );
41
 
42 212 erez
static unsigned long dma_read_ch_csr( struct dma_channel *channel );
43
static void dma_write_ch_csr( struct dma_channel *channel, unsigned long value );
44
static void dma_controller_clock( struct dma_controller *dma );
45
static void dma_load_descriptor( struct dma_channel *channel );
46
static void dma_init_transfer( struct dma_channel *channel );
47
static void dma_channel_terminate_transfer( struct dma_channel *channel, int generate_interrupt );
48
 
49
static void masked_increase( unsigned long *value, unsigned long mask );
50
 
51
#define CHANNEL_ND_I(ch) (TEST_FLAG(ch->regs.csr,DMA_CH_CSR,MODE) && TEST_FLAG(ch->regs.csr,DMA_CH_CSR,USE_ED) && ch->dma_nd_i)
52
 
53
 
54
/* Reset. Initializes all registers to default and places devices in memory address space. */
55
void dma_reset()
56
{
57 503 erez
  unsigned i;
58 212 erez
 
59 503 erez
  memset( dmas, 0, sizeof(dmas) );
60 235 erez
 
61 503 erez
  for ( i = 0; i < config.ndmas; ++ i ) {
62
    struct dma_controller *dma = &(dmas[i]);
63
    unsigned channel_number;
64 212 erez
 
65 503 erez
    dma->baseaddr = config.dmas[i].baseaddr;
66
    dma->irq = config.dmas[i].irq;
67
    for ( channel_number = 0; channel_number < DMA_NUM_CHANNELS; ++ channel_number ) {
68
      dma->ch[channel_number].controller = &(dmas[i]);
69
      dma->ch[channel_number].channel_number = channel_number;
70
      dma->ch[channel_number].channel_mask = 1LU << channel_number;
71
      dma->ch[channel_number].regs.am0 = dma->ch[channel_number].regs.am1 = 0xFFFFFFFC;
72
    }
73
    if ( dma->baseaddr != 0 )
74 970 simons
      register_memoryarea( dma->baseaddr, DMA_ADDR_SPACE, 4, 0, dma_read32, dma_write32);
75 503 erez
  }
76 212 erez
}
77
 
78
/* Print register values on stdout */
79
void dma_status( void )
80
{
81 503 erez
  unsigned i, j;
82 212 erez
 
83 503 erez
  for ( i = 0; i < config.ndmas; ++ i ) {
84
    struct dma_controller *dma = &(dmas[i]);
85 212 erez
 
86 503 erez
    if ( dma->baseaddr == 0 )
87
      continue;
88 212 erez
 
89 1308 phoenix
    PRINTF( "\nDMA controller %u at 0x%08lX:\n", i, dma->baseaddr );
90 997 markom
    PRINTF( "CSR       : 0x%08lX\n", dma->regs.csr );
91
    PRINTF( "INT_MSK_A : 0x%08lX\n", dma->regs.int_msk_a );
92
    PRINTF( "INT_MSK_B : 0x%08lX\n", dma->regs.int_msk_b );
93
    PRINTF( "INT_SRC_A : 0x%08lX\n", dma->regs.int_src_a );
94
    PRINTF( "INT_SRC_B : 0x%08lX\n", dma->regs.int_src_b );
95 212 erez
 
96 503 erez
    for ( j = 0; j < DMA_NUM_CHANNELS; ++ j ) {
97
      struct dma_channel *channel = &(dma->ch[j]);
98
      if ( !channel->referenced )
99
        continue;
100 997 markom
      PRINTF( "CH%u_CSR   : 0x%08lX\n", j, channel->regs.csr );
101
      PRINTF( "CH%u_SZ    : 0x%08lX\n", j, channel->regs.sz );
102
      PRINTF( "CH%u_A0    : 0x%08lX\n", j, channel->regs.a0 );
103
      PRINTF( "CH%u_AM0   : 0x%08lX\n", j, channel->regs.am0 );
104
      PRINTF( "CH%u_A1    : 0x%08lX\n", j, channel->regs.a1 );
105
      PRINTF( "CH%u_AM1   : 0x%08lX\n", j, channel->regs.am1 );
106
      PRINTF( "CH%u_DESC  : 0x%08lX\n", j, channel->regs.desc );
107
      PRINTF( "CH%u_SWPTR : 0x%08lX\n", j, channel->regs.swptr );
108 503 erez
    }
109
  }
110 212 erez
}
111
 
112
 
113
/* Read a register */
114 235 erez
unsigned long dma_read32( unsigned long addr )
115 212 erez
{
116 503 erez
  unsigned i;
117
  struct dma_controller *dma = NULL;
118 212 erez
 
119 503 erez
  for ( i = 0; i < MAX_DMAS && dma == NULL; ++ i ) {
120
    if ( addr >= dmas[i].baseaddr && addr < dmas[i].baseaddr + DMA_ADDR_SPACE )
121
      dma = &(dmas[i]);
122
  }
123 235 erez
 
124 503 erez
  /* verify we found a controller */
125
  if ( dma == NULL ) {
126
    fprintf( stderr, "dma_read32( 0x%08lX ): Out of range\n", addr );
127 884 markom
    runtime.sim.cont_run = 0;
128 503 erez
    return 0;
129
  }
130 212 erez
 
131 503 erez
  addr -= dma->baseaddr;
132 212 erez
 
133 503 erez
  if ( addr % 4 != 0 ) {
134
    fprintf( stderr, "dma_read32( 0x%08lX ): Not register-aligned\n", addr + dma->baseaddr );
135 884 markom
    runtime.sim.cont_run = 0;
136 503 erez
    return 0;
137
  }
138 212 erez
 
139 503 erez
  if ( addr < DMA_CH_BASE ) {
140
    /* case of global (not per-channel) registers */
141
    switch( addr ) {
142
    case DMA_CSR: return dma->regs.csr;
143
    case DMA_INT_MSK_A: return dma->regs.int_msk_a;
144
    case DMA_INT_MSK_B: return dma->regs.int_msk_b;
145
    case DMA_INT_SRC_A: return dma->regs.int_src_a;
146
    case DMA_INT_SRC_B: return dma->regs.int_src_b;
147
    default:
148
      fprintf( stderr, "dma_read32( 0x%08lX ): Illegal register\n", addr + dma->baseaddr );
149 884 markom
      runtime.sim.cont_run = 0;
150 503 erez
      return 0;
151
    }
152
  } else {
153
    /* case of per-channel registers */
154
    unsigned chno = (addr - DMA_CH_BASE) / DMA_CH_SIZE;
155
    addr = (addr - DMA_CH_BASE) % DMA_CH_SIZE;
156
    switch( addr ) {
157
    case DMA_CH_CSR: return dma_read_ch_csr( &(dma->ch[chno]) );
158
    case DMA_CH_SZ: return dma->ch[chno].regs.sz;
159
    case DMA_CH_A0: return dma->ch[chno].regs.a0;
160
    case DMA_CH_AM0: return dma->ch[chno].regs.am0;
161
    case DMA_CH_A1: return dma->ch[chno].regs.a1;
162
    case DMA_CH_AM1: return dma->ch[chno].regs.am1;
163
    case DMA_CH_DESC: return dma->ch[chno].regs.desc;
164
    case DMA_CH_SWPTR: return dma->ch[chno].regs.swptr;
165
    }
166
  }
167 212 erez
}
168
 
169
 
170
/* Handle read from a channel CSR */
171
unsigned long dma_read_ch_csr( struct dma_channel *channel )
172
{
173 503 erez
  unsigned long result = channel->regs.csr;
174 212 erez
 
175 503 erez
  /* before returning, clear all relevant bits */
176
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_CHUNK_DONE );
177
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_DONE );
178
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_ERR );
179
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
180 212 erez
 
181 503 erez
  return result;
182 212 erez
}
183
 
184
 
185
 
186
/* Write a register */
187 235 erez
void dma_write32( unsigned long addr, unsigned long value )
188 212 erez
{
189 503 erez
  unsigned i;
190
  struct dma_controller *dma = NULL;
191 212 erez
 
192 503 erez
  /* Find which controller this is */
193
  for ( i = 0; i < MAX_DMAS && dma == NULL; ++ i ) {
194
    if ( (addr >= dmas[i].baseaddr) && (addr < dmas[i].baseaddr + DMA_ADDR_SPACE) )
195
      dma = &(dmas[i]);
196
  }
197 235 erez
 
198 503 erez
  /* verify we found a controller */
199
  if ( dma == NULL ) {
200
    fprintf( stderr, "dma_write32( 0x%08lX ): Out of range\n", addr );
201 884 markom
    runtime.sim.cont_run = 0;
202 503 erez
    return;
203
  }
204 212 erez
 
205 503 erez
  addr -= dma->baseaddr;
206 212 erez
 
207 503 erez
  if ( addr % 4 != 0 ) {
208
    fprintf( stderr, "dma_write32( 0x%08lX, 0x%08lX ): Not register-aligned\n", addr + dma->baseaddr, value );
209 884 markom
    runtime.sim.cont_run = 0;
210 503 erez
    return;
211
  }
212 212 erez
 
213 503 erez
  /* case of global (not per-channel) registers */
214
  if ( addr < DMA_CH_BASE ) {
215
    switch( addr ) {
216
    case DMA_CSR:
217
      if ( TEST_FLAG( value, DMA_CSR, PAUSE ) )
218
        fprintf( stderr, "dma: PAUSE not implemented\n" );
219
      break;
220 212 erez
 
221 503 erez
    case DMA_INT_MSK_A: dma->regs.int_msk_a = value; break;
222
    case DMA_INT_MSK_B: dma->regs.int_msk_b = value; break;
223
    case DMA_INT_SRC_A: dma->regs.int_src_a = value; break;
224
    case DMA_INT_SRC_B: dma->regs.int_src_b = value; break;
225
    default:
226
      fprintf( stderr, "dma_write32( 0x%08lX ): Illegal register\n", addr + dma->baseaddr );
227 884 markom
      runtime.sim.cont_run = 0;
228 503 erez
      return;
229
    }
230
  } else {
231
    /* case of per-channel registers */
232
    unsigned chno = (addr - DMA_CH_BASE) / DMA_CH_SIZE;
233
    struct dma_channel *channel = &(dma->ch[chno]);
234
    channel->referenced = 1;
235
    addr = (addr - DMA_CH_BASE) % DMA_CH_SIZE;
236
    switch( addr ) {
237
    case DMA_CSR: dma_write_ch_csr( &(dma->ch[chno]), value ); break;
238
    case DMA_CH_SZ: channel->regs.sz = value; break;
239
    case DMA_CH_A0: channel->regs.a0 = value; break;
240
    case DMA_CH_AM0: channel->regs.am0 = value; break;
241
    case DMA_CH_A1: channel->regs.a1 = value; break;
242
    case DMA_CH_AM1: channel->regs.am1 = value; break;
243
    case DMA_CH_DESC: channel->regs.desc = value; break;
244
    case DMA_CH_SWPTR: channel->regs.swptr = value; break;
245
    }
246
  }
247 212 erez
}
248
 
249
 
250
/* Write a channel CSR
251
 * This ensures only the writable bits are modified.
252
 */
253
void dma_write_ch_csr( struct dma_channel *channel, unsigned long value )
254
{
255 503 erez
  /* Copy the writable bits to the channel CSR */
256
  channel->regs.csr &= ~DMA_CH_CSR_WRITE_MASK;
257
  channel->regs.csr |= value & DMA_CH_CSR_WRITE_MASK;
258 212 erez
}
259
 
260
 
261
/*
262
 * Simulation of control signals
263
 * To be used by simulations for other devices, e.g. ethernet
264
 */
265
 
266
void set_dma_req_i( unsigned dma_controller, unsigned channel )
267
{
268 503 erez
  dmas[dma_controller].ch[channel].dma_req_i = 1;
269 212 erez
}
270
 
271
void clear_dma_req_i( unsigned dma_controller, unsigned channel )
272
{
273 503 erez
  dmas[dma_controller].ch[channel].dma_req_i = 0;
274 212 erez
}
275
 
276
void set_dma_nd_i( unsigned dma_controller, unsigned channel )
277
{
278 503 erez
  dmas[dma_controller].ch[channel].dma_nd_i = 1;
279 212 erez
}
280
 
281
void clear_dma_nd_i( unsigned dma_controller, unsigned channel )
282
{
283 503 erez
  dmas[dma_controller].ch[channel].dma_nd_i = 0;
284 212 erez
}
285
 
286 235 erez
unsigned check_dma_ack_o( unsigned dma_controller, unsigned channel )
287 212 erez
{
288 503 erez
  return dmas[dma_controller].ch[channel].dma_ack_o;
289 212 erez
}
290
 
291
 
292
 
293
/* Simulation hook. Must be called every clock cycle to simulate DMA. */
294
void dma_clock()
295
{
296 503 erez
  unsigned i;
297
  for ( i = 0; i < MAX_DMAS; ++ i ) {
298
    if ( dmas[i].baseaddr != 0 )
299
      dma_controller_clock( &(dmas[i]) );
300
  }
301 212 erez
}
302
 
303
 
304
/* Clock tick for one DMA controller.
305
 * This does the actual "DMA" operation.
306
 * One chunk is transferred per clock.
307
 */
308
void dma_controller_clock( struct dma_controller *dma )
309
{
310 1308 phoenix
  unsigned chno;
311 503 erez
  int breakpoint = 0;
312 235 erez
 
313 503 erez
  for ( chno = 0; chno < DMA_NUM_CHANNELS; ++ chno ) {
314
    struct dma_channel *channel = &(dma->ch[chno]);
315 256 erez
 
316 503 erez
    /* check if this channel is enabled */
317
    if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN ) )
318
      continue;
319 212 erez
 
320 503 erez
    /* Do we need to abort? */
321
    if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, STOP ) ) {
322
      debug( 3,  "DMA: STOP requested\n" );
323
      CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN );
324
      CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
325
      SET_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
326 235 erez
 
327 503 erez
      if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INE_ERR ) &&
328
           (channel->controller->regs.int_msk_a & channel->channel_mask) ) {
329
        SET_FLAG( channel->regs.csr, DMA_CH_CSR, INT_ERR );
330
        channel->controller->regs.int_src_a = channel->channel_mask;
331
        report_interrupt( channel->controller->irq );
332
      }
333 212 erez
 
334 503 erez
      continue;
335
    }
336 212 erez
 
337 503 erez
    /* In HW Handshake mode, only work when dma_req_i asserted */
338
    if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, MODE ) &&
339
         !channel->dma_req_i ) {
340
      continue;
341
    }
342 212 erez
 
343 503 erez
    /* If this is the first cycle of the transfer, initialize our state */
344
    if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY ) ) {
345
      debug( 4,  "DMA: Starting new transfer\n" );
346 256 erez
 
347 503 erez
      CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, DONE );
348
      CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
349
      SET_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
350 212 erez
 
351 503 erez
      /* If using linked lists, copy the appropriate fields to our registers */
352
      if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, USE_ED ) )
353
        dma_load_descriptor( channel );
354
      else
355
        channel->load_next_descriptor_when_done = 0;
356 235 erez
 
357 503 erez
      /* Set our internal status */
358
      dma_init_transfer( channel );
359 212 erez
 
360 503 erez
      /* Might need to skip descriptor */
361
      if ( CHANNEL_ND_I( channel ) ) {
362
        debug( 3,  "DMA: dma_nd_i asserted before dma_req_i, skipping descriptor\n" );
363
        dma_channel_terminate_transfer( channel, 0 );
364
        continue;
365
      }
366
    }
367 212 erez
 
368 503 erez
    /* Transfer one word */
369
    set_mem32( channel->destination, eval_mem32( channel->source, &breakpoint ), &breakpoint );
370 212 erez
 
371 503 erez
    /* Advance the source and destionation pointers */
372
    masked_increase( &(channel->source), channel->source_mask );
373
    masked_increase( &(channel->destination), channel->destination_mask );
374
    ++ channel->words_transferred;
375 212 erez
 
376 503 erez
    /* Have we finished a whole chunk? */
377
    channel->dma_ack_o = (channel->words_transferred % channel->chunk_size == 0);
378 212 erez
 
379 503 erez
    /* When done with a chunk, check for dma_nd_i */
380
    if ( CHANNEL_ND_I( channel ) ) {
381
      debug( 3,  "DMA: dma_nd_i asserted\n" );
382
      dma_channel_terminate_transfer( channel, 0 );
383
      continue;
384
    }
385 235 erez
 
386 503 erez
    /* Are we done? */
387
    if ( channel->words_transferred >= channel->total_size )
388
      dma_channel_terminate_transfer( channel, 1 );
389
  }
390 212 erez
}
391
 
392
 
393
/* Copy relevant valued from linked list descriptor to channel registers */
394
void dma_load_descriptor( struct dma_channel *channel )
395
{
396 503 erez
  int breakpoint = 0;
397
  unsigned long desc_csr = eval_mem32( channel->regs.desc + DMA_DESC_CSR, &breakpoint );
398 212 erez
 
399 503 erez
  channel->load_next_descriptor_when_done = !TEST_FLAG( desc_csr, DMA_DESC_CSR, EOL );
400 212 erez
 
401 503 erez
  ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, INC_SRC, TEST_FLAG( desc_csr, DMA_DESC_CSR, INC_SRC ) );
402
  ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, INC_DST, TEST_FLAG( desc_csr, DMA_DESC_CSR, INC_DST ) );
403
  ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, SRC_SEL, TEST_FLAG( desc_csr, DMA_DESC_CSR, SRC_SEL ) );
404
  ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, DST_SEL, TEST_FLAG( desc_csr, DMA_DESC_CSR, DST_SEL ) );
405 212 erez
 
406 503 erez
  SET_FIELD( channel->regs.sz, DMA_CH_SZ, TOT_SZ,        GET_FIELD( desc_csr, DMA_DESC_CSR, TOT_SZ ) );
407 212 erez
 
408 503 erez
  channel->regs.a0 = eval_mem32( channel->regs.desc + DMA_DESC_ADR0, &breakpoint );
409
  channel->regs.a1 = eval_mem32( channel->regs.desc + DMA_DESC_ADR1, &breakpoint );
410 212 erez
 
411 503 erez
  channel->current_descriptor = channel->regs.desc;
412
  channel->regs.desc = eval_mem32( channel->regs.desc + DMA_DESC_NEXT, &breakpoint );
413 212 erez
}
414
 
415
 
416
/* Initialize internal parameters used to implement transfers */
417
void dma_init_transfer( struct dma_channel *channel )
418
{
419 503 erez
  channel->source = channel->regs.a0;
420
  channel->destination = channel->regs.a1;
421
  channel->source_mask = TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INC_SRC ) ? channel->regs.am0 : 0;
422
  channel->destination_mask = TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INC_DST ) ? channel->regs.am1 : 0;
423
  channel->total_size = GET_FIELD( channel->regs.sz, DMA_CH_SZ, TOT_SZ );
424
  channel->chunk_size = GET_FIELD( channel->regs.sz, DMA_CH_SZ, CHK_SZ );
425
  if ( !channel->chunk_size || (channel->chunk_size > channel->total_size) )
426
    channel->chunk_size = channel->total_size;
427
  channel->words_transferred = 0;
428 212 erez
}
429
 
430
 
431
/* Take care of transfer termination */
432
void dma_channel_terminate_transfer( struct dma_channel *channel, int generate_interrupt )
433
{
434 503 erez
  debug( 4,  "DMA: Terminating transfer\n" );
435 256 erez
 
436 503 erez
  /* Might be working in a linked list */
437
  if ( channel->load_next_descriptor_when_done ) {
438
    dma_load_descriptor( channel );
439
    dma_init_transfer( channel );
440
    return;
441
  }
442 212 erez
 
443 503 erez
  /* Might be in auto-restart mode */
444
  if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, ARS ) ) {
445
    dma_init_transfer( channel );
446
    return;
447
  }
448 212 erez
 
449 503 erez
  /* If needed, write amount of data transferred back to memory */
450
  if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, SZ_WB ) &&
451
       TEST_FLAG( channel->regs.csr, DMA_CH_CSR, USE_ED ) ) {
452
    /* TODO: What should we write back? Doc says "total number of remaining bytes" !? */
453
    unsigned long remaining_words = channel->total_size - channel->words_transferred;
454
    SET_FIELD( channel->regs.sz, DMA_DESC_CSR, TOT_SZ, remaining_words );
455
  }
456 212 erez
 
457 503 erez
  /* Mark end of transfer */
458
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN );
459
  SET_FLAG( channel->regs.csr, DMA_CH_CSR, DONE );
460
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
461
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
462 235 erez
 
463 503 erez
  /* If needed, generate interrupt */
464
  if ( generate_interrupt ) {
465
    /* TODO: Which channel should we interrupt? */
466
    if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INE_DONE ) &&
467
         (channel->controller->regs.int_msk_a & channel->channel_mask) ) {
468
      SET_FLAG( channel->regs.csr, DMA_CH_CSR, INT_DONE );
469
      channel->controller->regs.int_src_a = channel->channel_mask;
470
      report_interrupt( channel->controller->irq );
471
    }
472
  }
473 212 erez
}
474
 
475
/* Utility function: Add 4 to a value with a mask */
476
void masked_increase( unsigned long *value, unsigned long mask )
477
{
478 503 erez
  *value = (*value & ~mask) | ((*value + 4) & mask);
479 212 erez
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.