OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [tags/] [nog_patch_34/] [or1ksim/] [peripheral/] [dma.c] - Blame information for rev 997

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 212 erez
/* dma.c -- Simulation of DMA
2 503 erez
   Copyright (C) 2001 by Erez Volk, erez@opencores.org
3 212 erez
 
4 503 erez
   This file is part of OpenRISC 1000 Architectural Simulator.
5 235 erez
 
6 503 erez
   This program is free software; you can redistribute it and/or modify
7
   it under the terms of the GNU General Public License as published by
8
   the Free Software Foundation; either version 2 of the License, or
9
   (at your option) any later version.
10 235 erez
 
11 503 erez
   This program is distributed in the hope that it will be useful,
12
   but WITHOUT ANY WARRANTY; without even the implied warranty of
13
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
   GNU General Public License for more details.
15 212 erez
 
16 503 erez
   You should have received a copy of the GNU General Public License
17
   along with this program; if not, write to the Free Software
18
   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 235 erez
*/
20 212 erez
 
21
/*
22
 * This simulation of the DMA core is not meant to be full.
23
 * It is written only to allow simulating the Ethernet core.
24
 * Of course, if anyone feels like perfecting it, feel free...
25
 */
26
 
27
#include "dma.h"
28
#include "sim-config.h"
29
#include "pic.h"
30 235 erez
#include "abstract.h"
31 212 erez
#include "fields.h"
32
 
33
/* The representation of the DMA controllers */
34 424 markom
static struct dma_controller dmas[MAX_DMAS];
35 212 erez
 
36 235 erez
static unsigned long dma_read32( unsigned long addr );
37
static void dma_write32( unsigned long addr, unsigned long value );
38
 
39 212 erez
static unsigned long dma_read_ch_csr( struct dma_channel *channel );
40
static void dma_write_ch_csr( struct dma_channel *channel, unsigned long value );
41
static void dma_controller_clock( struct dma_controller *dma );
42
static void dma_load_descriptor( struct dma_channel *channel );
43
static void dma_init_transfer( struct dma_channel *channel );
44
static void dma_channel_terminate_transfer( struct dma_channel *channel, int generate_interrupt );
45
 
46
static void masked_increase( unsigned long *value, unsigned long mask );
47
 
48
#define CHANNEL_ND_I(ch) (TEST_FLAG(ch->regs.csr,DMA_CH_CSR,MODE) && TEST_FLAG(ch->regs.csr,DMA_CH_CSR,USE_ED) && ch->dma_nd_i)
49
 
50
 
51
/* Reset. Initializes all registers to default and places devices in memory address space. */
52
void dma_reset()
53
{
54 503 erez
  unsigned i;
55 212 erez
 
56 503 erez
  memset( dmas, 0, sizeof(dmas) );
57 235 erez
 
58 503 erez
  for ( i = 0; i < config.ndmas; ++ i ) {
59
    struct dma_controller *dma = &(dmas[i]);
60
    unsigned channel_number;
61 212 erez
 
62 503 erez
    dma->baseaddr = config.dmas[i].baseaddr;
63
    dma->irq = config.dmas[i].irq;
64
    for ( channel_number = 0; channel_number < DMA_NUM_CHANNELS; ++ channel_number ) {
65
      dma->ch[channel_number].controller = &(dmas[i]);
66
      dma->ch[channel_number].channel_number = channel_number;
67
      dma->ch[channel_number].channel_mask = 1LU << channel_number;
68
      dma->ch[channel_number].regs.am0 = dma->ch[channel_number].regs.am1 = 0xFFFFFFFC;
69
    }
70
    if ( dma->baseaddr != 0 )
71 970 simons
      register_memoryarea( dma->baseaddr, DMA_ADDR_SPACE, 4, 0, dma_read32, dma_write32);
72 503 erez
  }
73 212 erez
}
74
 
75
/* Print register values on stdout */
76
void dma_status( void )
77
{
78 503 erez
  unsigned i, j;
79 212 erez
 
80 503 erez
  for ( i = 0; i < config.ndmas; ++ i ) {
81
    struct dma_controller *dma = &(dmas[i]);
82 212 erez
 
83 503 erez
    if ( dma->baseaddr == 0 )
84
      continue;
85 212 erez
 
86 997 markom
    PRINTF( "\nDMA controller %u at 0x%08X:\n", i, dma->baseaddr );
87
    PRINTF( "CSR       : 0x%08lX\n", dma->regs.csr );
88
    PRINTF( "INT_MSK_A : 0x%08lX\n", dma->regs.int_msk_a );
89
    PRINTF( "INT_MSK_B : 0x%08lX\n", dma->regs.int_msk_b );
90
    PRINTF( "INT_SRC_A : 0x%08lX\n", dma->regs.int_src_a );
91
    PRINTF( "INT_SRC_B : 0x%08lX\n", dma->regs.int_src_b );
92 212 erez
 
93 503 erez
    for ( j = 0; j < DMA_NUM_CHANNELS; ++ j ) {
94
      struct dma_channel *channel = &(dma->ch[j]);
95
      if ( !channel->referenced )
96
        continue;
97 997 markom
      PRINTF( "CH%u_CSR   : 0x%08lX\n", j, channel->regs.csr );
98
      PRINTF( "CH%u_SZ    : 0x%08lX\n", j, channel->regs.sz );
99
      PRINTF( "CH%u_A0    : 0x%08lX\n", j, channel->regs.a0 );
100
      PRINTF( "CH%u_AM0   : 0x%08lX\n", j, channel->regs.am0 );
101
      PRINTF( "CH%u_A1    : 0x%08lX\n", j, channel->regs.a1 );
102
      PRINTF( "CH%u_AM1   : 0x%08lX\n", j, channel->regs.am1 );
103
      PRINTF( "CH%u_DESC  : 0x%08lX\n", j, channel->regs.desc );
104
      PRINTF( "CH%u_SWPTR : 0x%08lX\n", j, channel->regs.swptr );
105 503 erez
    }
106
  }
107 212 erez
}
108
 
109
 
110
/* Read a register */
111 235 erez
unsigned long dma_read32( unsigned long addr )
112 212 erez
{
113 503 erez
  unsigned i;
114
  struct dma_controller *dma = NULL;
115 212 erez
 
116 503 erez
  for ( i = 0; i < MAX_DMAS && dma == NULL; ++ i ) {
117
    if ( addr >= dmas[i].baseaddr && addr < dmas[i].baseaddr + DMA_ADDR_SPACE )
118
      dma = &(dmas[i]);
119
  }
120 235 erez
 
121 503 erez
  /* verify we found a controller */
122
  if ( dma == NULL ) {
123
    fprintf( stderr, "dma_read32( 0x%08lX ): Out of range\n", addr );
124 884 markom
    runtime.sim.cont_run = 0;
125 503 erez
    return 0;
126
  }
127 212 erez
 
128 503 erez
  addr -= dma->baseaddr;
129 212 erez
 
130 503 erez
  if ( addr % 4 != 0 ) {
131
    fprintf( stderr, "dma_read32( 0x%08lX ): Not register-aligned\n", addr + dma->baseaddr );
132 884 markom
    runtime.sim.cont_run = 0;
133 503 erez
    return 0;
134
  }
135 212 erez
 
136 503 erez
  if ( addr < DMA_CH_BASE ) {
137
    /* case of global (not per-channel) registers */
138
    switch( addr ) {
139
    case DMA_CSR: return dma->regs.csr;
140
    case DMA_INT_MSK_A: return dma->regs.int_msk_a;
141
    case DMA_INT_MSK_B: return dma->regs.int_msk_b;
142
    case DMA_INT_SRC_A: return dma->regs.int_src_a;
143
    case DMA_INT_SRC_B: return dma->regs.int_src_b;
144
    default:
145
      fprintf( stderr, "dma_read32( 0x%08lX ): Illegal register\n", addr + dma->baseaddr );
146 884 markom
      runtime.sim.cont_run = 0;
147 503 erez
      return 0;
148
    }
149
  } else {
150
    /* case of per-channel registers */
151
    unsigned chno = (addr - DMA_CH_BASE) / DMA_CH_SIZE;
152
    addr = (addr - DMA_CH_BASE) % DMA_CH_SIZE;
153
    switch( addr ) {
154
    case DMA_CH_CSR: return dma_read_ch_csr( &(dma->ch[chno]) );
155
    case DMA_CH_SZ: return dma->ch[chno].regs.sz;
156
    case DMA_CH_A0: return dma->ch[chno].regs.a0;
157
    case DMA_CH_AM0: return dma->ch[chno].regs.am0;
158
    case DMA_CH_A1: return dma->ch[chno].regs.a1;
159
    case DMA_CH_AM1: return dma->ch[chno].regs.am1;
160
    case DMA_CH_DESC: return dma->ch[chno].regs.desc;
161
    case DMA_CH_SWPTR: return dma->ch[chno].regs.swptr;
162
    }
163
  }
164 212 erez
}
165
 
166
 
167
/* Handle read from a channel CSR */
168
unsigned long dma_read_ch_csr( struct dma_channel *channel )
169
{
170 503 erez
  unsigned long result = channel->regs.csr;
171 212 erez
 
172 503 erez
  /* before returning, clear all relevant bits */
173
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_CHUNK_DONE );
174
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_DONE );
175
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_ERR );
176
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
177 212 erez
 
178 503 erez
  return result;
179 212 erez
}
180
 
181
 
182
 
183
/* Write a register */
184 235 erez
void dma_write32( unsigned long addr, unsigned long value )
185 212 erez
{
186 503 erez
  unsigned i;
187
  struct dma_controller *dma = NULL;
188 212 erez
 
189 503 erez
  /* Find which controller this is */
190
  for ( i = 0; i < MAX_DMAS && dma == NULL; ++ i ) {
191
    if ( (addr >= dmas[i].baseaddr) && (addr < dmas[i].baseaddr + DMA_ADDR_SPACE) )
192
      dma = &(dmas[i]);
193
  }
194 235 erez
 
195 503 erez
  /* verify we found a controller */
196
  if ( dma == NULL ) {
197
    fprintf( stderr, "dma_write32( 0x%08lX ): Out of range\n", addr );
198 884 markom
    runtime.sim.cont_run = 0;
199 503 erez
    return;
200
  }
201 212 erez
 
202 503 erez
  addr -= dma->baseaddr;
203 212 erez
 
204 503 erez
  if ( addr % 4 != 0 ) {
205
    fprintf( stderr, "dma_write32( 0x%08lX, 0x%08lX ): Not register-aligned\n", addr + dma->baseaddr, value );
206 884 markom
    runtime.sim.cont_run = 0;
207 503 erez
    return;
208
  }
209 212 erez
 
210 503 erez
  /* case of global (not per-channel) registers */
211
  if ( addr < DMA_CH_BASE ) {
212
    switch( addr ) {
213
    case DMA_CSR:
214
      if ( TEST_FLAG( value, DMA_CSR, PAUSE ) )
215
        fprintf( stderr, "dma: PAUSE not implemented\n" );
216
      break;
217 212 erez
 
218 503 erez
    case DMA_INT_MSK_A: dma->regs.int_msk_a = value; break;
219
    case DMA_INT_MSK_B: dma->regs.int_msk_b = value; break;
220
    case DMA_INT_SRC_A: dma->regs.int_src_a = value; break;
221
    case DMA_INT_SRC_B: dma->regs.int_src_b = value; break;
222
    default:
223
      fprintf( stderr, "dma_write32( 0x%08lX ): Illegal register\n", addr + dma->baseaddr );
224 884 markom
      runtime.sim.cont_run = 0;
225 503 erez
      return;
226
    }
227
  } else {
228
    /* case of per-channel registers */
229
    unsigned chno = (addr - DMA_CH_BASE) / DMA_CH_SIZE;
230
    struct dma_channel *channel = &(dma->ch[chno]);
231
    channel->referenced = 1;
232
    addr = (addr - DMA_CH_BASE) % DMA_CH_SIZE;
233
    switch( addr ) {
234
    case DMA_CSR: dma_write_ch_csr( &(dma->ch[chno]), value ); break;
235
    case DMA_CH_SZ: channel->regs.sz = value; break;
236
    case DMA_CH_A0: channel->regs.a0 = value; break;
237
    case DMA_CH_AM0: channel->regs.am0 = value; break;
238
    case DMA_CH_A1: channel->regs.a1 = value; break;
239
    case DMA_CH_AM1: channel->regs.am1 = value; break;
240
    case DMA_CH_DESC: channel->regs.desc = value; break;
241
    case DMA_CH_SWPTR: channel->regs.swptr = value; break;
242
    }
243
  }
244 212 erez
}
245
 
246
 
247
/* Write a channel CSR
248
 * This ensures only the writable bits are modified.
249
 */
250
void dma_write_ch_csr( struct dma_channel *channel, unsigned long value )
251
{
252 503 erez
  /* Copy the writable bits to the channel CSR */
253
  channel->regs.csr &= ~DMA_CH_CSR_WRITE_MASK;
254
  channel->regs.csr |= value & DMA_CH_CSR_WRITE_MASK;
255 212 erez
}
256
 
257
 
258
/*
259
 * Simulation of control signals
260
 * To be used by simulations for other devices, e.g. ethernet
261
 */
262
 
263
void set_dma_req_i( unsigned dma_controller, unsigned channel )
264
{
265 503 erez
  dmas[dma_controller].ch[channel].dma_req_i = 1;
266 212 erez
}
267
 
268
void clear_dma_req_i( unsigned dma_controller, unsigned channel )
269
{
270 503 erez
  dmas[dma_controller].ch[channel].dma_req_i = 0;
271 212 erez
}
272
 
273
void set_dma_nd_i( unsigned dma_controller, unsigned channel )
274
{
275 503 erez
  dmas[dma_controller].ch[channel].dma_nd_i = 1;
276 212 erez
}
277
 
278
void clear_dma_nd_i( unsigned dma_controller, unsigned channel )
279
{
280 503 erez
  dmas[dma_controller].ch[channel].dma_nd_i = 0;
281 212 erez
}
282
 
283 235 erez
unsigned check_dma_ack_o( unsigned dma_controller, unsigned channel )
284 212 erez
{
285 503 erez
  return dmas[dma_controller].ch[channel].dma_ack_o;
286 212 erez
}
287
 
288
 
289
 
290
/* Simulation hook. Must be called every clock cycle to simulate DMA. */
291
void dma_clock()
292
{
293 503 erez
  unsigned i;
294
  for ( i = 0; i < MAX_DMAS; ++ i ) {
295
    if ( dmas[i].baseaddr != 0 )
296
      dma_controller_clock( &(dmas[i]) );
297
  }
298 212 erez
}
299
 
300
 
301
/* Clock tick for one DMA controller.
302
 * This does the actual "DMA" operation.
303
 * One chunk is transferred per clock.
304
 */
305
void dma_controller_clock( struct dma_controller *dma )
306
{
307 503 erez
  unsigned chno, i;
308
  int breakpoint = 0;
309 235 erez
 
310 503 erez
  for ( chno = 0; chno < DMA_NUM_CHANNELS; ++ chno ) {
311
    struct dma_channel *channel = &(dma->ch[chno]);
312 256 erez
 
313 503 erez
    /* check if this channel is enabled */
314
    if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN ) )
315
      continue;
316 212 erez
 
317 503 erez
    /* Do we need to abort? */
318
    if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, STOP ) ) {
319
      debug( 3,  "DMA: STOP requested\n" );
320
      CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN );
321
      CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
322
      SET_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
323 235 erez
 
324 503 erez
      if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INE_ERR ) &&
325
           (channel->controller->regs.int_msk_a & channel->channel_mask) ) {
326
        SET_FLAG( channel->regs.csr, DMA_CH_CSR, INT_ERR );
327
        channel->controller->regs.int_src_a = channel->channel_mask;
328
        report_interrupt( channel->controller->irq );
329
      }
330 212 erez
 
331 503 erez
      continue;
332
    }
333 212 erez
 
334 503 erez
    /* In HW Handshake mode, only work when dma_req_i asserted */
335
    if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, MODE ) &&
336
         !channel->dma_req_i ) {
337
      continue;
338
    }
339 212 erez
 
340 503 erez
    /* If this is the first cycle of the transfer, initialize our state */
341
    if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY ) ) {
342
      debug( 4,  "DMA: Starting new transfer\n" );
343 256 erez
 
344 503 erez
      CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, DONE );
345
      CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
346
      SET_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
347 212 erez
 
348 503 erez
      /* If using linked lists, copy the appropriate fields to our registers */
349
      if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, USE_ED ) )
350
        dma_load_descriptor( channel );
351
      else
352
        channel->load_next_descriptor_when_done = 0;
353 235 erez
 
354 503 erez
      /* Set our internal status */
355
      dma_init_transfer( channel );
356 212 erez
 
357 503 erez
      /* Might need to skip descriptor */
358
      if ( CHANNEL_ND_I( channel ) ) {
359
        debug( 3,  "DMA: dma_nd_i asserted before dma_req_i, skipping descriptor\n" );
360
        dma_channel_terminate_transfer( channel, 0 );
361
        continue;
362
      }
363
    }
364 212 erez
 
365 503 erez
    /* Transfer one word */
366
    set_mem32( channel->destination, eval_mem32( channel->source, &breakpoint ), &breakpoint );
367 212 erez
 
368 503 erez
    /* Advance the source and destionation pointers */
369
    masked_increase( &(channel->source), channel->source_mask );
370
    masked_increase( &(channel->destination), channel->destination_mask );
371
    ++ channel->words_transferred;
372 212 erez
 
373 503 erez
    /* Have we finished a whole chunk? */
374
    channel->dma_ack_o = (channel->words_transferred % channel->chunk_size == 0);
375 212 erez
 
376 503 erez
    /* When done with a chunk, check for dma_nd_i */
377
    if ( CHANNEL_ND_I( channel ) ) {
378
      debug( 3,  "DMA: dma_nd_i asserted\n" );
379
      dma_channel_terminate_transfer( channel, 0 );
380
      continue;
381
    }
382 235 erez
 
383 503 erez
    /* Are we done? */
384
    if ( channel->words_transferred >= channel->total_size )
385
      dma_channel_terminate_transfer( channel, 1 );
386
  }
387 212 erez
}
388
 
389
 
390
/* Copy relevant valued from linked list descriptor to channel registers */
391
void dma_load_descriptor( struct dma_channel *channel )
392
{
393 503 erez
  int breakpoint = 0;
394
  unsigned long desc_csr = eval_mem32( channel->regs.desc + DMA_DESC_CSR, &breakpoint );
395 212 erez
 
396 503 erez
  channel->load_next_descriptor_when_done = !TEST_FLAG( desc_csr, DMA_DESC_CSR, EOL );
397 212 erez
 
398 503 erez
  ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, INC_SRC, TEST_FLAG( desc_csr, DMA_DESC_CSR, INC_SRC ) );
399
  ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, INC_DST, TEST_FLAG( desc_csr, DMA_DESC_CSR, INC_DST ) );
400
  ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, SRC_SEL, TEST_FLAG( desc_csr, DMA_DESC_CSR, SRC_SEL ) );
401
  ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, DST_SEL, TEST_FLAG( desc_csr, DMA_DESC_CSR, DST_SEL ) );
402 212 erez
 
403 503 erez
  SET_FIELD( channel->regs.sz, DMA_CH_SZ, TOT_SZ,        GET_FIELD( desc_csr, DMA_DESC_CSR, TOT_SZ ) );
404 212 erez
 
405 503 erez
  channel->regs.a0 = eval_mem32( channel->regs.desc + DMA_DESC_ADR0, &breakpoint );
406
  channel->regs.a1 = eval_mem32( channel->regs.desc + DMA_DESC_ADR1, &breakpoint );
407 212 erez
 
408 503 erez
  channel->current_descriptor = channel->regs.desc;
409
  channel->regs.desc = eval_mem32( channel->regs.desc + DMA_DESC_NEXT, &breakpoint );
410 212 erez
}
411
 
412
 
413
/* Initialize internal parameters used to implement transfers */
414
void dma_init_transfer( struct dma_channel *channel )
415
{
416 503 erez
  channel->source = channel->regs.a0;
417
  channel->destination = channel->regs.a1;
418
  channel->source_mask = TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INC_SRC ) ? channel->regs.am0 : 0;
419
  channel->destination_mask = TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INC_DST ) ? channel->regs.am1 : 0;
420
  channel->total_size = GET_FIELD( channel->regs.sz, DMA_CH_SZ, TOT_SZ );
421
  channel->chunk_size = GET_FIELD( channel->regs.sz, DMA_CH_SZ, CHK_SZ );
422
  if ( !channel->chunk_size || (channel->chunk_size > channel->total_size) )
423
    channel->chunk_size = channel->total_size;
424
  channel->words_transferred = 0;
425 212 erez
}
426
 
427
 
428
/* Take care of transfer termination */
429
void dma_channel_terminate_transfer( struct dma_channel *channel, int generate_interrupt )
430
{
431 503 erez
  debug( 4,  "DMA: Terminating transfer\n" );
432 256 erez
 
433 503 erez
  /* Might be working in a linked list */
434
  if ( channel->load_next_descriptor_when_done ) {
435
    dma_load_descriptor( channel );
436
    dma_init_transfer( channel );
437
    return;
438
  }
439 212 erez
 
440 503 erez
  /* Might be in auto-restart mode */
441
  if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, ARS ) ) {
442
    dma_init_transfer( channel );
443
    return;
444
  }
445 212 erez
 
446 503 erez
  /* If needed, write amount of data transferred back to memory */
447
  if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, SZ_WB ) &&
448
       TEST_FLAG( channel->regs.csr, DMA_CH_CSR, USE_ED ) ) {
449
    int breakpoint = 0;
450
    unsigned long desc_csr = eval_mem32( channel->regs.desc + DMA_DESC_CSR, &breakpoint );
451
    /* TODO: What should we write back? Doc says "total number of remaining bytes" !? */
452
    unsigned long remaining_words = channel->total_size - channel->words_transferred;
453
    SET_FIELD( channel->regs.sz, DMA_DESC_CSR, TOT_SZ, remaining_words );
454
  }
455 212 erez
 
456 503 erez
  /* Mark end of transfer */
457
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN );
458
  SET_FLAG( channel->regs.csr, DMA_CH_CSR, DONE );
459
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
460
  CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
461 235 erez
 
462 503 erez
  /* If needed, generate interrupt */
463
  if ( generate_interrupt ) {
464
    /* TODO: Which channel should we interrupt? */
465
    if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INE_DONE ) &&
466
         (channel->controller->regs.int_msk_a & channel->channel_mask) ) {
467
      SET_FLAG( channel->regs.csr, DMA_CH_CSR, INT_DONE );
468
      channel->controller->regs.int_src_a = channel->channel_mask;
469
      report_interrupt( channel->controller->irq );
470
    }
471
  }
472 212 erez
}
473
 
474
/* Utility function: Add 4 to a value with a mask */
475
void masked_increase( unsigned long *value, unsigned long mask )
476
{
477 503 erez
  *value = (*value & ~mask) | ((*value + 4) & mask);
478 212 erez
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.