OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [tags/] [nog_patch_39/] [or1ksim/] [peripheral/] [dma.c] - Blame information for rev 261

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 212 erez
/* dma.c -- Simulation of DMA
2 235 erez
         Copyright (C) 2001 by Erez Volk, erez@opencores.org
3 212 erez
 
4 235 erez
         This file is part of OpenRISC 1000 Architectural Simulator.
5
 
6
         This program is free software; you can redistribute it and/or modify
7
         it under the terms of the GNU General Public License as published by
8
         the Free Software Foundation; either version 2 of the License, or
9
         (at your option) any later version.
10
 
11
         This program is distributed in the hope that it will be useful,
12
         but WITHOUT ANY WARRANTY; without even the implied warranty of
13
         MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.    See the
14
         GNU General Public License for more details.
15 212 erez
 
16 235 erez
         You should have received a copy of the GNU General Public License
17
         along with this program; if not, write to the Free Software
18
         Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19
*/
20 212 erez
 
21
/*
22
 * This simulation of the DMA core is not meant to be full.
23
 * It is written only to allow simulating the Ethernet core.
24
 * Of course, if anyone feels like perfecting it, feel free...
25
 */
26
 
27
#include "dma.h"
28
#include "sim-config.h"
29
#include "trace.h"
30
#include "pic.h"
31 235 erez
#include "abstract.h"
32 212 erez
#include "fields.h"
33
 
34 256 erez
#define dprintf(x) printf x
35
 
36 212 erez
/* The representation of the DMA controllers */
37
static struct dma_controller dmas[NR_DMAS];
38
 
39 235 erez
static unsigned long dma_read32( unsigned long addr );
40
static void dma_write32( unsigned long addr, unsigned long value );
41
 
42 212 erez
static unsigned long dma_read_ch_csr( struct dma_channel *channel );
43
static void dma_write_ch_csr( struct dma_channel *channel, unsigned long value );
44
static void dma_controller_clock( struct dma_controller *dma );
45
static void dma_load_descriptor( struct dma_channel *channel );
46
static void dma_init_transfer( struct dma_channel *channel );
47
static void dma_channel_terminate_transfer( struct dma_channel *channel, int generate_interrupt );
48
 
49
static void masked_increase( unsigned long *value, unsigned long mask );
50
 
51
#define CHANNEL_ND_I(ch) (TEST_FLAG(ch->regs.csr,DMA_CH_CSR,MODE) && TEST_FLAG(ch->regs.csr,DMA_CH_CSR,USE_ED) && ch->dma_nd_i)
52
 
53
 
54
/* Reset. Initializes all registers to default and places devices in memory address space. */
55
void dma_reset()
56
{
57 235 erez
        unsigned i;
58 212 erez
 
59 235 erez
        memset( dmas, 0, sizeof(dmas) );
60
 
61 261 markom
  if (!config.dmas_enabled)
62
    config.ndmas = 0;
63
 
64
        for ( i = 0; i < config.ndmas; ++ i ) {
65 235 erez
                struct dma_controller *dma = &(dmas[i]);
66
                unsigned channel_number;
67 212 erez
 
68 235 erez
                dma->baseaddr = config.dmas[i].baseaddr;
69
                dma->irq = config.dmas[i].irq;
70
                for ( channel_number = 0; channel_number < DMA_NUM_CHANNELS; ++ channel_number ) {
71
                        dma->ch[channel_number].controller = &(dmas[i]);
72
                        dma->ch[channel_number].channel_number = channel_number;
73
                        dma->ch[channel_number].channel_mask = 1LU << channel_number;
74
                        dma->ch[channel_number].regs.am0 = dma->ch[channel_number].regs.am1 = 0xFFFFFFFC;
75
                }
76
                if ( dma->baseaddr != 0 )
77 261 markom
                        register_memoryarea( dma->baseaddr, DMA_ADDR_SPACE, 4, dma_read32, dma_write32);
78 212 erez
        }
79
}
80
 
81
/* Print register values on stdout */
82
void dma_status( void )
83
{
84 235 erez
        unsigned i, j;
85 212 erez
 
86 261 markom
        for ( i = 0; i < config.ndmas; ++ i ) {
87 235 erez
                struct dma_controller *dma = &(dmas[i]);
88 212 erez
 
89 235 erez
                if ( dma->baseaddr == 0 )
90
                        continue;
91 212 erez
 
92 235 erez
                printf( "\nDMA controller %u at 0x%08X:\n", i, dma->baseaddr );
93
                printf( "CSR                     : 0x%08lX\n", dma->regs.csr );
94
                printf( "INT_MSK_A : 0x%08lX\n", dma->regs.int_msk_a );
95
                printf( "INT_MSK_B : 0x%08lX\n", dma->regs.int_msk_b );
96
                printf( "INT_SRC_A : 0x%08lX\n", dma->regs.int_src_a );
97
                printf( "INT_SRC_B : 0x%08lX\n", dma->regs.int_src_b );
98 212 erez
 
99 235 erez
                for ( j = 0; j < DMA_NUM_CHANNELS; ++ j ) {
100
                        struct dma_channel *channel = &(dma->ch[j]);
101
                        if ( !channel->referenced )
102
                                continue;
103
                        printf( "CH%u_CSR               : 0x%08lX\n", j, channel->regs.csr );
104
                        printf( "CH%u_SZ                : 0x%08lX\n", j, channel->regs.sz );
105
                        printf( "CH%u_A0                : 0x%08lX\n", j, channel->regs.a0 );
106
                        printf( "CH%u_AM0               : 0x%08lX\n", j, channel->regs.am0 );
107
                        printf( "CH%u_A1                : 0x%08lX\n", j, channel->regs.a1 );
108
                        printf( "CH%u_AM1               : 0x%08lX\n", j, channel->regs.am1 );
109
                        printf( "CH%u_DESC      : 0x%08lX\n", j, channel->regs.desc );
110
                        printf( "CH%u_SWPTR : 0x%08lX\n", j, channel->regs.swptr );
111
                }
112 212 erez
        }
113
}
114
 
115
 
116
/* Read a register */
117 235 erez
unsigned long dma_read32( unsigned long addr )
118 212 erez
{
119 235 erez
        unsigned i;
120
        struct dma_controller *dma = NULL;
121 212 erez
 
122 235 erez
        for ( i = 0; i < NR_DMAS && dma == NULL; ++ i ) {
123
                if ( addr >= dmas[i].baseaddr && addr < dmas[i].baseaddr + DMA_ADDR_SPACE )
124
                        dma = &(dmas[i]);
125
        }
126
 
127
        /* verify we found a controller */
128
        if ( dma == NULL ) {
129
                fprintf( stderr, "dma_read32( 0x%08lX ): Out of range\n", addr );
130
                cont_run = 0;
131
                return 0;
132
        }
133 212 erez
 
134 235 erez
        addr -= dma->baseaddr;
135 212 erez
 
136 235 erez
        if ( addr % 4 != 0 ) {
137
                fprintf( stderr, "dma_read32( 0x%08lX ): Not register-aligned\n", addr + dma->baseaddr );
138
                cont_run = 0;
139
                return 0;
140
        }
141 212 erez
 
142 235 erez
        if ( addr < DMA_CH_BASE ) {
143 252 erez
                /* case of global (not per-channel) registers */
144 235 erez
                switch( addr ) {
145
                case DMA_CSR: return dma->regs.csr;
146
                case DMA_INT_MSK_A: return dma->regs.int_msk_a;
147
                case DMA_INT_MSK_B: return dma->regs.int_msk_b;
148
                case DMA_INT_SRC_A: return dma->regs.int_src_a;
149
                case DMA_INT_SRC_B: return dma->regs.int_src_b;
150
                default:
151
                        fprintf( stderr, "dma_read32( 0x%08lX ): Illegal register\n", addr + dma->baseaddr );
152
                        cont_run = 0;
153
                        return 0;
154
                }
155
        } else {
156
                /* case of per-channel registers */
157
                unsigned chno = (addr - DMA_CH_BASE) / DMA_CH_SIZE;
158
                addr = (addr - DMA_CH_BASE) % DMA_CH_SIZE;
159
                switch( addr ) {
160
                case DMA_CH_CSR: return dma_read_ch_csr( &(dma->ch[chno]) );
161
                case DMA_CH_SZ: return dma->ch[chno].regs.sz;
162
                case DMA_CH_A0: return dma->ch[chno].regs.a0;
163
                case DMA_CH_AM0: return dma->ch[chno].regs.am0;
164
                case DMA_CH_A1: return dma->ch[chno].regs.a1;
165
                case DMA_CH_AM1: return dma->ch[chno].regs.am1;
166
                case DMA_CH_DESC: return dma->ch[chno].regs.desc;
167
                case DMA_CH_SWPTR: return dma->ch[chno].regs.swptr;
168
                }
169
        }
170 212 erez
}
171
 
172
 
173
/* Handle read from a channel CSR */
174
unsigned long dma_read_ch_csr( struct dma_channel *channel )
175
{
176 235 erez
        unsigned long result = channel->regs.csr;
177 212 erez
 
178 235 erez
        /* before returning, clear all relevant bits */
179
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_CHUNK_DONE );
180
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_DONE );
181
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_ERR );
182
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
183 212 erez
 
184 235 erez
        return result;
185 212 erez
}
186
 
187
 
188
 
189
/* Write a register */
190 235 erez
void dma_write32( unsigned long addr, unsigned long value )
191 212 erez
{
192 235 erez
        unsigned i;
193
        struct dma_controller *dma = NULL;
194 212 erez
 
195 235 erez
        /* Find which controller this is */
196
        for ( i = 0; i < NR_DMAS && dma == NULL; ++ i ) {
197
                if ( (addr >= dmas[i].baseaddr) && (addr < dmas[i].baseaddr + DMA_ADDR_SPACE) )
198
                        dma = &(dmas[i]);
199
        }
200
 
201
        /* verify we found a controller */
202
        if ( dma == NULL ) {
203
                fprintf( stderr, "dma_write32( 0x%08lX ): Out of range\n", addr );
204
                cont_run = 0;
205
                return;
206
        }
207 212 erez
 
208 235 erez
        addr -= dma->baseaddr;
209 212 erez
 
210 235 erez
        if ( addr % 4 != 0 ) {
211
                fprintf( stderr, "dma_write32( 0x%08lX, 0x%08lX ): Not register-aligned\n", addr + dma->baseaddr, value );
212
                cont_run = 0;
213
                return;
214
        }
215 212 erez
 
216 235 erez
        /* case of global (not per-channel) registers */
217
        if ( addr < DMA_CH_BASE ) {
218
                switch( addr ) {
219
                case DMA_CSR:
220
                        if ( TEST_FLAG( value, DMA_CSR, PAUSE ) )
221
                                fprintf( stderr, "dma: PAUSE not implemented\n" );
222
                        break;
223 212 erez
 
224 235 erez
                case DMA_INT_MSK_A: dma->regs.int_msk_a = value; break;
225
                case DMA_INT_MSK_B: dma->regs.int_msk_b = value; break;
226
                case DMA_INT_SRC_A: dma->regs.int_src_a = value; break;
227
                case DMA_INT_SRC_B: dma->regs.int_src_b = value; break;
228
                default:
229
                        fprintf( stderr, "dma_write32( 0x%08lX ): Illegal register\n", addr + dma->baseaddr );
230
                        cont_run = 0;
231
                        return;
232
                }
233
        } else {
234
                /* case of per-channel registers */
235
                unsigned chno = (addr - DMA_CH_BASE) / DMA_CH_SIZE;
236
                struct dma_channel *channel = &(dma->ch[chno]);
237
                channel->referenced = 1;
238
                addr = (addr - DMA_CH_BASE) % DMA_CH_SIZE;
239
                switch( addr ) {
240
                case DMA_CSR: dma_write_ch_csr( &(dma->ch[chno]), value ); break;
241
                case DMA_CH_SZ: channel->regs.sz = value; break;
242
                case DMA_CH_A0: channel->regs.a0 = value; break;
243
                case DMA_CH_AM0: channel->regs.am0 = value; break;
244
                case DMA_CH_A1: channel->regs.a1 = value; break;
245
                case DMA_CH_AM1: channel->regs.am1 = value; break;
246
                case DMA_CH_DESC: channel->regs.desc = value; break;
247
                case DMA_CH_SWPTR: channel->regs.swptr = value; break;
248
                }
249 212 erez
        }
250
}
251
 
252
 
253
/* Write a channel CSR
254
 * This ensures only the writable bits are modified.
255
 */
256
void dma_write_ch_csr( struct dma_channel *channel, unsigned long value )
257
{
258 235 erez
        /* Copy the writable bits to the channel CSR */
259
        channel->regs.csr &= ~DMA_CH_CSR_WRITE_MASK;
260
        channel->regs.csr |= value & DMA_CH_CSR_WRITE_MASK;
261 212 erez
}
262
 
263
 
264
/*
265
 * Simulation of control signals
266
 * To be used by simulations for other devices, e.g. ethernet
267
 */
268
 
269
void set_dma_req_i( unsigned dma_controller, unsigned channel )
270
{
271 235 erez
        dmas[dma_controller].ch[channel].dma_req_i = 1;
272 212 erez
}
273
 
274
void clear_dma_req_i( unsigned dma_controller, unsigned channel )
275
{
276 235 erez
        dmas[dma_controller].ch[channel].dma_req_i = 0;
277 212 erez
}
278
 
279
void set_dma_nd_i( unsigned dma_controller, unsigned channel )
280
{
281 235 erez
        dmas[dma_controller].ch[channel].dma_nd_i = 1;
282 212 erez
}
283
 
284
void clear_dma_nd_i( unsigned dma_controller, unsigned channel )
285
{
286 235 erez
        dmas[dma_controller].ch[channel].dma_nd_i = 0;
287 212 erez
}
288
 
289 235 erez
unsigned check_dma_ack_o( unsigned dma_controller, unsigned channel )
290 212 erez
{
291 235 erez
        return dmas[dma_controller].ch[channel].dma_ack_o;
292 212 erez
}
293
 
294
 
295
 
296
/* Simulation hook. Must be called every clock cycle to simulate DMA. */
297
void dma_clock()
298
{
299 235 erez
        unsigned i;
300
        for ( i = 0; i < NR_DMAS; ++ i ) {
301
                if ( dmas[i].baseaddr != 0 )
302
                        dma_controller_clock( &(dmas[i]) );
303
        }
304 212 erez
}
305
 
306
 
307
/* Clock tick for one DMA controller.
308
 * This does the actual "DMA" operation.
309
 * One chunk is transferred per clock.
310
 */
311
void dma_controller_clock( struct dma_controller *dma )
312
{
313 235 erez
        unsigned chno, i;
314
        int breakpoint = 0;
315
 
316
        for ( chno = 0; chno < DMA_NUM_CHANNELS; ++ chno ) {
317
                struct dma_channel *channel = &(dma->ch[chno]);
318 256 erez
 
319 235 erez
                /* check if this channel is enabled */
320
                if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN ) )
321
                        continue;
322 212 erez
 
323 235 erez
                /* Do we need to abort? */
324
                if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, STOP ) ) {
325 256 erez
                        dprintf(( "DMA: STOP requested\n" ));
326 235 erez
                        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN );
327
                        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
328
                        SET_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
329
 
330
                        if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INE_ERR ) &&
331
                                         (channel->controller->regs.int_msk_a & channel->channel_mask) ) {
332
                                SET_FLAG( channel->regs.csr, DMA_CH_CSR, INT_ERR );
333
                                channel->controller->regs.int_src_a = channel->channel_mask;
334
                                report_interrupt( channel->controller->irq );
335
                        }
336 212 erez
 
337 235 erez
                        continue;
338
                }
339 212 erez
 
340 235 erez
                /* In HW Handshake mode, only work when dma_req_i asserted */
341
                if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, MODE ) &&
342
                                 !channel->dma_req_i ) {
343
                        continue;
344
                }
345 212 erez
 
346 235 erez
                /* If this is the first cycle of the transfer, initialize our state */
347
                if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY ) ) {
348 256 erez
                        dprintf(( "Starting new transfer\n" ));
349
 
350 235 erez
                        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, DONE );
351
                        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
352
                        SET_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
353 212 erez
 
354 235 erez
                        /* If using linked lists, copy the appropriate fields to our registers */
355
                        if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, USE_ED ) )
356
                                dma_load_descriptor( channel );
357
                        else
358
                                channel->load_next_descriptor_when_done = 0;
359
 
360
                        /* Set our internal status */
361
                        dma_init_transfer( channel );
362 212 erez
 
363 235 erez
                        /* Might need to skip descriptor */
364
                        if ( CHANNEL_ND_I( channel ) ) {
365 256 erez
                                dprintf(( "DMA: dma_nd_i asserted before dma_req_i, skipping descriptor\n" ));
366 235 erez
                                dma_channel_terminate_transfer( channel, 0 );
367
                                continue;
368
                        }
369
                }
370 212 erez
 
371 235 erez
                /* Transfer one word */
372
                set_mem32( channel->destination, eval_mem32( channel->source, &breakpoint ), &breakpoint );
373 212 erez
 
374 235 erez
                /* Advance the source and destionation pointers */
375
                masked_increase( &(channel->source), channel->source_mask );
376
                masked_increase( &(channel->destination), channel->destination_mask );
377
                ++ channel->words_transferred;
378 212 erez
 
379 235 erez
                /* Have we finished a whole chunk? */
380
                channel->dma_ack_o = (channel->words_transferred % channel->chunk_size == 0);
381 212 erez
 
382 235 erez
                /* When done with a chunk, check for dma_nd_i */
383
                if ( CHANNEL_ND_I( channel ) ) {
384 256 erez
                        dprintf(( "DMA: dma_nd_i asserted\n" ));
385 235 erez
                        dma_channel_terminate_transfer( channel, 0 );
386
                        continue;
387
                }
388
 
389
                /* Are we done? */
390
                if ( channel->words_transferred >= channel->total_size )
391
                        dma_channel_terminate_transfer( channel, 1 );
392 212 erez
        }
393
}
394
 
395
 
396
/* Copy relevant valued from linked list descriptor to channel registers */
397
void dma_load_descriptor( struct dma_channel *channel )
398
{
399 235 erez
        int breakpoint = 0;
400
        unsigned long desc_csr = eval_mem32( channel->regs.desc + DMA_DESC_CSR, &breakpoint );
401 212 erez
 
402 235 erez
        channel->load_next_descriptor_when_done = !TEST_FLAG( desc_csr, DMA_DESC_CSR, EOL );
403 212 erez
 
404 235 erez
        ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, INC_SRC, TEST_FLAG( desc_csr, DMA_DESC_CSR, INC_SRC ) );
405
        ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, INC_DST, TEST_FLAG( desc_csr, DMA_DESC_CSR, INC_DST ) );
406
        ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, SRC_SEL, TEST_FLAG( desc_csr, DMA_DESC_CSR, SRC_SEL ) );
407
        ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, DST_SEL, TEST_FLAG( desc_csr, DMA_DESC_CSR, DST_SEL ) );
408 212 erez
 
409 235 erez
        SET_FIELD( channel->regs.sz, DMA_CH_SZ, TOT_SZ,  GET_FIELD( desc_csr, DMA_DESC_CSR, TOT_SZ ) );
410 212 erez
 
411 235 erez
        channel->regs.a0 = eval_mem32( channel->regs.desc + DMA_DESC_ADR0, &breakpoint );
412
        channel->regs.a1 = eval_mem32( channel->regs.desc + DMA_DESC_ADR1, &breakpoint );
413 212 erez
 
414 235 erez
        channel->current_descriptor = channel->regs.desc;
415
        channel->regs.desc = eval_mem32( channel->regs.desc + DMA_DESC_NEXT, &breakpoint );
416 212 erez
}
417
 
418
 
419
/* Initialize internal parameters used to implement transfers */
420
void dma_init_transfer( struct dma_channel *channel )
421
{
422 235 erez
        channel->source = channel->regs.a0;
423
        channel->destination = channel->regs.a1;
424
        channel->source_mask = TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INC_SRC ) ? channel->regs.am0 : 0;
425
        channel->destination_mask = TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INC_DST ) ? channel->regs.am1 : 0;
426
        channel->total_size = GET_FIELD( channel->regs.sz, DMA_CH_SZ, TOT_SZ );
427
        channel->chunk_size = GET_FIELD( channel->regs.sz, DMA_CH_SZ, CHK_SZ );
428
        if ( !channel->chunk_size || (channel->chunk_size > channel->total_size) )
429
                channel->chunk_size = channel->total_size;
430
        channel->words_transferred = 0;
431 212 erez
}
432
 
433
 
434
/* Take care of transfer termination */
435
void dma_channel_terminate_transfer( struct dma_channel *channel, int generate_interrupt )
436
{
437 256 erez
        dprintf(( "DMA: Terminating transfer\n" ));
438
 
439 235 erez
        /* Might be working in a linked list */
440
        if ( channel->load_next_descriptor_when_done ) {
441
                dma_load_descriptor( channel );
442
                dma_init_transfer( channel );
443
                return;
444
        }
445 212 erez
 
446 235 erez
        /* Might be in auto-restart mode */
447
        if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, ARS ) ) {
448
                dma_init_transfer( channel );
449
                return;
450
        }
451 212 erez
 
452 235 erez
        /* If needed, write amount of data transferred back to memory */
453
        if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, SZ_WB ) &&
454
                         TEST_FLAG( channel->regs.csr, DMA_CH_CSR, USE_ED ) ) {
455
                int breakpoint = 0;
456
                unsigned long desc_csr = eval_mem32( channel->regs.desc + DMA_DESC_CSR, &breakpoint );
457
                /* TODO: What should we write back? Doc says "total number of remaining bytes" !? */
458
                unsigned long remaining_words = channel->total_size - channel->words_transferred;
459
                SET_FIELD( channel->regs.sz, DMA_DESC_CSR, TOT_SZ, remaining_words );
460
        }
461 212 erez
 
462 235 erez
        /* Mark end of transfer */
463
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN );
464
        SET_FLAG( channel->regs.csr, DMA_CH_CSR, DONE );
465
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
466
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
467
 
468
        /* If needed, generate interrupt */
469
        if ( generate_interrupt ) {
470
                /* TODO: Which channel should we interrupt? */
471
                if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INE_DONE ) &&
472
                                 (channel->controller->regs.int_msk_a & channel->channel_mask) ) {
473
                        SET_FLAG( channel->regs.csr, DMA_CH_CSR, INT_DONE );
474
                        channel->controller->regs.int_src_a = channel->channel_mask;
475
                        report_interrupt( channel->controller->irq );
476
                }
477 212 erez
        }
478
}
479
 
480
/* Utility function: Add 4 to a value with a mask */
481
void masked_increase( unsigned long *value, unsigned long mask )
482
{
483 256 erez
        *value = (*value & ~mask) | ((*value + 4) & mask);
484 212 erez
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.