OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [tags/] [nog_patch_39/] [or1ksim/] [peripheral/] [dma.c] - Blame information for rev 418

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 212 erez
/* dma.c -- Simulation of DMA
2 235 erez
         Copyright (C) 2001 by Erez Volk, erez@opencores.org
3 212 erez
 
4 235 erez
         This file is part of OpenRISC 1000 Architectural Simulator.
5
 
6
         This program is free software; you can redistribute it and/or modify
7
         it under the terms of the GNU General Public License as published by
8
         the Free Software Foundation; either version 2 of the License, or
9
         (at your option) any later version.
10
 
11
         This program is distributed in the hope that it will be useful,
12
         but WITHOUT ANY WARRANTY; without even the implied warranty of
13
         MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.    See the
14
         GNU General Public License for more details.
15 212 erez
 
16 235 erez
         You should have received a copy of the GNU General Public License
17
         along with this program; if not, write to the Free Software
18
         Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19
*/
20 212 erez
 
21
/*
22
 * This simulation of the DMA core is not meant to be full.
23
 * It is written only to allow simulating the Ethernet core.
24
 * Of course, if anyone feels like perfecting it, feel free...
25
 */
26
 
27
#include "dma.h"
28
#include "sim-config.h"
29
#include "trace.h"
30
#include "pic.h"
31 235 erez
#include "abstract.h"
32 212 erez
#include "fields.h"
33
 
34
/* The representation of the DMA controllers */
35
static struct dma_controller dmas[NR_DMAS];
36
 
37 235 erez
static unsigned long dma_read32( unsigned long addr );
38
static void dma_write32( unsigned long addr, unsigned long value );
39
 
40 212 erez
static unsigned long dma_read_ch_csr( struct dma_channel *channel );
41
static void dma_write_ch_csr( struct dma_channel *channel, unsigned long value );
42
static void dma_controller_clock( struct dma_controller *dma );
43
static void dma_load_descriptor( struct dma_channel *channel );
44
static void dma_init_transfer( struct dma_channel *channel );
45
static void dma_channel_terminate_transfer( struct dma_channel *channel, int generate_interrupt );
46
 
47
static void masked_increase( unsigned long *value, unsigned long mask );
48
 
49
#define CHANNEL_ND_I(ch) (TEST_FLAG(ch->regs.csr,DMA_CH_CSR,MODE) && TEST_FLAG(ch->regs.csr,DMA_CH_CSR,USE_ED) && ch->dma_nd_i)
50
 
51
 
52
/* Reset. Initializes all registers to default and places devices in memory address space. */
53
void dma_reset()
54
{
55 235 erez
        unsigned i;
56 212 erez
 
57 235 erez
        memset( dmas, 0, sizeof(dmas) );
58
 
59 261 markom
  if (!config.dmas_enabled)
60
    config.ndmas = 0;
61
 
62
        for ( i = 0; i < config.ndmas; ++ i ) {
63 235 erez
                struct dma_controller *dma = &(dmas[i]);
64
                unsigned channel_number;
65 212 erez
 
66 235 erez
                dma->baseaddr = config.dmas[i].baseaddr;
67
                dma->irq = config.dmas[i].irq;
68
                for ( channel_number = 0; channel_number < DMA_NUM_CHANNELS; ++ channel_number ) {
69
                        dma->ch[channel_number].controller = &(dmas[i]);
70
                        dma->ch[channel_number].channel_number = channel_number;
71
                        dma->ch[channel_number].channel_mask = 1LU << channel_number;
72
                        dma->ch[channel_number].regs.am0 = dma->ch[channel_number].regs.am1 = 0xFFFFFFFC;
73
                }
74
                if ( dma->baseaddr != 0 )
75 261 markom
                        register_memoryarea( dma->baseaddr, DMA_ADDR_SPACE, 4, dma_read32, dma_write32);
76 212 erez
        }
77
}
78
 
79
/* Print register values on stdout */
80
void dma_status( void )
81
{
82 235 erez
        unsigned i, j;
83 212 erez
 
84 261 markom
        for ( i = 0; i < config.ndmas; ++ i ) {
85 235 erez
                struct dma_controller *dma = &(dmas[i]);
86 212 erez
 
87 235 erez
                if ( dma->baseaddr == 0 )
88
                        continue;
89 212 erez
 
90 235 erez
                printf( "\nDMA controller %u at 0x%08X:\n", i, dma->baseaddr );
91
                printf( "CSR                     : 0x%08lX\n", dma->regs.csr );
92
                printf( "INT_MSK_A : 0x%08lX\n", dma->regs.int_msk_a );
93
                printf( "INT_MSK_B : 0x%08lX\n", dma->regs.int_msk_b );
94
                printf( "INT_SRC_A : 0x%08lX\n", dma->regs.int_src_a );
95
                printf( "INT_SRC_B : 0x%08lX\n", dma->regs.int_src_b );
96 212 erez
 
97 235 erez
                for ( j = 0; j < DMA_NUM_CHANNELS; ++ j ) {
98
                        struct dma_channel *channel = &(dma->ch[j]);
99
                        if ( !channel->referenced )
100
                                continue;
101
                        printf( "CH%u_CSR               : 0x%08lX\n", j, channel->regs.csr );
102
                        printf( "CH%u_SZ                : 0x%08lX\n", j, channel->regs.sz );
103
                        printf( "CH%u_A0                : 0x%08lX\n", j, channel->regs.a0 );
104
                        printf( "CH%u_AM0               : 0x%08lX\n", j, channel->regs.am0 );
105
                        printf( "CH%u_A1                : 0x%08lX\n", j, channel->regs.a1 );
106
                        printf( "CH%u_AM1               : 0x%08lX\n", j, channel->regs.am1 );
107
                        printf( "CH%u_DESC      : 0x%08lX\n", j, channel->regs.desc );
108
                        printf( "CH%u_SWPTR : 0x%08lX\n", j, channel->regs.swptr );
109
                }
110 212 erez
        }
111
}
112
 
113
 
114
/* Read a register */
115 235 erez
unsigned long dma_read32( unsigned long addr )
116 212 erez
{
117 235 erez
        unsigned i;
118
        struct dma_controller *dma = NULL;
119 212 erez
 
120 235 erez
        for ( i = 0; i < NR_DMAS && dma == NULL; ++ i ) {
121
                if ( addr >= dmas[i].baseaddr && addr < dmas[i].baseaddr + DMA_ADDR_SPACE )
122
                        dma = &(dmas[i]);
123
        }
124
 
125
        /* verify we found a controller */
126
        if ( dma == NULL ) {
127
                fprintf( stderr, "dma_read32( 0x%08lX ): Out of range\n", addr );
128
                cont_run = 0;
129
                return 0;
130
        }
131 212 erez
 
132 235 erez
        addr -= dma->baseaddr;
133 212 erez
 
134 235 erez
        if ( addr % 4 != 0 ) {
135
                fprintf( stderr, "dma_read32( 0x%08lX ): Not register-aligned\n", addr + dma->baseaddr );
136
                cont_run = 0;
137
                return 0;
138
        }
139 212 erez
 
140 235 erez
        if ( addr < DMA_CH_BASE ) {
141 252 erez
                /* case of global (not per-channel) registers */
142 235 erez
                switch( addr ) {
143
                case DMA_CSR: return dma->regs.csr;
144
                case DMA_INT_MSK_A: return dma->regs.int_msk_a;
145
                case DMA_INT_MSK_B: return dma->regs.int_msk_b;
146
                case DMA_INT_SRC_A: return dma->regs.int_src_a;
147
                case DMA_INT_SRC_B: return dma->regs.int_src_b;
148
                default:
149
                        fprintf( stderr, "dma_read32( 0x%08lX ): Illegal register\n", addr + dma->baseaddr );
150
                        cont_run = 0;
151
                        return 0;
152
                }
153
        } else {
154
                /* case of per-channel registers */
155
                unsigned chno = (addr - DMA_CH_BASE) / DMA_CH_SIZE;
156
                addr = (addr - DMA_CH_BASE) % DMA_CH_SIZE;
157
                switch( addr ) {
158
                case DMA_CH_CSR: return dma_read_ch_csr( &(dma->ch[chno]) );
159
                case DMA_CH_SZ: return dma->ch[chno].regs.sz;
160
                case DMA_CH_A0: return dma->ch[chno].regs.a0;
161
                case DMA_CH_AM0: return dma->ch[chno].regs.am0;
162
                case DMA_CH_A1: return dma->ch[chno].regs.a1;
163
                case DMA_CH_AM1: return dma->ch[chno].regs.am1;
164
                case DMA_CH_DESC: return dma->ch[chno].regs.desc;
165
                case DMA_CH_SWPTR: return dma->ch[chno].regs.swptr;
166
                }
167
        }
168 212 erez
}
169
 
170
 
171
/* Handle read from a channel CSR */
172
unsigned long dma_read_ch_csr( struct dma_channel *channel )
173
{
174 235 erez
        unsigned long result = channel->regs.csr;
175 212 erez
 
176 235 erez
        /* before returning, clear all relevant bits */
177
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_CHUNK_DONE );
178
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_DONE );
179
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_ERR );
180
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
181 212 erez
 
182 235 erez
        return result;
183 212 erez
}
184
 
185
 
186
 
187
/* Write a register */
188 235 erez
void dma_write32( unsigned long addr, unsigned long value )
189 212 erez
{
190 235 erez
        unsigned i;
191
        struct dma_controller *dma = NULL;
192 212 erez
 
193 235 erez
        /* Find which controller this is */
194
        for ( i = 0; i < NR_DMAS && dma == NULL; ++ i ) {
195
                if ( (addr >= dmas[i].baseaddr) && (addr < dmas[i].baseaddr + DMA_ADDR_SPACE) )
196
                        dma = &(dmas[i]);
197
        }
198
 
199
        /* verify we found a controller */
200
        if ( dma == NULL ) {
201
                fprintf( stderr, "dma_write32( 0x%08lX ): Out of range\n", addr );
202
                cont_run = 0;
203
                return;
204
        }
205 212 erez
 
206 235 erez
        addr -= dma->baseaddr;
207 212 erez
 
208 235 erez
        if ( addr % 4 != 0 ) {
209
                fprintf( stderr, "dma_write32( 0x%08lX, 0x%08lX ): Not register-aligned\n", addr + dma->baseaddr, value );
210
                cont_run = 0;
211
                return;
212
        }
213 212 erez
 
214 235 erez
        /* case of global (not per-channel) registers */
215
        if ( addr < DMA_CH_BASE ) {
216
                switch( addr ) {
217
                case DMA_CSR:
218
                        if ( TEST_FLAG( value, DMA_CSR, PAUSE ) )
219
                                fprintf( stderr, "dma: PAUSE not implemented\n" );
220
                        break;
221 212 erez
 
222 235 erez
                case DMA_INT_MSK_A: dma->regs.int_msk_a = value; break;
223
                case DMA_INT_MSK_B: dma->regs.int_msk_b = value; break;
224
                case DMA_INT_SRC_A: dma->regs.int_src_a = value; break;
225
                case DMA_INT_SRC_B: dma->regs.int_src_b = value; break;
226
                default:
227
                        fprintf( stderr, "dma_write32( 0x%08lX ): Illegal register\n", addr + dma->baseaddr );
228
                        cont_run = 0;
229
                        return;
230
                }
231
        } else {
232
                /* case of per-channel registers */
233
                unsigned chno = (addr - DMA_CH_BASE) / DMA_CH_SIZE;
234
                struct dma_channel *channel = &(dma->ch[chno]);
235
                channel->referenced = 1;
236
                addr = (addr - DMA_CH_BASE) % DMA_CH_SIZE;
237
                switch( addr ) {
238
                case DMA_CSR: dma_write_ch_csr( &(dma->ch[chno]), value ); break;
239
                case DMA_CH_SZ: channel->regs.sz = value; break;
240
                case DMA_CH_A0: channel->regs.a0 = value; break;
241
                case DMA_CH_AM0: channel->regs.am0 = value; break;
242
                case DMA_CH_A1: channel->regs.a1 = value; break;
243
                case DMA_CH_AM1: channel->regs.am1 = value; break;
244
                case DMA_CH_DESC: channel->regs.desc = value; break;
245
                case DMA_CH_SWPTR: channel->regs.swptr = value; break;
246
                }
247 212 erez
        }
248
}
249
 
250
 
251
/* Write a channel CSR
252
 * This ensures only the writable bits are modified.
253
 */
254
void dma_write_ch_csr( struct dma_channel *channel, unsigned long value )
255
{
256 235 erez
        /* Copy the writable bits to the channel CSR */
257
        channel->regs.csr &= ~DMA_CH_CSR_WRITE_MASK;
258
        channel->regs.csr |= value & DMA_CH_CSR_WRITE_MASK;
259 212 erez
}
260
 
261
 
262
/*
263
 * Simulation of control signals
264
 * To be used by simulations for other devices, e.g. ethernet
265
 */
266
 
267
void set_dma_req_i( unsigned dma_controller, unsigned channel )
268
{
269 235 erez
        dmas[dma_controller].ch[channel].dma_req_i = 1;
270 212 erez
}
271
 
272
void clear_dma_req_i( unsigned dma_controller, unsigned channel )
273
{
274 235 erez
        dmas[dma_controller].ch[channel].dma_req_i = 0;
275 212 erez
}
276
 
277
void set_dma_nd_i( unsigned dma_controller, unsigned channel )
278
{
279 235 erez
        dmas[dma_controller].ch[channel].dma_nd_i = 1;
280 212 erez
}
281
 
282
void clear_dma_nd_i( unsigned dma_controller, unsigned channel )
283
{
284 235 erez
        dmas[dma_controller].ch[channel].dma_nd_i = 0;
285 212 erez
}
286
 
287 235 erez
unsigned check_dma_ack_o( unsigned dma_controller, unsigned channel )
288 212 erez
{
289 235 erez
        return dmas[dma_controller].ch[channel].dma_ack_o;
290 212 erez
}
291
 
292
 
293
 
294
/* Simulation hook. Must be called every clock cycle to simulate DMA. */
295
void dma_clock()
296
{
297 235 erez
        unsigned i;
298
        for ( i = 0; i < NR_DMAS; ++ i ) {
299
                if ( dmas[i].baseaddr != 0 )
300
                        dma_controller_clock( &(dmas[i]) );
301
        }
302 212 erez
}
303
 
304
 
305
/* Clock tick for one DMA controller.
306
 * This does the actual "DMA" operation.
307
 * One chunk is transferred per clock.
308
 */
309
void dma_controller_clock( struct dma_controller *dma )
310
{
311 235 erez
        unsigned chno, i;
312
        int breakpoint = 0;
313
 
314
        for ( chno = 0; chno < DMA_NUM_CHANNELS; ++ chno ) {
315
                struct dma_channel *channel = &(dma->ch[chno]);
316 256 erez
 
317 235 erez
                /* check if this channel is enabled */
318
                if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN ) )
319
                        continue;
320 212 erez
 
321 235 erez
                /* Do we need to abort? */
322
                if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, STOP ) ) {
323 418 erez
                        debug( 3,  "DMA: STOP requested\n" );
324 235 erez
                        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN );
325
                        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
326
                        SET_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
327
 
328
                        if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INE_ERR ) &&
329
                                         (channel->controller->regs.int_msk_a & channel->channel_mask) ) {
330
                                SET_FLAG( channel->regs.csr, DMA_CH_CSR, INT_ERR );
331
                                channel->controller->regs.int_src_a = channel->channel_mask;
332
                                report_interrupt( channel->controller->irq );
333
                        }
334 212 erez
 
335 235 erez
                        continue;
336
                }
337 212 erez
 
338 235 erez
                /* In HW Handshake mode, only work when dma_req_i asserted */
339
                if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, MODE ) &&
340
                                 !channel->dma_req_i ) {
341
                        continue;
342
                }
343 212 erez
 
344 235 erez
                /* If this is the first cycle of the transfer, initialize our state */
345
                if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY ) ) {
346 418 erez
                        debug( 4,  "DMA: Starting new transfer\n" );
347 256 erez
 
348 235 erez
                        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, DONE );
349
                        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
350
                        SET_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
351 212 erez
 
352 235 erez
                        /* If using linked lists, copy the appropriate fields to our registers */
353
                        if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, USE_ED ) )
354
                                dma_load_descriptor( channel );
355
                        else
356
                                channel->load_next_descriptor_when_done = 0;
357
 
358
                        /* Set our internal status */
359
                        dma_init_transfer( channel );
360 212 erez
 
361 235 erez
                        /* Might need to skip descriptor */
362
                        if ( CHANNEL_ND_I( channel ) ) {
363 418 erez
                                debug( 3,  "DMA: dma_nd_i asserted before dma_req_i, skipping descriptor\n" );
364 235 erez
                                dma_channel_terminate_transfer( channel, 0 );
365
                                continue;
366
                        }
367
                }
368 212 erez
 
369 235 erez
                /* Transfer one word */
370
                set_mem32( channel->destination, eval_mem32( channel->source, &breakpoint ), &breakpoint );
371 212 erez
 
372 235 erez
                /* Advance the source and destionation pointers */
373
                masked_increase( &(channel->source), channel->source_mask );
374
                masked_increase( &(channel->destination), channel->destination_mask );
375
                ++ channel->words_transferred;
376 212 erez
 
377 235 erez
                /* Have we finished a whole chunk? */
378
                channel->dma_ack_o = (channel->words_transferred % channel->chunk_size == 0);
379 212 erez
 
380 235 erez
                /* When done with a chunk, check for dma_nd_i */
381
                if ( CHANNEL_ND_I( channel ) ) {
382 418 erez
                        debug( 3,  "DMA: dma_nd_i asserted\n" );
383 235 erez
                        dma_channel_terminate_transfer( channel, 0 );
384
                        continue;
385
                }
386
 
387
                /* Are we done? */
388
                if ( channel->words_transferred >= channel->total_size )
389
                        dma_channel_terminate_transfer( channel, 1 );
390 212 erez
        }
391
}
392
 
393
 
394
/* Copy relevant valued from linked list descriptor to channel registers */
395
void dma_load_descriptor( struct dma_channel *channel )
396
{
397 235 erez
        int breakpoint = 0;
398
        unsigned long desc_csr = eval_mem32( channel->regs.desc + DMA_DESC_CSR, &breakpoint );
399 212 erez
 
400 235 erez
        channel->load_next_descriptor_when_done = !TEST_FLAG( desc_csr, DMA_DESC_CSR, EOL );
401 212 erez
 
402 235 erez
        ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, INC_SRC, TEST_FLAG( desc_csr, DMA_DESC_CSR, INC_SRC ) );
403
        ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, INC_DST, TEST_FLAG( desc_csr, DMA_DESC_CSR, INC_DST ) );
404
        ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, SRC_SEL, TEST_FLAG( desc_csr, DMA_DESC_CSR, SRC_SEL ) );
405
        ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, DST_SEL, TEST_FLAG( desc_csr, DMA_DESC_CSR, DST_SEL ) );
406 212 erez
 
407 235 erez
        SET_FIELD( channel->regs.sz, DMA_CH_SZ, TOT_SZ,  GET_FIELD( desc_csr, DMA_DESC_CSR, TOT_SZ ) );
408 212 erez
 
409 235 erez
        channel->regs.a0 = eval_mem32( channel->regs.desc + DMA_DESC_ADR0, &breakpoint );
410
        channel->regs.a1 = eval_mem32( channel->regs.desc + DMA_DESC_ADR1, &breakpoint );
411 212 erez
 
412 235 erez
        channel->current_descriptor = channel->regs.desc;
413
        channel->regs.desc = eval_mem32( channel->regs.desc + DMA_DESC_NEXT, &breakpoint );
414 212 erez
}
415
 
416
 
417
/* Initialize internal parameters used to implement transfers */
418
void dma_init_transfer( struct dma_channel *channel )
419
{
420 235 erez
        channel->source = channel->regs.a0;
421
        channel->destination = channel->regs.a1;
422
        channel->source_mask = TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INC_SRC ) ? channel->regs.am0 : 0;
423
        channel->destination_mask = TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INC_DST ) ? channel->regs.am1 : 0;
424
        channel->total_size = GET_FIELD( channel->regs.sz, DMA_CH_SZ, TOT_SZ );
425
        channel->chunk_size = GET_FIELD( channel->regs.sz, DMA_CH_SZ, CHK_SZ );
426
        if ( !channel->chunk_size || (channel->chunk_size > channel->total_size) )
427
                channel->chunk_size = channel->total_size;
428
        channel->words_transferred = 0;
429 212 erez
}
430
 
431
 
432
/* Take care of transfer termination */
433
void dma_channel_terminate_transfer( struct dma_channel *channel, int generate_interrupt )
434
{
435 418 erez
        debug( 4,  "DMA: Terminating transfer\n" );
436 256 erez
 
437 235 erez
        /* Might be working in a linked list */
438
        if ( channel->load_next_descriptor_when_done ) {
439
                dma_load_descriptor( channel );
440
                dma_init_transfer( channel );
441
                return;
442
        }
443 212 erez
 
444 235 erez
        /* Might be in auto-restart mode */
445
        if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, ARS ) ) {
446
                dma_init_transfer( channel );
447
                return;
448
        }
449 212 erez
 
450 235 erez
        /* If needed, write amount of data transferred back to memory */
451
        if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, SZ_WB ) &&
452
                         TEST_FLAG( channel->regs.csr, DMA_CH_CSR, USE_ED ) ) {
453
                int breakpoint = 0;
454
                unsigned long desc_csr = eval_mem32( channel->regs.desc + DMA_DESC_CSR, &breakpoint );
455
                /* TODO: What should we write back? Doc says "total number of remaining bytes" !? */
456
                unsigned long remaining_words = channel->total_size - channel->words_transferred;
457
                SET_FIELD( channel->regs.sz, DMA_DESC_CSR, TOT_SZ, remaining_words );
458
        }
459 212 erez
 
460 235 erez
        /* Mark end of transfer */
461
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN );
462
        SET_FLAG( channel->regs.csr, DMA_CH_CSR, DONE );
463
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
464
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
465
 
466
        /* If needed, generate interrupt */
467
        if ( generate_interrupt ) {
468
                /* TODO: Which channel should we interrupt? */
469
                if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INE_DONE ) &&
470
                                 (channel->controller->regs.int_msk_a & channel->channel_mask) ) {
471
                        SET_FLAG( channel->regs.csr, DMA_CH_CSR, INT_DONE );
472
                        channel->controller->regs.int_src_a = channel->channel_mask;
473
                        report_interrupt( channel->controller->irq );
474
                }
475 212 erez
        }
476
}
477
 
478
/* Utility function: Add 4 to a value with a mask */
479
void masked_increase( unsigned long *value, unsigned long mask )
480
{
481 256 erez
        *value = (*value & ~mask) | ((*value + 4) & mask);
482 212 erez
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.