OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [tags/] [nog_patch_47/] [or1ksim/] [peripheral/] [dma.c] - Blame information for rev 256

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 212 erez
/* dma.c -- Simulation of DMA
2 235 erez
         Copyright (C) 2001 by Erez Volk, erez@opencores.org
3 212 erez
 
4 235 erez
         This file is part of OpenRISC 1000 Architectural Simulator.
5
 
6
         This program is free software; you can redistribute it and/or modify
7
         it under the terms of the GNU General Public License as published by
8
         the Free Software Foundation; either version 2 of the License, or
9
         (at your option) any later version.
10
 
11
         This program is distributed in the hope that it will be useful,
12
         but WITHOUT ANY WARRANTY; without even the implied warranty of
13
         MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.    See the
14
         GNU General Public License for more details.
15 212 erez
 
16 235 erez
         You should have received a copy of the GNU General Public License
17
         along with this program; if not, write to the Free Software
18
         Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19
*/
20 212 erez
 
21
/*
22
 * This simulation of the DMA core is not meant to be full.
23
 * It is written only to allow simulating the Ethernet core.
24
 * Of course, if anyone feels like perfecting it, feel free...
25
 */
26
 
27
#include "dma.h"
28
#include "sim-config.h"
29
#include "trace.h"
30
#include "pic.h"
31 235 erez
#include "abstract.h"
32 212 erez
#include "fields.h"
33
 
34 256 erez
#define dprintf(x) printf x
35
 
36 212 erez
/* The representation of the DMA controllers */
37
static struct dma_controller dmas[NR_DMAS];
38
 
39 235 erez
static unsigned long dma_read32( unsigned long addr );
40
static void dma_write32( unsigned long addr, unsigned long value );
41
 
42 212 erez
static unsigned long dma_read_ch_csr( struct dma_channel *channel );
43
static void dma_write_ch_csr( struct dma_channel *channel, unsigned long value );
44
static void dma_controller_clock( struct dma_controller *dma );
45
static void dma_load_descriptor( struct dma_channel *channel );
46
static void dma_init_transfer( struct dma_channel *channel );
47
static void dma_channel_terminate_transfer( struct dma_channel *channel, int generate_interrupt );
48
 
49
static void masked_increase( unsigned long *value, unsigned long mask );
50
 
51
#define CHANNEL_ND_I(ch) (TEST_FLAG(ch->regs.csr,DMA_CH_CSR,MODE) && TEST_FLAG(ch->regs.csr,DMA_CH_CSR,USE_ED) && ch->dma_nd_i)
52
 
53
 
54
/* Reset. Initializes all registers to default and places devices in memory address space. */
55
void dma_reset()
56
{
57 235 erez
        unsigned i;
58 212 erez
 
59 235 erez
        memset( dmas, 0, sizeof(dmas) );
60
 
61
        for ( i = 0; i < NR_DMAS; ++ i ) {
62
                struct dma_controller *dma = &(dmas[i]);
63
                unsigned channel_number;
64 212 erez
 
65 235 erez
                dma->baseaddr = config.dmas[i].baseaddr;
66
                dma->irq = config.dmas[i].irq;
67
                for ( channel_number = 0; channel_number < DMA_NUM_CHANNELS; ++ channel_number ) {
68
                        dma->ch[channel_number].controller = &(dmas[i]);
69
                        dma->ch[channel_number].channel_number = channel_number;
70
                        dma->ch[channel_number].channel_mask = 1LU << channel_number;
71
                        dma->ch[channel_number].regs.am0 = dma->ch[channel_number].regs.am1 = 0xFFFFFFFC;
72
                }
73
                if ( dma->baseaddr != 0 )
74
                        register_memoryarea( dma->baseaddr, DMA_ADDR_SPACE, 4, dma_read32, dma_write32, 0 );
75 212 erez
        }
76
}
77
 
78
/* Print register values on stdout */
79
void dma_status( void )
80
{
81 235 erez
        unsigned i, j;
82 212 erez
 
83 235 erez
        for ( i = 0; i < NR_DMAS; ++ i ) {
84
                struct dma_controller *dma = &(dmas[i]);
85 212 erez
 
86 235 erez
                if ( dma->baseaddr == 0 )
87
                        continue;
88 212 erez
 
89 235 erez
                printf( "\nDMA controller %u at 0x%08X:\n", i, dma->baseaddr );
90
                printf( "CSR                     : 0x%08lX\n", dma->regs.csr );
91
                printf( "INT_MSK_A : 0x%08lX\n", dma->regs.int_msk_a );
92
                printf( "INT_MSK_B : 0x%08lX\n", dma->regs.int_msk_b );
93
                printf( "INT_SRC_A : 0x%08lX\n", dma->regs.int_src_a );
94
                printf( "INT_SRC_B : 0x%08lX\n", dma->regs.int_src_b );
95 212 erez
 
96 235 erez
                for ( j = 0; j < DMA_NUM_CHANNELS; ++ j ) {
97
                        struct dma_channel *channel = &(dma->ch[j]);
98
                        if ( !channel->referenced )
99
                                continue;
100
                        printf( "CH%u_CSR               : 0x%08lX\n", j, channel->regs.csr );
101
                        printf( "CH%u_SZ                : 0x%08lX\n", j, channel->regs.sz );
102
                        printf( "CH%u_A0                : 0x%08lX\n", j, channel->regs.a0 );
103
                        printf( "CH%u_AM0               : 0x%08lX\n", j, channel->regs.am0 );
104
                        printf( "CH%u_A1                : 0x%08lX\n", j, channel->regs.a1 );
105
                        printf( "CH%u_AM1               : 0x%08lX\n", j, channel->regs.am1 );
106
                        printf( "CH%u_DESC      : 0x%08lX\n", j, channel->regs.desc );
107
                        printf( "CH%u_SWPTR : 0x%08lX\n", j, channel->regs.swptr );
108
                }
109 212 erez
        }
110
}
111
 
112
 
113
/* Read a register */
114 235 erez
unsigned long dma_read32( unsigned long addr )
115 212 erez
{
116 235 erez
        unsigned i;
117
        struct dma_controller *dma = NULL;
118 212 erez
 
119 235 erez
        for ( i = 0; i < NR_DMAS && dma == NULL; ++ i ) {
120
                if ( addr >= dmas[i].baseaddr && addr < dmas[i].baseaddr + DMA_ADDR_SPACE )
121
                        dma = &(dmas[i]);
122
        }
123
 
124
        /* verify we found a controller */
125
        if ( dma == NULL ) {
126
                fprintf( stderr, "dma_read32( 0x%08lX ): Out of range\n", addr );
127
                cont_run = 0;
128
                return 0;
129
        }
130 212 erez
 
131 235 erez
        addr -= dma->baseaddr;
132 212 erez
 
133 235 erez
        if ( addr % 4 != 0 ) {
134
                fprintf( stderr, "dma_read32( 0x%08lX ): Not register-aligned\n", addr + dma->baseaddr );
135
                cont_run = 0;
136
                return 0;
137
        }
138 212 erez
 
139 235 erez
        if ( addr < DMA_CH_BASE ) {
140 252 erez
                /* case of global (not per-channel) registers */
141 235 erez
                switch( addr ) {
142
                case DMA_CSR: return dma->regs.csr;
143
                case DMA_INT_MSK_A: return dma->regs.int_msk_a;
144
                case DMA_INT_MSK_B: return dma->regs.int_msk_b;
145
                case DMA_INT_SRC_A: return dma->regs.int_src_a;
146
                case DMA_INT_SRC_B: return dma->regs.int_src_b;
147
                default:
148
                        fprintf( stderr, "dma_read32( 0x%08lX ): Illegal register\n", addr + dma->baseaddr );
149
                        cont_run = 0;
150
                        return 0;
151
                }
152
        } else {
153
                /* case of per-channel registers */
154
                unsigned chno = (addr - DMA_CH_BASE) / DMA_CH_SIZE;
155
                addr = (addr - DMA_CH_BASE) % DMA_CH_SIZE;
156
                switch( addr ) {
157
                case DMA_CH_CSR: return dma_read_ch_csr( &(dma->ch[chno]) );
158
                case DMA_CH_SZ: return dma->ch[chno].regs.sz;
159
                case DMA_CH_A0: return dma->ch[chno].regs.a0;
160
                case DMA_CH_AM0: return dma->ch[chno].regs.am0;
161
                case DMA_CH_A1: return dma->ch[chno].regs.a1;
162
                case DMA_CH_AM1: return dma->ch[chno].regs.am1;
163
                case DMA_CH_DESC: return dma->ch[chno].regs.desc;
164
                case DMA_CH_SWPTR: return dma->ch[chno].regs.swptr;
165
                }
166
        }
167 212 erez
}
168
 
169
 
170
/* Handle read from a channel CSR */
171
unsigned long dma_read_ch_csr( struct dma_channel *channel )
172
{
173 235 erez
        unsigned long result = channel->regs.csr;
174 212 erez
 
175 235 erez
        /* before returning, clear all relevant bits */
176
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_CHUNK_DONE );
177
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_DONE );
178
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_ERR );
179
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
180 212 erez
 
181 235 erez
        return result;
182 212 erez
}
183
 
184
 
185
 
186
/* Write a register */
187 235 erez
void dma_write32( unsigned long addr, unsigned long value )
188 212 erez
{
189 235 erez
        unsigned i;
190
        struct dma_controller *dma = NULL;
191 212 erez
 
192 235 erez
        /* Find which controller this is */
193
        for ( i = 0; i < NR_DMAS && dma == NULL; ++ i ) {
194
                if ( (addr >= dmas[i].baseaddr) && (addr < dmas[i].baseaddr + DMA_ADDR_SPACE) )
195
                        dma = &(dmas[i]);
196
        }
197
 
198
        /* verify we found a controller */
199
        if ( dma == NULL ) {
200
                fprintf( stderr, "dma_write32( 0x%08lX ): Out of range\n", addr );
201
                cont_run = 0;
202
                return;
203
        }
204 212 erez
 
205 235 erez
        addr -= dma->baseaddr;
206 212 erez
 
207 235 erez
        if ( addr % 4 != 0 ) {
208
                fprintf( stderr, "dma_write32( 0x%08lX, 0x%08lX ): Not register-aligned\n", addr + dma->baseaddr, value );
209
                cont_run = 0;
210
                return;
211
        }
212 212 erez
 
213 235 erez
        /* case of global (not per-channel) registers */
214
        if ( addr < DMA_CH_BASE ) {
215
                switch( addr ) {
216
                case DMA_CSR:
217
                        if ( TEST_FLAG( value, DMA_CSR, PAUSE ) )
218
                                fprintf( stderr, "dma: PAUSE not implemented\n" );
219
                        break;
220 212 erez
 
221 235 erez
                case DMA_INT_MSK_A: dma->regs.int_msk_a = value; break;
222
                case DMA_INT_MSK_B: dma->regs.int_msk_b = value; break;
223
                case DMA_INT_SRC_A: dma->regs.int_src_a = value; break;
224
                case DMA_INT_SRC_B: dma->regs.int_src_b = value; break;
225
                default:
226
                        fprintf( stderr, "dma_write32( 0x%08lX ): Illegal register\n", addr + dma->baseaddr );
227
                        cont_run = 0;
228
                        return;
229
                }
230
        } else {
231
                /* case of per-channel registers */
232
                unsigned chno = (addr - DMA_CH_BASE) / DMA_CH_SIZE;
233
                struct dma_channel *channel = &(dma->ch[chno]);
234
                channel->referenced = 1;
235
                addr = (addr - DMA_CH_BASE) % DMA_CH_SIZE;
236
                switch( addr ) {
237
                case DMA_CSR: dma_write_ch_csr( &(dma->ch[chno]), value ); break;
238
                case DMA_CH_SZ: channel->regs.sz = value; break;
239
                case DMA_CH_A0: channel->regs.a0 = value; break;
240
                case DMA_CH_AM0: channel->regs.am0 = value; break;
241
                case DMA_CH_A1: channel->regs.a1 = value; break;
242
                case DMA_CH_AM1: channel->regs.am1 = value; break;
243
                case DMA_CH_DESC: channel->regs.desc = value; break;
244
                case DMA_CH_SWPTR: channel->regs.swptr = value; break;
245
                }
246 212 erez
        }
247
}
248
 
249
 
250
/* Write a channel CSR
251
 * This ensures only the writable bits are modified.
252
 */
253
void dma_write_ch_csr( struct dma_channel *channel, unsigned long value )
254
{
255 235 erez
        /* Copy the writable bits to the channel CSR */
256
        channel->regs.csr &= ~DMA_CH_CSR_WRITE_MASK;
257
        channel->regs.csr |= value & DMA_CH_CSR_WRITE_MASK;
258 212 erez
}
259
 
260
 
261
/*
262
 * Simulation of control signals
263
 * To be used by simulations for other devices, e.g. ethernet
264
 */
265
 
266
void set_dma_req_i( unsigned dma_controller, unsigned channel )
267
{
268 235 erez
        dmas[dma_controller].ch[channel].dma_req_i = 1;
269 212 erez
}
270
 
271
void clear_dma_req_i( unsigned dma_controller, unsigned channel )
272
{
273 235 erez
        dmas[dma_controller].ch[channel].dma_req_i = 0;
274 212 erez
}
275
 
276
void set_dma_nd_i( unsigned dma_controller, unsigned channel )
277
{
278 235 erez
        dmas[dma_controller].ch[channel].dma_nd_i = 1;
279 212 erez
}
280
 
281
void clear_dma_nd_i( unsigned dma_controller, unsigned channel )
282
{
283 235 erez
        dmas[dma_controller].ch[channel].dma_nd_i = 0;
284 212 erez
}
285
 
286 235 erez
unsigned check_dma_ack_o( unsigned dma_controller, unsigned channel )
287 212 erez
{
288 235 erez
        return dmas[dma_controller].ch[channel].dma_ack_o;
289 212 erez
}
290
 
291
 
292
 
293
/* Simulation hook. Must be called every clock cycle to simulate DMA. */
294
void dma_clock()
295
{
296 235 erez
        unsigned i;
297
        for ( i = 0; i < NR_DMAS; ++ i ) {
298
                if ( dmas[i].baseaddr != 0 )
299
                        dma_controller_clock( &(dmas[i]) );
300
        }
301 212 erez
}
302
 
303
 
304
/* Clock tick for one DMA controller.
305
 * This does the actual "DMA" operation.
306
 * One chunk is transferred per clock.
307
 */
308
void dma_controller_clock( struct dma_controller *dma )
309
{
310 235 erez
        unsigned chno, i;
311
        int breakpoint = 0;
312
 
313
        for ( chno = 0; chno < DMA_NUM_CHANNELS; ++ chno ) {
314
                struct dma_channel *channel = &(dma->ch[chno]);
315 256 erez
 
316 235 erez
                /* check if this channel is enabled */
317
                if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN ) )
318
                        continue;
319 212 erez
 
320 235 erez
                /* Do we need to abort? */
321
                if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, STOP ) ) {
322 256 erez
                        dprintf(( "DMA: STOP requested\n" ));
323 235 erez
                        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN );
324
                        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
325
                        SET_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
326
 
327
                        if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INE_ERR ) &&
328
                                         (channel->controller->regs.int_msk_a & channel->channel_mask) ) {
329
                                SET_FLAG( channel->regs.csr, DMA_CH_CSR, INT_ERR );
330
                                channel->controller->regs.int_src_a = channel->channel_mask;
331
                                report_interrupt( channel->controller->irq );
332
                        }
333 212 erez
 
334 235 erez
                        continue;
335
                }
336 212 erez
 
337 235 erez
                /* In HW Handshake mode, only work when dma_req_i asserted */
338
                if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, MODE ) &&
339
                                 !channel->dma_req_i ) {
340
                        continue;
341
                }
342 212 erez
 
343 235 erez
                /* If this is the first cycle of the transfer, initialize our state */
344
                if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY ) ) {
345 256 erez
                        dprintf(( "Starting new transfer\n" ));
346
 
347 235 erez
                        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, DONE );
348
                        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
349
                        SET_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
350 212 erez
 
351 235 erez
                        /* If using linked lists, copy the appropriate fields to our registers */
352
                        if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, USE_ED ) )
353
                                dma_load_descriptor( channel );
354
                        else
355
                                channel->load_next_descriptor_when_done = 0;
356
 
357
                        /* Set our internal status */
358
                        dma_init_transfer( channel );
359 212 erez
 
360 235 erez
                        /* Might need to skip descriptor */
361
                        if ( CHANNEL_ND_I( channel ) ) {
362 256 erez
                                dprintf(( "DMA: dma_nd_i asserted before dma_req_i, skipping descriptor\n" ));
363 235 erez
                                dma_channel_terminate_transfer( channel, 0 );
364
                                continue;
365
                        }
366
                }
367 212 erez
 
368 235 erez
                /* Transfer one word */
369
                set_mem32( channel->destination, eval_mem32( channel->source, &breakpoint ), &breakpoint );
370 212 erez
 
371 235 erez
                /* Advance the source and destionation pointers */
372
                masked_increase( &(channel->source), channel->source_mask );
373
                masked_increase( &(channel->destination), channel->destination_mask );
374
                ++ channel->words_transferred;
375 212 erez
 
376 235 erez
                /* Have we finished a whole chunk? */
377
                channel->dma_ack_o = (channel->words_transferred % channel->chunk_size == 0);
378 212 erez
 
379 235 erez
                /* When done with a chunk, check for dma_nd_i */
380
                if ( CHANNEL_ND_I( channel ) ) {
381 256 erez
                        dprintf(( "DMA: dma_nd_i asserted\n" ));
382 235 erez
                        dma_channel_terminate_transfer( channel, 0 );
383
                        continue;
384
                }
385
 
386
                /* Are we done? */
387
                if ( channel->words_transferred >= channel->total_size )
388
                        dma_channel_terminate_transfer( channel, 1 );
389 212 erez
        }
390
}
391
 
392
 
393
/* Copy relevant valued from linked list descriptor to channel registers */
394
void dma_load_descriptor( struct dma_channel *channel )
395
{
396 235 erez
        int breakpoint = 0;
397
        unsigned long desc_csr = eval_mem32( channel->regs.desc + DMA_DESC_CSR, &breakpoint );
398 212 erez
 
399 235 erez
        channel->load_next_descriptor_when_done = !TEST_FLAG( desc_csr, DMA_DESC_CSR, EOL );
400 212 erez
 
401 235 erez
        ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, INC_SRC, TEST_FLAG( desc_csr, DMA_DESC_CSR, INC_SRC ) );
402
        ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, INC_DST, TEST_FLAG( desc_csr, DMA_DESC_CSR, INC_DST ) );
403
        ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, SRC_SEL, TEST_FLAG( desc_csr, DMA_DESC_CSR, SRC_SEL ) );
404
        ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, DST_SEL, TEST_FLAG( desc_csr, DMA_DESC_CSR, DST_SEL ) );
405 212 erez
 
406 235 erez
        SET_FIELD( channel->regs.sz, DMA_CH_SZ, TOT_SZ,  GET_FIELD( desc_csr, DMA_DESC_CSR, TOT_SZ ) );
407 212 erez
 
408 235 erez
        channel->regs.a0 = eval_mem32( channel->regs.desc + DMA_DESC_ADR0, &breakpoint );
409
        channel->regs.a1 = eval_mem32( channel->regs.desc + DMA_DESC_ADR1, &breakpoint );
410 212 erez
 
411 235 erez
        channel->current_descriptor = channel->regs.desc;
412
        channel->regs.desc = eval_mem32( channel->regs.desc + DMA_DESC_NEXT, &breakpoint );
413 212 erez
}
414
 
415
 
416
/* Initialize internal parameters used to implement transfers */
417
void dma_init_transfer( struct dma_channel *channel )
418
{
419 235 erez
        channel->source = channel->regs.a0;
420
        channel->destination = channel->regs.a1;
421
        channel->source_mask = TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INC_SRC ) ? channel->regs.am0 : 0;
422
        channel->destination_mask = TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INC_DST ) ? channel->regs.am1 : 0;
423
        channel->total_size = GET_FIELD( channel->regs.sz, DMA_CH_SZ, TOT_SZ );
424
        channel->chunk_size = GET_FIELD( channel->regs.sz, DMA_CH_SZ, CHK_SZ );
425
        if ( !channel->chunk_size || (channel->chunk_size > channel->total_size) )
426
                channel->chunk_size = channel->total_size;
427
        channel->words_transferred = 0;
428 212 erez
}
429
 
430
 
431
/* Take care of transfer termination */
432
void dma_channel_terminate_transfer( struct dma_channel *channel, int generate_interrupt )
433
{
434 256 erez
        dprintf(( "DMA: Terminating transfer\n" ));
435
 
436 235 erez
        /* Might be working in a linked list */
437
        if ( channel->load_next_descriptor_when_done ) {
438
                dma_load_descriptor( channel );
439
                dma_init_transfer( channel );
440
                return;
441
        }
442 212 erez
 
443 235 erez
        /* Might be in auto-restart mode */
444
        if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, ARS ) ) {
445
                dma_init_transfer( channel );
446
                return;
447
        }
448 212 erez
 
449 235 erez
        /* If needed, write amount of data transferred back to memory */
450
        if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, SZ_WB ) &&
451
                         TEST_FLAG( channel->regs.csr, DMA_CH_CSR, USE_ED ) ) {
452
                int breakpoint = 0;
453
                unsigned long desc_csr = eval_mem32( channel->regs.desc + DMA_DESC_CSR, &breakpoint );
454
                /* TODO: What should we write back? Doc says "total number of remaining bytes" !? */
455
                unsigned long remaining_words = channel->total_size - channel->words_transferred;
456
                SET_FIELD( channel->regs.sz, DMA_DESC_CSR, TOT_SZ, remaining_words );
457
        }
458 212 erez
 
459 235 erez
        /* Mark end of transfer */
460
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN );
461
        SET_FLAG( channel->regs.csr, DMA_CH_CSR, DONE );
462
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
463
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
464
 
465
        /* If needed, generate interrupt */
466
        if ( generate_interrupt ) {
467
                /* TODO: Which channel should we interrupt? */
468
                if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INE_DONE ) &&
469
                                 (channel->controller->regs.int_msk_a & channel->channel_mask) ) {
470
                        SET_FLAG( channel->regs.csr, DMA_CH_CSR, INT_DONE );
471
                        channel->controller->regs.int_src_a = channel->channel_mask;
472
                        report_interrupt( channel->controller->irq );
473
                }
474 212 erez
        }
475
}
476
 
477
/* Utility function: Add 4 to a value with a mask */
478
void masked_increase( unsigned long *value, unsigned long mask )
479
{
480 256 erez
        *value = (*value & ~mask) | ((*value + 4) & mask);
481 212 erez
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.