OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [tags/] [stable_0_2_0/] [or1ksim/] [peripheral/] [dma.c] - Blame information for rev 235

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 212 erez
/* dma.c -- Simulation of DMA
2 235 erez
         Copyright (C) 2001 by Erez Volk, erez@opencores.org
3 212 erez
 
4 235 erez
         This file is part of OpenRISC 1000 Architectural Simulator.
5
 
6
         This program is free software; you can redistribute it and/or modify
7
         it under the terms of the GNU General Public License as published by
8
         the Free Software Foundation; either version 2 of the License, or
9
         (at your option) any later version.
10
 
11
         This program is distributed in the hope that it will be useful,
12
         but WITHOUT ANY WARRANTY; without even the implied warranty of
13
         MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.    See the
14
         GNU General Public License for more details.
15 212 erez
 
16 235 erez
         You should have received a copy of the GNU General Public License
17
         along with this program; if not, write to the Free Software
18
         Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19
*/
20 212 erez
 
21
/*
22
 * This simulation of the DMA core is not meant to be full.
23
 * It is written only to allow simulating the Ethernet core.
24
 * Of course, if anyone feels like perfecting it, feel free...
25
 */
26
 
27
#include "dma.h"
28
#include "sim-config.h"
29
#include "trace.h"
30
#include "pic.h"
31 235 erez
#include "abstract.h"
32 212 erez
#include "fields.h"
33
 
34
/* The representation of the DMA controllers */
35
static struct dma_controller dmas[NR_DMAS];
36
 
37 235 erez
static unsigned long dma_read32( unsigned long addr );
38
static void dma_write32( unsigned long addr, unsigned long value );
39
 
40 212 erez
static unsigned long dma_read_ch_csr( struct dma_channel *channel );
41
static void dma_write_ch_csr( struct dma_channel *channel, unsigned long value );
42
static void dma_controller_clock( struct dma_controller *dma );
43
static void dma_load_descriptor( struct dma_channel *channel );
44
static void dma_init_transfer( struct dma_channel *channel );
45
static void dma_channel_terminate_transfer( struct dma_channel *channel, int generate_interrupt );
46
 
47
static void masked_increase( unsigned long *value, unsigned long mask );
48
 
49
#define CHANNEL_ND_I(ch) (TEST_FLAG(ch->regs.csr,DMA_CH_CSR,MODE) && TEST_FLAG(ch->regs.csr,DMA_CH_CSR,USE_ED) && ch->dma_nd_i)
50
 
51
 
52
/* Reset. Initializes all registers to default and places devices in memory address space. */
53
void dma_reset()
54
{
55 235 erez
        unsigned i;
56 212 erez
 
57 235 erez
        memset( dmas, 0, sizeof(dmas) );
58
 
59
        for ( i = 0; i < NR_DMAS; ++ i ) {
60
                struct dma_controller *dma = &(dmas[i]);
61
                unsigned channel_number;
62 212 erez
 
63 235 erez
                dma->baseaddr = config.dmas[i].baseaddr;
64
                dma->irq = config.dmas[i].irq;
65
                for ( channel_number = 0; channel_number < DMA_NUM_CHANNELS; ++ channel_number ) {
66
                        dma->ch[channel_number].controller = &(dmas[i]);
67
                        dma->ch[channel_number].channel_number = channel_number;
68
                        dma->ch[channel_number].channel_mask = 1LU << channel_number;
69
                        dma->ch[channel_number].regs.am0 = dma->ch[channel_number].regs.am1 = 0xFFFFFFFC;
70
                }
71
                if ( dma->baseaddr != 0 )
72
                        register_memoryarea( dma->baseaddr, DMA_ADDR_SPACE, 4, dma_read32, dma_write32, 0 );
73 212 erez
        }
74
}
75
 
76
/* Print register values on stdout */
77
void dma_status( void )
78
{
79 235 erez
        unsigned i, j;
80 212 erez
 
81 235 erez
        for ( i = 0; i < NR_DMAS; ++ i ) {
82
                struct dma_controller *dma = &(dmas[i]);
83 212 erez
 
84 235 erez
                if ( dma->baseaddr == 0 )
85
                        continue;
86 212 erez
 
87 235 erez
                printf( "\nDMA controller %u at 0x%08X:\n", i, dma->baseaddr );
88
                printf( "CSR                     : 0x%08lX\n", dma->regs.csr );
89
                printf( "INT_MSK_A : 0x%08lX\n", dma->regs.int_msk_a );
90
                printf( "INT_MSK_B : 0x%08lX\n", dma->regs.int_msk_b );
91
                printf( "INT_SRC_A : 0x%08lX\n", dma->regs.int_src_a );
92
                printf( "INT_SRC_B : 0x%08lX\n", dma->regs.int_src_b );
93 212 erez
 
94 235 erez
                for ( j = 0; j < DMA_NUM_CHANNELS; ++ j ) {
95
                        struct dma_channel *channel = &(dma->ch[j]);
96
                        if ( !channel->referenced )
97
                                continue;
98
                        printf( "CH%u_CSR               : 0x%08lX\n", j, channel->regs.csr );
99
                        printf( "CH%u_SZ                : 0x%08lX\n", j, channel->regs.sz );
100
                        printf( "CH%u_A0                : 0x%08lX\n", j, channel->regs.a0 );
101
                        printf( "CH%u_AM0               : 0x%08lX\n", j, channel->regs.am0 );
102
                        printf( "CH%u_A1                : 0x%08lX\n", j, channel->regs.a1 );
103
                        printf( "CH%u_AM1               : 0x%08lX\n", j, channel->regs.am1 );
104
                        printf( "CH%u_DESC      : 0x%08lX\n", j, channel->regs.desc );
105
                        printf( "CH%u_SWPTR : 0x%08lX\n", j, channel->regs.swptr );
106
                }
107 212 erez
        }
108
}
109
 
110
 
111
/* Read a register */
112 235 erez
unsigned long dma_read32( unsigned long addr )
113 212 erez
{
114 235 erez
        unsigned i;
115
        struct dma_controller *dma = NULL;
116 212 erez
 
117 235 erez
        for ( i = 0; i < NR_DMAS && dma == NULL; ++ i ) {
118
                if ( addr >= dmas[i].baseaddr && addr < dmas[i].baseaddr + DMA_ADDR_SPACE )
119
                        dma = &(dmas[i]);
120
        }
121
 
122
        /* verify we found a controller */
123
        if ( dma == NULL ) {
124
                fprintf( stderr, "dma_read32( 0x%08lX ): Out of range\n", addr );
125
                cont_run = 0;
126
                return 0;
127
        }
128 212 erez
 
129 235 erez
        addr -= dma->baseaddr;
130 212 erez
 
131 235 erez
        if ( addr % 4 != 0 ) {
132
                fprintf( stderr, "dma_read32( 0x%08lX ): Not register-aligned\n", addr + dma->baseaddr );
133
                cont_run = 0;
134
                return 0;
135
        }
136 212 erez
 
137 235 erez
        /* case of global (not per-channel) registers */
138
        if ( addr < DMA_CH_BASE ) {
139
                switch( addr ) {
140
                case DMA_CSR: return dma->regs.csr;
141
                case DMA_INT_MSK_A: return dma->regs.int_msk_a;
142
                case DMA_INT_MSK_B: return dma->regs.int_msk_b;
143
                case DMA_INT_SRC_A: return dma->regs.int_src_a;
144
                case DMA_INT_SRC_B: return dma->regs.int_src_b;
145
                default:
146
                        fprintf( stderr, "dma_read32( 0x%08lX ): Illegal register\n", addr + dma->baseaddr );
147
                        cont_run = 0;
148
                        return 0;
149
                }
150
        } else {
151
                /* case of per-channel registers */
152
                unsigned chno = (addr - DMA_CH_BASE) / DMA_CH_SIZE;
153
                addr = (addr - DMA_CH_BASE) % DMA_CH_SIZE;
154
                switch( addr ) {
155
                case DMA_CH_CSR: return dma_read_ch_csr( &(dma->ch[chno]) );
156
                case DMA_CH_SZ: return dma->ch[chno].regs.sz;
157
                case DMA_CH_A0: return dma->ch[chno].regs.a0;
158
                case DMA_CH_AM0: return dma->ch[chno].regs.am0;
159
                case DMA_CH_A1: return dma->ch[chno].regs.a1;
160
                case DMA_CH_AM1: return dma->ch[chno].regs.am1;
161
                case DMA_CH_DESC: return dma->ch[chno].regs.desc;
162
                case DMA_CH_SWPTR: return dma->ch[chno].regs.swptr;
163
                }
164
        }
165 212 erez
}
166
 
167
 
168
/* Handle read from a channel CSR */
169
unsigned long dma_read_ch_csr( struct dma_channel *channel )
170
{
171 235 erez
        unsigned long result = channel->regs.csr;
172 212 erez
 
173 235 erez
        /* before returning, clear all relevant bits */
174
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_CHUNK_DONE );
175
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_DONE );
176
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_ERR );
177
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
178 212 erez
 
179 235 erez
        return result;
180 212 erez
}
181
 
182
 
183
 
184
/* Write a register */
185 235 erez
void dma_write32( unsigned long addr, unsigned long value )
186 212 erez
{
187 235 erez
        unsigned i;
188
        struct dma_controller *dma = NULL;
189 212 erez
 
190 235 erez
        /* Find which controller this is */
191
        for ( i = 0; i < NR_DMAS && dma == NULL; ++ i ) {
192
                if ( (addr >= dmas[i].baseaddr) && (addr < dmas[i].baseaddr + DMA_ADDR_SPACE) )
193
                        dma = &(dmas[i]);
194
        }
195
 
196
        /* verify we found a controller */
197
        if ( dma == NULL ) {
198
                fprintf( stderr, "dma_write32( 0x%08lX ): Out of range\n", addr );
199
                cont_run = 0;
200
                return;
201
        }
202 212 erez
 
203 235 erez
        addr -= dma->baseaddr;
204 212 erez
 
205 235 erez
        if ( addr % 4 != 0 ) {
206
                fprintf( stderr, "dma_write32( 0x%08lX, 0x%08lX ): Not register-aligned\n", addr + dma->baseaddr, value );
207
                cont_run = 0;
208
                return;
209
        }
210 212 erez
 
211 235 erez
        /* case of global (not per-channel) registers */
212
        if ( addr < DMA_CH_BASE ) {
213
                switch( addr ) {
214
                case DMA_CSR:
215
                        if ( TEST_FLAG( value, DMA_CSR, PAUSE ) )
216
                                fprintf( stderr, "dma: PAUSE not implemented\n" );
217
                        break;
218 212 erez
 
219 235 erez
                case DMA_INT_MSK_A: dma->regs.int_msk_a = value; break;
220
                case DMA_INT_MSK_B: dma->regs.int_msk_b = value; break;
221
                case DMA_INT_SRC_A: dma->regs.int_src_a = value; break;
222
                case DMA_INT_SRC_B: dma->regs.int_src_b = value; break;
223
                default:
224
                        fprintf( stderr, "dma_write32( 0x%08lX ): Illegal register\n", addr + dma->baseaddr );
225
                        cont_run = 0;
226
                        return;
227
                }
228
        } else {
229
                /* case of per-channel registers */
230
                unsigned chno = (addr - DMA_CH_BASE) / DMA_CH_SIZE;
231
                struct dma_channel *channel = &(dma->ch[chno]);
232
                channel->referenced = 1;
233
                addr = (addr - DMA_CH_BASE) % DMA_CH_SIZE;
234
                switch( addr ) {
235
                case DMA_CSR: dma_write_ch_csr( &(dma->ch[chno]), value ); break;
236
                case DMA_CH_SZ: channel->regs.sz = value; break;
237
                case DMA_CH_A0: channel->regs.a0 = value; break;
238
                case DMA_CH_AM0: channel->regs.am0 = value; break;
239
                case DMA_CH_A1: channel->regs.a1 = value; break;
240
                case DMA_CH_AM1: channel->regs.am1 = value; break;
241
                case DMA_CH_DESC: channel->regs.desc = value; break;
242
                case DMA_CH_SWPTR: channel->regs.swptr = value; break;
243
                }
244 212 erez
        }
245
}
246
 
247
 
248
/* Write a channel CSR
249
 * This ensures only the writable bits are modified.
250
 */
251
void dma_write_ch_csr( struct dma_channel *channel, unsigned long value )
252
{
253 235 erez
        /* Copy the writable bits to the channel CSR */
254
        channel->regs.csr &= ~DMA_CH_CSR_WRITE_MASK;
255
        channel->regs.csr |= value & DMA_CH_CSR_WRITE_MASK;
256 212 erez
}
257
 
258
 
259
/*
260
 * Simulation of control signals
261
 * To be used by simulations for other devices, e.g. ethernet
262
 */
263
 
264
void set_dma_req_i( unsigned dma_controller, unsigned channel )
265
{
266 235 erez
        dmas[dma_controller].ch[channel].dma_req_i = 1;
267 212 erez
}
268
 
269
void clear_dma_req_i( unsigned dma_controller, unsigned channel )
270
{
271 235 erez
        dmas[dma_controller].ch[channel].dma_req_i = 0;
272 212 erez
}
273
 
274
void set_dma_nd_i( unsigned dma_controller, unsigned channel )
275
{
276 235 erez
        dmas[dma_controller].ch[channel].dma_nd_i = 1;
277 212 erez
}
278
 
279
void clear_dma_nd_i( unsigned dma_controller, unsigned channel )
280
{
281 235 erez
        dmas[dma_controller].ch[channel].dma_nd_i = 0;
282 212 erez
}
283
 
284 235 erez
unsigned check_dma_ack_o( unsigned dma_controller, unsigned channel )
285 212 erez
{
286 235 erez
        return dmas[dma_controller].ch[channel].dma_ack_o;
287 212 erez
}
288
 
289
 
290
 
291
/* Simulation hook. Must be called every clock cycle to simulate DMA. */
292
void dma_clock()
293
{
294 235 erez
        unsigned i;
295
        for ( i = 0; i < NR_DMAS; ++ i ) {
296
                if ( dmas[i].baseaddr != 0 )
297
                        dma_controller_clock( &(dmas[i]) );
298
        }
299 212 erez
}
300
 
301
 
302
/* Clock tick for one DMA controller.
303
 * This does the actual "DMA" operation.
304
 * One chunk is transferred per clock.
305
 */
306
void dma_controller_clock( struct dma_controller *dma )
307
{
308 235 erez
        unsigned chno, i;
309
        int breakpoint = 0;
310
 
311
        for ( chno = 0; chno < DMA_NUM_CHANNELS; ++ chno ) {
312
                struct dma_channel *channel = &(dma->ch[chno]);
313
 
314
                /* check if this channel is enabled */
315
                if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN ) )
316
                        continue;
317 212 erez
 
318 235 erez
                /* Do we need to abort? */
319
                if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, STOP ) ) {
320
                        fprintf( stderr, "DMA: STOP requested\n" );
321
                        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN );
322
                        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
323
                        SET_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
324
 
325
                        if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INE_ERR ) &&
326
                                         (channel->controller->regs.int_msk_a & channel->channel_mask) ) {
327
                                SET_FLAG( channel->regs.csr, DMA_CH_CSR, INT_ERR );
328
                                channel->controller->regs.int_src_a = channel->channel_mask;
329
                                report_interrupt( channel->controller->irq );
330
                        }
331 212 erez
 
332 235 erez
                        continue;
333
                }
334 212 erez
 
335 235 erez
                /* In HW Handshake mode, only work when dma_req_i asserted */
336
                if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, MODE ) &&
337
                                 !channel->dma_req_i ) {
338
                        fprintf( stderr, "DMA: Waiting for HW handshake\n" );
339
                        continue;
340
                }
341 212 erez
 
342 235 erez
                /* If this is the first cycle of the transfer, initialize our state */
343
                if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY ) ) {
344
                        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, DONE );
345
                        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
346
                        SET_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
347 212 erez
 
348 235 erez
                        /* If using linked lists, copy the appropriate fields to our registers */
349
                        if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, USE_ED ) )
350
                                dma_load_descriptor( channel );
351
                        else
352
                                channel->load_next_descriptor_when_done = 0;
353
 
354
                        /* Set our internal status */
355
                        dma_init_transfer( channel );
356 212 erez
 
357 235 erez
                        /* Might need to skip descriptor */
358
                        if ( CHANNEL_ND_I( channel ) ) {
359
                                fprintf( stderr, "DMA: dma_nd_i asserted before dma_req_i, skipping descriptor\n" );
360
                                dma_channel_terminate_transfer( channel, 0 );
361
                                continue;
362
                        }
363
                }
364 212 erez
 
365 235 erez
                /* Transfer one word */
366
                set_mem32( channel->destination, eval_mem32( channel->source, &breakpoint ), &breakpoint );
367 212 erez
 
368 235 erez
                /* Advance the source and destionation pointers */
369
                masked_increase( &(channel->source), channel->source_mask );
370
                masked_increase( &(channel->destination), channel->destination_mask );
371
                ++ channel->words_transferred;
372 212 erez
 
373 235 erez
                /* Have we finished a whole chunk? */
374
                channel->dma_ack_o = (channel->words_transferred % channel->chunk_size == 0);
375 212 erez
 
376 235 erez
                /* When done with a chunk, check for dma_nd_i */
377
                if ( CHANNEL_ND_I( channel ) ) {
378
                        fprintf( stderr, "DMA: dma_nd_i asserted, \n" );
379
                        dma_channel_terminate_transfer( channel, 0 );
380
                        continue;
381
                }
382
 
383
                /* Are we done? */
384
                if ( channel->words_transferred >= channel->total_size )
385
                        dma_channel_terminate_transfer( channel, 1 );
386 212 erez
        }
387
}
388
 
389
 
390
/* Copy relevant valued from linked list descriptor to channel registers */
391
void dma_load_descriptor( struct dma_channel *channel )
392
{
393 235 erez
        int breakpoint = 0;
394
        unsigned long desc_csr = eval_mem32( channel->regs.desc + DMA_DESC_CSR, &breakpoint );
395 212 erez
 
396 235 erez
        channel->load_next_descriptor_when_done = !TEST_FLAG( desc_csr, DMA_DESC_CSR, EOL );
397 212 erez
 
398 235 erez
        ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, INC_SRC, TEST_FLAG( desc_csr, DMA_DESC_CSR, INC_SRC ) );
399
        ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, INC_DST, TEST_FLAG( desc_csr, DMA_DESC_CSR, INC_DST ) );
400
        ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, SRC_SEL, TEST_FLAG( desc_csr, DMA_DESC_CSR, SRC_SEL ) );
401
        ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, DST_SEL, TEST_FLAG( desc_csr, DMA_DESC_CSR, DST_SEL ) );
402 212 erez
 
403 235 erez
        SET_FIELD( channel->regs.sz, DMA_CH_SZ, TOT_SZ,  GET_FIELD( desc_csr, DMA_DESC_CSR, TOT_SZ ) );
404 212 erez
 
405 235 erez
        channel->regs.a0 = eval_mem32( channel->regs.desc + DMA_DESC_ADR0, &breakpoint );
406
        channel->regs.a1 = eval_mem32( channel->regs.desc + DMA_DESC_ADR1, &breakpoint );
407 212 erez
 
408 235 erez
        channel->current_descriptor = channel->regs.desc;
409
        channel->regs.desc = eval_mem32( channel->regs.desc + DMA_DESC_NEXT, &breakpoint );
410 212 erez
}
411
 
412
 
413
/* Initialize internal parameters used to implement transfers */
414
void dma_init_transfer( struct dma_channel *channel )
415
{
416 235 erez
        channel->source = channel->regs.a0;
417
        channel->destination = channel->regs.a1;
418
        channel->source_mask = TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INC_SRC ) ? channel->regs.am0 : 0;
419
        channel->destination_mask = TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INC_DST ) ? channel->regs.am1 : 0;
420
        channel->total_size = GET_FIELD( channel->regs.sz, DMA_CH_SZ, TOT_SZ );
421
        channel->chunk_size = GET_FIELD( channel->regs.sz, DMA_CH_SZ, CHK_SZ );
422
        if ( !channel->chunk_size || (channel->chunk_size > channel->total_size) )
423
                channel->chunk_size = channel->total_size;
424
        channel->words_transferred = 0;
425 212 erez
}
426
 
427
 
428
/* Take care of transfer termination */
429
void dma_channel_terminate_transfer( struct dma_channel *channel, int generate_interrupt )
430
{
431 235 erez
        /* Might be working in a linked list */
432
        if ( channel->load_next_descriptor_when_done ) {
433
                dma_load_descriptor( channel );
434
                dma_init_transfer( channel );
435
                return;
436
        }
437 212 erez
 
438 235 erez
        /* Might be in auto-restart mode */
439
        if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, ARS ) ) {
440
                dma_init_transfer( channel );
441
                return;
442
        }
443 212 erez
 
444 235 erez
        /* If needed, write amount of data transferred back to memory */
445
        if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, SZ_WB ) &&
446
                         TEST_FLAG( channel->regs.csr, DMA_CH_CSR, USE_ED ) ) {
447
                int breakpoint = 0;
448
                unsigned long desc_csr = eval_mem32( channel->regs.desc + DMA_DESC_CSR, &breakpoint );
449
                /* TODO: What should we write back? Doc says "total number of remaining bytes" !? */
450
                unsigned long remaining_words = channel->total_size - channel->words_transferred;
451
                SET_FIELD( channel->regs.sz, DMA_DESC_CSR, TOT_SZ, remaining_words );
452
        }
453 212 erez
 
454 235 erez
        /* Mark end of transfer */
455
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN );
456
        SET_FLAG( channel->regs.csr, DMA_CH_CSR, DONE );
457
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
458
        CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
459
 
460
        /* If needed, generate interrupt */
461
        if ( generate_interrupt ) {
462
                /* TODO: Which channel should we interrupt? */
463
                if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INE_DONE ) &&
464
                                 (channel->controller->regs.int_msk_a & channel->channel_mask) ) {
465
                        SET_FLAG( channel->regs.csr, DMA_CH_CSR, INT_DONE );
466
                        channel->controller->regs.int_src_a = channel->channel_mask;
467
                        report_interrupt( channel->controller->irq );
468
                }
469 212 erez
        }
470
}
471
 
472
/* Utility function: Add 4 to a value with a mask */
473
void masked_increase( unsigned long *value, unsigned long mask )
474
{
475 235 erez
        *value = (*value & ~mask) | ((*value & mask) + 4);
476 212 erez
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.