OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [rc203soc/] [sw/] [uClinux/] [include/] [asm-armnommu/] [proc-armv/] [pgtable.h] - Blame information for rev 1633

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1633 jcastillo
/*
2
 * linux/include/asm-arm/proc-armv/pgtable.h
3
 *
4
 * Copyright (C) 1995, 1996, 1997 Russell King
5
 *
6
 * 12-01-1997   RMK     Altered flushing routines to use function pointers
7
 *                      now possible to combine ARM6, ARM7 and StrongARM versions.
8
 */
9
#ifndef __ASM_PROC_PGTABLE_H
10
#define __ASM_PROC_PGTABLE_H
11
 
12
#ifndef NO_MM
13
#include <asm/arch/mmu.h>
14
 
15
#define LIBRARY_TEXT_START 0x0c000000
16
 
17
/*
18
 * Cache flushing...
19
 */
20
#define flush_cache_all()                                               \
21
        processor.u.armv3v4._flush_cache_all()
22
 
23
#define flush_cache_mm(_mm)                                             \
24
        do {                                                            \
25
                if ((_mm) == current->mm)                               \
26
                        processor.u.armv3v4._flush_cache_all();         \
27
        } while (0)
28
 
29
#define flush_cache_range(_mm,_start,_end)                              \
30
        do {                                                            \
31
                if ((_mm) == current->mm)                               \
32
                        processor.u.armv3v4._flush_cache_area           \
33
                                ((_start), (_end), 1);                  \
34
        } while (0)
35
 
36
#define flush_cache_page(_vma,_vmaddr)                                  \
37
        do {                                                            \
38
                if ((_vma)->vm_mm == current->mm)                       \
39
                        processor.u.armv3v4._flush_cache_area           \
40
                                ((_vmaddr), (_vmaddr) + PAGE_SIZE,      \
41
                                 ((_vma)->vm_flags & VM_EXEC) ? 1 : 0);  \
42
        } while (0)
43
 
44
/*
45
 * We don't have a mem map cache...
46
 */
47
#define update_mm_cache_all()                   do { } while (0)
48
#define update_mm_cache_task(tsk)               do { } while (0)
49
#define update_mm_cache_mm(mm)                  do { } while (0)
50
#define update_mm_cache_mm_addr(mm,addr,pte)    do { } while (0)
51
 
52
/*
53
 * This flushes back any buffered write data.  We have to clean and flush the entries
54
 * in the cache for this page.  Is it necessary to invalidate the I-cache?
55
 */
56
#define flush_page_to_ram(_page)                                        \
57
        processor.u.armv3v4._flush_ram_page ((_page) & PAGE_MASK);
58
 
59
/*
60
 * Make the page uncacheable (must flush page beforehand).
61
 */
62
#define uncache_page(_page)                                             \
63
        processor.u.armv3v4._flush_ram_page ((_page) & PAGE_MASK);
64
 
65
/*
66
 * TLB flushing:
67
 *
68
 *  - flush_tlb() flushes the current mm struct TLBs
69
 *  - flush_tlb_all() flushes all processes TLBs
70
 *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
71
 *  - flush_tlb_page(vma, vmaddr) flushes one page
72
 *  - flush_tlb_range(mm, start, end) flushes a range of pages
73
 *
74
 * GCC uses conditional instructions, and expects the assembler code to do so as well.
75
 *
76
 * We drain the write buffer in here to ensure that the page tables in ram
77
 * are really up to date.  It is more efficient to do this here...
78
 */
79
#define flush_tlb() flush_tlb_all()
80
 
81
#define flush_tlb_all()                                                         \
82
        processor.u.armv3v4._flush_tlb_all()
83
 
84
#define flush_tlb_mm(_mm)                                                       \
85
        do {                                                                    \
86
                if ((_mm) == current->mm)                                       \
87
                        processor.u.armv3v4._flush_tlb_all();                   \
88
        } while (0)
89
 
90
#define flush_tlb_range(_mm,_start,_end)                                        \
91
        do {                                                                    \
92
                if ((_mm) == current->mm)                                       \
93
                        processor.u.armv3v4._flush_tlb_area                     \
94
                                ((_start), (_end), 1);                          \
95
        } while (0)
96
 
97
#define flush_tlb_page(_vma,_vmaddr)                                            \
98
        do {                                                                    \
99
                if ((_vma)->vm_mm == current->mm)                               \
100
                        processor.u.armv3v4._flush_tlb_area                     \
101
                                ((_vmaddr), (_vmaddr) + PAGE_SIZE,              \
102
                                 ((_vma)->vm_flags & VM_EXEC) ? 1 : 0);          \
103
        } while (0)
104
 
105
/*
106
 * Since the page tables are in cached memory, we need to flush the dirty
107
 * data cached entries back before we flush the tlb...  This is also useful
108
 * to flush out the SWI instruction for signal handlers...
109
 */
110
#define __flush_entry_to_ram(entry)                                             \
111
        processor.u.armv3v4._flush_cache_entry((unsigned long)(entry))
112
 
113
#define __flush_pte_to_ram(entry)                                               \
114
        processor.u.armv3v4._flush_cache_pte((unsigned long)(entry))
115
 
116
/* PMD_SHIFT determines the size of the area a second-level page table can map */
117
#define PMD_SHIFT       20
118
#define PMD_SIZE        (1UL << PMD_SHIFT)
119
#define PMD_MASK        (~(PMD_SIZE-1))
120
 
121
/* PGDIR_SHIFT determines what a third-level page table entry can map */
122
#define PGDIR_SHIFT     20
123
#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
124
#define PGDIR_MASK      (~(PGDIR_SIZE-1))
125
 
126
/*
127
 * entries per page directory level: the sa110 is two-level, so
128
 * we don't really have any PMD directory physically.
129
 */
130
#define PTRS_PER_PTE    256
131
#define PTRS_PER_PMD    1
132
#define PTRS_PER_PGD    4096
133
 
134
/* Just any arbitrary offset to the start of the vmalloc VM area: the
135
 * current 8MB value just means that there will be a 8MB "hole" after the
136
 * physical memory until the kernel virtual memory starts.  That means that
137
 * any out-of-bounds memory accesses will hopefully be caught.
138
 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
139
 * area for the same reason. ;)
140
 */
141
#define VMALLOC_OFFSET    (8*1024*1024)
142
#define VMALLOC_START     ((high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
143
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
144
 
145
/* PMD types (actually level 1 descriptor) */
146
#define PMD_TYPE_MASK           0x0003
147
#define PMD_TYPE_FAULT          0x0000
148
#define PMD_TYPE_TABLE          0x0001
149
#define PMD_TYPE_SECT           0x0002
150
#define PMD_UPDATABLE           0x0010
151
#define PMD_SECT_CACHEABLE      0x0008
152
#define PMD_SECT_BUFFERABLE     0x0004
153
#define PMD_SECT_AP_WRITE       0x0400
154
#define PMD_SECT_AP_READ        0x0800
155
#define PMD_DOMAIN(x)           ((x) << 5)
156
 
157
/* PTE types (actually level 2 descriptor) */
158
#define PTE_TYPE_MASK   0x0003
159
#define PTE_TYPE_FAULT  0x0000
160
#define PTE_TYPE_LARGE  0x0001
161
#define PTE_TYPE_SMALL  0x0002
162
#define PTE_AP_READ     0x0aa0
163
#define PTE_AP_WRITE    0x0550
164
#define PTE_CACHEABLE   0x0008
165
#define PTE_BUFFERABLE  0x0004
166
 
167
/* Domains */
168
#define DOMAIN_KERNEL   0
169
 
170
#define _PAGE_CHG_MASK  (0xfffff00c | PTE_TYPE_MASK)
171
 
172
/*
173
 * We define the bits in the page tables as follows:
174
 *  PTE_BUFFERABLE      page is dirty
175
 *  PTE_AP_WRITE        page is writable
176
 *  PTE_AP_READ         page is a young (unsetting this causes faults for any access)
177
 *
178
 * Any page that is mapped in is assumed to be readable...
179
 */
180
#define PAGE_NONE       __pgprot(PTE_TYPE_SMALL)
181
#define PAGE_SHARED     __pgprot(PTE_TYPE_SMALL | PTE_CACHEABLE | PTE_AP_READ | PTE_AP_WRITE)
182
#define PAGE_COPY       __pgprot(PTE_TYPE_SMALL | PTE_CACHEABLE | PTE_AP_READ)
183
#define PAGE_READONLY   __pgprot(PTE_TYPE_SMALL | PTE_CACHEABLE | PTE_AP_READ)
184
#define PAGE_KERNEL     __pgprot(PTE_TYPE_SMALL | PTE_CACHEABLE | PTE_BUFFERABLE | PTE_AP_WRITE)
185
 
186
#define _PAGE_TABLE     (PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_KERNEL))
187
 
188
/*
189
 * The arm can't do page protection for execute, and considers that the same are read.
190
 * Also, write permissions imply read permissions. This is the closest we can get..
191
 */
192
#define __P000  PAGE_NONE
193
#define __P001  PAGE_READONLY
194
#define __P010  PAGE_COPY
195
#define __P011  PAGE_COPY
196
#define __P100  PAGE_READONLY
197
#define __P101  PAGE_READONLY
198
#define __P110  PAGE_COPY
199
#define __P111  PAGE_COPY
200
 
201
#define __S000  PAGE_NONE
202
#define __S001  PAGE_READONLY
203
#define __S010  PAGE_SHARED
204
#define __S011  PAGE_SHARED
205
#define __S100  PAGE_READONLY
206
#define __S101  PAGE_READONLY
207
#define __S110  PAGE_SHARED
208
#define __S111  PAGE_SHARED
209
 
210
#undef TEST_VERIFY_AREA
211
 
212
/*
213
 * BAD_PAGETABLE is used when we need a bogus page-table, while
214
 * BAD_PAGE is used for a bogus page.
215
 *
216
 * ZERO_PAGE is a global shared page that is always zero: used
217
 * for zero-mapped memory areas etc..
218
 */
219
extern pte_t __bad_page(void);
220
extern pte_t * __bad_pagetable(void);
221
extern unsigned long *empty_zero_page;
222
 
223
#define BAD_PAGETABLE __bad_pagetable()
224
#define BAD_PAGE __bad_page()
225
#define ZERO_PAGE ((unsigned long) empty_zero_page)
226
 
227
/* number of bits that fit into a memory pointer */
228
#define BYTES_PER_PTR                   (sizeof(unsigned long))
229
#define BITS_PER_PTR                    (8*BYTES_PER_PTR)
230
 
231
/* to align the pointer to a pointer address */
232
#define PTR_MASK                        (~(sizeof(void*)-1))
233
 
234
/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
235
#define SIZEOF_PTR_LOG2                 2
236
 
237
/* to find an entry in a page-table */
238
#define PAGE_PTR(address) \
239
((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
240
 
241
/* to set the page-dir */
242
#define SET_PAGE_DIR(tsk,pgdir)                                 \
243
do {                                                            \
244
        tsk->tss.memmap = __virt_to_phys((unsigned long)pgdir); \
245
        if ((tsk) == current)                                   \
246
                __asm__ __volatile__(                           \
247
                "mcr%?  p15, 0, %0, c2, c0, 0\n"                \
248
                : : "r" (tsk->tss.memmap));                     \
249
} while (0)
250
 
251
extern __inline__ int pte_none(pte_t pte)
252
{
253
        return !pte_val(pte);
254
}
255
 
256
#define pte_clear(ptep) set_pte(ptep, __pte(0))
257
 
258
extern __inline__ int pte_present(pte_t pte)
259
{
260
        switch (pte_val(pte) & PTE_TYPE_MASK) {
261
        case PTE_TYPE_LARGE:
262
        case PTE_TYPE_SMALL:
263
                return 1;
264
        default:
265
                return 0;
266
        }
267
}
268
 
269
extern __inline__ int pmd_none(pmd_t pmd)
270
{
271
        return !pmd_val(pmd);
272
}
273
 
274
#define pmd_clear(pmdp) set_pmd(pmdp, __pmd(0))
275
 
276
extern __inline__ int pmd_bad(pmd_t pmd)
277
{
278
        switch (pmd_val(pmd) & PMD_TYPE_MASK) {
279
        case PMD_TYPE_FAULT:
280
        case PMD_TYPE_TABLE:
281
                return 0;
282
        default:
283
                return 1;
284
        }
285
}
286
 
287
extern __inline__ int pmd_present(pmd_t pmd)
288
{
289
        switch (pmd_val(pmd) & PMD_TYPE_MASK) {
290
        case PMD_TYPE_TABLE:
291
                return 1;
292
        default:
293
                return 0;
294
        }
295
}
296
 
297
/*
298
 * The "pgd_xxx()" functions here are trivial for a folded two-level
299
 * setup: the pgd is never bad, and a pmd always exists (as it's folded
300
 * into the pgd entry)
301
 */
302
#define pgd_none(pgd)           (0)
303
#define pgd_bad(pgd)            (0)
304
#define pgd_present(pgd)        (1)
305
#define pgd_clear(pgdp)
306
 
307
/*
308
 * The following only work if pte_present() is true.
309
 * Undefined behaviour if not..
310
 */
311
#define pte_read(pte)           (1)
312
#define pte_exec(pte)           (1)
313
 
314
extern __inline__ int pte_write(pte_t pte)
315
{
316
        return pte_val(pte) & PTE_AP_WRITE;
317
}
318
 
319
extern __inline__ int pte_cacheable(pte_t pte)
320
{
321
        return pte_val(pte) & PTE_CACHEABLE;
322
}
323
 
324
extern __inline__ int pte_dirty(pte_t pte)
325
{
326
        return pte_val(pte) & PTE_BUFFERABLE;
327
}
328
 
329
extern __inline__ int pte_young(pte_t pte)
330
{
331
        return pte_val(pte) & PTE_AP_READ;
332
}
333
 
334
extern __inline__ pte_t pte_wrprotect(pte_t pte)
335
{
336
        pte_val(pte) &= ~PTE_AP_WRITE;
337
        return pte;
338
}
339
 
340
extern __inline__ pte_t pte_nocache(pte_t pte)
341
{
342
        pte_val(pte) &= ~PTE_CACHEABLE;
343
        return pte;
344
}
345
 
346
extern __inline__ pte_t pte_mkclean(pte_t pte)
347
{
348
        pte_val(pte) &= ~PTE_BUFFERABLE;
349
        return pte;
350
}
351
 
352
extern __inline__ pte_t pte_mkold(pte_t pte)
353
{
354
        pte_val(pte) &= ~PTE_AP_READ;
355
        return pte;
356
}
357
 
358
extern __inline__ pte_t pte_mkwrite(pte_t pte)
359
{
360
        pte_val(pte) |= PTE_AP_WRITE;
361
        return pte;
362
}
363
 
364
extern __inline__ pte_t pte_mkdirty(pte_t pte)
365
{
366
        pte_val(pte) |= PTE_BUFFERABLE;
367
        return pte;
368
}
369
 
370
extern __inline__ pte_t pte_mkyoung(pte_t pte)
371
{
372
        pte_val(pte) |= PTE_AP_READ;
373
        return pte;
374
}
375
 
376
/*
377
 * The following are unable to be implemented on this MMU
378
 */
379
#if 0
380
extern __inline__ pte_t pte_rdprotect(pte_t pte)
381
{
382
        pte_val(pte) &= ~(PTE_CACHEABLE|PTE_AP_READ);
383
        return pte;
384
}
385
 
386
extern __inline__ pte_t pte_exprotect(pte_t pte)
387
{
388
        pte_val(pte) &= ~(PTE_CACHEABLE|PTE_AP_READ);
389
        return pte;
390
}
391
 
392
extern __inline__ pte_t pte_mkread(pte_t pte)
393
{
394
        pte_val(pte) |= PTE_CACHEABLE;
395
        return pte;
396
}
397
 
398
extern __inline__ pte_t pte_mkexec(pte_t pte)
399
{
400
        pte_val(pte) |= PTE_CACHEABLE;
401
        return pte;
402
}
403
#endif
404
 
405
/*
406
 * Conversion functions: convert a page and protection to a page entry,
407
 * and a page entry and page directory to the page they refer to.
408
 */
409
extern __inline__ pte_t mk_pte(unsigned long page, pgprot_t pgprot)
410
{
411
        pte_t pte;
412
        pte_val(pte) = __virt_to_phys(page) | pgprot_val(pgprot);
413
        return pte;
414
}
415
 
416
extern __inline__ pte_t pte_modify(pte_t pte, pgprot_t newprot)
417
{
418
        pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
419
        return pte;
420
}
421
 
422
extern __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
423
{
424
        *pteptr = pteval;
425
        __flush_pte_to_ram(pteptr);
426
}
427
 
428
extern __inline__ unsigned long pte_page(pte_t pte)
429
{
430
        return __phys_to_virt(pte_val(pte) & PAGE_MASK);
431
}
432
 
433
extern __inline__ pmd_t mk_pmd(pte_t *ptep)
434
{
435
        pmd_t pmd;
436
        pmd_val(pmd) = __virt_to_phys((unsigned long)ptep) | _PAGE_TABLE;
437
        return pmd;
438
}
439
 
440
#if 1
441
#define set_pmd(pmdp,pmd) processor.u.armv3v4._set_pmd(pmdp,pmd)
442
#else
443
extern __inline__ void set_pmd(pmd_t *pmdp, pmd_t pmd)
444
{
445
        *pmdp = pmd;
446
        __flush_pte_to_ram(pmdp);
447
}
448
#endif
449
 
450
extern __inline__ unsigned long pmd_page(pmd_t pmd)
451
{
452
        return __phys_to_virt(pmd_val(pmd) & 0xfffffc00);
453
}
454
 
455
/* to find an entry in a page-table-directory */
456
extern __inline__ pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
457
{
458
        return mm->pgd + (address >> PGDIR_SHIFT);
459
}
460
 
461
/* Find an entry in the second-level page table.. */
462
#define pmd_offset(dir, address) ((pmd_t *)(dir))
463
 
464
/* Find an entry in the third-level page table.. */
465
extern __inline__ pte_t * pte_offset(pmd_t * dir, unsigned long address)
466
{
467
        return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
468
}
469
 
470
extern unsigned long get_small_page(int priority);
471
extern void free_small_page(unsigned long page);
472
 
473
/*
474
 * Allocate and free page tables. The xxx_kernel() versions are
475
 * used to allocate a kernel page table - this turns on ASN bits
476
 * if any.
477
 */
478
extern __inline__ void pte_free_kernel(pte_t * pte)
479
{
480
        free_small_page((unsigned long) pte);
481
}
482
 
483
extern const char bad_pmd_string[];
484
 
485
extern __inline__ pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address)
486
{
487
        address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
488
        if (pmd_none(*pmd)) {
489
                pte_t *page = (pte_t *) get_small_page(GFP_KERNEL);
490
                if (pmd_none(*pmd)) {
491
                        if (page) {
492
                                memzero (page, PTRS_PER_PTE * BYTES_PER_PTR);
493
                                set_pmd(pmd, mk_pmd(page));
494
                                return page + address;
495
                        }
496
                        set_pmd(pmd, mk_pmd(BAD_PAGETABLE));
497
                        return NULL;
498
                }
499
                free_small_page((unsigned long) page);
500
        }
501
        if (pmd_bad(*pmd)) {
502
                printk(bad_pmd_string, pmd_val(*pmd));
503
                set_pmd(pmd, mk_pmd(BAD_PAGETABLE));
504
                return NULL;
505
        }
506
        return (pte_t *) pmd_page(*pmd) + address;
507
}
508
 
509
/*
510
 * allocating and freeing a pmd is trivial: the 1-entry pmd is
511
 * inside the pgd, so has no extra memory associated with it.
512
 */
513
#define pmd_free_kernel(pmdp) pmd_val(*(pmdp)) = 0;
514
#define pmd_alloc_kernel(pgdp, address) ((pmd_t *)(pgdp))
515
 
516
extern __inline__ void pte_free(pte_t * pte)
517
{
518
        free_small_page((unsigned long) pte);
519
}
520
 
521
extern __inline__ pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
522
{
523
        address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
524
 
525
        if (pmd_none(*pmd)) {
526
                pte_t *page = (pte_t *) get_small_page(GFP_KERNEL);
527
                if (pmd_none(*pmd)) {
528
                        if (page) {
529
                                memzero (page, PTRS_PER_PTE * BYTES_PER_PTR);
530
                                set_pmd(pmd, mk_pmd(page));
531
                                return page + address;
532
                        }
533
                        set_pmd(pmd, mk_pmd(BAD_PAGETABLE));
534
                        return NULL;
535
                }
536
                free_small_page ((unsigned long) page);
537
        }
538
        if (pmd_bad(*pmd)) {
539
                printk(bad_pmd_string, pmd_val(*pmd));
540
                set_pmd(pmd, mk_pmd(BAD_PAGETABLE));
541
                return NULL;
542
        }
543
        return (pte_t *) pmd_page(*pmd) + address;
544
}
545
 
546
/*
547
 * allocating and freeing a pmd is trivial: the 1-entry pmd is
548
 * inside the pgd, so has no extra memory associated with it.
549
 */
550
#define pmd_free(pmdp) pmd_val(*(pmdp)) = 0;
551
#define pmd_alloc(pgdp, address) ((pmd_t *)(pgdp))
552
 
553
/*
554
 * Free a page directory.  Takes the virtual address.
555
 */
556
extern __inline__ void pgd_free(pgd_t * pgd)
557
{
558
        free_pages((unsigned long) pgd, 2);
559
}
560
 
561
/*
562
 * Allocate a new page directory.  Return the virtual address of it.
563
 */
564
extern __inline__ pgd_t * pgd_alloc(void)
565
{
566
        unsigned long pgd;
567
 
568
        /*
569
         * need to get a 16k page for level 1
570
         */
571
        pgd = __get_free_pages(GFP_KERNEL,2,0);
572
        if (pgd)
573
                memzero ((void *)pgd, PTRS_PER_PGD * BYTES_PER_PTR);
574
        return (pgd_t *)pgd;
575
}
576
 
577
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
578
 
579
/*
580
 * The sa110 doesn't have any external MMU info: the kernel page
581
 * tables contain all the necessary information.
582
 */
583
extern __inline__ void update_mmu_cache(struct vm_area_struct * vma,
584
        unsigned long address, pte_t pte)
585
{
586
}
587
 
588
#define SWP_TYPE(entry) (((entry) >> 2) & 0x7f)
589
#define SWP_OFFSET(entry) ((entry) >> 9)
590
#define SWP_ENTRY(type,offset) (((type) << 2) | ((offset) << 9))
591
 
592
#else
593
extern inline void flush_cache_mm(struct mm_struct *mm)
594
{
595
}
596
 
597
extern inline void flush_cache_range(struct mm_struct *mm,
598
                                                                         unsigned long start,
599
                                                                         unsigned long end)
600
{
601
}
602
 
603
/* Push the page at kernel virtual address and clear the icache */
604
extern inline void flush_page_to_ram (unsigned long address)
605
{
606
}
607
 
608
/* Push n pages at kernel virtual address and clear the icache */
609
extern inline void flush_pages_to_ram (unsigned long address, int n)
610
{
611
}
612
 
613
 
614
#define __flush_entry_to_ram(entry)
615
 
616
#endif
617
 
618
 
619
 
620
#endif /* __ASM_PROC_PAGE_H */
621
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.