OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [char/] [mem.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *  linux/drivers/char/mem.c
3
 *
4
 *  Copyright (C) 1991, 1992  Linus Torvalds
5
 *
6
 *  Added devfs support.
7
 *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8
 *  Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9
 */
10
 
11
#include <linux/config.h>
12
#include <linux/mm.h>
13
#include <linux/miscdevice.h>
14
#include <linux/tpqic02.h>
15
#include <linux/ftape.h>
16
#include <linux/slab.h>
17
#include <linux/vmalloc.h>
18
#include <linux/mman.h>
19
#include <linux/random.h>
20
#include <linux/init.h>
21
#include <linux/raw.h>
22
#include <linux/tty.h>
23
#include <linux/capability.h>
24
#include <linux/ptrace.h>
25
 
26
#include <asm/uaccess.h>
27
#include <asm/io.h>
28
#include <asm/pgalloc.h>
29
 
30
#ifdef CONFIG_I2C
31
extern int i2c_init_all(void);
32
#endif
33
#ifdef CONFIG_FB
34
extern void fbmem_init(void);
35
#endif
36
#ifdef CONFIG_PROM_CONSOLE
37
extern void prom_con_init(void);
38
#endif
39
#ifdef CONFIG_MDA_CONSOLE
40
extern void mda_console_init(void);
41
#endif
42
#if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
43
extern void tapechar_init(void);
44
#endif
45
 
46
static ssize_t do_write_mem(struct file * file, void *p, unsigned long realp,
47
                            const char * buf, size_t count, loff_t *ppos)
48
{
49
        ssize_t written;
50
 
51
        written = 0;
52
#if defined(__sparc__) || defined(__mc68000__)
53
        /* we don't have page 0 mapped on sparc and m68k.. */
54
        if (realp < PAGE_SIZE) {
55
                unsigned long sz = PAGE_SIZE-realp;
56
                if (sz > count) sz = count;
57
                /* Hmm. Do something? */
58
                buf+=sz;
59
                p+=sz;
60
                count-=sz;
61
                written+=sz;
62
        }
63
#endif
64
        if (copy_from_user(p, buf, count))
65
                return -EFAULT;
66
        written += count;
67
        *ppos += written;
68
        return written;
69
}
70
 
71
 
72
/*
73
 * This funcion reads the *physical* memory. The f_pos points directly to the
74
 * memory location.
75
 */
76
static ssize_t read_mem(struct file * file, char * buf,
77
                        size_t count, loff_t *ppos)
78
{
79
        unsigned long p = *ppos;
80
        unsigned long end_mem;
81
        ssize_t read;
82
 
83
        end_mem = __pa(high_memory);
84
        if (p >= end_mem)
85
                return 0;
86
        if (count > end_mem - p)
87
                count = end_mem - p;
88
        read = 0;
89
#if defined(__sparc__) || defined(__mc68000__)
90
        /* we don't have page 0 mapped on sparc and m68k.. */
91
        if (p < PAGE_SIZE) {
92
                unsigned long sz = PAGE_SIZE-p;
93
                if (sz > count)
94
                        sz = count;
95
                if (sz > 0) {
96
                        if (clear_user(buf, sz))
97
                                return -EFAULT;
98
                        buf += sz;
99
                        p += sz;
100
                        count -= sz;
101
                        read += sz;
102
                }
103
        }
104
#endif
105
        if (copy_to_user(buf, __va(p), count))
106
                return -EFAULT;
107
        read += count;
108
        *ppos += read;
109
        return read;
110
}
111
 
112
static ssize_t write_mem(struct file * file, const char * buf,
113
                         size_t count, loff_t *ppos)
114
{
115
        unsigned long p = *ppos;
116
        unsigned long end_mem;
117
 
118
        end_mem = __pa(high_memory);
119
        if (p >= end_mem)
120
                return 0;
121
        if (count > end_mem - p)
122
                count = end_mem - p;
123
        return do_write_mem(file, __va(p), p, buf, count, ppos);
124
}
125
 
126
#ifndef pgprot_noncached
127
 
128
/*
129
 * This should probably be per-architecture in <asm/pgtable.h>
130
 */
131
static inline pgprot_t pgprot_noncached(pgprot_t _prot)
132
{
133
        unsigned long prot = pgprot_val(_prot);
134
 
135
#if defined(__i386__) || defined(__x86_64__)
136
        /* On PPro and successors, PCD alone doesn't always mean
137
            uncached because of interactions with the MTRRs. PCD | PWT
138
            means definitely uncached. */
139
        if (boot_cpu_data.x86 > 3)
140
                prot |= _PAGE_PCD | _PAGE_PWT;
141
#elif defined(__powerpc__)
142
        prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
143
#elif defined(__mc68000__)
144
#ifdef SUN3_PAGE_NOCACHE
145
        if (MMU_IS_SUN3)
146
                prot |= SUN3_PAGE_NOCACHE;
147
        else
148
#endif
149
        if (MMU_IS_851 || MMU_IS_030)
150
                prot |= _PAGE_NOCACHE030;
151
        /* Use no-cache mode, serialized */
152
        else if (MMU_IS_040 || MMU_IS_060)
153
                prot = (prot & _CACHEMASK040) | _PAGE_NOCACHE_S;
154
#endif
155
 
156
        return __pgprot(prot);
157
}
158
 
159
#endif /* !pgprot_noncached */
160
 
161
/*
162
 * Architectures vary in how they handle caching for addresses
163
 * outside of main memory.
164
 */
165
static inline int noncached_address(unsigned long addr)
166
{
167
#if defined(__i386__)
168
        /*
169
         * On the PPro and successors, the MTRRs are used to set
170
         * memory types for physical addresses outside main memory,
171
         * so blindly setting PCD or PWT on those pages is wrong.
172
         * For Pentiums and earlier, the surround logic should disable
173
         * caching for the high addresses through the KEN pin, but
174
         * we maintain the tradition of paranoia in this code.
175
         */
176
        return !( test_bit(X86_FEATURE_MTRR, &boot_cpu_data.x86_capability) ||
177
                  test_bit(X86_FEATURE_K6_MTRR, &boot_cpu_data.x86_capability) ||
178
                  test_bit(X86_FEATURE_CYRIX_ARR, &boot_cpu_data.x86_capability) ||
179
                  test_bit(X86_FEATURE_CENTAUR_MCR, &boot_cpu_data.x86_capability) )
180
          && addr >= __pa(high_memory);
181
#else
182
        return addr >= __pa(high_memory);
183
#endif
184
}
185
 
186
static int mmap_mem(struct file * file, struct vm_area_struct * vma)
187
{
188
        unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
189
 
190
        /*
191
         * Accessing memory above the top the kernel knows about or
192
         * through a file pointer that was marked O_SYNC will be
193
         * done non-cached.
194
         */
195
        if (noncached_address(offset) || (file->f_flags & O_SYNC))
196
                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
197
 
198
        /* Don't try to swap out physical pages.. */
199
        vma->vm_flags |= VM_RESERVED;
200
 
201
        /*
202
         * Don't dump addresses that are not real memory to a core file.
203
         */
204
        if (offset >= __pa(high_memory) || (file->f_flags & O_SYNC))
205
                vma->vm_flags |= VM_IO;
206
 
207
        if (remap_page_range(vma->vm_start, offset, vma->vm_end-vma->vm_start,
208
                             vma->vm_page_prot))
209
                return -EAGAIN;
210
        return 0;
211
}
212
 
213
/*
214
 * This function reads the *virtual* memory as seen by the kernel.
215
 */
216
static ssize_t read_kmem(struct file *file, char *buf,
217
                         size_t count, loff_t *ppos)
218
{
219
        unsigned long p = *ppos;
220
        ssize_t read = 0;
221
        ssize_t virtr = 0;
222
        char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
223
 
224
        if (p < (unsigned long) high_memory) {
225
                read = count;
226
                if (count > (unsigned long) high_memory - p)
227
                        read = (unsigned long) high_memory - p;
228
 
229
#if defined(__sparc__) || defined(__mc68000__)
230
                /* we don't have page 0 mapped on sparc and m68k.. */
231
                if (p < PAGE_SIZE && read > 0) {
232
                        size_t tmp = PAGE_SIZE - p;
233
                        if (tmp > read) tmp = read;
234
                        if (clear_user(buf, tmp))
235
                                return -EFAULT;
236
                        buf += tmp;
237
                        p += tmp;
238
                        read -= tmp;
239
                        count -= tmp;
240
                }
241
#endif
242
                if (copy_to_user(buf, (char *)p, read))
243
                        return -EFAULT;
244
                p += read;
245
                buf += read;
246
                count -= read;
247
        }
248
 
249
        if (count > 0) {
250
                kbuf = (char *)__get_free_page(GFP_KERNEL);
251
                if (!kbuf)
252
                        return -ENOMEM;
253
                while (count > 0) {
254
                        int len = count;
255
 
256
                        if (len > PAGE_SIZE)
257
                                len = PAGE_SIZE;
258
                        len = vread(kbuf, (char *)p, len);
259
                        if (!len)
260
                                break;
261
                        if (copy_to_user(buf, kbuf, len)) {
262
                                free_page((unsigned long)kbuf);
263
                                return -EFAULT;
264
                        }
265
                        count -= len;
266
                        buf += len;
267
                        virtr += len;
268
                        p += len;
269
                }
270
                free_page((unsigned long)kbuf);
271
        }
272
        *ppos = p;
273
        return virtr + read;
274
}
275
 
276
extern long vwrite(char *buf, char *addr, unsigned long count);
277
 
278
/*
279
 * This function writes to the *virtual* memory as seen by the kernel.
280
 */
281
static ssize_t write_kmem(struct file * file, const char * buf,
282
                          size_t count, loff_t *ppos)
283
{
284
        unsigned long p = *ppos;
285
        ssize_t wrote = 0;
286
        ssize_t virtr = 0;
287
        char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
288
 
289
        if (p < (unsigned long) high_memory) {
290
                wrote = count;
291
                if (count > (unsigned long) high_memory - p)
292
                        wrote = (unsigned long) high_memory - p;
293
 
294
                wrote = do_write_mem(file, (void*)p, p, buf, wrote, ppos);
295
 
296
                p += wrote;
297
                buf += wrote;
298
                count -= wrote;
299
        }
300
 
301
        if (count > 0) {
302
                kbuf = (char *)__get_free_page(GFP_KERNEL);
303
                if (!kbuf)
304
                        return -ENOMEM;
305
                while (count > 0) {
306
                        int len = count;
307
 
308
                        if (len > PAGE_SIZE)
309
                                len = PAGE_SIZE;
310
                        if (len && copy_from_user(kbuf, buf, len)) {
311
                                free_page((unsigned long)kbuf);
312
                                return -EFAULT;
313
                        }
314
                        len = vwrite(kbuf, (char *)p, len);
315
                        count -= len;
316
                        buf += len;
317
                        virtr += len;
318
                        p += len;
319
                }
320
                free_page((unsigned long)kbuf);
321
        }
322
 
323
        *ppos = p;
324
        return virtr + wrote;
325
}
326
 
327
#if defined(CONFIG_ISA) || !defined(__mc68000__)
328
static ssize_t read_port(struct file * file, char * buf,
329
                         size_t count, loff_t *ppos)
330
{
331
        unsigned long i = *ppos;
332
        char *tmp = buf;
333
 
334
        if (verify_area(VERIFY_WRITE,buf,count))
335
                return -EFAULT;
336
        while (count-- > 0 && i < 65536) {
337
                if (__put_user(inb(i),tmp) < 0)
338
                        return -EFAULT;
339
                i++;
340
                tmp++;
341
        }
342
        *ppos = i;
343
        return tmp-buf;
344
}
345
 
346
static ssize_t write_port(struct file * file, const char * buf,
347
                          size_t count, loff_t *ppos)
348
{
349
        unsigned long i = *ppos;
350
        const char * tmp = buf;
351
 
352
        if (verify_area(VERIFY_READ,buf,count))
353
                return -EFAULT;
354
        while (count-- > 0 && i < 65536) {
355
                char c;
356
                if (__get_user(c, tmp))
357
                        return -EFAULT;
358
                outb(c,i);
359
                i++;
360
                tmp++;
361
        }
362
        *ppos = i;
363
        return tmp-buf;
364
}
365
#endif
366
 
367
static ssize_t read_null(struct file * file, char * buf,
368
                         size_t count, loff_t *ppos)
369
{
370
        return 0;
371
}
372
 
373
static ssize_t write_null(struct file * file, const char * buf,
374
                          size_t count, loff_t *ppos)
375
{
376
        return count;
377
}
378
 
379
/*
380
 * For fun, we are using the MMU for this.
381
 */
382
static inline size_t read_zero_pagealigned(char * buf, size_t size)
383
{
384
        struct mm_struct *mm;
385
        struct vm_area_struct * vma;
386
        unsigned long addr=(unsigned long)buf;
387
 
388
        mm = current->mm;
389
        /* Oops, this was forgotten before. -ben */
390
        down_read(&mm->mmap_sem);
391
 
392
        /* For private mappings, just map in zero pages. */
393
        for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
394
                unsigned long count;
395
 
396
                if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
397
                        goto out_up;
398
                if (vma->vm_flags & VM_SHARED)
399
                        break;
400
                count = vma->vm_end - addr;
401
                if (count > size)
402
                        count = size;
403
 
404
                zap_page_range(mm, addr, count);
405
                zeromap_page_range(addr, count, PAGE_COPY);
406
 
407
                size -= count;
408
                buf += count;
409
                addr += count;
410
                if (size == 0)
411
                        goto out_up;
412
        }
413
 
414
        up_read(&mm->mmap_sem);
415
 
416
        /* The shared case is hard. Let's do the conventional zeroing. */
417
        do {
418
                unsigned long unwritten = clear_user(buf, PAGE_SIZE);
419
                if (unwritten)
420
                        return size + unwritten - PAGE_SIZE;
421
                if (current->need_resched)
422
                        schedule();
423
                buf += PAGE_SIZE;
424
                size -= PAGE_SIZE;
425
        } while (size);
426
 
427
        return size;
428
out_up:
429
        up_read(&mm->mmap_sem);
430
        return size;
431
}
432
 
433
static ssize_t read_zero(struct file * file, char * buf,
434
                         size_t count, loff_t *ppos)
435
{
436
        unsigned long left, unwritten, written = 0;
437
 
438
        if (!count)
439
                return 0;
440
 
441
        if (!access_ok(VERIFY_WRITE, buf, count))
442
                return -EFAULT;
443
 
444
        left = count;
445
 
446
        /* do we want to be clever? Arbitrary cut-off */
447
        if (count >= PAGE_SIZE*4) {
448
                unsigned long partial;
449
 
450
                /* How much left of the page? */
451
                partial = (PAGE_SIZE-1) & -(unsigned long) buf;
452
                unwritten = clear_user(buf, partial);
453
                written = partial - unwritten;
454
                if (unwritten)
455
                        goto out;
456
                left -= partial;
457
                buf += partial;
458
                unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
459
                written += (left & PAGE_MASK) - unwritten;
460
                if (unwritten)
461
                        goto out;
462
                buf += left & PAGE_MASK;
463
                left &= ~PAGE_MASK;
464
        }
465
        unwritten = clear_user(buf, left);
466
        written += left - unwritten;
467
out:
468
        return written ? written : -EFAULT;
469
}
470
 
471
static int mmap_zero(struct file * file, struct vm_area_struct * vma)
472
{
473
        if (vma->vm_flags & VM_SHARED)
474
                return shmem_zero_setup(vma);
475
        if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
476
                return -EAGAIN;
477
        return 0;
478
}
479
 
480
static ssize_t write_full(struct file * file, const char * buf,
481
                          size_t count, loff_t *ppos)
482
{
483
        return -ENOSPC;
484
}
485
 
486
/*
487
 * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
488
 * can fopen() both devices with "a" now.  This was previously impossible.
489
 * -- SRB.
490
 */
491
 
492
static loff_t null_lseek(struct file * file, loff_t offset, int orig)
493
{
494
        return file->f_pos = 0;
495
}
496
 
497
/*
498
 * The memory devices use the full 32/64 bits of the offset, and so we cannot
499
 * check against negative addresses: they are ok. The return value is weird,
500
 * though, in that case (0).
501
 *
502
 * also note that seeking relative to the "end of file" isn't supported:
503
 * it has no meaning, so it returns -EINVAL.
504
 */
505
static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
506
{
507
        loff_t ret;
508
 
509
        switch (orig) {
510
                case 0:
511
                        file->f_pos = offset;
512
                        ret = file->f_pos;
513
                        force_successful_syscall_return();
514
                        break;
515
                case 1:
516
                        file->f_pos += offset;
517
                        ret = file->f_pos;
518
                        force_successful_syscall_return();
519
                        break;
520
                default:
521
                        ret = -EINVAL;
522
        }
523
        return ret;
524
}
525
 
526
static int open_port(struct inode * inode, struct file * filp)
527
{
528
        return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
529
}
530
 
531
struct page *kmem_vm_nopage(struct vm_area_struct *vma, unsigned long address, int write)
532
{
533
        unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
534
        unsigned long kaddr;
535
        pgd_t *pgd;
536
        pmd_t *pmd;
537
        pte_t *ptep, pte;
538
        struct page *page = NULL;
539
 
540
        /* address is user VA; convert to kernel VA of desired page */
541
        kaddr = (address - vma->vm_start) + offset;
542
        kaddr = VMALLOC_VMADDR(kaddr);
543
 
544
        spin_lock(&init_mm.page_table_lock);
545
 
546
        /* Lookup page structure for kernel VA */
547
        pgd = pgd_offset(&init_mm, kaddr);
548
        if (pgd_none(*pgd) || pgd_bad(*pgd))
549
                goto out;
550
        pmd = pmd_offset(pgd, kaddr);
551
        if (pmd_none(*pmd) || pmd_bad(*pmd))
552
                goto out;
553
        ptep = pte_offset(pmd, kaddr);
554
        if (!ptep)
555
                goto out;
556
        pte = *ptep;
557
        if (!pte_present(pte))
558
                goto out;
559
        if (write && !pte_write(pte))
560
                goto out;
561
        page = pte_page(pte);
562
        if (!VALID_PAGE(page)) {
563
                page = NULL;
564
                goto out;
565
        }
566
 
567
        /* Increment reference count on page */
568
        get_page(page);
569
 
570
out:
571
        spin_unlock(&init_mm.page_table_lock);
572
 
573
        return page;
574
}
575
 
576
struct vm_operations_struct kmem_vm_ops = {
577
        nopage:         kmem_vm_nopage,
578
};
579
 
580
static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
581
{
582
        unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
583
        unsigned long size = vma->vm_end - vma->vm_start;
584
 
585
        /*
586
         * If the user is not attempting to mmap a high memory address then
587
         * the standard mmap_mem mechanism will work.  High memory addresses
588
         * need special handling, as remap_page_range expects a physically-
589
         * contiguous range of kernel addresses (such as obtained in kmalloc).
590
         */
591
        if ((offset + size) < (unsigned long) high_memory)
592
                return mmap_mem(file, vma);
593
 
594
        /*
595
         * Accessing memory above the top the kernel knows about or
596
         * through a file pointer that was marked O_SYNC will be
597
         * done non-cached.
598
         */
599
        if (noncached_address(offset) || (file->f_flags & O_SYNC))
600
                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
601
 
602
        /* Don't do anything here; "nopage" will fill the holes */
603
        vma->vm_ops = &kmem_vm_ops;
604
 
605
        /* Don't try to swap out physical pages.. */
606
        vma->vm_flags |= VM_RESERVED;
607
 
608
        /*
609
         * Don't dump addresses that are not real memory to a core file.
610
         */
611
        vma->vm_flags |= VM_IO;
612
 
613
        return 0;
614
}
615
 
616
#define zero_lseek      null_lseek
617
#define full_lseek      null_lseek
618
#define write_zero      write_null
619
#define read_full       read_zero
620
#define open_mem        open_port
621
#define open_kmem       open_mem
622
 
623
static struct file_operations mem_fops = {
624
        llseek:         memory_lseek,
625
        read:           read_mem,
626
        write:          write_mem,
627
        mmap:           mmap_mem,
628
        open:           open_mem,
629
};
630
 
631
static struct file_operations kmem_fops = {
632
        llseek:         memory_lseek,
633
        read:           read_kmem,
634
        write:          write_kmem,
635
        mmap:           mmap_kmem,
636
        open:           open_kmem,
637
};
638
 
639
static struct file_operations null_fops = {
640
        llseek:         null_lseek,
641
        read:           read_null,
642
        write:          write_null,
643
};
644
 
645
#if defined(CONFIG_ISA) || !defined(__mc68000__)
646
static struct file_operations port_fops = {
647
        llseek:         memory_lseek,
648
        read:           read_port,
649
        write:          write_port,
650
        open:           open_port,
651
};
652
#endif
653
 
654
static struct file_operations zero_fops = {
655
        llseek:         zero_lseek,
656
        read:           read_zero,
657
        write:          write_zero,
658
        mmap:           mmap_zero,
659
};
660
 
661
static struct file_operations full_fops = {
662
        llseek:         full_lseek,
663
        read:           read_full,
664
        write:          write_full,
665
};
666
 
667
static int memory_open(struct inode * inode, struct file * filp)
668
{
669
        switch (MINOR(inode->i_rdev)) {
670
                case 1:
671
                        filp->f_op = &mem_fops;
672
                        break;
673
                case 2:
674
                        filp->f_op = &kmem_fops;
675
                        break;
676
                case 3:
677
                        filp->f_op = &null_fops;
678
                        break;
679
#if defined(CONFIG_ISA) || !defined(__mc68000__)
680
                case 4:
681
                        filp->f_op = &port_fops;
682
                        break;
683
#endif
684
                case 5:
685
                        filp->f_op = &zero_fops;
686
                        break;
687
                case 7:
688
                        filp->f_op = &full_fops;
689
                        break;
690
                case 8:
691
                        filp->f_op = &random_fops;
692
                        break;
693
                case 9:
694
                        filp->f_op = &urandom_fops;
695
                        break;
696
                default:
697
                        return -ENXIO;
698
        }
699
        if (filp->f_op && filp->f_op->open)
700
                return filp->f_op->open(inode,filp);
701
        return 0;
702
}
703
 
704
void __init memory_devfs_register (void)
705
{
706
    /*  These are never unregistered  */
707
    static const struct {
708
        unsigned short minor;
709
        char *name;
710
        umode_t mode;
711
        struct file_operations *fops;
712
    } list[] = { /* list of minor devices */
713
        {1, "mem",     S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
714
        {2, "kmem",    S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
715
        {3, "null",    S_IRUGO | S_IWUGO,           &null_fops},
716
#if defined(CONFIG_ISA) || !defined(__mc68000__)
717
        {4, "port",    S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
718
#endif
719
        {5, "zero",    S_IRUGO | S_IWUGO,           &zero_fops},
720
        {7, "full",    S_IRUGO | S_IWUGO,           &full_fops},
721
        {8, "random",  S_IRUGO | S_IWUSR,           &random_fops},
722
        {9, "urandom", S_IRUGO | S_IWUSR,           &urandom_fops}
723
    };
724
    int i;
725
 
726
    for (i=0; i<(sizeof(list)/sizeof(*list)); i++)
727
        devfs_register (NULL, list[i].name, DEVFS_FL_NONE,
728
                        MEM_MAJOR, list[i].minor,
729
                        list[i].mode | S_IFCHR,
730
                        list[i].fops, NULL);
731
}
732
 
733
static struct file_operations memory_fops = {
734
        open:           memory_open,    /* just a selector for the real open */
735
};
736
 
737
int __init chr_dev_init(void)
738
{
739
        if (devfs_register_chrdev(MEM_MAJOR,"mem",&memory_fops))
740
                printk("unable to get major %d for memory devs\n", MEM_MAJOR);
741
        memory_devfs_register();
742
        rand_initialize();
743
#ifdef CONFIG_I2C
744
        i2c_init_all();
745
#endif
746
#if defined (CONFIG_FB)
747
        fbmem_init();
748
#endif
749
#if defined (CONFIG_PROM_CONSOLE)
750
        prom_con_init();
751
#endif
752
#if defined (CONFIG_MDA_CONSOLE)
753
        mda_console_init();
754
#endif
755
        tty_init();
756
#ifdef CONFIG_M68K_PRINTER
757
        lp_m68k_init();
758
#endif
759
        misc_init();
760
#if CONFIG_QIC02_TAPE
761
        qic02_tape_init();
762
#endif
763
#ifdef CONFIG_FTAPE
764
        ftape_init();
765
#endif
766
#if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
767
        tapechar_init();
768
#endif
769
        return 0;
770
}
771
 
772
__initcall(chr_dev_init);

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.