OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [drivers/] [char/] [mem.c] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 *  linux/drivers/char/mem.c
3
 *
4
 *  Copyright (C) 1991, 1992  Linus Torvalds
5
 *
6
 *  Added devfs support.
7
 *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8
 *  Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9
 */
10
 
11
#include <linux/mm.h>
12
#include <linux/miscdevice.h>
13
#include <linux/slab.h>
14
#include <linux/vmalloc.h>
15
#include <linux/mman.h>
16
#include <linux/random.h>
17
#include <linux/init.h>
18
#include <linux/raw.h>
19
#include <linux/tty.h>
20
#include <linux/capability.h>
21
#include <linux/ptrace.h>
22
#include <linux/device.h>
23
#include <linux/highmem.h>
24
#include <linux/crash_dump.h>
25
#include <linux/backing-dev.h>
26
#include <linux/bootmem.h>
27
#include <linux/splice.h>
28
#include <linux/pfn.h>
29
 
30
#include <asm/uaccess.h>
31
#include <asm/io.h>
32
 
33
#ifdef CONFIG_IA64
34
# include <linux/efi.h>
35
#endif
36
 
37
/*
38
 * Architectures vary in how they handle caching for addresses
39
 * outside of main memory.
40
 *
41
 */
42
static inline int uncached_access(struct file *file, unsigned long addr)
43
{
44
#if defined(__i386__) && !defined(__arch_um__)
45
        /*
46
         * On the PPro and successors, the MTRRs are used to set
47
         * memory types for physical addresses outside main memory,
48
         * so blindly setting PCD or PWT on those pages is wrong.
49
         * For Pentiums and earlier, the surround logic should disable
50
         * caching for the high addresses through the KEN pin, but
51
         * we maintain the tradition of paranoia in this code.
52
         */
53
        if (file->f_flags & O_SYNC)
54
                return 1;
55
        return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
56
                  test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
57
                  test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
58
                  test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
59
          && addr >= __pa(high_memory);
60
#elif defined(__x86_64__) && !defined(__arch_um__)
61
        /*
62
         * This is broken because it can generate memory type aliases,
63
         * which can cause cache corruptions
64
         * But it is only available for root and we have to be bug-to-bug
65
         * compatible with i386.
66
         */
67
        if (file->f_flags & O_SYNC)
68
                return 1;
69
        /* same behaviour as i386. PAT always set to cached and MTRRs control the
70
           caching behaviour.
71
           Hopefully a full PAT implementation will fix that soon. */
72
        return 0;
73
#elif defined(CONFIG_IA64)
74
        /*
75
         * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
76
         */
77
        return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
78
#elif defined(CONFIG_MIPS)
79
        {
80
                extern int __uncached_access(struct file *file,
81
                                             unsigned long addr);
82
 
83
                return __uncached_access(file, addr);
84
        }
85
#else
86
        /*
87
         * Accessing memory above the top the kernel knows about or through a file pointer
88
         * that was marked O_SYNC will be done non-cached.
89
         */
90
        if (file->f_flags & O_SYNC)
91
                return 1;
92
        return addr >= __pa(high_memory);
93
#endif
94
}
95
 
96
#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
97
static inline int valid_phys_addr_range(unsigned long addr, size_t count)
98
{
99
        if (addr + count > __pa(high_memory))
100
                return 0;
101
 
102
        return 1;
103
}
104
 
105
static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
106
{
107
        return 1;
108
}
109
#endif
110
 
111
/*
112
 * This funcion reads the *physical* memory. The f_pos points directly to the
113
 * memory location.
114
 */
115
static ssize_t read_mem(struct file * file, char __user * buf,
116
                        size_t count, loff_t *ppos)
117
{
118
        unsigned long p = *ppos;
119
        ssize_t read, sz;
120
        char *ptr;
121
 
122
        if (!valid_phys_addr_range(p, count))
123
                return -EFAULT;
124
        read = 0;
125
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
126
        /* we don't have page 0 mapped on sparc and m68k.. */
127
        if (p < PAGE_SIZE) {
128
                sz = PAGE_SIZE - p;
129
                if (sz > count)
130
                        sz = count;
131
                if (sz > 0) {
132
                        if (clear_user(buf, sz))
133
                                return -EFAULT;
134
                        buf += sz;
135
                        p += sz;
136
                        count -= sz;
137
                        read += sz;
138
                }
139
        }
140
#endif
141
 
142
        while (count > 0) {
143
                /*
144
                 * Handle first page in case it's not aligned
145
                 */
146
                if (-p & (PAGE_SIZE - 1))
147
                        sz = -p & (PAGE_SIZE - 1);
148
                else
149
                        sz = PAGE_SIZE;
150
 
151
                sz = min_t(unsigned long, sz, count);
152
 
153
                /*
154
                 * On ia64 if a page has been mapped somewhere as
155
                 * uncached, then it must also be accessed uncached
156
                 * by the kernel or data corruption may occur
157
                 */
158
                ptr = xlate_dev_mem_ptr(p);
159
 
160
                if (copy_to_user(buf, ptr, sz))
161
                        return -EFAULT;
162
                buf += sz;
163
                p += sz;
164
                count -= sz;
165
                read += sz;
166
        }
167
 
168
        *ppos += read;
169
        return read;
170
}
171
 
172
static ssize_t write_mem(struct file * file, const char __user * buf,
173
                         size_t count, loff_t *ppos)
174
{
175
        unsigned long p = *ppos;
176
        ssize_t written, sz;
177
        unsigned long copied;
178
        void *ptr;
179
 
180
        if (!valid_phys_addr_range(p, count))
181
                return -EFAULT;
182
 
183
        written = 0;
184
 
185
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
186
        /* we don't have page 0 mapped on sparc and m68k.. */
187
        if (p < PAGE_SIZE) {
188
                unsigned long sz = PAGE_SIZE - p;
189
                if (sz > count)
190
                        sz = count;
191
                /* Hmm. Do something? */
192
                buf += sz;
193
                p += sz;
194
                count -= sz;
195
                written += sz;
196
        }
197
#endif
198
 
199
        while (count > 0) {
200
                /*
201
                 * Handle first page in case it's not aligned
202
                 */
203
                if (-p & (PAGE_SIZE - 1))
204
                        sz = -p & (PAGE_SIZE - 1);
205
                else
206
                        sz = PAGE_SIZE;
207
 
208
                sz = min_t(unsigned long, sz, count);
209
 
210
                /*
211
                 * On ia64 if a page has been mapped somewhere as
212
                 * uncached, then it must also be accessed uncached
213
                 * by the kernel or data corruption may occur
214
                 */
215
                ptr = xlate_dev_mem_ptr(p);
216
 
217
                copied = copy_from_user(ptr, buf, sz);
218
                if (copied) {
219
                        written += sz - copied;
220
                        if (written)
221
                                break;
222
                        return -EFAULT;
223
                }
224
                buf += sz;
225
                p += sz;
226
                count -= sz;
227
                written += sz;
228
        }
229
 
230
        *ppos += written;
231
        return written;
232
}
233
 
234
#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
235
static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
236
                                     unsigned long size, pgprot_t vma_prot)
237
{
238
#ifdef pgprot_noncached
239
        unsigned long offset = pfn << PAGE_SHIFT;
240
 
241
        if (uncached_access(file, offset))
242
                return pgprot_noncached(vma_prot);
243
#endif
244
        return vma_prot;
245
}
246
#endif
247
 
248
#ifndef CONFIG_MMU
249
static unsigned long get_unmapped_area_mem(struct file *file,
250
                                           unsigned long addr,
251
                                           unsigned long len,
252
                                           unsigned long pgoff,
253
                                           unsigned long flags)
254
{
255
        if (!valid_mmap_phys_addr_range(pgoff, len))
256
                return (unsigned long) -EINVAL;
257
        return pgoff << PAGE_SHIFT;
258
}
259
 
260
/* can't do an in-place private mapping if there's no MMU */
261
static inline int private_mapping_ok(struct vm_area_struct *vma)
262
{
263
        return vma->vm_flags & VM_MAYSHARE;
264
}
265
#else
266
#define get_unmapped_area_mem   NULL
267
 
268
static inline int private_mapping_ok(struct vm_area_struct *vma)
269
{
270
        return 1;
271
}
272
#endif
273
 
274
static int mmap_mem(struct file * file, struct vm_area_struct * vma)
275
{
276
        size_t size = vma->vm_end - vma->vm_start;
277
 
278
        if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
279
                return -EINVAL;
280
 
281
        if (!private_mapping_ok(vma))
282
                return -ENOSYS;
283
 
284
        vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
285
                                                 size,
286
                                                 vma->vm_page_prot);
287
 
288
        /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
289
        if (remap_pfn_range(vma,
290
                            vma->vm_start,
291
                            vma->vm_pgoff,
292
                            size,
293
                            vma->vm_page_prot))
294
                return -EAGAIN;
295
        return 0;
296
}
297
 
298
static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
299
{
300
        unsigned long pfn;
301
 
302
        /* Turn a kernel-virtual address into a physical page frame */
303
        pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
304
 
305
        /*
306
         * RED-PEN: on some architectures there is more mapped memory
307
         * than available in mem_map which pfn_valid checks
308
         * for. Perhaps should add a new macro here.
309
         *
310
         * RED-PEN: vmalloc is not supported right now.
311
         */
312
        if (!pfn_valid(pfn))
313
                return -EIO;
314
 
315
        vma->vm_pgoff = pfn;
316
        return mmap_mem(file, vma);
317
}
318
 
319
#ifdef CONFIG_CRASH_DUMP
320
/*
321
 * Read memory corresponding to the old kernel.
322
 */
323
static ssize_t read_oldmem(struct file *file, char __user *buf,
324
                                size_t count, loff_t *ppos)
325
{
326
        unsigned long pfn, offset;
327
        size_t read = 0, csize;
328
        int rc = 0;
329
 
330
        while (count) {
331
                pfn = *ppos / PAGE_SIZE;
332
                if (pfn > saved_max_pfn)
333
                        return read;
334
 
335
                offset = (unsigned long)(*ppos % PAGE_SIZE);
336
                if (count > PAGE_SIZE - offset)
337
                        csize = PAGE_SIZE - offset;
338
                else
339
                        csize = count;
340
 
341
                rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
342
                if (rc < 0)
343
                        return rc;
344
                buf += csize;
345
                *ppos += csize;
346
                read += csize;
347
                count -= csize;
348
        }
349
        return read;
350
}
351
#endif
352
 
353
extern long vread(char *buf, char *addr, unsigned long count);
354
extern long vwrite(char *buf, char *addr, unsigned long count);
355
 
356
/*
357
 * This function reads the *virtual* memory as seen by the kernel.
358
 */
359
static ssize_t read_kmem(struct file *file, char __user *buf,
360
                         size_t count, loff_t *ppos)
361
{
362
        unsigned long p = *ppos;
363
        ssize_t low_count, read, sz;
364
        char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
365
 
366
        read = 0;
367
        if (p < (unsigned long) high_memory) {
368
                low_count = count;
369
                if (count > (unsigned long) high_memory - p)
370
                        low_count = (unsigned long) high_memory - p;
371
 
372
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
373
                /* we don't have page 0 mapped on sparc and m68k.. */
374
                if (p < PAGE_SIZE && low_count > 0) {
375
                        size_t tmp = PAGE_SIZE - p;
376
                        if (tmp > low_count) tmp = low_count;
377
                        if (clear_user(buf, tmp))
378
                                return -EFAULT;
379
                        buf += tmp;
380
                        p += tmp;
381
                        read += tmp;
382
                        low_count -= tmp;
383
                        count -= tmp;
384
                }
385
#endif
386
                while (low_count > 0) {
387
                        /*
388
                         * Handle first page in case it's not aligned
389
                         */
390
                        if (-p & (PAGE_SIZE - 1))
391
                                sz = -p & (PAGE_SIZE - 1);
392
                        else
393
                                sz = PAGE_SIZE;
394
 
395
                        sz = min_t(unsigned long, sz, low_count);
396
 
397
                        /*
398
                         * On ia64 if a page has been mapped somewhere as
399
                         * uncached, then it must also be accessed uncached
400
                         * by the kernel or data corruption may occur
401
                         */
402
                        kbuf = xlate_dev_kmem_ptr((char *)p);
403
 
404
                        if (copy_to_user(buf, kbuf, sz))
405
                                return -EFAULT;
406
                        buf += sz;
407
                        p += sz;
408
                        read += sz;
409
                        low_count -= sz;
410
                        count -= sz;
411
                }
412
        }
413
 
414
        if (count > 0) {
415
                kbuf = (char *)__get_free_page(GFP_KERNEL);
416
                if (!kbuf)
417
                        return -ENOMEM;
418
                while (count > 0) {
419
                        int len = count;
420
 
421
                        if (len > PAGE_SIZE)
422
                                len = PAGE_SIZE;
423
                        len = vread(kbuf, (char *)p, len);
424
                        if (!len)
425
                                break;
426
                        if (copy_to_user(buf, kbuf, len)) {
427
                                free_page((unsigned long)kbuf);
428
                                return -EFAULT;
429
                        }
430
                        count -= len;
431
                        buf += len;
432
                        read += len;
433
                        p += len;
434
                }
435
                free_page((unsigned long)kbuf);
436
        }
437
        *ppos = p;
438
        return read;
439
}
440
 
441
 
442
static inline ssize_t
443
do_write_kmem(void *p, unsigned long realp, const char __user * buf,
444
              size_t count, loff_t *ppos)
445
{
446
        ssize_t written, sz;
447
        unsigned long copied;
448
 
449
        written = 0;
450
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
451
        /* we don't have page 0 mapped on sparc and m68k.. */
452
        if (realp < PAGE_SIZE) {
453
                unsigned long sz = PAGE_SIZE - realp;
454
                if (sz > count)
455
                        sz = count;
456
                /* Hmm. Do something? */
457
                buf += sz;
458
                p += sz;
459
                realp += sz;
460
                count -= sz;
461
                written += sz;
462
        }
463
#endif
464
 
465
        while (count > 0) {
466
                char *ptr;
467
                /*
468
                 * Handle first page in case it's not aligned
469
                 */
470
                if (-realp & (PAGE_SIZE - 1))
471
                        sz = -realp & (PAGE_SIZE - 1);
472
                else
473
                        sz = PAGE_SIZE;
474
 
475
                sz = min_t(unsigned long, sz, count);
476
 
477
                /*
478
                 * On ia64 if a page has been mapped somewhere as
479
                 * uncached, then it must also be accessed uncached
480
                 * by the kernel or data corruption may occur
481
                 */
482
                ptr = xlate_dev_kmem_ptr(p);
483
 
484
                copied = copy_from_user(ptr, buf, sz);
485
                if (copied) {
486
                        written += sz - copied;
487
                        if (written)
488
                                break;
489
                        return -EFAULT;
490
                }
491
                buf += sz;
492
                p += sz;
493
                realp += sz;
494
                count -= sz;
495
                written += sz;
496
        }
497
 
498
        *ppos += written;
499
        return written;
500
}
501
 
502
 
503
/*
504
 * This function writes to the *virtual* memory as seen by the kernel.
505
 */
506
static ssize_t write_kmem(struct file * file, const char __user * buf,
507
                          size_t count, loff_t *ppos)
508
{
509
        unsigned long p = *ppos;
510
        ssize_t wrote = 0;
511
        ssize_t virtr = 0;
512
        ssize_t written;
513
        char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
514
 
515
        if (p < (unsigned long) high_memory) {
516
 
517
                wrote = count;
518
                if (count > (unsigned long) high_memory - p)
519
                        wrote = (unsigned long) high_memory - p;
520
 
521
                written = do_write_kmem((void*)p, p, buf, wrote, ppos);
522
                if (written != wrote)
523
                        return written;
524
                wrote = written;
525
                p += wrote;
526
                buf += wrote;
527
                count -= wrote;
528
        }
529
 
530
        if (count > 0) {
531
                kbuf = (char *)__get_free_page(GFP_KERNEL);
532
                if (!kbuf)
533
                        return wrote ? wrote : -ENOMEM;
534
                while (count > 0) {
535
                        int len = count;
536
 
537
                        if (len > PAGE_SIZE)
538
                                len = PAGE_SIZE;
539
                        if (len) {
540
                                written = copy_from_user(kbuf, buf, len);
541
                                if (written) {
542
                                        if (wrote + virtr)
543
                                                break;
544
                                        free_page((unsigned long)kbuf);
545
                                        return -EFAULT;
546
                                }
547
                        }
548
                        len = vwrite(kbuf, (char *)p, len);
549
                        count -= len;
550
                        buf += len;
551
                        virtr += len;
552
                        p += len;
553
                }
554
                free_page((unsigned long)kbuf);
555
        }
556
 
557
        *ppos = p;
558
        return virtr + wrote;
559
}
560
 
561
#ifdef CONFIG_DEVPORT
562
static ssize_t read_port(struct file * file, char __user * buf,
563
                         size_t count, loff_t *ppos)
564
{
565
        unsigned long i = *ppos;
566
        char __user *tmp = buf;
567
 
568
        if (!access_ok(VERIFY_WRITE, buf, count))
569
                return -EFAULT;
570
        while (count-- > 0 && i < 65536) {
571
                if (__put_user(inb(i),tmp) < 0)
572
                        return -EFAULT;
573
                i++;
574
                tmp++;
575
        }
576
        *ppos = i;
577
        return tmp-buf;
578
}
579
 
580
static ssize_t write_port(struct file * file, const char __user * buf,
581
                          size_t count, loff_t *ppos)
582
{
583
        unsigned long i = *ppos;
584
        const char __user * tmp = buf;
585
 
586
        if (!access_ok(VERIFY_READ,buf,count))
587
                return -EFAULT;
588
        while (count-- > 0 && i < 65536) {
589
                char c;
590
                if (__get_user(c, tmp)) {
591
                        if (tmp > buf)
592
                                break;
593
                        return -EFAULT;
594
                }
595
                outb(c,i);
596
                i++;
597
                tmp++;
598
        }
599
        *ppos = i;
600
        return tmp-buf;
601
}
602
#endif
603
 
604
static ssize_t read_null(struct file * file, char __user * buf,
605
                         size_t count, loff_t *ppos)
606
{
607
        return 0;
608
}
609
 
610
static ssize_t write_null(struct file * file, const char __user * buf,
611
                          size_t count, loff_t *ppos)
612
{
613
        return count;
614
}
615
 
616
static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
617
                        struct splice_desc *sd)
618
{
619
        return sd->len;
620
}
621
 
622
static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
623
                                 loff_t *ppos, size_t len, unsigned int flags)
624
{
625
        return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
626
}
627
 
628
static ssize_t read_zero(struct file * file, char __user * buf,
629
                         size_t count, loff_t *ppos)
630
{
631
        size_t written;
632
 
633
        if (!count)
634
                return 0;
635
 
636
        if (!access_ok(VERIFY_WRITE, buf, count))
637
                return -EFAULT;
638
 
639
        written = 0;
640
        while (count) {
641
                unsigned long unwritten;
642
                size_t chunk = count;
643
 
644
                if (chunk > PAGE_SIZE)
645
                        chunk = PAGE_SIZE;      /* Just for latency reasons */
646
                unwritten = clear_user(buf, chunk);
647
                written += chunk - unwritten;
648
                if (unwritten)
649
                        break;
650
                buf += chunk;
651
                count -= chunk;
652
                cond_resched();
653
        }
654
        return written ? written : -EFAULT;
655
}
656
 
657
static int mmap_zero(struct file * file, struct vm_area_struct * vma)
658
{
659
#ifndef CONFIG_MMU
660
        return -ENOSYS;
661
#endif
662
        if (vma->vm_flags & VM_SHARED)
663
                return shmem_zero_setup(vma);
664
        return 0;
665
}
666
 
667
static ssize_t write_full(struct file * file, const char __user * buf,
668
                          size_t count, loff_t *ppos)
669
{
670
        return -ENOSPC;
671
}
672
 
673
/*
674
 * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
675
 * can fopen() both devices with "a" now.  This was previously impossible.
676
 * -- SRB.
677
 */
678
 
679
static loff_t null_lseek(struct file * file, loff_t offset, int orig)
680
{
681
        return file->f_pos = 0;
682
}
683
 
684
/*
685
 * The memory devices use the full 32/64 bits of the offset, and so we cannot
686
 * check against negative addresses: they are ok. The return value is weird,
687
 * though, in that case (0).
688
 *
689
 * also note that seeking relative to the "end of file" isn't supported:
690
 * it has no meaning, so it returns -EINVAL.
691
 */
692
static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
693
{
694
        loff_t ret;
695
 
696
        mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
697
        switch (orig) {
698
                case 0:
699
                        file->f_pos = offset;
700
                        ret = file->f_pos;
701
                        force_successful_syscall_return();
702
                        break;
703
                case 1:
704
                        file->f_pos += offset;
705
                        ret = file->f_pos;
706
                        force_successful_syscall_return();
707
                        break;
708
                default:
709
                        ret = -EINVAL;
710
        }
711
        mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
712
        return ret;
713
}
714
 
715
static int open_port(struct inode * inode, struct file * filp)
716
{
717
        return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
718
}
719
 
720
#define zero_lseek      null_lseek
721
#define full_lseek      null_lseek
722
#define write_zero      write_null
723
#define read_full       read_zero
724
#define open_mem        open_port
725
#define open_kmem       open_mem
726
#define open_oldmem     open_mem
727
 
728
static const struct file_operations mem_fops = {
729
        .llseek         = memory_lseek,
730
        .read           = read_mem,
731
        .write          = write_mem,
732
        .mmap           = mmap_mem,
733
        .open           = open_mem,
734
        .get_unmapped_area = get_unmapped_area_mem,
735
};
736
 
737
static const struct file_operations kmem_fops = {
738
        .llseek         = memory_lseek,
739
        .read           = read_kmem,
740
        .write          = write_kmem,
741
        .mmap           = mmap_kmem,
742
        .open           = open_kmem,
743
        .get_unmapped_area = get_unmapped_area_mem,
744
};
745
 
746
static const struct file_operations null_fops = {
747
        .llseek         = null_lseek,
748
        .read           = read_null,
749
        .write          = write_null,
750
        .splice_write   = splice_write_null,
751
};
752
 
753
#ifdef CONFIG_DEVPORT
754
static const struct file_operations port_fops = {
755
        .llseek         = memory_lseek,
756
        .read           = read_port,
757
        .write          = write_port,
758
        .open           = open_port,
759
};
760
#endif
761
 
762
static const struct file_operations zero_fops = {
763
        .llseek         = zero_lseek,
764
        .read           = read_zero,
765
        .write          = write_zero,
766
        .mmap           = mmap_zero,
767
};
768
 
769
/*
770
 * capabilities for /dev/zero
771
 * - permits private mappings, "copies" are taken of the source of zeros
772
 */
773
static struct backing_dev_info zero_bdi = {
774
        .capabilities   = BDI_CAP_MAP_COPY,
775
};
776
 
777
static const struct file_operations full_fops = {
778
        .llseek         = full_lseek,
779
        .read           = read_full,
780
        .write          = write_full,
781
};
782
 
783
#ifdef CONFIG_CRASH_DUMP
784
static const struct file_operations oldmem_fops = {
785
        .read   = read_oldmem,
786
        .open   = open_oldmem,
787
};
788
#endif
789
 
790
static ssize_t kmsg_write(struct file * file, const char __user * buf,
791
                          size_t count, loff_t *ppos)
792
{
793
        char *tmp;
794
        ssize_t ret;
795
 
796
        tmp = kmalloc(count + 1, GFP_KERNEL);
797
        if (tmp == NULL)
798
                return -ENOMEM;
799
        ret = -EFAULT;
800
        if (!copy_from_user(tmp, buf, count)) {
801
                tmp[count] = 0;
802
                ret = printk("%s", tmp);
803
                if (ret > count)
804
                        /* printk can add a prefix */
805
                        ret = count;
806
        }
807
        kfree(tmp);
808
        return ret;
809
}
810
 
811
static const struct file_operations kmsg_fops = {
812
        .write =        kmsg_write,
813
};
814
 
815
static int memory_open(struct inode * inode, struct file * filp)
816
{
817
        switch (iminor(inode)) {
818
                case 1:
819
                        filp->f_op = &mem_fops;
820
                        filp->f_mapping->backing_dev_info =
821
                                &directly_mappable_cdev_bdi;
822
                        break;
823
                case 2:
824
                        filp->f_op = &kmem_fops;
825
                        filp->f_mapping->backing_dev_info =
826
                                &directly_mappable_cdev_bdi;
827
                        break;
828
                case 3:
829
                        filp->f_op = &null_fops;
830
                        break;
831
#ifdef CONFIG_DEVPORT
832
                case 4:
833
                        filp->f_op = &port_fops;
834
                        break;
835
#endif
836
                case 5:
837
                        filp->f_mapping->backing_dev_info = &zero_bdi;
838
                        filp->f_op = &zero_fops;
839
                        break;
840
                case 7:
841
                        filp->f_op = &full_fops;
842
                        break;
843
                case 8:
844
                        filp->f_op = &random_fops;
845
                        break;
846
                case 9:
847
                        filp->f_op = &urandom_fops;
848
                        break;
849
                case 11:
850
                        filp->f_op = &kmsg_fops;
851
                        break;
852
#ifdef CONFIG_CRASH_DUMP
853
                case 12:
854
                        filp->f_op = &oldmem_fops;
855
                        break;
856
#endif
857
                default:
858
                        return -ENXIO;
859
        }
860
        if (filp->f_op && filp->f_op->open)
861
                return filp->f_op->open(inode,filp);
862
        return 0;
863
}
864
 
865
static const struct file_operations memory_fops = {
866
        .open           = memory_open,  /* just a selector for the real open */
867
};
868
 
869
static const struct {
870
        unsigned int            minor;
871
        char                    *name;
872
        umode_t                 mode;
873
        const struct file_operations    *fops;
874
} devlist[] = { /* list of minor devices */
875
        {1, "mem",     S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
876
        {2, "kmem",    S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
877
        {3, "null",    S_IRUGO | S_IWUGO,           &null_fops},
878
#ifdef CONFIG_DEVPORT
879
        {4, "port",    S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
880
#endif
881
        {5, "zero",    S_IRUGO | S_IWUGO,           &zero_fops},
882
        {7, "full",    S_IRUGO | S_IWUGO,           &full_fops},
883
        {8, "random",  S_IRUGO | S_IWUSR,           &random_fops},
884
        {9, "urandom", S_IRUGO | S_IWUSR,           &urandom_fops},
885
        {11,"kmsg",    S_IRUGO | S_IWUSR,           &kmsg_fops},
886
#ifdef CONFIG_CRASH_DUMP
887
        {12,"oldmem",    S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
888
#endif
889
};
890
 
891
static struct class *mem_class;
892
 
893
static int __init chr_dev_init(void)
894
{
895
        int i;
896
        int err;
897
 
898
        err = bdi_init(&zero_bdi);
899
        if (err)
900
                return err;
901
 
902
        if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
903
                printk("unable to get major %d for memory devs\n", MEM_MAJOR);
904
 
905
        mem_class = class_create(THIS_MODULE, "mem");
906
        for (i = 0; i < ARRAY_SIZE(devlist); i++)
907
                device_create(mem_class, NULL,
908
                              MKDEV(MEM_MAJOR, devlist[i].minor),
909
                              devlist[i].name);
910
 
911
        return 0;
912
}
913
 
914
fs_initcall(chr_dev_init);

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.