OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [drivers/] [uio/] [uio.c] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 * drivers/uio/uio.c
3
 *
4
 * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de>
5
 * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
6
 * Copyright(C) 2006, Hans J. Koch <hjk@linutronix.de>
7
 * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com>
8
 *
9
 * Userspace IO
10
 *
11
 * Base Functions
12
 *
13
 * Licensed under the GPLv2 only.
14
 */
15
 
16
#include <linux/module.h>
17
#include <linux/init.h>
18
#include <linux/poll.h>
19
#include <linux/device.h>
20
#include <linux/mm.h>
21
#include <linux/idr.h>
22
#include <linux/string.h>
23
#include <linux/kobject.h>
24
#include <linux/uio_driver.h>
25
 
26
#define UIO_MAX_DEVICES 255
27
 
28
struct uio_device {
29
        struct module           *owner;
30
        struct device           *dev;
31
        int                     minor;
32
        atomic_t                event;
33
        struct fasync_struct    *async_queue;
34
        wait_queue_head_t       wait;
35
        int                     vma_count;
36
        struct uio_info         *info;
37
        struct kset             map_attr_kset;
38
};
39
 
40
static int uio_major;
41
static DEFINE_IDR(uio_idr);
42
static struct file_operations uio_fops;
43
 
44
/* UIO class infrastructure */
45
static struct uio_class {
46
        struct kref kref;
47
        struct class *class;
48
} *uio_class;
49
 
50
/*
51
 * attributes
52
 */
53
 
54
static struct attribute attr_addr = {
55
        .name  = "addr",
56
        .mode  = S_IRUGO,
57
};
58
 
59
static struct attribute attr_size = {
60
        .name  = "size",
61
        .mode  = S_IRUGO,
62
};
63
 
64
static struct attribute* map_attrs[] = {
65
        &attr_addr, &attr_size, NULL
66
};
67
 
68
static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr,
69
                             char *buf)
70
{
71
        struct uio_mem *mem = container_of(kobj, struct uio_mem, kobj);
72
 
73
        if (strncmp(attr->name,"addr",4) == 0)
74
                return sprintf(buf, "0x%lx\n", mem->addr);
75
 
76
        if (strncmp(attr->name,"size",4) == 0)
77
                return sprintf(buf, "0x%lx\n", mem->size);
78
 
79
        return -ENODEV;
80
}
81
 
82
static void map_attr_release(struct kobject *kobj)
83
{
84
        /* TODO ??? */
85
}
86
 
87
static struct sysfs_ops map_attr_ops = {
88
        .show  = map_attr_show,
89
};
90
 
91
static struct kobj_type map_attr_type = {
92
        .release        = map_attr_release,
93
        .sysfs_ops      = &map_attr_ops,
94
        .default_attrs  = map_attrs,
95
};
96
 
97
static ssize_t show_name(struct device *dev,
98
                         struct device_attribute *attr, char *buf)
99
{
100
        struct uio_device *idev = dev_get_drvdata(dev);
101
        if (idev)
102
                return sprintf(buf, "%s\n", idev->info->name);
103
        else
104
                return -ENODEV;
105
}
106
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
107
 
108
static ssize_t show_version(struct device *dev,
109
                            struct device_attribute *attr, char *buf)
110
{
111
        struct uio_device *idev = dev_get_drvdata(dev);
112
        if (idev)
113
                return sprintf(buf, "%s\n", idev->info->version);
114
        else
115
                return -ENODEV;
116
}
117
static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
118
 
119
static ssize_t show_event(struct device *dev,
120
                          struct device_attribute *attr, char *buf)
121
{
122
        struct uio_device *idev = dev_get_drvdata(dev);
123
        if (idev)
124
                return sprintf(buf, "%u\n",
125
                                (unsigned int)atomic_read(&idev->event));
126
        else
127
                return -ENODEV;
128
}
129
static DEVICE_ATTR(event, S_IRUGO, show_event, NULL);
130
 
131
static struct attribute *uio_attrs[] = {
132
        &dev_attr_name.attr,
133
        &dev_attr_version.attr,
134
        &dev_attr_event.attr,
135
        NULL,
136
};
137
 
138
static struct attribute_group uio_attr_grp = {
139
        .attrs = uio_attrs,
140
};
141
 
142
/*
143
 * device functions
144
 */
145
static int uio_dev_add_attributes(struct uio_device *idev)
146
{
147
        int ret;
148
        int mi;
149
        int map_found = 0;
150
        struct uio_mem *mem;
151
 
152
        ret = sysfs_create_group(&idev->dev->kobj, &uio_attr_grp);
153
        if (ret)
154
                goto err_group;
155
 
156
        for (mi = 0; mi < MAX_UIO_MAPS; mi++) {
157
                mem = &idev->info->mem[mi];
158
                if (mem->size == 0)
159
                        break;
160
                if (!map_found) {
161
                        map_found = 1;
162
                        kobject_set_name(&idev->map_attr_kset.kobj,"maps");
163
                        idev->map_attr_kset.ktype = &map_attr_type;
164
                        idev->map_attr_kset.kobj.parent = &idev->dev->kobj;
165
                        ret = kset_register(&idev->map_attr_kset);
166
                        if (ret)
167
                                goto err_remove_group;
168
                }
169
                kobject_init(&mem->kobj);
170
                kobject_set_name(&mem->kobj,"map%d",mi);
171
                mem->kobj.parent = &idev->map_attr_kset.kobj;
172
                mem->kobj.kset = &idev->map_attr_kset;
173
                ret = kobject_add(&mem->kobj);
174
                if (ret)
175
                        goto err_remove_maps;
176
        }
177
 
178
        return 0;
179
 
180
err_remove_maps:
181
        for (mi--; mi>=0; mi--) {
182
                mem = &idev->info->mem[mi];
183
                kobject_unregister(&mem->kobj);
184
        }
185
        kset_unregister(&idev->map_attr_kset); /* Needed ? */
186
err_remove_group:
187
        sysfs_remove_group(&idev->dev->kobj, &uio_attr_grp);
188
err_group:
189
        dev_err(idev->dev, "error creating sysfs files (%d)\n", ret);
190
        return ret;
191
}
192
 
193
static void uio_dev_del_attributes(struct uio_device *idev)
194
{
195
        int mi;
196
        struct uio_mem *mem;
197
        for (mi = 0; mi < MAX_UIO_MAPS; mi++) {
198
                mem = &idev->info->mem[mi];
199
                if (mem->size == 0)
200
                        break;
201
                kobject_unregister(&mem->kobj);
202
        }
203
        kset_unregister(&idev->map_attr_kset);
204
        sysfs_remove_group(&idev->dev->kobj, &uio_attr_grp);
205
}
206
 
207
static int uio_get_minor(struct uio_device *idev)
208
{
209
        static DEFINE_MUTEX(minor_lock);
210
        int retval = -ENOMEM;
211
        int id;
212
 
213
        mutex_lock(&minor_lock);
214
        if (idr_pre_get(&uio_idr, GFP_KERNEL) == 0)
215
                goto exit;
216
 
217
        retval = idr_get_new(&uio_idr, idev, &id);
218
        if (retval < 0) {
219
                if (retval == -EAGAIN)
220
                        retval = -ENOMEM;
221
                goto exit;
222
        }
223
        idev->minor = id & MAX_ID_MASK;
224
exit:
225
        mutex_unlock(&minor_lock);
226
        return retval;
227
}
228
 
229
static void uio_free_minor(struct uio_device *idev)
230
{
231
        idr_remove(&uio_idr, idev->minor);
232
}
233
 
234
/**
235
 * uio_event_notify - trigger an interrupt event
236
 * @info: UIO device capabilities
237
 */
238
void uio_event_notify(struct uio_info *info)
239
{
240
        struct uio_device *idev = info->uio_dev;
241
 
242
        atomic_inc(&idev->event);
243
        wake_up_interruptible(&idev->wait);
244
        kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
245
}
246
EXPORT_SYMBOL_GPL(uio_event_notify);
247
 
248
/**
249
 * uio_interrupt - hardware interrupt handler
250
 * @irq: IRQ number, can be UIO_IRQ_CYCLIC for cyclic timer
251
 * @dev_id: Pointer to the devices uio_device structure
252
 */
253
static irqreturn_t uio_interrupt(int irq, void *dev_id)
254
{
255
        struct uio_device *idev = (struct uio_device *)dev_id;
256
        irqreturn_t ret = idev->info->handler(irq, idev->info);
257
 
258
        if (ret == IRQ_HANDLED)
259
                uio_event_notify(idev->info);
260
 
261
        return ret;
262
}
263
 
264
struct uio_listener {
265
        struct uio_device *dev;
266
        s32 event_count;
267
};
268
 
269
static int uio_open(struct inode *inode, struct file *filep)
270
{
271
        struct uio_device *idev;
272
        struct uio_listener *listener;
273
        int ret = 0;
274
 
275
        idev = idr_find(&uio_idr, iminor(inode));
276
        if (!idev)
277
                return -ENODEV;
278
 
279
        listener = kmalloc(sizeof(*listener), GFP_KERNEL);
280
        if (!listener)
281
                return -ENOMEM;
282
 
283
        listener->dev = idev;
284
        listener->event_count = atomic_read(&idev->event);
285
        filep->private_data = listener;
286
 
287
        if (idev->info->open) {
288
                if (!try_module_get(idev->owner))
289
                        return -ENODEV;
290
                ret = idev->info->open(idev->info, inode);
291
                module_put(idev->owner);
292
        }
293
 
294
        if (ret)
295
                kfree(listener);
296
 
297
        return ret;
298
}
299
 
300
static int uio_fasync(int fd, struct file *filep, int on)
301
{
302
        struct uio_listener *listener = filep->private_data;
303
        struct uio_device *idev = listener->dev;
304
 
305
        return fasync_helper(fd, filep, on, &idev->async_queue);
306
}
307
 
308
static int uio_release(struct inode *inode, struct file *filep)
309
{
310
        int ret = 0;
311
        struct uio_listener *listener = filep->private_data;
312
        struct uio_device *idev = listener->dev;
313
 
314
        if (idev->info->release) {
315
                if (!try_module_get(idev->owner))
316
                        return -ENODEV;
317
                ret = idev->info->release(idev->info, inode);
318
                module_put(idev->owner);
319
        }
320
        if (filep->f_flags & FASYNC)
321
                ret = uio_fasync(-1, filep, 0);
322
        kfree(listener);
323
        return ret;
324
}
325
 
326
static unsigned int uio_poll(struct file *filep, poll_table *wait)
327
{
328
        struct uio_listener *listener = filep->private_data;
329
        struct uio_device *idev = listener->dev;
330
 
331
        if (idev->info->irq == UIO_IRQ_NONE)
332
                return -EIO;
333
 
334
        poll_wait(filep, &idev->wait, wait);
335
        if (listener->event_count != atomic_read(&idev->event))
336
                return POLLIN | POLLRDNORM;
337
        return 0;
338
}
339
 
340
static ssize_t uio_read(struct file *filep, char __user *buf,
341
                        size_t count, loff_t *ppos)
342
{
343
        struct uio_listener *listener = filep->private_data;
344
        struct uio_device *idev = listener->dev;
345
        DECLARE_WAITQUEUE(wait, current);
346
        ssize_t retval;
347
        s32 event_count;
348
 
349
        if (idev->info->irq == UIO_IRQ_NONE)
350
                return -EIO;
351
 
352
        if (count != sizeof(s32))
353
                return -EINVAL;
354
 
355
        add_wait_queue(&idev->wait, &wait);
356
 
357
        do {
358
                set_current_state(TASK_INTERRUPTIBLE);
359
 
360
                event_count = atomic_read(&idev->event);
361
                if (event_count != listener->event_count) {
362
                        if (copy_to_user(buf, &event_count, count))
363
                                retval = -EFAULT;
364
                        else {
365
                                listener->event_count = event_count;
366
                                retval = count;
367
                        }
368
                        break;
369
                }
370
 
371
                if (filep->f_flags & O_NONBLOCK) {
372
                        retval = -EAGAIN;
373
                        break;
374
                }
375
 
376
                if (signal_pending(current)) {
377
                        retval = -ERESTARTSYS;
378
                        break;
379
                }
380
                schedule();
381
        } while (1);
382
 
383
        __set_current_state(TASK_RUNNING);
384
        remove_wait_queue(&idev->wait, &wait);
385
 
386
        return retval;
387
}
388
 
389
static int uio_find_mem_index(struct vm_area_struct *vma)
390
{
391
        int mi;
392
        struct uio_device *idev = vma->vm_private_data;
393
 
394
        for (mi = 0; mi < MAX_UIO_MAPS; mi++) {
395
                if (idev->info->mem[mi].size == 0)
396
                        return -1;
397
                if (vma->vm_pgoff == mi)
398
                        return mi;
399
        }
400
        return -1;
401
}
402
 
403
static void uio_vma_open(struct vm_area_struct *vma)
404
{
405
        struct uio_device *idev = vma->vm_private_data;
406
        idev->vma_count++;
407
}
408
 
409
static void uio_vma_close(struct vm_area_struct *vma)
410
{
411
        struct uio_device *idev = vma->vm_private_data;
412
        idev->vma_count--;
413
}
414
 
415
static struct page *uio_vma_nopage(struct vm_area_struct *vma,
416
                                   unsigned long address, int *type)
417
{
418
        struct uio_device *idev = vma->vm_private_data;
419
        struct page* page = NOPAGE_SIGBUS;
420
 
421
        int mi = uio_find_mem_index(vma);
422
        if (mi < 0)
423
                return page;
424
 
425
        if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL)
426
                page = virt_to_page(idev->info->mem[mi].addr);
427
        else
428
                page = vmalloc_to_page((void*)idev->info->mem[mi].addr);
429
        get_page(page);
430
        if (type)
431
                *type = VM_FAULT_MINOR;
432
        return page;
433
}
434
 
435
static struct vm_operations_struct uio_vm_ops = {
436
        .open = uio_vma_open,
437
        .close = uio_vma_close,
438
        .nopage = uio_vma_nopage,
439
};
440
 
441
static int uio_mmap_physical(struct vm_area_struct *vma)
442
{
443
        struct uio_device *idev = vma->vm_private_data;
444
        int mi = uio_find_mem_index(vma);
445
        if (mi < 0)
446
                return -EINVAL;
447
 
448
        vma->vm_flags |= VM_IO | VM_RESERVED;
449
 
450
        return remap_pfn_range(vma,
451
                               vma->vm_start,
452
                               idev->info->mem[mi].addr >> PAGE_SHIFT,
453
                               vma->vm_end - vma->vm_start,
454
                               vma->vm_page_prot);
455
}
456
 
457
static int uio_mmap_logical(struct vm_area_struct *vma)
458
{
459
        vma->vm_flags |= VM_RESERVED;
460
        vma->vm_ops = &uio_vm_ops;
461
        uio_vma_open(vma);
462
        return 0;
463
}
464
 
465
static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
466
{
467
        struct uio_listener *listener = filep->private_data;
468
        struct uio_device *idev = listener->dev;
469
        int mi;
470
        unsigned long requested_pages, actual_pages;
471
        int ret = 0;
472
 
473
        if (vma->vm_end < vma->vm_start)
474
                return -EINVAL;
475
 
476
        vma->vm_private_data = idev;
477
 
478
        mi = uio_find_mem_index(vma);
479
        if (mi < 0)
480
                return -EINVAL;
481
 
482
        requested_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
483
        actual_pages = (idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT;
484
        if (requested_pages > actual_pages)
485
                return -EINVAL;
486
 
487
        if (idev->info->mmap) {
488
                if (!try_module_get(idev->owner))
489
                        return -ENODEV;
490
                ret = idev->info->mmap(idev->info, vma);
491
                module_put(idev->owner);
492
                return ret;
493
        }
494
 
495
        switch (idev->info->mem[mi].memtype) {
496
                case UIO_MEM_PHYS:
497
                        return uio_mmap_physical(vma);
498
                case UIO_MEM_LOGICAL:
499
                case UIO_MEM_VIRTUAL:
500
                        return uio_mmap_logical(vma);
501
                default:
502
                        return -EINVAL;
503
        }
504
}
505
 
506
static struct file_operations uio_fops = {
507
        .owner          = THIS_MODULE,
508
        .open           = uio_open,
509
        .release        = uio_release,
510
        .read           = uio_read,
511
        .mmap           = uio_mmap,
512
        .poll           = uio_poll,
513
        .fasync         = uio_fasync,
514
};
515
 
516
static int uio_major_init(void)
517
{
518
        uio_major = register_chrdev(0, "uio", &uio_fops);
519
        if (uio_major < 0)
520
                return uio_major;
521
        return 0;
522
}
523
 
524
static void uio_major_cleanup(void)
525
{
526
        unregister_chrdev(uio_major, "uio");
527
}
528
 
529
static int init_uio_class(void)
530
{
531
        int ret = 0;
532
 
533
        if (uio_class != NULL) {
534
                kref_get(&uio_class->kref);
535
                goto exit;
536
        }
537
 
538
        /* This is the first time in here, set everything up properly */
539
        ret = uio_major_init();
540
        if (ret)
541
                goto exit;
542
 
543
        uio_class = kzalloc(sizeof(*uio_class), GFP_KERNEL);
544
        if (!uio_class) {
545
                ret = -ENOMEM;
546
                goto err_kzalloc;
547
        }
548
 
549
        kref_init(&uio_class->kref);
550
        uio_class->class = class_create(THIS_MODULE, "uio");
551
        if (IS_ERR(uio_class->class)) {
552
                ret = IS_ERR(uio_class->class);
553
                printk(KERN_ERR "class_create failed for uio\n");
554
                goto err_class_create;
555
        }
556
        return 0;
557
 
558
err_class_create:
559
        kfree(uio_class);
560
        uio_class = NULL;
561
err_kzalloc:
562
        uio_major_cleanup();
563
exit:
564
        return ret;
565
}
566
 
567
static void release_uio_class(struct kref *kref)
568
{
569
        /* Ok, we cheat as we know we only have one uio_class */
570
        class_destroy(uio_class->class);
571
        kfree(uio_class);
572
        uio_major_cleanup();
573
        uio_class = NULL;
574
}
575
 
576
static void uio_class_destroy(void)
577
{
578
        if (uio_class)
579
                kref_put(&uio_class->kref, release_uio_class);
580
}
581
 
582
/**
583
 * uio_register_device - register a new userspace IO device
584
 * @owner:      module that creates the new device
585
 * @parent:     parent device
586
 * @info:       UIO device capabilities
587
 *
588
 * returns zero on success or a negative error code.
589
 */
590
int __uio_register_device(struct module *owner,
591
                          struct device *parent,
592
                          struct uio_info *info)
593
{
594
        struct uio_device *idev;
595
        int ret = 0;
596
 
597
        if (!parent || !info || !info->name || !info->version)
598
                return -EINVAL;
599
 
600
        info->uio_dev = NULL;
601
 
602
        ret = init_uio_class();
603
        if (ret)
604
                return ret;
605
 
606
        idev = kzalloc(sizeof(*idev), GFP_KERNEL);
607
        if (!idev) {
608
                ret = -ENOMEM;
609
                goto err_kzalloc;
610
        }
611
 
612
        idev->owner = owner;
613
        idev->info = info;
614
        init_waitqueue_head(&idev->wait);
615
        atomic_set(&idev->event, 0);
616
 
617
        ret = uio_get_minor(idev);
618
        if (ret)
619
                goto err_get_minor;
620
 
621
        idev->dev = device_create(uio_class->class, parent,
622
                                  MKDEV(uio_major, idev->minor),
623
                                  "uio%d", idev->minor);
624
        if (IS_ERR(idev->dev)) {
625
                printk(KERN_ERR "UIO: device register failed\n");
626
                ret = PTR_ERR(idev->dev);
627
                goto err_device_create;
628
        }
629
        dev_set_drvdata(idev->dev, idev);
630
 
631
        ret = uio_dev_add_attributes(idev);
632
        if (ret)
633
                goto err_uio_dev_add_attributes;
634
 
635
        info->uio_dev = idev;
636
 
637
        if (idev->info->irq >= 0) {
638
                ret = request_irq(idev->info->irq, uio_interrupt,
639
                                  idev->info->irq_flags, idev->info->name, idev);
640
                if (ret)
641
                        goto err_request_irq;
642
        }
643
 
644
        return 0;
645
 
646
err_request_irq:
647
        uio_dev_del_attributes(idev);
648
err_uio_dev_add_attributes:
649
        device_destroy(uio_class->class, MKDEV(uio_major, idev->minor));
650
err_device_create:
651
        uio_free_minor(idev);
652
err_get_minor:
653
        kfree(idev);
654
err_kzalloc:
655
        uio_class_destroy();
656
        return ret;
657
}
658
EXPORT_SYMBOL_GPL(__uio_register_device);
659
 
660
/**
661
 * uio_unregister_device - unregister a industrial IO device
662
 * @info:       UIO device capabilities
663
 *
664
 */
665
void uio_unregister_device(struct uio_info *info)
666
{
667
        struct uio_device *idev;
668
 
669
        if (!info || !info->uio_dev)
670
                return;
671
 
672
        idev = info->uio_dev;
673
 
674
        uio_free_minor(idev);
675
 
676
        if (info->irq >= 0)
677
                free_irq(info->irq, idev);
678
 
679
        uio_dev_del_attributes(idev);
680
 
681
        dev_set_drvdata(idev->dev, NULL);
682
        device_destroy(uio_class->class, MKDEV(uio_major, idev->minor));
683
        kfree(idev);
684
        uio_class_destroy();
685
 
686
        return;
687
}
688
EXPORT_SYMBOL_GPL(uio_unregister_device);
689
 
690
static int __init uio_init(void)
691
{
692
        return 0;
693
}
694
 
695
static void __exit uio_exit(void)
696
{
697
}
698
 
699
module_init(uio_init)
700
module_exit(uio_exit)
701
MODULE_LICENSE("GPL v2");

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.