OpenCores
URL https://opencores.org/ocsvn/funbase_ip_library/funbase_ip_library/trunk

Subversion Repositories funbase_ip_library

[/] [funbase_ip_library/] [trunk/] [TUT/] [ip.swp.api/] [openmcapi/] [1.0/] [libmcapi/] [shm/] [linux/] [kmod/] [common.c] - Blame information for rev 145

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 145 lanttu
/*
2
 * Copyright (c) 2010, Mentor Graphics Corporation
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions are met:
7
 *
8
 * 1. Redistributions of source code must retain the above copyright notice,
9
 *    this list of conditions and the following disclaimer.
10
 * 2. Redistributions in binary form must reproduce the above copyright notice,
11
 *    this list of conditions and the following disclaimer in the documentation
12
 *    and/or other materials provided with the distribution.
13
 * 3. Neither the name of the <ORGANIZATION> nor the names of its contributors
14
 *    may be used to endorse or promote products derived from this software
15
 *    without specific prior written permission.
16
 *
17
 * Alternatively, this software may be distributed under the terms of the
18
 * GNU General Public License ("GPL") version 2 as published by the Free
19
 * Software Foundation.
20
 *
21
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31
 * POSSIBILITY OF SUCH DAMAGE.
32
 */
33
 
34
#undef DEBUG
35
 
36
#include <linux/kernel.h>
37
#include <linux/mm.h>
38
#include <linux/module.h>
39
#include <linux/list.h>
40
#include <linux/miscdevice.h>
41
#include <linux/ioport.h>
42
#include <linux/kdev_t.h>
43
#include <linux/fs.h>
44
#include <linux/cdev.h>
45
#include <linux/types.h>
46
#include <linux/platform_device.h>
47
#include <linux/device.h>
48
#include <linux/interrupt.h>
49
#include <linux/io.h>
50
#include <linux/wait.h>
51
#include <linux/sched.h>
52
#include <linux/uaccess.h>
53
#include <linux/hugetlb.h>
54
#include <linux/highmem.h>
55
 
56
#include "mcomm.h"
57
#include "mcomm_compat.h"
58
 
59
struct mcomm_devdata {
60
        wait_queue_head_t wait; /* All waiting processes sleep here */
61
        struct cdev cdev;
62
        struct resource mem;
63
        void __iomem *mbox_mapped;
64
        void *platform_data;
65
        atomic_t refcount;
66
        unsigned int irq;
67
        unsigned int nr_mboxes;
68
        unsigned int mbox_size;
69
        unsigned int mbox_stride;
70
};
71
 
72
/* Only supports a single mcomm region. */
73
static struct mcomm_devdata _mcomm_devdata;
74
 
75
static struct mcomm_platform_ops *mcomm_platform_ops;
76
 
77
 
78
 
79
/* Wake up the process(es) corresponding to the mailbox(es) which just received
80
 * packets. */
81
static irqreturn_t mcomm_interrupt(int irq, void *dev_id)
82
{
83
        struct mcomm_devdata *devdata = dev_id;
84
        void __iomem *mbox;
85
        int i;
86
 
87
        mbox = devdata->mbox_mapped;
88
        for (i = 0; i < devdata->nr_mboxes; i++) {
89
                int active;
90
 
91
                switch (devdata->mbox_size) {
92
                case 1:
93
                        active = readb(mbox);
94
                        break;
95
                case 4:
96
                        active = readl(mbox);
97
                        break;
98
                default:
99
                        active = 0;
100
                }
101
 
102
                if (active) {
103
                        pr_debug("%s: waking mbox %d\n", __func__, i);
104
                        wake_up_interruptible(&devdata->wait);
105
                }
106
                mbox += devdata->mbox_stride;
107
        }
108
 
109
        if (irq != NO_IRQ)
110
                mcomm_platform_ops->ack();
111
 
112
        return IRQ_HANDLED;
113
}
114
 
115
static int mcomm_mbox_pending(struct mcomm_devdata *devdata,
116
                              mcomm_mbox_t mbox_id)
117
{
118
        unsigned long mbox_offset;
119
        int active;
120
 
121
        mbox_offset = devdata->mbox_stride * mbox_id;
122
 
123
        switch (devdata->mbox_size) {
124
        case 1:
125
                active = readb(devdata->mbox_mapped + mbox_offset);
126
                break;
127
        case 4:
128
                active = readl(devdata->mbox_mapped + mbox_offset);
129
                break;
130
        default:
131
                active = 0;
132
        }
133
 
134
        if (active)
135
                pr_debug("mailbox %d (0x%lx) active; value 0x%x\n", mbox_id,
136
                         mbox_offset, active);
137
        else
138
                pr_debug("mailbox %d (0x%lx) not active\n", mbox_id, mbox_offset);
139
 
140
        return active;
141
}
142
 
143
static long mcomm_fd_ioctl_wait_read(struct mcomm_devdata *devdata,
144
                                     mcomm_mbox_t mbox_id)
145
{
146
        if (devdata->irq == NO_IRQ)
147
                return 0;
148
 
149
        return wait_event_interruptible(devdata->wait,
150
                                        mcomm_mbox_pending(devdata, mbox_id));
151
}
152
 
153
static long mcomm_fd_ioctl_notify(struct mcomm_devdata *devdata,
154
                                  mcomm_core_t target_core)
155
{
156
        /* If the target is the local core, call the interrupt handler directly. */
157
        if (target_core == mcomm_platform_ops->cpuid())
158
                mcomm_interrupt(NO_IRQ, devdata);
159
        else
160
                mcomm_platform_ops->notify(target_core);
161
 
162
        return 0;
163
}
164
 
165
static long mcomm_fd_ioctl(struct file *fp, unsigned int ioctl,
166
                           unsigned long arg)
167
{
168
        struct mcomm_devdata *devdata = &_mcomm_devdata;
169
        void __user *userptr = (void __user *)arg;
170
        long rc;
171
 
172
        switch (ioctl) {
173
        case MCOMM_CPUID: {
174
                u32 cpuid = mcomm_platform_ops->cpuid();
175
 
176
                rc = -EFAULT;
177
                if (copy_to_user(userptr, &cpuid, sizeof(cpuid)) == 0)
178
                        rc = 0;
179
                break;
180
        }
181
 
182
        case MCOMM_WAIT_READ: {
183
                mcomm_mbox_t mbox_id;
184
 
185
                rc = -EFAULT;
186
                if (copy_from_user(&mbox_id, userptr, sizeof(mbox_id)) == 0) {
187
                        pr_debug("%s: sleeping mbox %d\n", __func__, mbox_id);
188
                        rc = mcomm_fd_ioctl_wait_read(devdata, mbox_id);
189
                        pr_debug("%s: mbox %d woke up\n", __func__, mbox_id);
190
                }
191
                break;
192
        }
193
 
194
        case MCOMM_NOTIFY: {
195
                mcomm_core_t core_id;
196
 
197
                rc = -EFAULT;
198
                if (copy_from_user(&core_id, userptr, sizeof(core_id)) == 0) {
199
                        pr_debug("%s: waking core %d\n", __func__, core_id);
200
                        rc = mcomm_fd_ioctl_notify(devdata, core_id);
201
                }
202
 
203
                break;
204
        }
205
 
206
        default:
207
                rc = -EINVAL;
208
        }
209
 
210
        return rc;
211
}
212
 
213
static int __mcomm_follow_pte(struct mm_struct *mm, unsigned long address,
214
                pte_t **ptepp, spinlock_t **ptlp)
215
{
216
        pgd_t *pgd;
217
        pud_t *pud;
218
        pmd_t *pmd;
219
        pte_t *ptep;
220
 
221
        pgd = pgd_offset(mm, address);
222
        if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
223
                goto out;
224
 
225
        pud = pud_offset(pgd, address);
226
        if (pud_none(*pud) || unlikely(pud_bad(*pud)))
227
                goto out;
228
 
229
        pmd = pmd_offset(pud, address);
230
        VM_BUG_ON(pmd_trans_huge(*pmd));
231
        if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
232
                goto out;
233
 
234
        /* We cannot handle huge page PFN maps. Luckily they don't exist. */
235
        if (pmd_huge(*pmd))
236
                goto out;
237
 
238
        ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
239
        if (!ptep)
240
                goto out;
241
        if (!pte_present(*ptep))
242
                goto unlock;
243
        *ptepp = ptep;
244
        return 0;
245
unlock:
246
        pte_unmap_unlock(ptep, *ptlp);
247
out:
248
        return -EINVAL;
249
}
250
 
251
static inline int mcomm_follow_pte(struct mm_struct *mm, unsigned long address,
252
                             pte_t **ptepp, spinlock_t **ptlp)
253
{
254
        int res;
255
 
256
        /* (void) is needed to make gcc happy */
257
        (void) __cond_lock(*ptlp,
258
                           !(res = __mcomm_follow_pte(mm, address, ptepp, ptlp)));
259
        return res;
260
}
261
 
262
#ifdef CONFIG_HAVE_IOREMAP_PROT
263
static int mcomm_follow_phys(struct vm_area_struct *vma,
264
                unsigned long address, unsigned int flags,
265
                unsigned long *prot, resource_size_t *phys)
266
{
267
        int ret = -EINVAL;
268
        pte_t *ptep, pte;
269
        spinlock_t *ptl;
270
 
271
        if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
272
                goto out;
273
 
274
        if (mcomm_follow_pte(vma->vm_mm, address, &ptep, &ptl))
275
                goto out;
276
        pte = *ptep;
277
 
278
        if ((flags & FOLL_WRITE) && !pte_write(pte))
279
                goto unlock;
280
 
281
        *prot = pgprot_val(pte_pgprot(pte));
282
        *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
283
 
284
        ret = 0;
285
unlock:
286
        pte_unmap_unlock(ptep, ptl);
287
out:
288
        return ret;
289
}
290
 
291
static int mcomm_access_phys(struct vm_area_struct *vma, unsigned long addr,
292
                             void *buf, int len, int write)
293
{
294
        resource_size_t phys_addr = 0;
295
        unsigned long prot = 0;
296
        void __iomem *maddr;
297
        int offset = addr & (PAGE_SIZE-1);
298
 
299
        if (mcomm_follow_phys(vma, addr, write, &prot, &phys_addr))
300
                return -EINVAL;
301
 
302
        maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
303
        if (write)
304
                memcpy_toio(maddr + offset, buf, len);
305
        else
306
                memcpy_fromio(buf, maddr + offset, len);
307
        iounmap(maddr);
308
 
309
        return len;
310
}
311
#endif
312
 
313
static const struct vm_operations_struct mmap_mcomm_ops = {
314
#ifdef CONFIG_HAVE_IOREMAP_PROT
315
        .access = mcomm_access_phys
316
#endif
317
};
318
 
319
static int mcomm_mmap(struct file *file, struct vm_area_struct *vma)
320
{
321
        struct mcomm_devdata *devdata = &_mcomm_devdata;
322
        unsigned long start_page;
323
 
324
        if ((vma->vm_end - vma->vm_start) > resource_size(&devdata->mem))
325
                return -ENOMEM;
326
 
327
        vma->vm_page_prot = mcomm_platform_ops->mmap_pgprot(vma);
328
        vma->vm_ops = &mmap_mcomm_ops;
329
 
330
        start_page = devdata->mem.start >> PAGE_SHIFT;
331
        return remap_pfn_range(vma, vma->vm_start,
332
                               start_page + vma->vm_pgoff,
333
                               vma->vm_end - vma->vm_start,
334
                               vma->vm_page_prot);
335
}
336
 
337
static int mcomm_fd_release(struct inode *inode, struct file *fp)
338
{
339
        struct mcomm_devdata *devdata = &_mcomm_devdata;
340
 
341
        /* XXX what happens to a thread blocked in ioctl? */
342
 
343
        if (atomic_dec_and_test(&devdata->refcount)) {
344
                if (devdata->irq != NO_IRQ)
345
                        free_irq(devdata->irq, devdata);
346
                iounmap(devdata->mbox_mapped);
347
        }
348
 
349
        return 0;
350
}
351
 
352
static struct file_operations mcomm_fd_fops = {
353
        .release        = mcomm_fd_release,
354
        .unlocked_ioctl = mcomm_fd_ioctl,
355
        .compat_ioctl   = mcomm_fd_ioctl,
356
        .mmap           = mcomm_mmap,
357
};
358
 
359
static long mcomm_dev_initialize(struct mcomm_devdata *devdata, u32 offset,
360
                                 mcomm_mbox_t nr_mboxes, u32 mbox_size,
361
                                 u32 mbox_stride)
362
{
363
        resource_size_t mbox_paddr;
364
        long rc;
365
 
366
        if (offset + nr_mboxes * mbox_stride >= resource_size(&devdata->mem)) {
367
                printk(KERN_ERR "%s: mailboxes exceed memory area.\n", __func__);
368
                rc = -E2BIG;
369
                goto out1;
370
        }
371
 
372
        switch (mbox_size) {
373
        case 1:
374
        case 4:
375
                break;
376
        default:
377
                printk(KERN_ERR "%s: unsupported mailbox size %d\n", __func__,
378
                       mbox_size);
379
                rc = -EINVAL;
380
                goto out1;
381
        }
382
 
383
        /* Map only the memory encompassing the mailboxes. */
384
        mbox_paddr = devdata->mem.start + offset;
385
        devdata->mbox_mapped = mcomm_platform_ops->map(mbox_paddr,
386
                                                       nr_mboxes * mbox_stride);
387
        if (devdata->mbox_mapped == NULL) {
388
                printk(KERN_ERR "%s: failed to map the mailboxes.\n", __func__);
389
                rc = -EFAULT;
390
                goto out1;
391
        }
392
 
393
        devdata->mbox_size = mbox_size;
394
        devdata->mbox_stride = mbox_stride;
395
        devdata->nr_mboxes = nr_mboxes;
396
 
397
        if (devdata->irq != NO_IRQ) {
398
                rc = request_irq(devdata->irq, mcomm_interrupt, 0, "mcomm",
399
                                                 devdata);
400
                if (rc) {
401
                        printk(KERN_ERR "%s: failed to reserve irq %d\n", __func__,
402
                                   devdata->irq);
403
                        goto out2;
404
                }
405
        }
406
 
407
        return 0;
408
 
409
out2:
410
        iounmap(devdata->mbox_mapped);
411
out1:
412
        return rc;
413
}
414
 
415
static long mcomm_dev_ioctl_init(struct mcomm_devdata *devdata, u32 offset,
416
                                 mcomm_mbox_t nr_mboxes, u32 mbox_size,
417
                                 u32 mbox_stride)
418
{
419
        long rc;
420
 
421
        if (atomic_inc_return(&devdata->refcount) > 1) {
422
                if ( (nr_mboxes != devdata->nr_mboxes) ||
423
                     (mbox_size != devdata->mbox_size) ||
424
                     (mbox_stride != devdata->mbox_stride)) {
425
                        printk(KERN_ERR "%s: new configuration doesn't match old configuration.\n", __func__);
426
                        rc = -EBUSY;
427
                        goto out1;
428
                }
429
        } else {
430
                rc = mcomm_dev_initialize(devdata, offset, nr_mboxes, mbox_size,
431
                                          mbox_stride);
432
                if (rc)
433
                        goto out1;
434
        }
435
 
436
        return mcomm_anon_inode_getfd("mcomm", &mcomm_fd_fops, devdata, O_RDWR);
437
 
438
out1:
439
        atomic_dec(&devdata->refcount);
440
        return rc;
441
}
442
 
443
static long mcomm_dev_ioctl(struct file *fp, unsigned int ioctl,
444
                            unsigned long arg)
445
{
446
        struct mcomm_devdata *devdata = &_mcomm_devdata;
447
        void __user *userptr = (void __user *)arg;
448
        long rc;
449
 
450
        switch (ioctl) {
451
        case MCOMM_INIT: {
452
                struct mcomm_init_device args;
453
 
454
                rc = -EFAULT;
455
                if (copy_from_user(&args, userptr, sizeof(args)) == 0)
456
                        rc = mcomm_dev_ioctl_init(devdata, args.offset, args.nr_mboxes,
457
                                                  args.mbox_size, args.mbox_stride);
458
                break;
459
        }
460
 
461
        default:
462
                rc = -EINVAL;
463
        }
464
 
465
        return rc;
466
}
467
 
468
static int mcomm_dev_open(struct inode *inode, struct file *fp)
469
{
470
        return 0;
471
}
472
 
473
static struct file_operations mcomm_dev_fops = {
474
        .open           = mcomm_dev_open,
475
        .unlocked_ioctl = mcomm_dev_ioctl,
476
        .compat_ioctl   = mcomm_dev_ioctl,
477
        .mmap           = mcomm_mmap,
478
};
479
 
480
 
481
static ssize_t mcomm_show_region_addr(struct device *dev,
482
                                      struct device_attribute *attr,
483
                                      char *buf)
484
{
485
        struct mcomm_devdata *devdata = &_mcomm_devdata;
486
 
487
        return sprintf(buf, "0x%llx\n", (unsigned long long)devdata->mem.start);
488
}
489
static DEVICE_ATTR(address, 0444, mcomm_show_region_addr, NULL);
490
 
491
static ssize_t mcomm_show_region_size(struct device *dev,
492
                                      struct device_attribute *attr,
493
                                      char *buf)
494
{
495
        struct mcomm_devdata *devdata = &_mcomm_devdata;
496
 
497
        return sprintf(buf, "0x%llx\n",
498
                       (unsigned long long)resource_size(&devdata->mem));
499
}
500
static DEVICE_ATTR(size, 0444, mcomm_show_region_size, NULL);
501
 
502
static struct attribute *mcomm_attributes[] = {
503
        &dev_attr_size.attr,
504
        &dev_attr_address.attr,
505
        NULL
506
};
507
 
508
static struct attribute_group mcomm_attr_group = {
509
        .attrs = mcomm_attributes,
510
};
511
 
512
struct miscdevice mcomm_misc_dev = {
513
        .fops = &mcomm_dev_fops,
514
        .minor = MISC_DYNAMIC_MINOR,
515
        .name = "mcomm0",
516
};
517
 
518
int mcomm_new_region(struct device *dev, struct resource *mem,
519
                     struct resource *irq)
520
{
521
        struct mcomm_devdata *devdata = &_mcomm_devdata;
522
        int rc;
523
        static int initialized;
524
 
525
        if (initialized++)
526
                return -EEXIST;
527
 
528
        init_waitqueue_head(&devdata->wait);
529
        devdata->mem = *mem;
530
        devdata->irq = irq->start;
531
 
532
        rc = sysfs_create_group(&dev->kobj, &mcomm_attr_group);
533
        if (rc) {
534
                printk(KERN_WARNING "%s: Failed to register sysfs attributes.\n",
535
                       __func__);
536
                goto out1;
537
        }
538
 
539
        rc = misc_register(&mcomm_misc_dev);
540
        if (rc) {
541
                printk("%s misc_register error %d\n", __func__, rc);
542
                goto out2;
543
        }
544
 
545
        return 0;
546
 
547
out2:
548
        sysfs_remove_group(&dev->kobj, &mcomm_attr_group);
549
out1:
550
        return rc;
551
}
552
EXPORT_SYMBOL(mcomm_new_region);
553
 
554
void mcomm_remove_region(struct device *dev)
555
{
556
        misc_deregister(&mcomm_misc_dev);
557
        sysfs_remove_group(&dev->kobj, &mcomm_attr_group);
558
}
559
EXPORT_SYMBOL(mcomm_remove_region);
560
 
561
int mcomm_init(struct mcomm_platform_ops *ops, struct module *module)
562
{
563
        int rc;
564
 
565
        rc = mcomm_init_anon_inodes();
566
        if (rc)
567
                goto out1;
568
 
569
        mcomm_platform_ops = ops;
570
 
571
        mcomm_dev_fops.owner = module;
572
        mcomm_fd_fops.owner = module;
573
 
574
        return 0;
575
 
576
out1:
577
        return rc;
578
}
579
EXPORT_SYMBOL(mcomm_init);
580
 
581
void mcomm_exit(void)
582
{
583
        mcomm_exit_anon_inodes();
584
}
585
EXPORT_SYMBOL(mcomm_exit);
586
 
587
MODULE_LICENSE("GPL v2");
588
MODULE_AUTHOR("Hollis Blanchard <hollis_blanchard@mentor.com>");
589
MODULE_DESCRIPTION("Shared memory communications channel");

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.