OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [drivers/] [mtd/] [mtd_blkdevs.c] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 * $Id: mtd_blkdevs.c,v 1.27 2005/11/07 11:14:20 gleixner Exp $
3
 *
4
 * (C) 2003 David Woodhouse <dwmw2@infradead.org>
5
 *
6
 * Interface to Linux 2.5 block layer for MTD 'translation layers'.
7
 *
8
 */
9
 
10
#include <linux/kernel.h>
11
#include <linux/slab.h>
12
#include <linux/module.h>
13
#include <linux/list.h>
14
#include <linux/fs.h>
15
#include <linux/mtd/blktrans.h>
16
#include <linux/mtd/mtd.h>
17
#include <linux/blkdev.h>
18
#include <linux/blkpg.h>
19
#include <linux/freezer.h>
20
#include <linux/spinlock.h>
21
#include <linux/hdreg.h>
22
#include <linux/init.h>
23
#include <linux/mutex.h>
24
#include <linux/kthread.h>
25
#include <asm/uaccess.h>
26
 
27
#include "mtdcore.h"
28
 
29
static LIST_HEAD(blktrans_majors);
30
 
31
struct mtd_blkcore_priv {
32
        struct task_struct *thread;
33
        struct request_queue *rq;
34
        spinlock_t queue_lock;
35
};
36
 
37
static int do_blktrans_request(struct mtd_blktrans_ops *tr,
38
                               struct mtd_blktrans_dev *dev,
39
                               struct request *req)
40
{
41
        unsigned long block, nsect;
42
        char *buf;
43
 
44
        block = req->sector << 9 >> tr->blkshift;
45
        nsect = req->current_nr_sectors << 9 >> tr->blkshift;
46
 
47
        buf = req->buffer;
48
 
49
        if (!blk_fs_request(req))
50
                return 0;
51
 
52
        if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk))
53
                return 0;
54
 
55
        switch(rq_data_dir(req)) {
56
        case READ:
57
                for (; nsect > 0; nsect--, block++, buf += tr->blksize)
58
                        if (tr->readsect(dev, block, buf))
59
                                return 0;
60
                return 1;
61
 
62
        case WRITE:
63
                if (!tr->writesect)
64
                        return 0;
65
 
66
                for (; nsect > 0; nsect--, block++, buf += tr->blksize)
67
                        if (tr->writesect(dev, block, buf))
68
                                return 0;
69
                return 1;
70
 
71
        default:
72
                printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
73
                return 0;
74
        }
75
}
76
 
77
static int mtd_blktrans_thread(void *arg)
78
{
79
        struct mtd_blktrans_ops *tr = arg;
80
        struct request_queue *rq = tr->blkcore_priv->rq;
81
 
82
        /* we might get involved when memory gets low, so use PF_MEMALLOC */
83
        current->flags |= PF_MEMALLOC;
84
 
85
        spin_lock_irq(rq->queue_lock);
86
        while (!kthread_should_stop()) {
87
                struct request *req;
88
                struct mtd_blktrans_dev *dev;
89
                int res = 0;
90
 
91
                req = elv_next_request(rq);
92
 
93
                if (!req) {
94
                        set_current_state(TASK_INTERRUPTIBLE);
95
                        spin_unlock_irq(rq->queue_lock);
96
                        schedule();
97
                        spin_lock_irq(rq->queue_lock);
98
                        continue;
99
                }
100
 
101
                dev = req->rq_disk->private_data;
102
                tr = dev->tr;
103
 
104
                spin_unlock_irq(rq->queue_lock);
105
 
106
                mutex_lock(&dev->lock);
107
                res = do_blktrans_request(tr, dev, req);
108
                mutex_unlock(&dev->lock);
109
 
110
                spin_lock_irq(rq->queue_lock);
111
 
112
                end_request(req, res);
113
        }
114
        spin_unlock_irq(rq->queue_lock);
115
 
116
        return 0;
117
}
118
 
119
static void mtd_blktrans_request(struct request_queue *rq)
120
{
121
        struct mtd_blktrans_ops *tr = rq->queuedata;
122
        wake_up_process(tr->blkcore_priv->thread);
123
}
124
 
125
 
126
static int blktrans_open(struct inode *i, struct file *f)
127
{
128
        struct mtd_blktrans_dev *dev;
129
        struct mtd_blktrans_ops *tr;
130
        int ret = -ENODEV;
131
 
132
        dev = i->i_bdev->bd_disk->private_data;
133
        tr = dev->tr;
134
 
135
        if (!try_module_get(dev->mtd->owner))
136
                goto out;
137
 
138
        if (!try_module_get(tr->owner))
139
                goto out_tr;
140
 
141
        /* FIXME: Locking. A hot pluggable device can go away
142
           (del_mtd_device can be called for it) without its module
143
           being unloaded. */
144
        dev->mtd->usecount++;
145
 
146
        ret = 0;
147
        if (tr->open && (ret = tr->open(dev))) {
148
                dev->mtd->usecount--;
149
                module_put(dev->mtd->owner);
150
        out_tr:
151
                module_put(tr->owner);
152
        }
153
 out:
154
        return ret;
155
}
156
 
157
static int blktrans_release(struct inode *i, struct file *f)
158
{
159
        struct mtd_blktrans_dev *dev;
160
        struct mtd_blktrans_ops *tr;
161
        int ret = 0;
162
 
163
        dev = i->i_bdev->bd_disk->private_data;
164
        tr = dev->tr;
165
 
166
        if (tr->release)
167
                ret = tr->release(dev);
168
 
169
        if (!ret) {
170
                dev->mtd->usecount--;
171
                module_put(dev->mtd->owner);
172
                module_put(tr->owner);
173
        }
174
 
175
        return ret;
176
}
177
 
178
static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
179
{
180
        struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
181
 
182
        if (dev->tr->getgeo)
183
                return dev->tr->getgeo(dev, geo);
184
        return -ENOTTY;
185
}
186
 
187
static int blktrans_ioctl(struct inode *inode, struct file *file,
188
                              unsigned int cmd, unsigned long arg)
189
{
190
        struct mtd_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data;
191
        struct mtd_blktrans_ops *tr = dev->tr;
192
 
193
        switch (cmd) {
194
        case BLKFLSBUF:
195
                if (tr->flush)
196
                        return tr->flush(dev);
197
                /* The core code did the work, we had nothing to do. */
198
                return 0;
199
        default:
200
                return -ENOTTY;
201
        }
202
}
203
 
204
static struct block_device_operations mtd_blktrans_ops = {
205
        .owner          = THIS_MODULE,
206
        .open           = blktrans_open,
207
        .release        = blktrans_release,
208
        .ioctl          = blktrans_ioctl,
209
        .getgeo         = blktrans_getgeo,
210
};
211
 
212
int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
213
{
214
        struct mtd_blktrans_ops *tr = new->tr;
215
        struct list_head *this;
216
        int last_devnum = -1;
217
        struct gendisk *gd;
218
 
219
        if (mutex_trylock(&mtd_table_mutex)) {
220
                mutex_unlock(&mtd_table_mutex);
221
                BUG();
222
        }
223
 
224
        list_for_each(this, &tr->devs) {
225
                struct mtd_blktrans_dev *d = list_entry(this, struct mtd_blktrans_dev, list);
226
                if (new->devnum == -1) {
227
                        /* Use first free number */
228
                        if (d->devnum != last_devnum+1) {
229
                                /* Found a free devnum. Plug it in here */
230
                                new->devnum = last_devnum+1;
231
                                list_add_tail(&new->list, &d->list);
232
                                goto added;
233
                        }
234
                } else if (d->devnum == new->devnum) {
235
                        /* Required number taken */
236
                        return -EBUSY;
237
                } else if (d->devnum > new->devnum) {
238
                        /* Required number was free */
239
                        list_add_tail(&new->list, &d->list);
240
                        goto added;
241
                }
242
                last_devnum = d->devnum;
243
        }
244
        if (new->devnum == -1)
245
                new->devnum = last_devnum+1;
246
 
247
        if ((new->devnum << tr->part_bits) > 256) {
248
                return -EBUSY;
249
        }
250
 
251
        mutex_init(&new->lock);
252
        list_add_tail(&new->list, &tr->devs);
253
 added:
254
        if (!tr->writesect)
255
                new->readonly = 1;
256
 
257
        gd = alloc_disk(1 << tr->part_bits);
258
        if (!gd) {
259
                list_del(&new->list);
260
                return -ENOMEM;
261
        }
262
        gd->major = tr->major;
263
        gd->first_minor = (new->devnum) << tr->part_bits;
264
        gd->fops = &mtd_blktrans_ops;
265
 
266
        if (tr->part_bits)
267
                if (new->devnum < 26)
268
                        snprintf(gd->disk_name, sizeof(gd->disk_name),
269
                                 "%s%c", tr->name, 'a' + new->devnum);
270
                else
271
                        snprintf(gd->disk_name, sizeof(gd->disk_name),
272
                                 "%s%c%c", tr->name,
273
                                 'a' - 1 + new->devnum / 26,
274
                                 'a' + new->devnum % 26);
275
        else
276
                snprintf(gd->disk_name, sizeof(gd->disk_name),
277
                         "%s%d", tr->name, new->devnum);
278
 
279
        /* 2.5 has capacity in units of 512 bytes while still
280
           having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
281
        set_capacity(gd, (new->size * tr->blksize) >> 9);
282
 
283
        gd->private_data = new;
284
        new->blkcore_priv = gd;
285
        gd->queue = tr->blkcore_priv->rq;
286
 
287
        if (new->readonly)
288
                set_disk_ro(gd, 1);
289
 
290
        add_disk(gd);
291
 
292
        return 0;
293
}
294
 
295
int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
296
{
297
        if (mutex_trylock(&mtd_table_mutex)) {
298
                mutex_unlock(&mtd_table_mutex);
299
                BUG();
300
        }
301
 
302
        list_del(&old->list);
303
 
304
        del_gendisk(old->blkcore_priv);
305
        put_disk(old->blkcore_priv);
306
 
307
        return 0;
308
}
309
 
310
static void blktrans_notify_remove(struct mtd_info *mtd)
311
{
312
        struct list_head *this, *this2, *next;
313
 
314
        list_for_each(this, &blktrans_majors) {
315
                struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
316
 
317
                list_for_each_safe(this2, next, &tr->devs) {
318
                        struct mtd_blktrans_dev *dev = list_entry(this2, struct mtd_blktrans_dev, list);
319
 
320
                        if (dev->mtd == mtd)
321
                                tr->remove_dev(dev);
322
                }
323
        }
324
}
325
 
326
static void blktrans_notify_add(struct mtd_info *mtd)
327
{
328
        struct list_head *this;
329
 
330
        if (mtd->type == MTD_ABSENT)
331
                return;
332
 
333
        list_for_each(this, &blktrans_majors) {
334
                struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
335
 
336
                tr->add_mtd(tr, mtd);
337
        }
338
 
339
}
340
 
341
static struct mtd_notifier blktrans_notifier = {
342
        .add = blktrans_notify_add,
343
        .remove = blktrans_notify_remove,
344
};
345
 
346
int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
347
{
348
        int ret, i;
349
 
350
        /* Register the notifier if/when the first device type is
351
           registered, to prevent the link/init ordering from fucking
352
           us over. */
353
        if (!blktrans_notifier.list.next)
354
                register_mtd_user(&blktrans_notifier);
355
 
356
        tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
357
        if (!tr->blkcore_priv)
358
                return -ENOMEM;
359
 
360
        mutex_lock(&mtd_table_mutex);
361
 
362
        ret = register_blkdev(tr->major, tr->name);
363
        if (ret) {
364
                printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
365
                       tr->name, tr->major, ret);
366
                kfree(tr->blkcore_priv);
367
                mutex_unlock(&mtd_table_mutex);
368
                return ret;
369
        }
370
        spin_lock_init(&tr->blkcore_priv->queue_lock);
371
 
372
        tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock);
373
        if (!tr->blkcore_priv->rq) {
374
                unregister_blkdev(tr->major, tr->name);
375
                kfree(tr->blkcore_priv);
376
                mutex_unlock(&mtd_table_mutex);
377
                return -ENOMEM;
378
        }
379
 
380
        tr->blkcore_priv->rq->queuedata = tr;
381
        blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
382
        tr->blkshift = ffs(tr->blksize) - 1;
383
 
384
        tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr,
385
                        "%sd", tr->name);
386
        if (IS_ERR(tr->blkcore_priv->thread)) {
387
                blk_cleanup_queue(tr->blkcore_priv->rq);
388
                unregister_blkdev(tr->major, tr->name);
389
                kfree(tr->blkcore_priv);
390
                mutex_unlock(&mtd_table_mutex);
391
                return PTR_ERR(tr->blkcore_priv->thread);
392
        }
393
 
394
        INIT_LIST_HEAD(&tr->devs);
395
        list_add(&tr->list, &blktrans_majors);
396
 
397
        for (i=0; i<MAX_MTD_DEVICES; i++) {
398
                if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT)
399
                        tr->add_mtd(tr, mtd_table[i]);
400
        }
401
 
402
        mutex_unlock(&mtd_table_mutex);
403
 
404
        return 0;
405
}
406
 
407
int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
408
{
409
        struct list_head *this, *next;
410
 
411
        mutex_lock(&mtd_table_mutex);
412
 
413
        /* Clean up the kernel thread */
414
        kthread_stop(tr->blkcore_priv->thread);
415
 
416
        /* Remove it from the list of active majors */
417
        list_del(&tr->list);
418
 
419
        list_for_each_safe(this, next, &tr->devs) {
420
                struct mtd_blktrans_dev *dev = list_entry(this, struct mtd_blktrans_dev, list);
421
                tr->remove_dev(dev);
422
        }
423
 
424
        blk_cleanup_queue(tr->blkcore_priv->rq);
425
        unregister_blkdev(tr->major, tr->name);
426
 
427
        mutex_unlock(&mtd_table_mutex);
428
 
429
        kfree(tr->blkcore_priv);
430
 
431
        BUG_ON(!list_empty(&tr->devs));
432
        return 0;
433
}
434
 
435
static void __exit mtd_blktrans_exit(void)
436
{
437
        /* No race here -- if someone's currently in register_mtd_blktrans
438
           we're screwed anyway. */
439
        if (blktrans_notifier.list.next)
440
                unregister_mtd_user(&blktrans_notifier);
441
}
442
 
443
module_exit(mtd_blktrans_exit);
444
 
445
EXPORT_SYMBOL_GPL(register_mtd_blktrans);
446
EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
447
EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
448
EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
449
 
450
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
451
MODULE_LICENSE("GPL");
452
MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.