OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [block/] [loop.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *  linux/drivers/block/loop.c
3
 *
4
 *  Written by Theodore Ts'o, 3/29/93
5
 *
6
 * Copyright 1993 by Theodore Ts'o.  Redistribution of this file is
7
 * permitted under the GNU General Public License.
8
 *
9
 * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993
10
 * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996
11
 *
12
 * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
13
 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
14
 *
15
 * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
16
 *
17
 * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
18
 *
19
 * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
20
 *
21
 * Loadable modules and other fixes by AK, 1998
22
 *
23
 * Make real block number available to downstream transfer functions, enables
24
 * CBC (and relatives) mode encryption requiring unique IVs per data block.
25
 * Reed H. Petty, rhp@draper.net
26
 *
27
 * Maximum number of loop devices now dynamic via max_loop module parameter.
28
 * Russell Kroll <rkroll@exploits.org> 19990701
29
 *
30
 * Maximum number of loop devices when compiled-in now selectable by passing
31
 * max_loop=<1-255> to the kernel on boot.
32
 * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999
33
 *
34
 * Completely rewrite request handling to be make_request_fn style and
35
 * non blocking, pushing work to a helper thread. Lots of fixes from
36
 * Al Viro too.
37
 * Jens Axboe <axboe@suse.de>, Nov 2000
38
 *
39
 * Support up to 256 loop devices
40
 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
41
 *
42
 * Still To Fix:
43
 * - Advisory locking is ignored here.
44
 * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
45
 *
46
 * WARNING/FIXME:
47
 * - The block number as IV passing to low level transfer functions is broken:
48
 *   it passes the underlying device's block number instead of the
49
 *   offset. This makes it change for a given block when the file is
50
 *   moved/restored/copied and also doesn't work over NFS.
51
 * AV, Feb 12, 2000: we pass the logical block number now. It fixes the
52
 *   problem above. Encryption modules that used to rely on the old scheme
53
 *   should just call ->i_mapping->bmap() to calculate the physical block
54
 *   number.
55
 */
56
 
57
#include <linux/config.h>
58
#include <linux/module.h>
59
 
60
#include <linux/sched.h>
61
#include <linux/fs.h>
62
#include <linux/file.h>
63
#include <linux/stat.h>
64
#include <linux/errno.h>
65
#include <linux/major.h>
66
#include <linux/wait.h>
67
#include <linux/blk.h>
68
#include <linux/blkpg.h>
69
#include <linux/init.h>
70
#include <linux/devfs_fs_kernel.h>
71
#include <linux/smp_lock.h>
72
#include <linux/swap.h>
73
#include <linux/slab.h>
74
 
75
#include <asm/uaccess.h>
76
 
77
#include <linux/loop.h>         
78
 
79
#define MAJOR_NR LOOP_MAJOR
80
 
81
static int max_loop = 8;
82
static struct loop_device *loop_dev;
83
static int *loop_sizes;
84
static int *loop_blksizes;
85
static devfs_handle_t devfs_handle;      /*  For the directory */
86
 
87
/*
88
 * Transfer functions
89
 */
90
static int transfer_none(struct loop_device *lo, int cmd, char *raw_buf,
91
                         char *loop_buf, int size, int real_block)
92
{
93
        if (raw_buf != loop_buf) {
94
                if (cmd == READ)
95
                        memcpy(loop_buf, raw_buf, size);
96
                else
97
                        memcpy(raw_buf, loop_buf, size);
98
        }
99
 
100
        return 0;
101
}
102
 
103
static int transfer_xor(struct loop_device *lo, int cmd, char *raw_buf,
104
                        char *loop_buf, int size, int real_block)
105
{
106
        char    *in, *out, *key;
107
        int     i, keysize;
108
 
109
        if (cmd == READ) {
110
                in = raw_buf;
111
                out = loop_buf;
112
        } else {
113
                in = loop_buf;
114
                out = raw_buf;
115
        }
116
 
117
        key = lo->lo_encrypt_key;
118
        keysize = lo->lo_encrypt_key_size;
119
        for (i = 0; i < size; i++)
120
                *out++ = *in++ ^ key[(i & 511) % keysize];
121
        return 0;
122
}
123
 
124
static int none_status(struct loop_device *lo, struct loop_info *info)
125
{
126
        lo->lo_flags |= LO_FLAGS_BH_REMAP;
127
        return 0;
128
}
129
 
130
static int xor_status(struct loop_device *lo, struct loop_info *info)
131
{
132
        if (info->lo_encrypt_key_size <= 0)
133
                return -EINVAL;
134
        return 0;
135
}
136
 
137
struct loop_func_table none_funcs = {
138
        number: LO_CRYPT_NONE,
139
        transfer: transfer_none,
140
        init: none_status,
141
};
142
 
143
struct loop_func_table xor_funcs = {
144
        number: LO_CRYPT_XOR,
145
        transfer: transfer_xor,
146
        init: xor_status
147
};
148
 
149
/* xfer_funcs[0] is special - its release function is never called */
150
struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
151
        &none_funcs,
152
        &xor_funcs
153
};
154
 
155
#define MAX_DISK_SIZE 1024*1024*1024
156
 
157
static int compute_loop_size(struct loop_device *lo, struct dentry * lo_dentry, kdev_t lodev)
158
{
159
        if (S_ISREG(lo_dentry->d_inode->i_mode))
160
                return (lo_dentry->d_inode->i_size - lo->lo_offset) >> BLOCK_SIZE_BITS;
161
        if (blk_size[MAJOR(lodev)])
162
                return blk_size[MAJOR(lodev)][MINOR(lodev)] -
163
                                (lo->lo_offset >> BLOCK_SIZE_BITS);
164
        return MAX_DISK_SIZE;
165
}
166
 
167
static void figure_loop_size(struct loop_device *lo)
168
{
169
        loop_sizes[lo->lo_number] = compute_loop_size(lo,
170
                                        lo->lo_backing_file->f_dentry,
171
                                        lo->lo_device);
172
}
173
 
174
static int lo_send(struct loop_device *lo, struct buffer_head *bh, int bsize,
175
                   loff_t pos)
176
{
177
        struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */
178
        struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
179
        struct address_space_operations *aops = mapping->a_ops;
180
        struct page *page;
181
        char *kaddr, *data;
182
        unsigned long index;
183
        unsigned size, offset;
184
        int len;
185
 
186
        down(&mapping->host->i_sem);
187
        index = pos >> PAGE_CACHE_SHIFT;
188
        offset = pos & (PAGE_CACHE_SIZE - 1);
189
        len = bh->b_size;
190
        data = bh->b_data;
191
        while (len > 0) {
192
                int IV = index * (PAGE_CACHE_SIZE/bsize) + offset/bsize;
193
                int transfer_result;
194
 
195
                size = PAGE_CACHE_SIZE - offset;
196
                if (size > len)
197
                        size = len;
198
 
199
                page = grab_cache_page(mapping, index);
200
                if (!page)
201
                        goto fail;
202
                kaddr = kmap(page);
203
                if (aops->prepare_write(file, page, offset, offset+size))
204
                        goto unlock;
205
                flush_dcache_page(page);
206
                transfer_result = lo_do_transfer(lo, WRITE, kaddr + offset, data, size, IV);
207
                if (transfer_result) {
208
                        /*
209
                         * The transfer failed, but we still write the data to
210
                         * keep prepare/commit calls balanced.
211
                         */
212
                        printk(KERN_ERR "loop: transfer error block %ld\n", index);
213
                        memset(kaddr + offset, 0, size);
214
                }
215
                if (aops->commit_write(file, page, offset, offset+size))
216
                        goto unlock;
217
                if (transfer_result)
218
                        goto unlock;
219
                kunmap(page);
220
                data += size;
221
                len -= size;
222
                offset = 0;
223
                index++;
224
                pos += size;
225
                UnlockPage(page);
226
                page_cache_release(page);
227
        }
228
        up(&mapping->host->i_sem);
229
        return 0;
230
 
231
unlock:
232
        kunmap(page);
233
        UnlockPage(page);
234
        page_cache_release(page);
235
fail:
236
        up(&mapping->host->i_sem);
237
        return -1;
238
}
239
 
240
struct lo_read_data {
241
        struct loop_device *lo;
242
        char *data;
243
        int bsize;
244
};
245
 
246
static int lo_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size)
247
{
248
        char *kaddr;
249
        unsigned long count = desc->count;
250
        struct lo_read_data *p = (struct lo_read_data*)desc->buf;
251
        struct loop_device *lo = p->lo;
252
        int IV = page->index * (PAGE_CACHE_SIZE/p->bsize) + offset/p->bsize;
253
 
254
        if (size > count)
255
                size = count;
256
 
257
        kaddr = kmap(page);
258
        if (lo_do_transfer(lo, READ, kaddr + offset, p->data, size, IV)) {
259
                size = 0;
260
                printk(KERN_ERR "loop: transfer error block %ld\n",page->index);
261
                desc->error = -EINVAL;
262
        }
263
        kunmap(page);
264
 
265
        desc->count = count - size;
266
        desc->written += size;
267
        p->data += size;
268
        return size;
269
}
270
 
271
static int lo_receive(struct loop_device *lo, struct buffer_head *bh, int bsize,
272
                      loff_t pos)
273
{
274
        struct lo_read_data cookie;
275
        read_descriptor_t desc;
276
        struct file *file;
277
 
278
        cookie.lo = lo;
279
        cookie.data = bh->b_data;
280
        cookie.bsize = bsize;
281
        desc.written = 0;
282
        desc.count = bh->b_size;
283
        desc.buf = (char*)&cookie;
284
        desc.error = 0;
285
        spin_lock_irq(&lo->lo_lock);
286
        file = lo->lo_backing_file;
287
        spin_unlock_irq(&lo->lo_lock);
288
        do_generic_file_read(file, &pos, &desc, lo_read_actor);
289
        return desc.error;
290
}
291
 
292
static inline int loop_get_bs(struct loop_device *lo)
293
{
294
        int bs = 0;
295
 
296
        if (blksize_size[MAJOR(lo->lo_device)])
297
                bs = blksize_size[MAJOR(lo->lo_device)][MINOR(lo->lo_device)];
298
        if (!bs)
299
                bs = BLOCK_SIZE;
300
 
301
        return bs;
302
}
303
 
304
static inline unsigned long loop_get_iv(struct loop_device *lo,
305
                                        unsigned long sector)
306
{
307
        int bs = loop_get_bs(lo);
308
        unsigned long offset, IV;
309
 
310
        IV = sector / (bs >> 9) + lo->lo_offset / bs;
311
        offset = ((sector % (bs >> 9)) << 9) + lo->lo_offset % bs;
312
        if (offset >= bs)
313
                IV++;
314
 
315
        return IV;
316
}
317
 
318
static int do_bh_filebacked(struct loop_device *lo, struct buffer_head *bh, int rw)
319
{
320
        loff_t pos;
321
        int ret;
322
 
323
        pos = ((loff_t) bh->b_rsector << 9) + lo->lo_offset;
324
 
325
        if (rw == WRITE)
326
                ret = lo_send(lo, bh, loop_get_bs(lo), pos);
327
        else
328
                ret = lo_receive(lo, bh, loop_get_bs(lo), pos);
329
 
330
        return ret;
331
}
332
 
333
static void loop_end_io_transfer(struct buffer_head *bh, int uptodate);
334
static void loop_put_buffer(struct buffer_head *bh)
335
{
336
        /*
337
         * check b_end_io, may just be a remapped bh and not an allocated one
338
         */
339
        if (bh && bh->b_end_io == loop_end_io_transfer) {
340
                __free_page(bh->b_page);
341
                kmem_cache_free(bh_cachep, bh);
342
        }
343
}
344
 
345
/*
346
 * Add buffer_head to back of pending list
347
 */
348
static void loop_add_bh(struct loop_device *lo, struct buffer_head *bh)
349
{
350
        unsigned long flags;
351
 
352
        spin_lock_irqsave(&lo->lo_lock, flags);
353
        if (lo->lo_bhtail) {
354
                lo->lo_bhtail->b_reqnext = bh;
355
                lo->lo_bhtail = bh;
356
        } else
357
                lo->lo_bh = lo->lo_bhtail = bh;
358
        spin_unlock_irqrestore(&lo->lo_lock, flags);
359
 
360
        up(&lo->lo_bh_mutex);
361
}
362
 
363
/*
364
 * Grab first pending buffer
365
 */
366
static struct buffer_head *loop_get_bh(struct loop_device *lo)
367
{
368
        struct buffer_head *bh;
369
 
370
        spin_lock_irq(&lo->lo_lock);
371
        if ((bh = lo->lo_bh)) {
372
                if (bh == lo->lo_bhtail)
373
                        lo->lo_bhtail = NULL;
374
                lo->lo_bh = bh->b_reqnext;
375
                bh->b_reqnext = NULL;
376
        }
377
        spin_unlock_irq(&lo->lo_lock);
378
 
379
        return bh;
380
}
381
 
382
/*
383
 * when buffer i/o has completed. if BH_Dirty is set, this was a WRITE
384
 * and lo->transfer stuff has already been done. if not, it was a READ
385
 * so queue it for the loop thread and let it do the transfer out of
386
 * b_end_io context (we don't want to do decrypt of a page with irqs
387
 * disabled)
388
 */
389
static void loop_end_io_transfer(struct buffer_head *bh, int uptodate)
390
{
391
        struct loop_device *lo = &loop_dev[MINOR(bh->b_dev)];
392
 
393
        if (!uptodate || test_bit(BH_Dirty, &bh->b_state)) {
394
                struct buffer_head *rbh = bh->b_private;
395
 
396
                rbh->b_end_io(rbh, uptodate);
397
                if (atomic_dec_and_test(&lo->lo_pending))
398
                        up(&lo->lo_bh_mutex);
399
                loop_put_buffer(bh);
400
        } else
401
                loop_add_bh(lo, bh);
402
}
403
 
404
static struct buffer_head *loop_get_buffer(struct loop_device *lo,
405
                                           struct buffer_head *rbh)
406
{
407
        struct buffer_head *bh;
408
 
409
        /*
410
         * for xfer_funcs that can operate on the same bh, do that
411
         */
412
        if (lo->lo_flags & LO_FLAGS_BH_REMAP) {
413
                bh = rbh;
414
                goto out_bh;
415
        }
416
 
417
        do {
418
                bh = kmem_cache_alloc(bh_cachep, SLAB_NOIO);
419
                if (bh)
420
                        break;
421
 
422
                run_task_queue(&tq_disk);
423
                set_current_state(TASK_INTERRUPTIBLE);
424
                schedule_timeout(HZ);
425
        } while (1);
426
        memset(bh, 0, sizeof(*bh));
427
 
428
        bh->b_size = rbh->b_size;
429
        bh->b_dev = rbh->b_rdev;
430
        bh->b_state = (1 << BH_Req) | (1 << BH_Mapped) | (1 << BH_Lock);
431
 
432
        /*
433
         * easy way out, although it does waste some memory for < PAGE_SIZE
434
         * blocks... if highmem bounce buffering can get away with it,
435
         * so can we :-)
436
         */
437
        do {
438
                bh->b_page = alloc_page(GFP_NOIO);
439
                if (bh->b_page)
440
                        break;
441
 
442
                run_task_queue(&tq_disk);
443
                set_current_state(TASK_INTERRUPTIBLE);
444
                schedule_timeout(HZ);
445
        } while (1);
446
 
447
        bh->b_data = page_address(bh->b_page);
448
        bh->b_end_io = loop_end_io_transfer;
449
        bh->b_private = rbh;
450
        init_waitqueue_head(&bh->b_wait);
451
 
452
out_bh:
453
        bh->b_rsector = rbh->b_rsector + (lo->lo_offset >> 9);
454
        spin_lock_irq(&lo->lo_lock);
455
        bh->b_rdev = lo->lo_device;
456
        spin_unlock_irq(&lo->lo_lock);
457
 
458
        return bh;
459
}
460
 
461
static int loop_make_request(request_queue_t *q, int rw, struct buffer_head *rbh)
462
{
463
        struct buffer_head *bh = NULL;
464
        struct loop_device *lo;
465
        unsigned long IV;
466
 
467
        if (!buffer_locked(rbh))
468
                BUG();
469
 
470
        if (MINOR(rbh->b_rdev) >= max_loop)
471
                goto out;
472
 
473
        lo = &loop_dev[MINOR(rbh->b_rdev)];
474
        spin_lock_irq(&lo->lo_lock);
475
        if (lo->lo_state != Lo_bound)
476
                goto inactive;
477
        atomic_inc(&lo->lo_pending);
478
        spin_unlock_irq(&lo->lo_lock);
479
 
480
        if (rw == WRITE) {
481
                if (lo->lo_flags & LO_FLAGS_READ_ONLY)
482
                        goto err;
483
        } else if (rw == READA) {
484
                rw = READ;
485
        } else if (rw != READ) {
486
                printk(KERN_ERR "loop: unknown command (%d)\n", rw);
487
                goto err;
488
        }
489
 
490
        rbh = blk_queue_bounce(q, rw, rbh);
491
 
492
        /*
493
         * file backed, queue for loop_thread to handle
494
         */
495
        if (lo->lo_flags & LO_FLAGS_DO_BMAP) {
496
                /*
497
                 * rbh locked at this point, noone else should clear
498
                 * the dirty flag
499
                 */
500
                if (rw == WRITE)
501
                        set_bit(BH_Dirty, &rbh->b_state);
502
                loop_add_bh(lo, rbh);
503
                return 0;
504
        }
505
 
506
        /*
507
         * piggy old buffer on original, and submit for I/O
508
         */
509
        bh = loop_get_buffer(lo, rbh);
510
        IV = loop_get_iv(lo, rbh->b_rsector);
511
        if (rw == WRITE) {
512
                set_bit(BH_Dirty, &bh->b_state);
513
                if (lo_do_transfer(lo, WRITE, bh->b_data, rbh->b_data,
514
                                   bh->b_size, IV))
515
                        goto err;
516
        }
517
 
518
        generic_make_request(rw, bh);
519
        return 0;
520
 
521
err:
522
        if (atomic_dec_and_test(&lo->lo_pending))
523
                up(&lo->lo_bh_mutex);
524
        loop_put_buffer(bh);
525
out:
526
        buffer_IO_error(rbh);
527
        return 0;
528
inactive:
529
        spin_unlock_irq(&lo->lo_lock);
530
        goto out;
531
}
532
 
533
static inline void loop_handle_bh(struct loop_device *lo,struct buffer_head *bh)
534
{
535
        int ret;
536
 
537
        /*
538
         * For block backed loop, we know this is a READ
539
         */
540
        if (lo->lo_flags & LO_FLAGS_DO_BMAP) {
541
                int rw = !!test_and_clear_bit(BH_Dirty, &bh->b_state);
542
 
543
                ret = do_bh_filebacked(lo, bh, rw);
544
                bh->b_end_io(bh, !ret);
545
        } else {
546
                struct buffer_head *rbh = bh->b_private;
547
                unsigned long IV = loop_get_iv(lo, rbh->b_rsector);
548
 
549
                ret = lo_do_transfer(lo, READ, bh->b_data, rbh->b_data,
550
                                     bh->b_size, IV);
551
 
552
                rbh->b_end_io(rbh, !ret);
553
                loop_put_buffer(bh);
554
        }
555
}
556
 
557
/*
558
 * worker thread that handles reads/writes to file backed loop devices,
559
 * to avoid blocking in our make_request_fn. it also does loop decrypting
560
 * on reads for block backed loop, as that is too heavy to do from
561
 * b_end_io context where irqs may be disabled.
562
 */
563
static int loop_thread(void *data)
564
{
565
        struct loop_device *lo = data;
566
        struct buffer_head *bh;
567
 
568
        daemonize();
569
        exit_files(current);
570
        reparent_to_init();
571
 
572
        sprintf(current->comm, "loop%d", lo->lo_number);
573
 
574
        spin_lock_irq(&current->sigmask_lock);
575
        sigfillset(&current->blocked);
576
        flush_signals(current);
577
        spin_unlock_irq(&current->sigmask_lock);
578
 
579
        spin_lock_irq(&lo->lo_lock);
580
        lo->lo_state = Lo_bound;
581
        atomic_inc(&lo->lo_pending);
582
        spin_unlock_irq(&lo->lo_lock);
583
 
584
        current->flags |= PF_NOIO;
585
 
586
        /*
587
         * up sem, we are running
588
         */
589
        up(&lo->lo_sem);
590
 
591
        for (;;) {
592
                down_interruptible(&lo->lo_bh_mutex);
593
                /*
594
                 * could be upped because of tear-down, not because of
595
                 * pending work
596
                 */
597
                if (!atomic_read(&lo->lo_pending))
598
                        break;
599
 
600
                bh = loop_get_bh(lo);
601
                if (!bh) {
602
                        printk("loop: missing bh\n");
603
                        continue;
604
                }
605
                loop_handle_bh(lo, bh);
606
 
607
                /*
608
                 * upped both for pending work and tear-down, lo_pending
609
                 * will hit zero then
610
                 */
611
                if (atomic_dec_and_test(&lo->lo_pending))
612
                        break;
613
        }
614
 
615
        up(&lo->lo_sem);
616
        return 0;
617
}
618
 
619
static int loop_set_fd(struct loop_device *lo, struct file *lo_file, kdev_t dev,
620
                       unsigned int arg)
621
{
622
        struct file     *file;
623
        struct inode    *inode;
624
        kdev_t          lo_device;
625
        int             lo_flags = 0;
626
        int             error;
627
        int             bs;
628
 
629
        MOD_INC_USE_COUNT;
630
 
631
        error = -EBUSY;
632
        if (lo->lo_state != Lo_unbound)
633
                goto out;
634
 
635
        error = -EBADF;
636
        file = fget(arg);
637
        if (!file)
638
                goto out;
639
 
640
        error = -EINVAL;
641
        inode = file->f_dentry->d_inode;
642
 
643
        if (!(file->f_mode & FMODE_WRITE))
644
                lo_flags |= LO_FLAGS_READ_ONLY;
645
 
646
        if (S_ISBLK(inode->i_mode)) {
647
                lo_device = inode->i_rdev;
648
                if (lo_device == dev) {
649
                        error = -EBUSY;
650
                        goto out_putf;
651
                }
652
        } else if (S_ISREG(inode->i_mode)) {
653
                struct address_space_operations *aops = inode->i_mapping->a_ops;
654
                /*
655
                 * If we can't read - sorry. If we only can't write - well,
656
                 * it's going to be read-only.
657
                 */
658
                if (!aops->readpage)
659
                        goto out_putf;
660
 
661
                if (!aops->prepare_write || !aops->commit_write)
662
                        lo_flags |= LO_FLAGS_READ_ONLY;
663
 
664
                lo_device = inode->i_dev;
665
                lo_flags |= LO_FLAGS_DO_BMAP;
666
                error = 0;
667
        } else
668
                goto out_putf;
669
 
670
        get_file(file);
671
 
672
        if (IS_RDONLY (inode) || is_read_only(lo_device)
673
            || !(lo_file->f_mode & FMODE_WRITE))
674
                lo_flags |= LO_FLAGS_READ_ONLY;
675
 
676
        set_device_ro(dev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
677
 
678
        lo->lo_device = lo_device;
679
        lo->lo_flags = lo_flags;
680
        lo->lo_backing_file = file;
681
        lo->transfer = NULL;
682
        lo->ioctl = NULL;
683
        figure_loop_size(lo);
684
        lo->old_gfp_mask = inode->i_mapping->gfp_mask;
685
        inode->i_mapping->gfp_mask &= ~(__GFP_IO|__GFP_FS);
686
 
687
        bs = 0;
688
        if (blksize_size[MAJOR(lo_device)])
689
                bs = blksize_size[MAJOR(lo_device)][MINOR(lo_device)];
690
        if (!bs)
691
                bs = BLOCK_SIZE;
692
 
693
        set_blocksize(dev, bs);
694
 
695
        lo->lo_bh = lo->lo_bhtail = NULL;
696
        kernel_thread(loop_thread, lo, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
697
        down(&lo->lo_sem);
698
 
699
        fput(file);
700
        return 0;
701
 
702
 out_putf:
703
        fput(file);
704
 out:
705
        MOD_DEC_USE_COUNT;
706
        return error;
707
}
708
 
709
static int loop_release_xfer(struct loop_device *lo)
710
{
711
        int err = 0;
712
        if (lo->lo_encrypt_type) {
713
                struct loop_func_table *xfer= xfer_funcs[lo->lo_encrypt_type];
714
                if (xfer && xfer->release)
715
                        err = xfer->release(lo);
716
                if (xfer && xfer->unlock)
717
                        xfer->unlock(lo);
718
                lo->lo_encrypt_type = 0;
719
        }
720
        return err;
721
}
722
 
723
static int loop_init_xfer(struct loop_device *lo, int type,struct loop_info *i)
724
{
725
        int err = 0;
726
        if (type) {
727
                struct loop_func_table *xfer = xfer_funcs[type];
728
                if (xfer->init)
729
                        err = xfer->init(lo, i);
730
                if (!err) {
731
                        lo->lo_encrypt_type = type;
732
                        if (xfer->lock)
733
                                xfer->lock(lo);
734
                }
735
        }
736
        return err;
737
}
738
 
739
static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
740
{
741
        struct file *filp = lo->lo_backing_file;
742
        int gfp = lo->old_gfp_mask;
743
 
744
        if (lo->lo_state != Lo_bound)
745
                return -ENXIO;
746
        if (lo->lo_refcnt > 1)  /* we needed one fd for the ioctl */
747
                return -EBUSY;
748
        if (filp==NULL)
749
                return -EINVAL;
750
 
751
        spin_lock_irq(&lo->lo_lock);
752
        lo->lo_state = Lo_rundown;
753
        if (atomic_dec_and_test(&lo->lo_pending))
754
                up(&lo->lo_bh_mutex);
755
        spin_unlock_irq(&lo->lo_lock);
756
 
757
        down(&lo->lo_sem);
758
 
759
        lo->lo_backing_file = NULL;
760
 
761
        loop_release_xfer(lo);
762
        lo->transfer = NULL;
763
        lo->ioctl = NULL;
764
        lo->lo_device = 0;
765
        lo->lo_encrypt_type = 0;
766
        lo->lo_offset = 0;
767
        lo->lo_encrypt_key_size = 0;
768
        lo->lo_flags = 0;
769
        memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
770
        memset(lo->lo_name, 0, LO_NAME_SIZE);
771
        loop_sizes[lo->lo_number] = 0;
772
        invalidate_bdev(bdev, 0);
773
        filp->f_dentry->d_inode->i_mapping->gfp_mask = gfp;
774
        lo->lo_state = Lo_unbound;
775
        fput(filp);
776
        MOD_DEC_USE_COUNT;
777
        return 0;
778
}
779
 
780
static int loop_set_status(struct loop_device *lo, struct loop_info *arg)
781
{
782
        struct loop_info info;
783
        int err;
784
        unsigned int type;
785
 
786
        if (lo->lo_encrypt_key_size && lo->lo_key_owner != current->uid &&
787
            !capable(CAP_SYS_ADMIN))
788
                return -EPERM;
789
        if (lo->lo_state != Lo_bound)
790
                return -ENXIO;
791
        if (copy_from_user(&info, arg, sizeof (struct loop_info)))
792
                return -EFAULT;
793
        if ((unsigned int) info.lo_encrypt_key_size > LO_KEY_SIZE)
794
                return -EINVAL;
795
        type = info.lo_encrypt_type;
796
        if (type >= MAX_LO_CRYPT || xfer_funcs[type] == NULL)
797
                return -EINVAL;
798
        if (type == LO_CRYPT_XOR && info.lo_encrypt_key_size == 0)
799
                return -EINVAL;
800
        err = loop_release_xfer(lo);
801
        if (!err)
802
                err = loop_init_xfer(lo, type, &info);
803
        if (err)
804
                return err;
805
 
806
        lo->lo_offset = info.lo_offset;
807
        strncpy(lo->lo_name, info.lo_name, LO_NAME_SIZE);
808
 
809
        lo->transfer = xfer_funcs[type]->transfer;
810
        lo->ioctl = xfer_funcs[type]->ioctl;
811
        lo->lo_encrypt_key_size = info.lo_encrypt_key_size;
812
        lo->lo_init[0] = info.lo_init[0];
813
        lo->lo_init[1] = info.lo_init[1];
814
        if (info.lo_encrypt_key_size) {
815
                memcpy(lo->lo_encrypt_key, info.lo_encrypt_key,
816
                       info.lo_encrypt_key_size);
817
                lo->lo_key_owner = current->uid;
818
        }
819
        figure_loop_size(lo);
820
        return 0;
821
}
822
 
823
static int loop_get_status(struct loop_device *lo, struct loop_info *arg)
824
{
825
        struct loop_info        info;
826
        struct file *file = lo->lo_backing_file;
827
 
828
        if (lo->lo_state != Lo_bound)
829
                return -ENXIO;
830
        if (!arg)
831
                return -EINVAL;
832
        memset(&info, 0, sizeof(info));
833
        info.lo_number = lo->lo_number;
834
        info.lo_device = kdev_t_to_nr(file->f_dentry->d_inode->i_dev);
835
        info.lo_inode = file->f_dentry->d_inode->i_ino;
836
        info.lo_rdevice = kdev_t_to_nr(lo->lo_device);
837
        info.lo_offset = lo->lo_offset;
838
        info.lo_flags = lo->lo_flags;
839
        strncpy(info.lo_name, lo->lo_name, LO_NAME_SIZE);
840
        info.lo_encrypt_type = lo->lo_encrypt_type;
841
        if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
842
                info.lo_encrypt_key_size = lo->lo_encrypt_key_size;
843
                memcpy(info.lo_encrypt_key, lo->lo_encrypt_key,
844
                       lo->lo_encrypt_key_size);
845
        }
846
        return copy_to_user(arg, &info, sizeof(info)) ? -EFAULT : 0;
847
}
848
 
849
static int lo_ioctl(struct inode * inode, struct file * file,
850
        unsigned int cmd, unsigned long arg)
851
{
852
        struct loop_device *lo;
853
        int dev, err;
854
 
855
        if (!inode)
856
                return -EINVAL;
857
        if (MAJOR(inode->i_rdev) != MAJOR_NR) {
858
                printk(KERN_WARNING "lo_ioctl: pseudo-major != %d\n",
859
                       MAJOR_NR);
860
                return -ENODEV;
861
        }
862
        dev = MINOR(inode->i_rdev);
863
        if (dev >= max_loop)
864
                return -ENODEV;
865
        lo = &loop_dev[dev];
866
        down(&lo->lo_ctl_mutex);
867
        switch (cmd) {
868
        case LOOP_SET_FD:
869
                err = loop_set_fd(lo, file, inode->i_rdev, arg);
870
                break;
871
        case LOOP_CLR_FD:
872
                err = loop_clr_fd(lo, inode->i_bdev);
873
                break;
874
        case LOOP_SET_STATUS:
875
                err = loop_set_status(lo, (struct loop_info *) arg);
876
                break;
877
        case LOOP_GET_STATUS:
878
                err = loop_get_status(lo, (struct loop_info *) arg);
879
                break;
880
        case BLKGETSIZE:
881
                if (lo->lo_state != Lo_bound) {
882
                        err = -ENXIO;
883
                        break;
884
                }
885
                err = put_user((unsigned long)loop_sizes[lo->lo_number] << 1, (unsigned long *) arg);
886
                break;
887
        case BLKGETSIZE64:
888
                if (lo->lo_state != Lo_bound) {
889
                        err = -ENXIO;
890
                        break;
891
                }
892
                err = put_user((u64)loop_sizes[lo->lo_number] << 10, (u64*)arg);
893
                break;
894
        case BLKBSZGET:
895
        case BLKBSZSET:
896
        case BLKSSZGET:
897
                err = blk_ioctl(inode->i_rdev, cmd, arg);
898
                break;
899
        default:
900
                err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
901
        }
902
        up(&lo->lo_ctl_mutex);
903
        return err;
904
}
905
 
906
static int lo_open(struct inode *inode, struct file *file)
907
{
908
        struct loop_device *lo;
909
        int     dev, type;
910
 
911
        if (!inode)
912
                return -EINVAL;
913
        if (MAJOR(inode->i_rdev) != MAJOR_NR) {
914
                printk(KERN_WARNING "lo_open: pseudo-major != %d\n", MAJOR_NR);
915
                return -ENODEV;
916
        }
917
        dev = MINOR(inode->i_rdev);
918
        if (dev >= max_loop)
919
                return -ENODEV;
920
 
921
        lo = &loop_dev[dev];
922
        MOD_INC_USE_COUNT;
923
        down(&lo->lo_ctl_mutex);
924
 
925
        type = lo->lo_encrypt_type;
926
        if (type && xfer_funcs[type] && xfer_funcs[type]->lock)
927
                xfer_funcs[type]->lock(lo);
928
        lo->lo_refcnt++;
929
        up(&lo->lo_ctl_mutex);
930
        return 0;
931
}
932
 
933
static int lo_release(struct inode *inode, struct file *file)
934
{
935
        struct loop_device *lo;
936
        int     dev, type;
937
 
938
        if (!inode)
939
                return 0;
940
        if (MAJOR(inode->i_rdev) != MAJOR_NR) {
941
                printk(KERN_WARNING "lo_release: pseudo-major != %d\n",
942
                       MAJOR_NR);
943
                return 0;
944
        }
945
        dev = MINOR(inode->i_rdev);
946
        if (dev >= max_loop)
947
                return 0;
948
 
949
        lo = &loop_dev[dev];
950
        down(&lo->lo_ctl_mutex);
951
        type = lo->lo_encrypt_type;
952
        --lo->lo_refcnt;
953
        if (xfer_funcs[type] && xfer_funcs[type]->unlock)
954
                xfer_funcs[type]->unlock(lo);
955
 
956
        up(&lo->lo_ctl_mutex);
957
        MOD_DEC_USE_COUNT;
958
        return 0;
959
}
960
 
961
static struct block_device_operations lo_fops = {
962
        owner:          THIS_MODULE,
963
        open:           lo_open,
964
        release:        lo_release,
965
        ioctl:          lo_ioctl,
966
};
967
 
968
/*
969
 * And now the modules code and kernel interface.
970
 */
971
MODULE_PARM(max_loop, "i");
972
MODULE_PARM_DESC(max_loop, "Maximum number of loop devices (1-256)");
973
MODULE_LICENSE("GPL");
974
 
975
int loop_register_transfer(struct loop_func_table *funcs)
976
{
977
        if ((unsigned)funcs->number > MAX_LO_CRYPT || xfer_funcs[funcs->number])
978
                return -EINVAL;
979
        xfer_funcs[funcs->number] = funcs;
980
        return 0;
981
}
982
 
983
int loop_unregister_transfer(int number)
984
{
985
        struct loop_device *lo;
986
 
987
        if ((unsigned)number >= MAX_LO_CRYPT)
988
                return -EINVAL;
989
        for (lo = &loop_dev[0]; lo < &loop_dev[max_loop]; lo++) {
990
                int type = lo->lo_encrypt_type;
991
                if (type == number) {
992
                        xfer_funcs[type]->release(lo);
993
                        lo->transfer = NULL;
994
                        lo->lo_encrypt_type = 0;
995
                }
996
        }
997
        xfer_funcs[number] = NULL;
998
        return 0;
999
}
1000
 
1001
EXPORT_SYMBOL(loop_register_transfer);
1002
EXPORT_SYMBOL(loop_unregister_transfer);
1003
 
1004
int __init loop_init(void)
1005
{
1006
        int     i;
1007
 
1008
        if ((max_loop < 1) || (max_loop > 256)) {
1009
                printk(KERN_WARNING "loop: invalid max_loop (must be between"
1010
                                    " 1 and 256), using default (8)\n");
1011
                max_loop = 8;
1012
        }
1013
 
1014
        if (devfs_register_blkdev(MAJOR_NR, "loop", &lo_fops)) {
1015
                printk(KERN_WARNING "Unable to get major number %d for loop"
1016
                                    " device\n", MAJOR_NR);
1017
                return -EIO;
1018
        }
1019
 
1020
 
1021
        loop_dev = kmalloc(max_loop * sizeof(struct loop_device), GFP_KERNEL);
1022
        if (!loop_dev)
1023
                return -ENOMEM;
1024
 
1025
        loop_sizes = kmalloc(max_loop * sizeof(int), GFP_KERNEL);
1026
        if (!loop_sizes)
1027
                goto out_sizes;
1028
 
1029
        loop_blksizes = kmalloc(max_loop * sizeof(int), GFP_KERNEL);
1030
        if (!loop_blksizes)
1031
                goto out_blksizes;
1032
 
1033
        blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), loop_make_request);
1034
 
1035
        for (i = 0; i < max_loop; i++) {
1036
                struct loop_device *lo = &loop_dev[i];
1037
                memset(lo, 0, sizeof(struct loop_device));
1038
                init_MUTEX(&lo->lo_ctl_mutex);
1039
                init_MUTEX_LOCKED(&lo->lo_sem);
1040
                init_MUTEX_LOCKED(&lo->lo_bh_mutex);
1041
                lo->lo_number = i;
1042
                spin_lock_init(&lo->lo_lock);
1043
        }
1044
 
1045
        memset(loop_sizes, 0, max_loop * sizeof(int));
1046
        memset(loop_blksizes, 0, max_loop * sizeof(int));
1047
        blk_size[MAJOR_NR] = loop_sizes;
1048
        blksize_size[MAJOR_NR] = loop_blksizes;
1049
        for (i = 0; i < max_loop; i++)
1050
                register_disk(NULL, MKDEV(MAJOR_NR, i), 1, &lo_fops, 0);
1051
 
1052
        devfs_handle = devfs_mk_dir(NULL, "loop", NULL);
1053
        devfs_register_series(devfs_handle, "%u", max_loop, DEVFS_FL_DEFAULT,
1054
                              MAJOR_NR, 0,
1055
                              S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP,
1056
                              &lo_fops, NULL);
1057
 
1058
        printk(KERN_INFO "loop: loaded (max %d devices)\n", max_loop);
1059
        return 0;
1060
 
1061
out_blksizes:
1062
        kfree(loop_sizes);
1063
out_sizes:
1064
        kfree(loop_dev);
1065
        if (devfs_unregister_blkdev(MAJOR_NR, "loop"))
1066
                printk(KERN_WARNING "loop: cannot unregister blkdev\n");
1067
        printk(KERN_ERR "loop: ran out of memory\n");
1068
        return -ENOMEM;
1069
}
1070
 
1071
void loop_exit(void)
1072
{
1073
        devfs_unregister(devfs_handle);
1074
        if (devfs_unregister_blkdev(MAJOR_NR, "loop"))
1075
                printk(KERN_WARNING "loop: cannot unregister blkdev\n");
1076
        kfree(loop_dev);
1077
        kfree(loop_sizes);
1078
        kfree(loop_blksizes);
1079
}
1080
 
1081
module_init(loop_init);
1082
module_exit(loop_exit);
1083
 
1084
#ifndef MODULE
1085
static int __init max_loop_setup(char *str)
1086
{
1087
        max_loop = simple_strtol(str, NULL, 0);
1088
        return 1;
1089
}
1090
 
1091
__setup("max_loop=", max_loop_setup);
1092
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.