OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [fs/] [block_dev.c] - Blame information for rev 65

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 *  linux/fs/block_dev.c
3
 *
4
 *  Copyright (C) 1991, 1992  Linus Torvalds
5
 *  Copyright (C) 2001  Andrea Arcangeli <andrea@suse.de> SuSE
6
 */
7
 
8
#include <linux/init.h>
9
#include <linux/mm.h>
10
#include <linux/fcntl.h>
11
#include <linux/slab.h>
12
#include <linux/kmod.h>
13
#include <linux/major.h>
14
#include <linux/smp_lock.h>
15
#include <linux/highmem.h>
16
#include <linux/blkdev.h>
17
#include <linux/module.h>
18
#include <linux/blkpg.h>
19
#include <linux/buffer_head.h>
20
#include <linux/writeback.h>
21
#include <linux/mpage.h>
22
#include <linux/mount.h>
23
#include <linux/uio.h>
24
#include <linux/namei.h>
25
#include <linux/log2.h>
26
#include <asm/uaccess.h>
27
#include "internal.h"
28
 
29
struct bdev_inode {
30
        struct block_device bdev;
31
        struct inode vfs_inode;
32
};
33
 
34
static inline struct bdev_inode *BDEV_I(struct inode *inode)
35
{
36
        return container_of(inode, struct bdev_inode, vfs_inode);
37
}
38
 
39
inline struct block_device *I_BDEV(struct inode *inode)
40
{
41
        return &BDEV_I(inode)->bdev;
42
}
43
 
44
EXPORT_SYMBOL(I_BDEV);
45
 
46
static sector_t max_block(struct block_device *bdev)
47
{
48
        sector_t retval = ~((sector_t)0);
49
        loff_t sz = i_size_read(bdev->bd_inode);
50
 
51
        if (sz) {
52
                unsigned int size = block_size(bdev);
53
                unsigned int sizebits = blksize_bits(size);
54
                retval = (sz >> sizebits);
55
        }
56
        return retval;
57
}
58
 
59
/* Kill _all_ buffers and pagecache , dirty or not.. */
60
static void kill_bdev(struct block_device *bdev)
61
{
62
        if (bdev->bd_inode->i_mapping->nrpages == 0)
63
                return;
64
        invalidate_bh_lrus();
65
        truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
66
}
67
 
68
int set_blocksize(struct block_device *bdev, int size)
69
{
70
        /* Size must be a power of two, and between 512 and PAGE_SIZE */
71
        if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
72
                return -EINVAL;
73
 
74
        /* Size cannot be smaller than the size supported by the device */
75
        if (size < bdev_hardsect_size(bdev))
76
                return -EINVAL;
77
 
78
        /* Don't change the size if it is same as current */
79
        if (bdev->bd_block_size != size) {
80
                sync_blockdev(bdev);
81
                bdev->bd_block_size = size;
82
                bdev->bd_inode->i_blkbits = blksize_bits(size);
83
                kill_bdev(bdev);
84
        }
85
        return 0;
86
}
87
 
88
EXPORT_SYMBOL(set_blocksize);
89
 
90
int sb_set_blocksize(struct super_block *sb, int size)
91
{
92
        if (set_blocksize(sb->s_bdev, size))
93
                return 0;
94
        /* If we get here, we know size is power of two
95
         * and it's value is between 512 and PAGE_SIZE */
96
        sb->s_blocksize = size;
97
        sb->s_blocksize_bits = blksize_bits(size);
98
        return sb->s_blocksize;
99
}
100
 
101
EXPORT_SYMBOL(sb_set_blocksize);
102
 
103
int sb_min_blocksize(struct super_block *sb, int size)
104
{
105
        int minsize = bdev_hardsect_size(sb->s_bdev);
106
        if (size < minsize)
107
                size = minsize;
108
        return sb_set_blocksize(sb, size);
109
}
110
 
111
EXPORT_SYMBOL(sb_min_blocksize);
112
 
113
static int
114
blkdev_get_block(struct inode *inode, sector_t iblock,
115
                struct buffer_head *bh, int create)
116
{
117
        if (iblock >= max_block(I_BDEV(inode))) {
118
                if (create)
119
                        return -EIO;
120
 
121
                /*
122
                 * for reads, we're just trying to fill a partial page.
123
                 * return a hole, they will have to call get_block again
124
                 * before they can fill it, and they will get -EIO at that
125
                 * time
126
                 */
127
                return 0;
128
        }
129
        bh->b_bdev = I_BDEV(inode);
130
        bh->b_blocknr = iblock;
131
        set_buffer_mapped(bh);
132
        return 0;
133
}
134
 
135
static int
136
blkdev_get_blocks(struct inode *inode, sector_t iblock,
137
                struct buffer_head *bh, int create)
138
{
139
        sector_t end_block = max_block(I_BDEV(inode));
140
        unsigned long max_blocks = bh->b_size >> inode->i_blkbits;
141
 
142
        if ((iblock + max_blocks) > end_block) {
143
                max_blocks = end_block - iblock;
144
                if ((long)max_blocks <= 0) {
145
                        if (create)
146
                                return -EIO;    /* write fully beyond EOF */
147
                        /*
148
                         * It is a read which is fully beyond EOF.  We return
149
                         * a !buffer_mapped buffer
150
                         */
151
                        max_blocks = 0;
152
                }
153
        }
154
 
155
        bh->b_bdev = I_BDEV(inode);
156
        bh->b_blocknr = iblock;
157
        bh->b_size = max_blocks << inode->i_blkbits;
158
        if (max_blocks)
159
                set_buffer_mapped(bh);
160
        return 0;
161
}
162
 
163
static ssize_t
164
blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
165
                        loff_t offset, unsigned long nr_segs)
166
{
167
        struct file *file = iocb->ki_filp;
168
        struct inode *inode = file->f_mapping->host;
169
 
170
        return blockdev_direct_IO_no_locking(rw, iocb, inode, I_BDEV(inode),
171
                                iov, offset, nr_segs, blkdev_get_blocks, NULL);
172
}
173
 
174
#if 0
175
static void blk_end_aio(struct bio *bio, int error)
176
{
177
        struct kiocb *iocb = bio->bi_private;
178
        atomic_t *bio_count = &iocb->ki_bio_count;
179
 
180
        if (bio_data_dir(bio) == READ)
181
                bio_check_pages_dirty(bio);
182
        else {
183
                bio_release_pages(bio);
184
                bio_put(bio);
185
        }
186
 
187
        /* iocb->ki_nbytes stores error code from LLDD */
188
        if (error)
189
                iocb->ki_nbytes = -EIO;
190
 
191
        if (atomic_dec_and_test(bio_count)) {
192
                if ((long)iocb->ki_nbytes < 0)
193
                        aio_complete(iocb, iocb->ki_nbytes, 0);
194
                else
195
                        aio_complete(iocb, iocb->ki_left, 0);
196
        }
197
 
198
        return 0;
199
}
200
 
201
#define VEC_SIZE        16
202
struct pvec {
203
        unsigned short nr;
204
        unsigned short idx;
205
        struct page *page[VEC_SIZE];
206
};
207
 
208
#define PAGES_SPANNED(addr, len)        \
209
        (DIV_ROUND_UP((addr) + (len), PAGE_SIZE) - (addr) / PAGE_SIZE);
210
 
211
/*
212
 * get page pointer for user addr, we internally cache struct page array for
213
 * (addr, count) range in pvec to avoid frequent call to get_user_pages.  If
214
 * internal page list is exhausted, a batch count of up to VEC_SIZE is used
215
 * to get next set of page struct.
216
 */
217
static struct page *blk_get_page(unsigned long addr, size_t count, int rw,
218
                                 struct pvec *pvec)
219
{
220
        int ret, nr_pages;
221
        if (pvec->idx == pvec->nr) {
222
                nr_pages = PAGES_SPANNED(addr, count);
223
                nr_pages = min(nr_pages, VEC_SIZE);
224
                down_read(&current->mm->mmap_sem);
225
                ret = get_user_pages(current, current->mm, addr, nr_pages,
226
                                     rw == READ, 0, pvec->page, NULL);
227
                up_read(&current->mm->mmap_sem);
228
                if (ret < 0)
229
                        return ERR_PTR(ret);
230
                pvec->nr = ret;
231
                pvec->idx = 0;
232
        }
233
        return pvec->page[pvec->idx++];
234
}
235
 
236
/* return a page back to pvec array */
237
static void blk_unget_page(struct page *page, struct pvec *pvec)
238
{
239
        pvec->page[--pvec->idx] = page;
240
}
241
 
242
static ssize_t
243
blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
244
                 loff_t pos, unsigned long nr_segs)
245
{
246
        struct inode *inode = iocb->ki_filp->f_mapping->host;
247
        unsigned blkbits = blksize_bits(bdev_hardsect_size(I_BDEV(inode)));
248
        unsigned blocksize_mask = (1 << blkbits) - 1;
249
        unsigned long seg = 0;   /* iov segment iterator */
250
        unsigned long nvec;     /* number of bio vec needed */
251
        unsigned long cur_off;  /* offset into current page */
252
        unsigned long cur_len;  /* I/O len of current page, up to PAGE_SIZE */
253
 
254
        unsigned long addr;     /* user iovec address */
255
        size_t count;           /* user iovec len */
256
        size_t nbytes = iocb->ki_nbytes = iocb->ki_left; /* total xfer size */
257
        loff_t size;            /* size of block device */
258
        struct bio *bio;
259
        atomic_t *bio_count = &iocb->ki_bio_count;
260
        struct page *page;
261
        struct pvec pvec;
262
 
263
        pvec.nr = 0;
264
        pvec.idx = 0;
265
 
266
        if (pos & blocksize_mask)
267
                return -EINVAL;
268
 
269
        size = i_size_read(inode);
270
        if (pos + nbytes > size) {
271
                nbytes = size - pos;
272
                iocb->ki_left = nbytes;
273
        }
274
 
275
        /*
276
         * check first non-zero iov alignment, the remaining
277
         * iov alignment is checked inside bio loop below.
278
         */
279
        do {
280
                addr = (unsigned long) iov[seg].iov_base;
281
                count = min(iov[seg].iov_len, nbytes);
282
                if (addr & blocksize_mask || count & blocksize_mask)
283
                        return -EINVAL;
284
        } while (!count && ++seg < nr_segs);
285
        atomic_set(bio_count, 1);
286
 
287
        while (nbytes) {
288
                /* roughly estimate number of bio vec needed */
289
                nvec = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
290
                nvec = max(nvec, nr_segs - seg);
291
                nvec = min(nvec, (unsigned long) BIO_MAX_PAGES);
292
 
293
                /* bio_alloc should not fail with GFP_KERNEL flag */
294
                bio = bio_alloc(GFP_KERNEL, nvec);
295
                bio->bi_bdev = I_BDEV(inode);
296
                bio->bi_end_io = blk_end_aio;
297
                bio->bi_private = iocb;
298
                bio->bi_sector = pos >> blkbits;
299
same_bio:
300
                cur_off = addr & ~PAGE_MASK;
301
                cur_len = PAGE_SIZE - cur_off;
302
                if (count < cur_len)
303
                        cur_len = count;
304
 
305
                page = blk_get_page(addr, count, rw, &pvec);
306
                if (unlikely(IS_ERR(page)))
307
                        goto backout;
308
 
309
                if (bio_add_page(bio, page, cur_len, cur_off)) {
310
                        pos += cur_len;
311
                        addr += cur_len;
312
                        count -= cur_len;
313
                        nbytes -= cur_len;
314
 
315
                        if (count)
316
                                goto same_bio;
317
                        while (++seg < nr_segs) {
318
                                addr = (unsigned long) iov[seg].iov_base;
319
                                count = iov[seg].iov_len;
320
                                if (!count)
321
                                        continue;
322
                                if (unlikely(addr & blocksize_mask ||
323
                                             count & blocksize_mask)) {
324
                                        page = ERR_PTR(-EINVAL);
325
                                        goto backout;
326
                                }
327
                                count = min(count, nbytes);
328
                                goto same_bio;
329
                        }
330
                } else {
331
                        blk_unget_page(page, &pvec);
332
                }
333
 
334
                /* bio is ready, submit it */
335
                if (rw == READ)
336
                        bio_set_pages_dirty(bio);
337
                atomic_inc(bio_count);
338
                submit_bio(rw, bio);
339
        }
340
 
341
completion:
342
        iocb->ki_left -= nbytes;
343
        nbytes = iocb->ki_left;
344
        iocb->ki_pos += nbytes;
345
 
346
        blk_run_address_space(inode->i_mapping);
347
        if (atomic_dec_and_test(bio_count))
348
                aio_complete(iocb, nbytes, 0);
349
 
350
        return -EIOCBQUEUED;
351
 
352
backout:
353
        /*
354
         * back out nbytes count constructed so far for this bio,
355
         * we will throw away current bio.
356
         */
357
        nbytes += bio->bi_size;
358
        bio_release_pages(bio);
359
        bio_put(bio);
360
 
361
        /*
362
         * if no bio was submmitted, return the error code.
363
         * otherwise, proceed with pending I/O completion.
364
         */
365
        if (atomic_read(bio_count) == 1)
366
                return PTR_ERR(page);
367
        goto completion;
368
}
369
#endif
370
 
371
static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
372
{
373
        return block_write_full_page(page, blkdev_get_block, wbc);
374
}
375
 
376
static int blkdev_readpage(struct file * file, struct page * page)
377
{
378
        return block_read_full_page(page, blkdev_get_block);
379
}
380
 
381
static int blkdev_write_begin(struct file *file, struct address_space *mapping,
382
                        loff_t pos, unsigned len, unsigned flags,
383
                        struct page **pagep, void **fsdata)
384
{
385
        *pagep = NULL;
386
        return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
387
                                blkdev_get_block);
388
}
389
 
390
static int blkdev_write_end(struct file *file, struct address_space *mapping,
391
                        loff_t pos, unsigned len, unsigned copied,
392
                        struct page *page, void *fsdata)
393
{
394
        int ret;
395
        ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
396
 
397
        unlock_page(page);
398
        page_cache_release(page);
399
 
400
        return ret;
401
}
402
 
403
/*
404
 * private llseek:
405
 * for a block special file file->f_path.dentry->d_inode->i_size is zero
406
 * so we compute the size by hand (just as in block_read/write above)
407
 */
408
static loff_t block_llseek(struct file *file, loff_t offset, int origin)
409
{
410
        struct inode *bd_inode = file->f_mapping->host;
411
        loff_t size;
412
        loff_t retval;
413
 
414
        mutex_lock(&bd_inode->i_mutex);
415
        size = i_size_read(bd_inode);
416
 
417
        switch (origin) {
418
                case 2:
419
                        offset += size;
420
                        break;
421
                case 1:
422
                        offset += file->f_pos;
423
        }
424
        retval = -EINVAL;
425
        if (offset >= 0 && offset <= size) {
426
                if (offset != file->f_pos) {
427
                        file->f_pos = offset;
428
                }
429
                retval = offset;
430
        }
431
        mutex_unlock(&bd_inode->i_mutex);
432
        return retval;
433
}
434
 
435
/*
436
 *      Filp is never NULL; the only case when ->fsync() is called with
437
 *      NULL first argument is nfsd_sync_dir() and that's not a directory.
438
 */
439
 
440
static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
441
{
442
        return sync_blockdev(I_BDEV(filp->f_mapping->host));
443
}
444
 
445
/*
446
 * pseudo-fs
447
 */
448
 
449
static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
450
static struct kmem_cache * bdev_cachep __read_mostly;
451
 
452
static struct inode *bdev_alloc_inode(struct super_block *sb)
453
{
454
        struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
455
        if (!ei)
456
                return NULL;
457
        return &ei->vfs_inode;
458
}
459
 
460
static void bdev_destroy_inode(struct inode *inode)
461
{
462
        struct bdev_inode *bdi = BDEV_I(inode);
463
 
464
        bdi->bdev.bd_inode_backing_dev_info = NULL;
465
        kmem_cache_free(bdev_cachep, bdi);
466
}
467
 
468
static void init_once(struct kmem_cache * cachep, void *foo)
469
{
470
        struct bdev_inode *ei = (struct bdev_inode *) foo;
471
        struct block_device *bdev = &ei->bdev;
472
 
473
        memset(bdev, 0, sizeof(*bdev));
474
        mutex_init(&bdev->bd_mutex);
475
        sema_init(&bdev->bd_mount_sem, 1);
476
        INIT_LIST_HEAD(&bdev->bd_inodes);
477
        INIT_LIST_HEAD(&bdev->bd_list);
478
#ifdef CONFIG_SYSFS
479
        INIT_LIST_HEAD(&bdev->bd_holder_list);
480
#endif
481
        inode_init_once(&ei->vfs_inode);
482
}
483
 
484
static inline void __bd_forget(struct inode *inode)
485
{
486
        list_del_init(&inode->i_devices);
487
        inode->i_bdev = NULL;
488
        inode->i_mapping = &inode->i_data;
489
}
490
 
491
static void bdev_clear_inode(struct inode *inode)
492
{
493
        struct block_device *bdev = &BDEV_I(inode)->bdev;
494
        struct list_head *p;
495
        spin_lock(&bdev_lock);
496
        while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) {
497
                __bd_forget(list_entry(p, struct inode, i_devices));
498
        }
499
        list_del_init(&bdev->bd_list);
500
        spin_unlock(&bdev_lock);
501
}
502
 
503
static const struct super_operations bdev_sops = {
504
        .statfs = simple_statfs,
505
        .alloc_inode = bdev_alloc_inode,
506
        .destroy_inode = bdev_destroy_inode,
507
        .drop_inode = generic_delete_inode,
508
        .clear_inode = bdev_clear_inode,
509
};
510
 
511
static int bd_get_sb(struct file_system_type *fs_type,
512
        int flags, const char *dev_name, void *data, struct vfsmount *mnt)
513
{
514
        return get_sb_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576, mnt);
515
}
516
 
517
static struct file_system_type bd_type = {
518
        .name           = "bdev",
519
        .get_sb         = bd_get_sb,
520
        .kill_sb        = kill_anon_super,
521
};
522
 
523
static struct vfsmount *bd_mnt __read_mostly;
524
struct super_block *blockdev_superblock;
525
 
526
void __init bdev_cache_init(void)
527
{
528
        int err;
529
        bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
530
                        0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
531
                                SLAB_MEM_SPREAD|SLAB_PANIC),
532
                        init_once);
533
        err = register_filesystem(&bd_type);
534
        if (err)
535
                panic("Cannot register bdev pseudo-fs");
536
        bd_mnt = kern_mount(&bd_type);
537
        err = PTR_ERR(bd_mnt);
538
        if (IS_ERR(bd_mnt))
539
                panic("Cannot create bdev pseudo-fs");
540
        blockdev_superblock = bd_mnt->mnt_sb;   /* For writeback */
541
}
542
 
543
/*
544
 * Most likely _very_ bad one - but then it's hardly critical for small
545
 * /dev and can be fixed when somebody will need really large one.
546
 * Keep in mind that it will be fed through icache hash function too.
547
 */
548
static inline unsigned long hash(dev_t dev)
549
{
550
        return MAJOR(dev)+MINOR(dev);
551
}
552
 
553
static int bdev_test(struct inode *inode, void *data)
554
{
555
        return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data;
556
}
557
 
558
static int bdev_set(struct inode *inode, void *data)
559
{
560
        BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data;
561
        return 0;
562
}
563
 
564
static LIST_HEAD(all_bdevs);
565
 
566
struct block_device *bdget(dev_t dev)
567
{
568
        struct block_device *bdev;
569
        struct inode *inode;
570
 
571
        inode = iget5_locked(bd_mnt->mnt_sb, hash(dev),
572
                        bdev_test, bdev_set, &dev);
573
 
574
        if (!inode)
575
                return NULL;
576
 
577
        bdev = &BDEV_I(inode)->bdev;
578
 
579
        if (inode->i_state & I_NEW) {
580
                bdev->bd_contains = NULL;
581
                bdev->bd_inode = inode;
582
                bdev->bd_block_size = (1 << inode->i_blkbits);
583
                bdev->bd_part_count = 0;
584
                bdev->bd_invalidated = 0;
585
                inode->i_mode = S_IFBLK;
586
                inode->i_rdev = dev;
587
                inode->i_bdev = bdev;
588
                inode->i_data.a_ops = &def_blk_aops;
589
                mapping_set_gfp_mask(&inode->i_data, GFP_USER);
590
                inode->i_data.backing_dev_info = &default_backing_dev_info;
591
                spin_lock(&bdev_lock);
592
                list_add(&bdev->bd_list, &all_bdevs);
593
                spin_unlock(&bdev_lock);
594
                unlock_new_inode(inode);
595
        }
596
        return bdev;
597
}
598
 
599
EXPORT_SYMBOL(bdget);
600
 
601
long nr_blockdev_pages(void)
602
{
603
        struct block_device *bdev;
604
        long ret = 0;
605
        spin_lock(&bdev_lock);
606
        list_for_each_entry(bdev, &all_bdevs, bd_list) {
607
                ret += bdev->bd_inode->i_mapping->nrpages;
608
        }
609
        spin_unlock(&bdev_lock);
610
        return ret;
611
}
612
 
613
void bdput(struct block_device *bdev)
614
{
615
        iput(bdev->bd_inode);
616
}
617
 
618
EXPORT_SYMBOL(bdput);
619
 
620
static struct block_device *bd_acquire(struct inode *inode)
621
{
622
        struct block_device *bdev;
623
 
624
        spin_lock(&bdev_lock);
625
        bdev = inode->i_bdev;
626
        if (bdev) {
627
                atomic_inc(&bdev->bd_inode->i_count);
628
                spin_unlock(&bdev_lock);
629
                return bdev;
630
        }
631
        spin_unlock(&bdev_lock);
632
 
633
        bdev = bdget(inode->i_rdev);
634
        if (bdev) {
635
                spin_lock(&bdev_lock);
636
                if (!inode->i_bdev) {
637
                        /*
638
                         * We take an additional bd_inode->i_count for inode,
639
                         * and it's released in clear_inode() of inode.
640
                         * So, we can access it via ->i_mapping always
641
                         * without igrab().
642
                         */
643
                        atomic_inc(&bdev->bd_inode->i_count);
644
                        inode->i_bdev = bdev;
645
                        inode->i_mapping = bdev->bd_inode->i_mapping;
646
                        list_add(&inode->i_devices, &bdev->bd_inodes);
647
                }
648
                spin_unlock(&bdev_lock);
649
        }
650
        return bdev;
651
}
652
 
653
/* Call when you free inode */
654
 
655
void bd_forget(struct inode *inode)
656
{
657
        struct block_device *bdev = NULL;
658
 
659
        spin_lock(&bdev_lock);
660
        if (inode->i_bdev) {
661
                if (inode->i_sb != blockdev_superblock)
662
                        bdev = inode->i_bdev;
663
                __bd_forget(inode);
664
        }
665
        spin_unlock(&bdev_lock);
666
 
667
        if (bdev)
668
                iput(bdev->bd_inode);
669
}
670
 
671
int bd_claim(struct block_device *bdev, void *holder)
672
{
673
        int res;
674
        spin_lock(&bdev_lock);
675
 
676
        /* first decide result */
677
        if (bdev->bd_holder == holder)
678
                res = 0;  /* already a holder */
679
        else if (bdev->bd_holder != NULL)
680
                res = -EBUSY;    /* held by someone else */
681
        else if (bdev->bd_contains == bdev)
682
                res = 0;          /* is a whole device which isn't held */
683
 
684
        else if (bdev->bd_contains->bd_holder == bd_claim)
685
                res = 0;          /* is a partition of a device that is being partitioned */
686
        else if (bdev->bd_contains->bd_holder != NULL)
687
                res = -EBUSY;    /* is a partition of a held device */
688
        else
689
                res = 0;  /* is a partition of an un-held device */
690
 
691
        /* now impose change */
692
        if (res==0) {
693
                /* note that for a whole device bd_holders
694
                 * will be incremented twice, and bd_holder will
695
                 * be set to bd_claim before being set to holder
696
                 */
697
                bdev->bd_contains->bd_holders ++;
698
                bdev->bd_contains->bd_holder = bd_claim;
699
                bdev->bd_holders++;
700
                bdev->bd_holder = holder;
701
        }
702
        spin_unlock(&bdev_lock);
703
        return res;
704
}
705
 
706
EXPORT_SYMBOL(bd_claim);
707
 
708
void bd_release(struct block_device *bdev)
709
{
710
        spin_lock(&bdev_lock);
711
        if (!--bdev->bd_contains->bd_holders)
712
                bdev->bd_contains->bd_holder = NULL;
713
        if (!--bdev->bd_holders)
714
                bdev->bd_holder = NULL;
715
        spin_unlock(&bdev_lock);
716
}
717
 
718
EXPORT_SYMBOL(bd_release);
719
 
720
#ifdef CONFIG_SYSFS
721
/*
722
 * Functions for bd_claim_by_kobject / bd_release_from_kobject
723
 *
724
 *     If a kobject is passed to bd_claim_by_kobject()
725
 *     and the kobject has a parent directory,
726
 *     following symlinks are created:
727
 *        o from the kobject to the claimed bdev
728
 *        o from "holders" directory of the bdev to the parent of the kobject
729
 *     bd_release_from_kobject() removes these symlinks.
730
 *
731
 *     Example:
732
 *        If /dev/dm-0 maps to /dev/sda, kobject corresponding to
733
 *        /sys/block/dm-0/slaves is passed to bd_claim_by_kobject(), then:
734
 *           /sys/block/dm-0/slaves/sda --> /sys/block/sda
735
 *           /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
736
 */
737
 
738
static struct kobject *bdev_get_kobj(struct block_device *bdev)
739
{
740
        if (bdev->bd_contains != bdev)
741
                return kobject_get(&bdev->bd_part->kobj);
742
        else
743
                return kobject_get(&bdev->bd_disk->kobj);
744
}
745
 
746
static struct kobject *bdev_get_holder(struct block_device *bdev)
747
{
748
        if (bdev->bd_contains != bdev)
749
                return kobject_get(bdev->bd_part->holder_dir);
750
        else
751
                return kobject_get(bdev->bd_disk->holder_dir);
752
}
753
 
754
static int add_symlink(struct kobject *from, struct kobject *to)
755
{
756
        if (!from || !to)
757
                return 0;
758
        return sysfs_create_link(from, to, kobject_name(to));
759
}
760
 
761
static void del_symlink(struct kobject *from, struct kobject *to)
762
{
763
        if (!from || !to)
764
                return;
765
        sysfs_remove_link(from, kobject_name(to));
766
}
767
 
768
/*
769
 * 'struct bd_holder' contains pointers to kobjects symlinked by
770
 * bd_claim_by_kobject.
771
 * It's connected to bd_holder_list which is protected by bdev->bd_sem.
772
 */
773
struct bd_holder {
774
        struct list_head list;  /* chain of holders of the bdev */
775
        int count;              /* references from the holder */
776
        struct kobject *sdir;   /* holder object, e.g. "/block/dm-0/slaves" */
777
        struct kobject *hdev;   /* e.g. "/block/dm-0" */
778
        struct kobject *hdir;   /* e.g. "/block/sda/holders" */
779
        struct kobject *sdev;   /* e.g. "/block/sda" */
780
};
781
 
782
/*
783
 * Get references of related kobjects at once.
784
 * Returns 1 on success. 0 on failure.
785
 *
786
 * Should call bd_holder_release_dirs() after successful use.
787
 */
788
static int bd_holder_grab_dirs(struct block_device *bdev,
789
                        struct bd_holder *bo)
790
{
791
        if (!bdev || !bo)
792
                return 0;
793
 
794
        bo->sdir = kobject_get(bo->sdir);
795
        if (!bo->sdir)
796
                return 0;
797
 
798
        bo->hdev = kobject_get(bo->sdir->parent);
799
        if (!bo->hdev)
800
                goto fail_put_sdir;
801
 
802
        bo->sdev = bdev_get_kobj(bdev);
803
        if (!bo->sdev)
804
                goto fail_put_hdev;
805
 
806
        bo->hdir = bdev_get_holder(bdev);
807
        if (!bo->hdir)
808
                goto fail_put_sdev;
809
 
810
        return 1;
811
 
812
fail_put_sdev:
813
        kobject_put(bo->sdev);
814
fail_put_hdev:
815
        kobject_put(bo->hdev);
816
fail_put_sdir:
817
        kobject_put(bo->sdir);
818
 
819
        return 0;
820
}
821
 
822
/* Put references of related kobjects at once. */
823
static void bd_holder_release_dirs(struct bd_holder *bo)
824
{
825
        kobject_put(bo->hdir);
826
        kobject_put(bo->sdev);
827
        kobject_put(bo->hdev);
828
        kobject_put(bo->sdir);
829
}
830
 
831
static struct bd_holder *alloc_bd_holder(struct kobject *kobj)
832
{
833
        struct bd_holder *bo;
834
 
835
        bo = kzalloc(sizeof(*bo), GFP_KERNEL);
836
        if (!bo)
837
                return NULL;
838
 
839
        bo->count = 1;
840
        bo->sdir = kobj;
841
 
842
        return bo;
843
}
844
 
845
static void free_bd_holder(struct bd_holder *bo)
846
{
847
        kfree(bo);
848
}
849
 
850
/**
851
 * find_bd_holder - find matching struct bd_holder from the block device
852
 *
853
 * @bdev:       struct block device to be searched
854
 * @bo:         target struct bd_holder
855
 *
856
 * Returns matching entry with @bo in @bdev->bd_holder_list.
857
 * If found, increment the reference count and return the pointer.
858
 * If not found, returns NULL.
859
 */
860
static struct bd_holder *find_bd_holder(struct block_device *bdev,
861
                                        struct bd_holder *bo)
862
{
863
        struct bd_holder *tmp;
864
 
865
        list_for_each_entry(tmp, &bdev->bd_holder_list, list)
866
                if (tmp->sdir == bo->sdir) {
867
                        tmp->count++;
868
                        return tmp;
869
                }
870
 
871
        return NULL;
872
}
873
 
874
/**
875
 * add_bd_holder - create sysfs symlinks for bd_claim() relationship
876
 *
877
 * @bdev:       block device to be bd_claimed
878
 * @bo:         preallocated and initialized by alloc_bd_holder()
879
 *
880
 * Add @bo to @bdev->bd_holder_list, create symlinks.
881
 *
882
 * Returns 0 if symlinks are created.
883
 * Returns -ve if something fails.
884
 */
885
static int add_bd_holder(struct block_device *bdev, struct bd_holder *bo)
886
{
887
        int err;
888
 
889
        if (!bo)
890
                return -EINVAL;
891
 
892
        if (!bd_holder_grab_dirs(bdev, bo))
893
                return -EBUSY;
894
 
895
        err = add_symlink(bo->sdir, bo->sdev);
896
        if (err)
897
                return err;
898
 
899
        err = add_symlink(bo->hdir, bo->hdev);
900
        if (err) {
901
                del_symlink(bo->sdir, bo->sdev);
902
                return err;
903
        }
904
 
905
        list_add_tail(&bo->list, &bdev->bd_holder_list);
906
        return 0;
907
}
908
 
909
/**
910
 * del_bd_holder - delete sysfs symlinks for bd_claim() relationship
911
 *
912
 * @bdev:       block device to be bd_claimed
913
 * @kobj:       holder's kobject
914
 *
915
 * If there is matching entry with @kobj in @bdev->bd_holder_list
916
 * and no other bd_claim() from the same kobject,
917
 * remove the struct bd_holder from the list, delete symlinks for it.
918
 *
919
 * Returns a pointer to the struct bd_holder when it's removed from the list
920
 * and ready to be freed.
921
 * Returns NULL if matching claim isn't found or there is other bd_claim()
922
 * by the same kobject.
923
 */
924
static struct bd_holder *del_bd_holder(struct block_device *bdev,
925
                                        struct kobject *kobj)
926
{
927
        struct bd_holder *bo;
928
 
929
        list_for_each_entry(bo, &bdev->bd_holder_list, list) {
930
                if (bo->sdir == kobj) {
931
                        bo->count--;
932
                        BUG_ON(bo->count < 0);
933
                        if (!bo->count) {
934
                                list_del(&bo->list);
935
                                del_symlink(bo->sdir, bo->sdev);
936
                                del_symlink(bo->hdir, bo->hdev);
937
                                bd_holder_release_dirs(bo);
938
                                return bo;
939
                        }
940
                        break;
941
                }
942
        }
943
 
944
        return NULL;
945
}
946
 
947
/**
948
 * bd_claim_by_kobject - bd_claim() with additional kobject signature
949
 *
950
 * @bdev:       block device to be claimed
951
 * @holder:     holder's signature
952
 * @kobj:       holder's kobject
953
 *
954
 * Do bd_claim() and if it succeeds, create sysfs symlinks between
955
 * the bdev and the holder's kobject.
956
 * Use bd_release_from_kobject() when relesing the claimed bdev.
957
 *
958
 * Returns 0 on success. (same as bd_claim())
959
 * Returns errno on failure.
960
 */
961
static int bd_claim_by_kobject(struct block_device *bdev, void *holder,
962
                                struct kobject *kobj)
963
{
964
        int err;
965
        struct bd_holder *bo, *found;
966
 
967
        if (!kobj)
968
                return -EINVAL;
969
 
970
        bo = alloc_bd_holder(kobj);
971
        if (!bo)
972
                return -ENOMEM;
973
 
974
        mutex_lock(&bdev->bd_mutex);
975
 
976
        err = bd_claim(bdev, holder);
977
        if (err)
978
                goto fail;
979
 
980
        found = find_bd_holder(bdev, bo);
981
        if (found)
982
                goto fail;
983
 
984
        err = add_bd_holder(bdev, bo);
985
        if (err)
986
                bd_release(bdev);
987
        else
988
                bo = NULL;
989
fail:
990
        mutex_unlock(&bdev->bd_mutex);
991
        free_bd_holder(bo);
992
        return err;
993
}
994
 
995
/**
996
 * bd_release_from_kobject - bd_release() with additional kobject signature
997
 *
998
 * @bdev:       block device to be released
999
 * @kobj:       holder's kobject
1000
 *
1001
 * Do bd_release() and remove sysfs symlinks created by bd_claim_by_kobject().
1002
 */
1003
static void bd_release_from_kobject(struct block_device *bdev,
1004
                                        struct kobject *kobj)
1005
{
1006
        if (!kobj)
1007
                return;
1008
 
1009
        mutex_lock(&bdev->bd_mutex);
1010
        bd_release(bdev);
1011
        free_bd_holder(del_bd_holder(bdev, kobj));
1012
        mutex_unlock(&bdev->bd_mutex);
1013
}
1014
 
1015
/**
1016
 * bd_claim_by_disk - wrapper function for bd_claim_by_kobject()
1017
 *
1018
 * @bdev:       block device to be claimed
1019
 * @holder:     holder's signature
1020
 * @disk:       holder's gendisk
1021
 *
1022
 * Call bd_claim_by_kobject() with getting @disk->slave_dir.
1023
 */
1024
int bd_claim_by_disk(struct block_device *bdev, void *holder,
1025
                        struct gendisk *disk)
1026
{
1027
        return bd_claim_by_kobject(bdev, holder, kobject_get(disk->slave_dir));
1028
}
1029
EXPORT_SYMBOL_GPL(bd_claim_by_disk);
1030
 
1031
/**
1032
 * bd_release_from_disk - wrapper function for bd_release_from_kobject()
1033
 *
1034
 * @bdev:       block device to be claimed
1035
 * @disk:       holder's gendisk
1036
 *
1037
 * Call bd_release_from_kobject() and put @disk->slave_dir.
1038
 */
1039
void bd_release_from_disk(struct block_device *bdev, struct gendisk *disk)
1040
{
1041
        bd_release_from_kobject(bdev, disk->slave_dir);
1042
        kobject_put(disk->slave_dir);
1043
}
1044
EXPORT_SYMBOL_GPL(bd_release_from_disk);
1045
#endif
1046
 
1047
/*
1048
 * Tries to open block device by device number.  Use it ONLY if you
1049
 * really do not have anything better - i.e. when you are behind a
1050
 * truly sucky interface and all you are given is a device number.  _Never_
1051
 * to be used for internal purposes.  If you ever need it - reconsider
1052
 * your API.
1053
 */
1054
struct block_device *open_by_devnum(dev_t dev, unsigned mode)
1055
{
1056
        struct block_device *bdev = bdget(dev);
1057
        int err = -ENOMEM;
1058
        int flags = mode & FMODE_WRITE ? O_RDWR : O_RDONLY;
1059
        if (bdev)
1060
                err = blkdev_get(bdev, mode, flags);
1061
        return err ? ERR_PTR(err) : bdev;
1062
}
1063
 
1064
EXPORT_SYMBOL(open_by_devnum);
1065
 
1066
/*
1067
 * This routine checks whether a removable media has been changed,
1068
 * and invalidates all buffer-cache-entries in that case. This
1069
 * is a relatively slow routine, so we have to try to minimize using
1070
 * it. Thus it is called only upon a 'mount' or 'open'. This
1071
 * is the best way of combining speed and utility, I think.
1072
 * People changing diskettes in the middle of an operation deserve
1073
 * to lose :-)
1074
 */
1075
int check_disk_change(struct block_device *bdev)
1076
{
1077
        struct gendisk *disk = bdev->bd_disk;
1078
        struct block_device_operations * bdops = disk->fops;
1079
 
1080
        if (!bdops->media_changed)
1081
                return 0;
1082
        if (!bdops->media_changed(bdev->bd_disk))
1083
                return 0;
1084
 
1085
        if (__invalidate_device(bdev))
1086
                printk("VFS: busy inodes on changed media.\n");
1087
 
1088
        if (bdops->revalidate_disk)
1089
                bdops->revalidate_disk(bdev->bd_disk);
1090
        if (bdev->bd_disk->minors > 1)
1091
                bdev->bd_invalidated = 1;
1092
        return 1;
1093
}
1094
 
1095
EXPORT_SYMBOL(check_disk_change);
1096
 
1097
void bd_set_size(struct block_device *bdev, loff_t size)
1098
{
1099
        unsigned bsize = bdev_hardsect_size(bdev);
1100
 
1101
        bdev->bd_inode->i_size = size;
1102
        while (bsize < PAGE_CACHE_SIZE) {
1103
                if (size & bsize)
1104
                        break;
1105
                bsize <<= 1;
1106
        }
1107
        bdev->bd_block_size = bsize;
1108
        bdev->bd_inode->i_blkbits = blksize_bits(bsize);
1109
}
1110
EXPORT_SYMBOL(bd_set_size);
1111
 
1112
static int __blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags,
1113
                        int for_part);
1114
static int __blkdev_put(struct block_device *bdev, int for_part);
1115
 
1116
/*
1117
 * bd_mutex locking:
1118
 *
1119
 *  mutex_lock(part->bd_mutex)
1120
 *    mutex_lock_nested(whole->bd_mutex, 1)
1121
 */
1122
 
1123
static int do_open(struct block_device *bdev, struct file *file, int for_part)
1124
{
1125
        struct module *owner = NULL;
1126
        struct gendisk *disk;
1127
        int ret = -ENXIO;
1128
        int part;
1129
 
1130
        file->f_mapping = bdev->bd_inode->i_mapping;
1131
        lock_kernel();
1132
        disk = get_gendisk(bdev->bd_dev, &part);
1133
        if (!disk) {
1134
                unlock_kernel();
1135
                bdput(bdev);
1136
                return ret;
1137
        }
1138
        owner = disk->fops->owner;
1139
 
1140
        mutex_lock_nested(&bdev->bd_mutex, for_part);
1141
        if (!bdev->bd_openers) {
1142
                bdev->bd_disk = disk;
1143
                bdev->bd_contains = bdev;
1144
                if (!part) {
1145
                        struct backing_dev_info *bdi;
1146
                        if (disk->fops->open) {
1147
                                ret = disk->fops->open(bdev->bd_inode, file);
1148
                                if (ret)
1149
                                        goto out_first;
1150
                        }
1151
                        if (!bdev->bd_openers) {
1152
                                bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
1153
                                bdi = blk_get_backing_dev_info(bdev);
1154
                                if (bdi == NULL)
1155
                                        bdi = &default_backing_dev_info;
1156
                                bdev->bd_inode->i_data.backing_dev_info = bdi;
1157
                        }
1158
                        if (bdev->bd_invalidated)
1159
                                rescan_partitions(disk, bdev);
1160
                } else {
1161
                        struct hd_struct *p;
1162
                        struct block_device *whole;
1163
                        whole = bdget_disk(disk, 0);
1164
                        ret = -ENOMEM;
1165
                        if (!whole)
1166
                                goto out_first;
1167
                        BUG_ON(for_part);
1168
                        ret = __blkdev_get(whole, file->f_mode, file->f_flags, 1);
1169
                        if (ret)
1170
                                goto out_first;
1171
                        bdev->bd_contains = whole;
1172
                        p = disk->part[part - 1];
1173
                        bdev->bd_inode->i_data.backing_dev_info =
1174
                           whole->bd_inode->i_data.backing_dev_info;
1175
                        if (!(disk->flags & GENHD_FL_UP) || !p || !p->nr_sects) {
1176
                                ret = -ENXIO;
1177
                                goto out_first;
1178
                        }
1179
                        kobject_get(&p->kobj);
1180
                        bdev->bd_part = p;
1181
                        bd_set_size(bdev, (loff_t) p->nr_sects << 9);
1182
                }
1183
        } else {
1184
                put_disk(disk);
1185
                module_put(owner);
1186
                if (bdev->bd_contains == bdev) {
1187
                        if (bdev->bd_disk->fops->open) {
1188
                                ret = bdev->bd_disk->fops->open(bdev->bd_inode, file);
1189
                                if (ret)
1190
                                        goto out;
1191
                        }
1192
                        if (bdev->bd_invalidated)
1193
                                rescan_partitions(bdev->bd_disk, bdev);
1194
                }
1195
        }
1196
        bdev->bd_openers++;
1197
        if (for_part)
1198
                bdev->bd_part_count++;
1199
        mutex_unlock(&bdev->bd_mutex);
1200
        unlock_kernel();
1201
        return 0;
1202
 
1203
out_first:
1204
        bdev->bd_disk = NULL;
1205
        bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
1206
        if (bdev != bdev->bd_contains)
1207
                __blkdev_put(bdev->bd_contains, 1);
1208
        bdev->bd_contains = NULL;
1209
        put_disk(disk);
1210
        module_put(owner);
1211
out:
1212
        mutex_unlock(&bdev->bd_mutex);
1213
        unlock_kernel();
1214
        if (ret)
1215
                bdput(bdev);
1216
        return ret;
1217
}
1218
 
1219
static int __blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags,
1220
                        int for_part)
1221
{
1222
        /*
1223
         * This crockload is due to bad choice of ->open() type.
1224
         * It will go away.
1225
         * For now, block device ->open() routine must _not_
1226
         * examine anything in 'inode' argument except ->i_rdev.
1227
         */
1228
        struct file fake_file = {};
1229
        struct dentry fake_dentry = {};
1230
        fake_file.f_mode = mode;
1231
        fake_file.f_flags = flags;
1232
        fake_file.f_path.dentry = &fake_dentry;
1233
        fake_dentry.d_inode = bdev->bd_inode;
1234
 
1235
        return do_open(bdev, &fake_file, for_part);
1236
}
1237
 
1238
int blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags)
1239
{
1240
        return __blkdev_get(bdev, mode, flags, 0);
1241
}
1242
EXPORT_SYMBOL(blkdev_get);
1243
 
1244
static int blkdev_open(struct inode * inode, struct file * filp)
1245
{
1246
        struct block_device *bdev;
1247
        int res;
1248
 
1249
        /*
1250
         * Preserve backwards compatibility and allow large file access
1251
         * even if userspace doesn't ask for it explicitly. Some mkfs
1252
         * binary needs it. We might want to drop this workaround
1253
         * during an unstable branch.
1254
         */
1255
        filp->f_flags |= O_LARGEFILE;
1256
 
1257
        bdev = bd_acquire(inode);
1258
        if (bdev == NULL)
1259
                return -ENOMEM;
1260
 
1261
        res = do_open(bdev, filp, 0);
1262
        if (res)
1263
                return res;
1264
 
1265
        if (!(filp->f_flags & O_EXCL) )
1266
                return 0;
1267
 
1268
        if (!(res = bd_claim(bdev, filp)))
1269
                return 0;
1270
 
1271
        blkdev_put(bdev);
1272
        return res;
1273
}
1274
 
1275
static int __blkdev_put(struct block_device *bdev, int for_part)
1276
{
1277
        int ret = 0;
1278
        struct inode *bd_inode = bdev->bd_inode;
1279
        struct gendisk *disk = bdev->bd_disk;
1280
        struct block_device *victim = NULL;
1281
 
1282
        mutex_lock_nested(&bdev->bd_mutex, for_part);
1283
        lock_kernel();
1284
        if (for_part)
1285
                bdev->bd_part_count--;
1286
 
1287
        if (!--bdev->bd_openers) {
1288
                sync_blockdev(bdev);
1289
                kill_bdev(bdev);
1290
        }
1291
        if (bdev->bd_contains == bdev) {
1292
                if (disk->fops->release)
1293
                        ret = disk->fops->release(bd_inode, NULL);
1294
        }
1295
        if (!bdev->bd_openers) {
1296
                struct module *owner = disk->fops->owner;
1297
 
1298
                put_disk(disk);
1299
                module_put(owner);
1300
 
1301
                if (bdev->bd_contains != bdev) {
1302
                        kobject_put(&bdev->bd_part->kobj);
1303
                        bdev->bd_part = NULL;
1304
                }
1305
                bdev->bd_disk = NULL;
1306
                bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
1307
                if (bdev != bdev->bd_contains)
1308
                        victim = bdev->bd_contains;
1309
                bdev->bd_contains = NULL;
1310
        }
1311
        unlock_kernel();
1312
        mutex_unlock(&bdev->bd_mutex);
1313
        bdput(bdev);
1314
        if (victim)
1315
                __blkdev_put(victim, 1);
1316
        return ret;
1317
}
1318
 
1319
int blkdev_put(struct block_device *bdev)
1320
{
1321
        return __blkdev_put(bdev, 0);
1322
}
1323
EXPORT_SYMBOL(blkdev_put);
1324
 
1325
static int blkdev_close(struct inode * inode, struct file * filp)
1326
{
1327
        struct block_device *bdev = I_BDEV(filp->f_mapping->host);
1328
        if (bdev->bd_holder == filp)
1329
                bd_release(bdev);
1330
        return blkdev_put(bdev);
1331
}
1332
 
1333
static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1334
{
1335
        return blkdev_ioctl(file->f_mapping->host, file, cmd, arg);
1336
}
1337
 
1338
const struct address_space_operations def_blk_aops = {
1339
        .readpage       = blkdev_readpage,
1340
        .writepage      = blkdev_writepage,
1341
        .sync_page      = block_sync_page,
1342
        .write_begin    = blkdev_write_begin,
1343
        .write_end      = blkdev_write_end,
1344
        .writepages     = generic_writepages,
1345
        .direct_IO      = blkdev_direct_IO,
1346
};
1347
 
1348
const struct file_operations def_blk_fops = {
1349
        .open           = blkdev_open,
1350
        .release        = blkdev_close,
1351
        .llseek         = block_llseek,
1352
        .read           = do_sync_read,
1353
        .write          = do_sync_write,
1354
        .aio_read       = generic_file_aio_read,
1355
        .aio_write      = generic_file_aio_write_nolock,
1356
        .mmap           = generic_file_mmap,
1357
        .fsync          = block_fsync,
1358
        .unlocked_ioctl = block_ioctl,
1359
#ifdef CONFIG_COMPAT
1360
        .compat_ioctl   = compat_blkdev_ioctl,
1361
#endif
1362
        .splice_read    = generic_file_splice_read,
1363
        .splice_write   = generic_file_splice_write,
1364
};
1365
 
1366
int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
1367
{
1368
        int res;
1369
        mm_segment_t old_fs = get_fs();
1370
        set_fs(KERNEL_DS);
1371
        res = blkdev_ioctl(bdev->bd_inode, NULL, cmd, arg);
1372
        set_fs(old_fs);
1373
        return res;
1374
}
1375
 
1376
EXPORT_SYMBOL(ioctl_by_bdev);
1377
 
1378
/**
1379
 * lookup_bdev  - lookup a struct block_device by name
1380
 *
1381
 * @path:       special file representing the block device
1382
 *
1383
 * Get a reference to the blockdevice at @path in the current
1384
 * namespace if possible and return it.  Return ERR_PTR(error)
1385
 * otherwise.
1386
 */
1387
struct block_device *lookup_bdev(const char *path)
1388
{
1389
        struct block_device *bdev;
1390
        struct inode *inode;
1391
        struct nameidata nd;
1392
        int error;
1393
 
1394
        if (!path || !*path)
1395
                return ERR_PTR(-EINVAL);
1396
 
1397
        error = path_lookup(path, LOOKUP_FOLLOW, &nd);
1398
        if (error)
1399
                return ERR_PTR(error);
1400
 
1401
        inode = nd.dentry->d_inode;
1402
        error = -ENOTBLK;
1403
        if (!S_ISBLK(inode->i_mode))
1404
                goto fail;
1405
        error = -EACCES;
1406
        if (nd.mnt->mnt_flags & MNT_NODEV)
1407
                goto fail;
1408
        error = -ENOMEM;
1409
        bdev = bd_acquire(inode);
1410
        if (!bdev)
1411
                goto fail;
1412
out:
1413
        path_release(&nd);
1414
        return bdev;
1415
fail:
1416
        bdev = ERR_PTR(error);
1417
        goto out;
1418
}
1419
 
1420
/**
1421
 * open_bdev_excl  -  open a block device by name and set it up for use
1422
 *
1423
 * @path:       special file representing the block device
1424
 * @flags:      %MS_RDONLY for opening read-only
1425
 * @holder:     owner for exclusion
1426
 *
1427
 * Open the blockdevice described by the special file at @path, claim it
1428
 * for the @holder.
1429
 */
1430
struct block_device *open_bdev_excl(const char *path, int flags, void *holder)
1431
{
1432
        struct block_device *bdev;
1433
        mode_t mode = FMODE_READ;
1434
        int error = 0;
1435
 
1436
        bdev = lookup_bdev(path);
1437
        if (IS_ERR(bdev))
1438
                return bdev;
1439
 
1440
        if (!(flags & MS_RDONLY))
1441
                mode |= FMODE_WRITE;
1442
        error = blkdev_get(bdev, mode, 0);
1443
        if (error)
1444
                return ERR_PTR(error);
1445
        error = -EACCES;
1446
        if (!(flags & MS_RDONLY) && bdev_read_only(bdev))
1447
                goto blkdev_put;
1448
        error = bd_claim(bdev, holder);
1449
        if (error)
1450
                goto blkdev_put;
1451
 
1452
        return bdev;
1453
 
1454
blkdev_put:
1455
        blkdev_put(bdev);
1456
        return ERR_PTR(error);
1457
}
1458
 
1459
EXPORT_SYMBOL(open_bdev_excl);
1460
 
1461
/**
1462
 * close_bdev_excl  -  release a blockdevice openen by open_bdev_excl()
1463
 *
1464
 * @bdev:       blockdevice to close
1465
 *
1466
 * This is the counterpart to open_bdev_excl().
1467
 */
1468
void close_bdev_excl(struct block_device *bdev)
1469
{
1470
        bd_release(bdev);
1471
        blkdev_put(bdev);
1472
}
1473
 
1474
EXPORT_SYMBOL(close_bdev_excl);
1475
 
1476
int __invalidate_device(struct block_device *bdev)
1477
{
1478
        struct super_block *sb = get_super(bdev);
1479
        int res = 0;
1480
 
1481
        if (sb) {
1482
                /*
1483
                 * no need to lock the super, get_super holds the
1484
                 * read mutex so the filesystem cannot go away
1485
                 * under us (->put_super runs with the write lock
1486
                 * hold).
1487
                 */
1488
                shrink_dcache_sb(sb);
1489
                res = invalidate_inodes(sb);
1490
                drop_super(sb);
1491
        }
1492
        invalidate_bdev(bdev);
1493
        return res;
1494
}
1495
EXPORT_SYMBOL(__invalidate_device);

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.