OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [char/] [raw.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * linux/drivers/char/raw.c
3
 *
4
 * Front-end raw character devices.  These can be bound to any block
5
 * devices to provide genuine Unix raw character device semantics.
6
 *
7
 * We reserve minor number 0 for a control interface.  ioctl()s on this
8
 * device are used to bind the other minor numbers to block devices.
9
 */
10
 
11
#include <linux/fs.h>
12
#include <linux/iobuf.h>
13
#include <linux/major.h>
14
#include <linux/blkdev.h>
15
#include <linux/raw.h>
16
#include <linux/capability.h>
17
#include <linux/smp_lock.h>
18
#include <asm/uaccess.h>
19
 
20
#define dprintk(x...) 
21
 
22
typedef struct raw_device_data_s {
23
        struct block_device *binding;
24
        int inuse, sector_size, sector_bits;
25
        struct semaphore mutex;
26
} raw_device_data_t;
27
 
28
static raw_device_data_t raw_devices[256];
29
 
30
static ssize_t rw_raw_dev(int rw, struct file *, char *, size_t, loff_t *);
31
 
32
ssize_t raw_read(struct file *, char *, size_t, loff_t *);
33
ssize_t raw_write(struct file *, const char *, size_t, loff_t *);
34
int     raw_open(struct inode *, struct file *);
35
int     raw_release(struct inode *, struct file *);
36
int     raw_ctl_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
37
int     raw_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
38
 
39
 
40
static struct file_operations raw_fops = {
41
        read:           raw_read,
42
        write:          raw_write,
43
        open:           raw_open,
44
        release:        raw_release,
45
        ioctl:          raw_ioctl,
46
};
47
 
48
static struct file_operations raw_ctl_fops = {
49
        ioctl:          raw_ctl_ioctl,
50
        open:           raw_open,
51
};
52
 
53
static int __init raw_init(void)
54
{
55
        int i;
56
        register_chrdev(RAW_MAJOR, "raw", &raw_fops);
57
 
58
        for (i = 0; i < 256; i++)
59
                init_MUTEX(&raw_devices[i].mutex);
60
 
61
        return 0;
62
}
63
 
64
__initcall(raw_init);
65
 
66
/*
67
 * Open/close code for raw IO.
68
 */
69
 
70
int raw_open(struct inode *inode, struct file *filp)
71
{
72
        int minor;
73
        struct block_device * bdev;
74
        kdev_t rdev;    /* it should eventually go away */
75
        int err;
76
        int sector_size;
77
        int sector_bits;
78
 
79
        minor = MINOR(inode->i_rdev);
80
 
81
        /*
82
         * Is it the control device?
83
         */
84
 
85
        if (minor == 0) {
86
                filp->f_op = &raw_ctl_fops;
87
                return 0;
88
        }
89
 
90
        if (!filp->f_iobuf) {
91
                err = alloc_kiovec(1, &filp->f_iobuf);
92
                if (err)
93
                        return err;
94
        }
95
 
96
        down(&raw_devices[minor].mutex);
97
        /*
98
         * No, it is a normal raw device.  All we need to do on open is
99
         * to check that the device is bound, and force the underlying
100
         * block device to a sector-size blocksize.
101
         */
102
 
103
        bdev = raw_devices[minor].binding;
104
        err = -ENODEV;
105
        if (!bdev)
106
                goto out;
107
 
108
        atomic_inc(&bdev->bd_count);
109
        rdev = to_kdev_t(bdev->bd_dev);
110
        err = blkdev_get(bdev, filp->f_mode, 0, BDEV_RAW);
111
        if (err)
112
                goto out;
113
 
114
        /*
115
         * Don't change the blocksize if we already have users using
116
         * this device
117
         */
118
 
119
        if (raw_devices[minor].inuse++)
120
                goto out;
121
 
122
        /*
123
         * Don't interfere with mounted devices: we cannot safely set
124
         * the blocksize on a device which is already mounted.
125
         */
126
 
127
        sector_size = 512;
128
        if (is_mounted(rdev)) {
129
                if (blksize_size[MAJOR(rdev)])
130
                        sector_size = blksize_size[MAJOR(rdev)][MINOR(rdev)];
131
        } else {
132
                if (hardsect_size[MAJOR(rdev)])
133
                        sector_size = hardsect_size[MAJOR(rdev)][MINOR(rdev)];
134
        }
135
 
136
        set_blocksize(rdev, sector_size);
137
        raw_devices[minor].sector_size = sector_size;
138
 
139
        for (sector_bits = 0; !(sector_size & 1); )
140
                sector_size>>=1, sector_bits++;
141
        raw_devices[minor].sector_bits = sector_bits;
142
 
143
 out:
144
        up(&raw_devices[minor].mutex);
145
 
146
        return err;
147
}
148
 
149
int raw_release(struct inode *inode, struct file *filp)
150
{
151
        int minor;
152
        struct block_device *bdev;
153
 
154
        minor = MINOR(inode->i_rdev);
155
        down(&raw_devices[minor].mutex);
156
        bdev = raw_devices[minor].binding;
157
        raw_devices[minor].inuse--;
158
        up(&raw_devices[minor].mutex);
159
        blkdev_put(bdev, BDEV_RAW);
160
        return 0;
161
}
162
 
163
 
164
 
165
/* Forward ioctls to the underlying block device. */
166
int raw_ioctl(struct inode *inode,
167
                  struct file *flip,
168
                  unsigned int command,
169
                  unsigned long arg)
170
{
171
        int minor = minor(inode->i_rdev), err;
172
        struct block_device *b;
173
        if (minor < 1 || minor > 255)
174
                return -ENODEV;
175
 
176
        b = raw_devices[minor].binding;
177
        err = -EINVAL;
178
        if (b && b->bd_inode && b->bd_op && b->bd_op->ioctl) {
179
                err = b->bd_op->ioctl(b->bd_inode, NULL, command, arg);
180
        }
181
        return err;
182
}
183
 
184
/*
185
 * Deal with ioctls against the raw-device control interface, to bind
186
 * and unbind other raw devices.
187
 */
188
 
189
int raw_ctl_ioctl(struct inode *inode,
190
                  struct file *flip,
191
                  unsigned int command,
192
                  unsigned long arg)
193
{
194
        struct raw_config_request rq;
195
        int err = 0;
196
        int minor;
197
 
198
        switch (command) {
199
        case RAW_SETBIND:
200
        case RAW_GETBIND:
201
 
202
                /* First, find out which raw minor we want */
203
 
204
                err = copy_from_user(&rq, (void *) arg, sizeof(rq));
205
                if (err)
206
                        break;
207
 
208
                minor = rq.raw_minor;
209
                if (minor <= 0 || minor > MINORMASK) {
210
                        err = -EINVAL;
211
                        break;
212
                }
213
 
214
                if (command == RAW_SETBIND) {
215
                        /*
216
                         * This is like making block devices, so demand the
217
                         * same capability
218
                         */
219
                        if (!capable(CAP_SYS_ADMIN)) {
220
                                err = -EPERM;
221
                                break;
222
                        }
223
 
224
                        /*
225
                         * For now, we don't need to check that the underlying
226
                         * block device is present or not: we can do that when
227
                         * the raw device is opened.  Just check that the
228
                         * major/minor numbers make sense.
229
                         */
230
 
231
                        if ((rq.block_major == NODEV &&
232
                             rq.block_minor != NODEV) ||
233
                            rq.block_major > MAX_BLKDEV ||
234
                            rq.block_minor > MINORMASK) {
235
                                err = -EINVAL;
236
                                break;
237
                        }
238
 
239
                        down(&raw_devices[minor].mutex);
240
                        if (raw_devices[minor].inuse) {
241
                                up(&raw_devices[minor].mutex);
242
                                err = -EBUSY;
243
                                break;
244
                        }
245
                        if (raw_devices[minor].binding)
246
                                bdput(raw_devices[minor].binding);
247
                        raw_devices[minor].binding =
248
                                bdget(kdev_t_to_nr(MKDEV(rq.block_major, rq.block_minor)));
249
                        up(&raw_devices[minor].mutex);
250
                } else {
251
                        struct block_device *bdev;
252
                        kdev_t dev;
253
 
254
                        bdev = raw_devices[minor].binding;
255
                        if (bdev) {
256
                                dev = to_kdev_t(bdev->bd_dev);
257
                                rq.block_major = MAJOR(dev);
258
                                rq.block_minor = MINOR(dev);
259
                        } else {
260
                                rq.block_major = rq.block_minor = 0;
261
                        }
262
                        err = copy_to_user((void *) arg, &rq, sizeof(rq));
263
                }
264
                break;
265
 
266
        default:
267
                err = -EINVAL;
268
        }
269
 
270
        return err;
271
}
272
 
273
 
274
 
275
ssize_t raw_read(struct file *filp, char * buf,
276
                 size_t size, loff_t *offp)
277
{
278
        return rw_raw_dev(READ, filp, buf, size, offp);
279
}
280
 
281
ssize_t raw_write(struct file *filp, const char *buf,
282
                  size_t size, loff_t *offp)
283
{
284
        return rw_raw_dev(WRITE, filp, (char *) buf, size, offp);
285
}
286
 
287
#define SECTOR_BITS 9
288
#define SECTOR_SIZE (1U << SECTOR_BITS)
289
#define SECTOR_MASK (SECTOR_SIZE - 1)
290
 
291
ssize_t rw_raw_dev(int rw, struct file *filp, char *buf,
292
                   size_t size, loff_t *offp)
293
{
294
        struct kiobuf * iobuf;
295
        int             new_iobuf;
296
        int             err = 0;
297
        unsigned long   blocknr, blocks;
298
        size_t          transferred;
299
        int             iosize;
300
        int             i;
301
        int             minor;
302
        kdev_t          dev;
303
        unsigned long   limit;
304
 
305
        int             sector_size, sector_bits, sector_mask;
306
        int             max_sectors;
307
 
308
        /*
309
         * First, a few checks on device size limits
310
         */
311
 
312
        minor = MINOR(filp->f_dentry->d_inode->i_rdev);
313
 
314
        new_iobuf = 0;
315
        iobuf = filp->f_iobuf;
316
        if (test_and_set_bit(0, &filp->f_iobuf_lock)) {
317
                /*
318
                 * A parallel read/write is using the preallocated iobuf
319
                 * so just run slow and allocate a new one.
320
                 */
321
                err = alloc_kiovec(1, &iobuf);
322
                if (err)
323
                        goto out;
324
                new_iobuf = 1;
325
        }
326
 
327
        dev = to_kdev_t(raw_devices[minor].binding->bd_dev);
328
        sector_size = raw_devices[minor].sector_size;
329
        sector_bits = raw_devices[minor].sector_bits;
330
        sector_mask = sector_size- 1;
331
        max_sectors = KIO_MAX_SECTORS >> (sector_bits - 9);
332
 
333
        if (blk_size[MAJOR(dev)])
334
                limit = (((loff_t) blk_size[MAJOR(dev)][MINOR(dev)]) << BLOCK_SIZE_BITS) >> sector_bits;
335
        else
336
                limit = INT_MAX;
337
        dprintk ("rw_raw_dev: dev %d:%d (+%d)\n",
338
                 MAJOR(dev), MINOR(dev), limit);
339
 
340
        err = -EINVAL;
341
        if ((*offp & sector_mask) || (size & sector_mask))
342
                goto out_free;
343
        err = 0;
344
        if (size)
345
                err = -ENXIO;
346
        if ((*offp >> sector_bits) >= limit)
347
                goto out_free;
348
 
349
        /*
350
         * Split the IO into KIO_MAX_SECTORS chunks, mapping and
351
         * unmapping the single kiobuf as we go to perform each chunk of
352
         * IO.
353
         */
354
 
355
        transferred = 0;
356
        blocknr = *offp >> sector_bits;
357
        while (size > 0) {
358
                blocks = size >> sector_bits;
359
                if (blocks > max_sectors)
360
                        blocks = max_sectors;
361
                if (blocks > limit - blocknr)
362
                        blocks = limit - blocknr;
363
                if (!blocks)
364
                        break;
365
 
366
                iosize = blocks << sector_bits;
367
 
368
                err = map_user_kiobuf(rw, iobuf, (unsigned long) buf, iosize);
369
                if (err)
370
                        break;
371
 
372
                for (i=0; i < blocks; i++)
373
                        iobuf->blocks[i] = blocknr++;
374
 
375
                err = brw_kiovec(rw, 1, &iobuf, dev, iobuf->blocks, sector_size);
376
 
377
                if (rw == READ && err > 0)
378
                        mark_dirty_kiobuf(iobuf, err);
379
 
380
                if (err >= 0) {
381
                        transferred += err;
382
                        size -= err;
383
                        buf += err;
384
                }
385
 
386
                unmap_kiobuf(iobuf);
387
 
388
                if (err != iosize)
389
                        break;
390
        }
391
 
392
        if (transferred) {
393
                *offp += transferred;
394
                err = transferred;
395
        }
396
 
397
 out_free:
398
        if (!new_iobuf)
399
                clear_bit(0, &filp->f_iobuf_lock);
400
        else
401
                free_kiovec(1, &iobuf);
402
 out:
403
        return err;
404
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.